aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortxh18 <cloudygooseg@gmail.com>2015-10-23 19:36:31 +0800
committertxh18 <cloudygooseg@gmail.com>2015-10-23 19:36:31 +0800
commit1234c026869ab052e898cc2541143fe4a22312b6 (patch)
treebd4b980ae12340b4ea3a8aa6259d43dc891b5568
parentf0937ae6e6401f25f15bb0e83e764ca888e81f11 (diff)
parent64fce92b7845b716f3c168036691c37b2467d99b (diff)
Just come back, let's merge the new master
Merge branch 'master' into txh18/rnnlm
-rw-r--r--.gitmodules6
-rw-r--r--Makefile1
-rw-r--r--README.md4
-rw-r--r--embedding_example/.gitignore2
-rw-r--r--embedding_example/Makefile20
-rw-r--r--embedding_example/main.c93
-rwxr-xr-xembedding_example/run.sh4
-rw-r--r--embedding_example/setup_nerv.lua25
-rw-r--r--nerv/Makefile10
-rw-r--r--nerv/examples/asr_trainer.lua37
-rw-r--r--nerv/examples/mmi_chime3.lua183
-rw-r--r--nerv/examples/mpe_chime3.lua186
-rw-r--r--nerv/examples/seq_trainer.lua87
-rw-r--r--nerv/examples/swb_baseline.lua79
-rw-r--r--nerv/examples/swb_baseline_basic.lua162
-rw-r--r--nerv/init.lua14
-rw-r--r--nerv/io/sgd_buffer.lua50
-rw-r--r--nerv/layer/affine.lua6
-rw-r--r--nerv/layer/affine_recurrent.lua4
-rw-r--r--nerv/layer/bias.lua4
-rw-r--r--nerv/layer/combiner.lua6
-rw-r--r--nerv/layer/init.lua1
-rw-r--r--nerv/layer/mse.lua10
-rw-r--r--nerv/layer/sigmoid.lua4
-rw-r--r--nerv/layer/softmax.lua35
-rw-r--r--nerv/layer/softmax_ce.lua7
-rw-r--r--nerv/layer/window.lua4
-rw-r--r--nerv/lib/matrix/cukernel.h2
-rw-r--r--nerv/lib/matrix/cumatrix.c1
-rw-r--r--nerv/lib/matrix/cumatrix.h1
-rw-r--r--nerv/lib/matrix/generic/cukernel.cu20
-rw-r--r--nerv/lib/matrix/generic/cumatrix.c21
-rw-r--r--nerv/lib/matrix/generic/cumatrix.h2
-rw-r--r--nerv/lib/matrix/generic/matrix.c5
-rw-r--r--nerv/lib/matrix/generic/matrix.h2
-rw-r--r--nerv/lib/matrix/mmatrix.c37
-rw-r--r--nerv/lib/matrix/mmatrix.h3
-rw-r--r--nerv/matrix/generic/cukernel.cu592
-rw-r--r--nerv/matrix/generic/cumatrix.c32
-rw-r--r--nerv/matrix/init.lua4
-rw-r--r--nerv/matrix/mmatrix.c46
-rw-r--r--nerv/nn/layer_dag.lua76
-rw-r--r--nerv/nn/layer_repo.lua8
m---------speech0
44 files changed, 1174 insertions, 722 deletions
diff --git a/.gitmodules b/.gitmodules
index 1432de9..9f556c5 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,12 +1,6 @@
[submodule "luajit-2.0"]
path = luajit-2.0
url = http://luajit.org/git/luajit-2.0.git
-[submodule "nerv-speech"]
- path = nerv-speech
- url = https://github.com/Determinant/nerv-speech.git
-[submodule "speech"]
- path = speech
- url = https://github.com/Determinant/nerv-speech.git
[submodule "luarocks"]
path = luarocks
url = https://github.com/keplerproject/luarocks.git
diff --git a/Makefile b/Makefile
index fa888c3..664a83b 100644
--- a/Makefile
+++ b/Makefile
@@ -9,6 +9,7 @@ luarocks:
install:
cd nerv; $(PREFIX)/bin/luarocks make
speech:
+ cd speech/speech_utils; $(PREFIX)/bin/luarocks make
cd speech/htk_io; $(PREFIX)/bin/luarocks make
clean:
cd nerv && make clean
diff --git a/README.md b/README.md
index 10d531c..8c21bd9 100644
--- a/README.md
+++ b/README.md
@@ -8,10 +8,12 @@ First make sure you have __lua__ and __CUDA__ installed on your computer.
__Nerv__ is currently developed via github.You can download and make __Nerv__ by doing the following:
```
cd ~
-git clone https://github.com/Determinant/nerv.git
+git clone https://github.com/Nerv-SJTU/nerv.git
cd nerv
+git clone https://github.com/Nerv-SJTU/nerv-speech.git speech
git submodule init && git submodule update
make
+make speech
```
The `git submodule` command is for the __luajit__ repository inside __Nerv__.
Now, you can try to run some example scripts.
diff --git a/embedding_example/.gitignore b/embedding_example/.gitignore
new file mode 100644
index 0000000..8e68213
--- /dev/null
+++ b/embedding_example/.gitignore
@@ -0,0 +1,2 @@
+main
+main.o
diff --git a/embedding_example/Makefile b/embedding_example/Makefile
new file mode 100644
index 0000000..73287f4
--- /dev/null
+++ b/embedding_example/Makefile
@@ -0,0 +1,20 @@
+CFLAG += -I ../install/include/luajit-2.0/ -I ../install/include/nerv/
+LDFLAG += -L../install/lib/ -lluajit-5.1 -Wl,-rpath=../install/lib/ -lluaT -lnervcore
+GCC := gcc
+
+.PHONY: FORCE
+
+all: main FORCE
+clean:
+ -rm -f *.o
+ -rm main
+
+FORCE: ../install/bin/luarocks
+ echo "#!/bin/bash" > run.sh
+ $< path >> run.sh
+ echo "./main" >> run.sh
+main: main.o
+ $(GCC) -o $@ $< $(LDFLAG)
+
+main.o: main.c
+ $(GCC) $(CFLAG) -o $@ $< -c
diff --git a/embedding_example/main.c b/embedding_example/main.c
new file mode 100644
index 0000000..8856d58
--- /dev/null
+++ b/embedding_example/main.c
@@ -0,0 +1,93 @@
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+#include "matrix/matrix.h"
+#include "common.h"
+#include "luaT/luaT.h"
+#include <stdio.h>
+
+const char *nerv_matrix_host_float_tname = "nerv.MMatrixFloat";
+const char *input_name = "_nerv_embed_input";
+const char *output_name = "_nerv_embed_output";
+extern Matrix *nerv_matrix_host_float_create(long nrow, long ncol, Status *status);
+extern void nerv_matrix_host_float_data_retain(Matrix *self);
+extern void nerv_matrix_host_float_data_free(Matrix *self, Status *status);
+
+lua_State *L;
+Matrix *input, *output;
+Status status;
+
+void setup_nerv() {
+ L = lua_open();
+ luaL_openlibs(L);
+ luaL_loadfile(L, "setup_nerv.lua");
+ /* network configuration */
+ lua_pushstring(L, "../nerv/examples/swb_baseline.lua");
+ if (lua_pcall(L, 1, LUA_MULTRET, 0))
+ {
+ printf("%s\n", luaL_checkstring(L, 1));
+ exit(1);
+ }
+ /* lua stack now: input width, output width, propagator */
+ input = nerv_matrix_host_float_create(1, luaL_checkinteger(L, 1), &status);
+ NERV_LUA_CHECK_STATUS(L, status);
+ output = nerv_matrix_host_float_create(1, luaL_checkinteger(L, 2), &status);
+ NERV_LUA_CHECK_STATUS(L, status);
+ /* add reference to avoid gc */
+ luaT_pushudata(L, output, nerv_matrix_host_float_tname);
+ luaT_pushudata(L, input, nerv_matrix_host_float_tname);
+ lua_setfield(L, LUA_GLOBALSINDEX, input_name);
+ lua_setfield(L, LUA_GLOBALSINDEX, output_name);
+}
+
+
+void propagate(float for_fun) {
+ int i, j;
+ printf("ok: %d\n", lua_gettop(L));
+ lua_pushvalue(L, 3);
+ /* lua stack now: input width, output width, propagator, propagator */
+ for (i = 0; i < input->nrow; i++) /* nrow is actually 1 */
+ {
+ float *nerv_row = (float *)((char *)input->data.f + i * input->stride);
+ for (j = 0; j < input->ncol; j++)
+ {
+ nerv_row[j] = j * for_fun;
+ }
+ }
+ lua_getfield(L, LUA_GLOBALSINDEX, input_name);
+ lua_getfield(L, LUA_GLOBALSINDEX, output_name);
+ /* lua stack now: input width, output width, propagator, propagator, input, output */
+ if (lua_pcall(L, 2, 0, 0)) /* call propagator with two parameters */
+ {
+ printf("%s\n", luaL_checkstring(L, -1));
+ exit(-1);
+ }
+ /* lua stack now: input width, output width, propagator */
+ printf("## output: %ld %ld ##\n", output->nrow, output->ncol);
+ for (i = 0; i < output->nrow; i++) /* nrow is actually 1 */
+ {
+ float *nerv_row = (float *)((char *)output->data.f + i * output->stride);
+ for (j = 0; j < output->ncol; j++)
+ {
+ printf("%.8f ", nerv_row[j]);
+ }
+ }
+}
+
+void teardown_nerv() {
+ lua_pushnil(L);
+ lua_pushnil(L);
+ lua_setfield(L, LUA_GLOBALSINDEX, input_name);
+ lua_setfield(L, LUA_GLOBALSINDEX, output_name);
+ lua_gc(L, LUA_GCCOLLECT, 0);
+}
+
+int main() {
+ setup_nerv();
+ propagate(1.0);
+ propagate(2.0);
+ propagate(2.0);
+ propagate(3.0);
+ teardown_nerv();
+ return 0;
+}
diff --git a/embedding_example/run.sh b/embedding_example/run.sh
new file mode 100755
index 0000000..e919263
--- /dev/null
+++ b/embedding_example/run.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+export LUA_PATH='/home/slhome/mfy43/.luarocks/share/lua/5.1/?.lua;/home/slhome/mfy43/.luarocks/share/lua/5.1/?/init.lua;/home/slhome/mfy43/nerv/install/share/lua/5.1/?.lua;/home/slhome/mfy43/nerv/install/share/lua/5.1/?/init.lua;./?.lua;/usr/local/share/luajit-2.0.4/?.lua;/usr/local/share/lua/5.1/?.lua;/usr/local/share/lua/5.1/?/init.lua'
+export LUA_CPATH='/home/slhome/mfy43/.luarocks/lib/lua/5.1/?.so;/home/slhome/mfy43/nerv/install/lib/lua/5.1/?.so;./?.so;/usr/local/lib/lua/5.1/?.so;/usr/local/lib/lua/5.1/loadall.so'
+./main
diff --git a/embedding_example/setup_nerv.lua b/embedding_example/setup_nerv.lua
new file mode 100644
index 0000000..d80c306
--- /dev/null
+++ b/embedding_example/setup_nerv.lua
@@ -0,0 +1,25 @@
+local k,l,_=pcall(require,"luarocks.loader") _=k and l.add_context("nerv","scm-1")
+require 'nerv'
+local arg = {...}
+dofile(arg[1])
+local param_repo = nerv.ParamRepo()
+param_repo:import(gconf.initialized_param, nil, gconf)
+local layer_repo = make_layer_repo(param_repo)
+local network = get_decode_network(layer_repo)
+local global_transf = get_global_transf(layer_repo)
+local batch_size = 1
+network:init(batch_size)
+
+function propagator(input, output)
+ local transformed = nerv.speech_utils.global_transf(
+ gconf.cumat_type.new_from_host(input),
+ global_transf, 0, 0, gconf) -- preprocessing
+ local gpu_input = transformed
+ local gpu_output = nerv.CuMatrixFloat(output:nrow(), output:ncol())
+ network:propagate({gpu_input}, {gpu_output})
+ gpu_output:copy_toh(output)
+ -- collect garbage in-time to save GPU memory
+ collectgarbage("collect")
+end
+
+return network.dim_in[1], network.dim_out[1], propagator
diff --git a/nerv/Makefile b/nerv/Makefile
index 022e2fb..b449f82 100644
--- a/nerv/Makefile
+++ b/nerv/Makefile
@@ -30,14 +30,14 @@ LUAT_OBJS := $(addprefix $(OBJ_DIR)/,$(LUAT_OBJS))
OBJS := $(CORE_OBJS) $(NERV_OBJS) $(LUAT_OBJS)
LIBS := $(INST_LIBDIR)/libnerv.so $(LIB_PATH)/libnervcore.so $(LIB_PATH)/libluaT.so
LUA_LIBS := matrix/init.lua io/init.lua init.lua \
- layer/init.lua layer/affine.lua layer/sigmoid.lua layer/softmax_ce.lua \
- layer/window.lua layer/bias.lua layer/combiner.lua layer/mse.lua layer/affine_recurrent.lua\
+ layer/init.lua layer/affine.lua layer/sigmoid.lua layer/softmax_ce.lua layer/softmax.lua \
+ layer/window.lua layer/bias.lua layer/combiner.lua layer/mse.lua layer/affine_recurrent.lua \
nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/layer_dag.lua \
io/sgd_buffer.lua
INCLUDE := -I $(LUA_INCDIR) -DLUA_USE_APICHECK
-CUDA_BASE := /usr/local/cuda-6.5
-#CUDA_BASE := /usr/local/cuda-5.0
+#CUDA_BASE := /usr/local/cuda-7.0
+CUDA_BASE := /usr/local/cuda
CUDA_INCLUDE := -I $(CUDA_BASE)/include/
INCLUDE += $(CUDA_INCLUDE)
@@ -66,7 +66,7 @@ $(LIB_PATH)/libluaT.so: $(LUAT_OBJS)
$(INST_LIBDIR)/libnerv.so: $(NERV_OBJS) $(LIB_PATH)/libnervcore.so $(LIB_PATH)/libluaT.so
gcc -shared -o $@ $(NERV_OBJS) $(LDFLAGS) -Wl,-rpath=$(LIB_PATH) -L$(LIB_PATH) -lnervcore -lluaT
-$(OBJ_DIR)/matrix/cumatrix.o: matrix/generic/cumatrix.c matrix/generic/matrix.c matrix/generic/cukernel.cu
+$(OBJ_DIR)/matrix/cumatrix.o: matrix/generic/cumatrix.c matrix/generic/matrix.c
$(OBJ_DIR)/matrix/mmatrix.o: matrix/generic/mmatrix.c matrix/generic/matrix.c
$(OBJ_DIR)/lib/matrix/cumatrix.o: lib/matrix/generic/cumatrix.c lib/matrix/generic/matrix.c lib/matrix/generic/cukernel.cu
diff --git a/nerv/examples/asr_trainer.lua b/nerv/examples/asr_trainer.lua
index 4fa4096..69cfeed 100644
--- a/nerv/examples/asr_trainer.lua
+++ b/nerv/examples/asr_trainer.lua
@@ -1,9 +1,9 @@
function build_trainer(ifname)
local param_repo = nerv.ParamRepo()
param_repo:import(ifname, nil, gconf)
- local sublayer_repo = make_sublayer_repo(param_repo)
- local layer_repo = make_layer_repo(sublayer_repo, param_repo)
+ local layer_repo = make_layer_repo(param_repo)
local network = get_network(layer_repo)
+ local global_transf = get_global_transf(layer_repo)
local input_order = get_input_order()
local iterative_trainer = function (prefix, scp_file, bp)
gconf.randomize = bp
@@ -12,28 +12,41 @@ function build_trainer(ifname)
-- initialize the network
network:init(gconf.batch_size)
gconf.cnt = 0
- err_input = {nerv.CuMatrixFloat(256, 1)}
+ err_input = {nerv.CuMatrixFloat(gconf.batch_size, 1)}
err_input[1]:fill(1)
for data in buffer.get_data, buffer do
-- prine stat periodically
gconf.cnt = gconf.cnt + 1
if gconf.cnt == 1000 then
- print_stat(sublayer_repo)
+ print_stat(layer_repo)
nerv.CuMatrix.print_profile()
nerv.CuMatrix.clear_profile()
gconf.cnt = 0
-- break
end
local input = {}
--- if gconf.cnt == 100 then break end
- for i, id in ipairs(input_order) do
+-- if gconf.cnt == 1000 then break end
+ for i, e in ipairs(input_order) do
+ local id = e.id
if data[id] == nil then
nerv.error("input data %s not found", id)
end
- table.insert(input, data[id])
+ local transformed
+ if e.global_transf then
+ transformed = nerv.speech_utils.global_transf(data[id],
+ global_transf,
+ gconf.frm_ext or 0, 0,
+ gconf)
+ else
+ transformed = data[id]
+ end
+ table.insert(input, transformed)
+ end
+ local output = {nerv.CuMatrixFloat(gconf.batch_size, 1)}
+ err_output = {}
+ for i = 1, #input do
+ table.insert(err_output, input[i]:create())
end
- local output = {nerv.CuMatrixFloat(256, 1)}
- err_output = {input[1]:create()}
network:propagate(input, output)
if bp then
network:back_propagate(err_input, err_output, input, output)
@@ -42,16 +55,16 @@ function build_trainer(ifname)
-- collect garbage in-time to save GPU memory
collectgarbage("collect")
end
- print_stat(sublayer_repo)
+ print_stat(layer_repo)
nerv.CuMatrix.print_profile()
nerv.CuMatrix.clear_profile()
if (not bp) and prefix ~= nil then
nerv.info("writing back...")
local fname = string.format("%s_cv%.3f.nerv",
- prefix, get_accuracy(sublayer_repo))
+ prefix, get_accuracy(layer_repo))
network:get_params():export(fname, nil)
end
- return get_accuracy(sublayer_repo)
+ return get_accuracy(layer_repo)
end
return iterative_trainer
end
diff --git a/nerv/examples/mmi_chime3.lua b/nerv/examples/mmi_chime3.lua
new file mode 100644
index 0000000..6ac7f28
--- /dev/null
+++ b/nerv/examples/mmi_chime3.lua
@@ -0,0 +1,183 @@
+require 'kaldi_io'
+require 'kaldi_seq'
+gconf = {lrate = 0.00001, wcost = 0, momentum = 0.0,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ frm_ext = 5,
+ tr_scp = "ark,o:/slfs6/users/ymz09/kaldi/src/featbin/copy-feats scp:/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced_mmi/train.scp ark:- |",
+ initialized_param = {"/slfs6/users/ymz09/nerv-project/nerv/nerv-speech/kaldi_seq/test/chime3_init_mmi.nerv",
+ "/slfs6/users/ymz09/nerv-project/nerv/nerv-speech/kaldi_seq/test/chime3_global_transf_mmi.nerv"},
+ debug = false}
+
+function make_layer_repo(param_repo)
+ local layer_repo = nerv.LayerRepo(
+ {
+ -- global transf
+ ["nerv.BiasLayer"] =
+ {
+ blayer1 = {{bias = "bias1"}, {dim_in = {440}, dim_out = {440}}},
+ blayer2 = {{bias = "bias2"}, {dim_in = {440}, dim_out = {440}}}
+ },
+ ["nerv.WindowLayer"] =
+ {
+ wlayer1 = {{window = "window1"}, {dim_in = {440}, dim_out = {440}}},
+ wlayer2 = {{window = "window2"}, {dim_in = {440}, dim_out = {440}}}
+ },
+ -- biased linearity
+ ["nerv.AffineLayer"] =
+ {
+ affine0 = {{ltp = "affine0_ltp", bp = "affine0_bp"},
+ {dim_in = {440}, dim_out = {2048}}},
+ affine1 = {{ltp = "affine1_ltp", bp = "affine1_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine2 = {{ltp = "affine2_ltp", bp = "affine2_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine3 = {{ltp = "affine3_ltp", bp = "affine3_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine4 = {{ltp = "affine4_ltp", bp = "affine4_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine5 = {{ltp = "affine5_ltp", bp = "affine5_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine6 = {{ltp = "affine6_ltp", bp = "affine6_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine7 = {{ltp = "affine7_ltp", bp = "affine7_bp"},
+ {dim_in = {2048}, dim_out = {2011}}}
+ },
+ ["nerv.SigmoidLayer"] =
+ {
+ sigmoid0 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid1 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid2 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid3 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid4 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid5 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid6 = {{}, {dim_in = {2048}, dim_out = {2048}}}
+ },
+ ["nerv.MMILayer"] =
+ {
+ mmi_crit = {{}, {dim_in = {2011, -1}, dim_out = {1},
+ cmd = {
+ arg = "--class-frame-counts=/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced/ali_train_pdf.counts --acoustic-scale=0.1 --lm-scale=1.0 --learn-rate=0.00001 --drop-frames=true --verbose=1",
+ mdl = "/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced_ali/final.mdl",
+ lat = "scp:/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced_denlats/lat.scp",
+ ali = "ark:gunzip -c /slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced_ali/ali.*.gz |"
+ }
+ }
+ }
+ },
+ ["nerv.SoftmaxLayer"] = -- softmax for decode output
+ {
+ softmax = {{}, {dim_in = {2011}, dim_out = {2011}}}
+ }
+ }, param_repo, gconf)
+
+ layer_repo:add_layers(
+ {
+ ["nerv.DAGLayer"] =
+ {
+ global_transf = {{}, {
+ dim_in = {440}, dim_out = {440},
+ sub_layers = layer_repo,
+ connections = {
+ ["<input>[1]"] = "blayer1[1]",
+ ["blayer1[1]"] = "wlayer1[1]",
+ ["wlayer1[1]"] = "blayer2[1]",
+ ["blayer2[1]"] = "wlayer2[1]",
+ ["wlayer2[1]"] = "<output>[1]"
+ }
+ }},
+ main = {{}, {
+ dim_in = {440}, dim_out = {2011},
+ sub_layers = layer_repo,
+ connections = {
+ ["<input>[1]"] = "affine0[1]",
+ ["affine0[1]"] = "sigmoid0[1]",
+ ["sigmoid0[1]"] = "affine1[1]",
+ ["affine1[1]"] = "sigmoid1[1]",
+ ["sigmoid1[1]"] = "affine2[1]",
+ ["affine2[1]"] = "sigmoid2[1]",
+ ["sigmoid2[1]"] = "affine3[1]",
+ ["affine3[1]"] = "sigmoid3[1]",
+ ["sigmoid3[1]"] = "affine4[1]",
+ ["affine4[1]"] = "sigmoid4[1]",
+ ["sigmoid4[1]"] = "affine5[1]",
+ ["affine5[1]"] = "sigmoid5[1]",
+ ["sigmoid5[1]"] = "affine6[1]",
+ ["affine6[1]"] = "sigmoid6[1]",
+ ["sigmoid6[1]"] = "affine7[1]",
+ ["affine7[1]"] = "<output>[1]"
+ }
+ }}
+ }
+ }, param_repo, gconf)
+
+ layer_repo:add_layers(
+ {
+ ["nerv.DAGLayer"] =
+ {
+ mmi_output = {{}, {
+ dim_in = {440, -1}, dim_out = {1},
+ sub_layers = layer_repo,
+ connections = {
+ ["<input>[1]"] = "main[1]",
+ ["main[1]"] = "mmi_crit[1]",
+ ["<input>[2]"] = "mmi_crit[2]",
+ ["mmi_crit[1]"] = "<output>[1]"
+ }
+ }},
+ softmax_output = {{}, {
+ dim_in = {440}, dim_out = {2011},
+ sub_layers = layer_repo,
+ connections = {
+ ["<input>[1]"] = "main[1]",
+ ["main[1]"] = "softmax[1]",
+ ["softmax[1]"] = "<output>[1]"
+ }
+ }}
+ }
+ }, param_repo, gconf)
+
+ return layer_repo
+end
+
+function get_network(layer_repo)
+ return layer_repo:get_layer("mmi_output")
+end
+
+function get_decode_network(layer_repo)
+ return layer_repo:get_layer("softmax_output")
+end
+
+function get_global_transf(layer_repo)
+ return layer_repo:get_layer("global_transf")
+end
+
+function make_readers(feature_rspecifier, layer_repo)
+ return {
+ {reader = nerv.KaldiReader(gconf,
+ {
+ id = "main_scp",
+ feature_rspecifier = feature_rspecifier,
+ frm_ext = gconf.frm_ext,
+ global_transf = layer_repo:get_layer("global_transf"),
+ mlfs = {}
+ })
+ }
+ }
+end
+
+function get_input_order()
+ return {{id = "main_scp", global_transf = true},
+ {id = "key"}}
+end
+
+function get_accuracy(layer_repo)
+ return 0
+end
+
+function print_stat(layer_repo)
+ local mmi_crit = layer_repo:get_layer("mmi_crit")
+ nerv.info("*** training stat begin ***")
+ nerv.printf("frames:\t\t\t%d\n", mmi_crit.total_frames)
+ nerv.info("*** training stat end ***")
+end
diff --git a/nerv/examples/mpe_chime3.lua b/nerv/examples/mpe_chime3.lua
new file mode 100644
index 0000000..ec095b0
--- /dev/null
+++ b/nerv/examples/mpe_chime3.lua
@@ -0,0 +1,186 @@
+require 'kaldi_io'
+require 'kaldi_seq'
+gconf = {lrate = 0.00001, wcost = 0, momentum = 0.0,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ frm_ext = 5,
+ tr_scp = "ark,s,cs:/slfs6/users/ymz09/kaldi/src/featbin/copy-feats scp:/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced_smbr/train.scp ark:- |",
+ initialized_param = {"/slfs6/users/ymz09/nerv-project/nerv/nerv-speech/kaldi_seq/test/chime3_init.nerv",
+ "/slfs6/users/ymz09/nerv-project/nerv/nerv-speech/kaldi_seq/test/chime3_global_transf.nerv"},
+ debug = false}
+
+function make_layer_repo(param_repo)
+ local layer_repo = nerv.LayerRepo(
+ {
+ -- global transf
+ ["nerv.BiasLayer"] =
+ {
+ blayer1 = {{bias = "bias1"}, {dim_in = {440}, dim_out = {440}}},
+ blayer2 = {{bias = "bias2"}, {dim_in = {440}, dim_out = {440}}}
+ },
+ ["nerv.WindowLayer"] =
+ {
+ wlayer1 = {{window = "window1"}, {dim_in = {440}, dim_out = {440}}},
+ wlayer2 = {{window = "window2"}, {dim_in = {440}, dim_out = {440}}}
+ },
+ -- biased linearity
+ ["nerv.AffineLayer"] =
+ {
+ affine0 = {{ltp = "affine0_ltp", bp = "affine0_bp"},
+ {dim_in = {440}, dim_out = {2048}}},
+ affine1 = {{ltp = "affine1_ltp", bp = "affine1_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine2 = {{ltp = "affine2_ltp", bp = "affine2_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine3 = {{ltp = "affine3_ltp", bp = "affine3_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine4 = {{ltp = "affine4_ltp", bp = "affine4_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine5 = {{ltp = "affine5_ltp", bp = "affine5_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine6 = {{ltp = "affine6_ltp", bp = "affine6_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine7 = {{ltp = "affine7_ltp", bp = "affine7_bp"},
+ {dim_in = {2048}, dim_out = {2011}}}
+ },
+ ["nerv.SigmoidLayer"] =
+ {
+ sigmoid0 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid1 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid2 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid3 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid4 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid5 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid6 = {{}, {dim_in = {2048}, dim_out = {2048}}}
+ },
+ ["nerv.MPELayer"] =
+ {
+ mpe_crit = {{}, {dim_in = {2011, -1}, dim_out = {1},
+ cmd = {
+ arg = "--class-frame-counts=/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced/ali_train_pdf.counts --acoustic-scale=0.1 --lm-scale=1.0 --learn-rate=0.00001 --do-smbr=true --verbose=1",
+ mdl = "/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced_ali/final.mdl",
+ lat = "scp:/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced_denlats/lat.scp",
+ ali = "ark:gunzip -c /slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced_ali/ali.*.gz |"
+