summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--htk_io/Makefile9
-rw-r--r--htk_io/init.c2
-rw-r--r--htk_io/init.lua20
-rw-r--r--htk_io/src/cwrapper.cpp15
-rw-r--r--htk_io/src/cwrapper.h9
-rw-r--r--htk_io/src/init.c12
-rw-r--r--htk_io/src/test.c7
-rw-r--r--kaldi_decode/Makefile43
-rwxr-xr-xkaldi_decode/README13
-rwxr-xr-xkaldi_decode/README.timit15
-rwxr-xr-xkaldi_decode/cmd.sh36
-rw-r--r--kaldi_decode/conf/decode_dnn.config2
-rwxr-xr-xkaldi_decode/decode_with_nerv.sh (renamed from kaldi_decode/decode.sh)26
-rw-r--r--kaldi_decode/kaldi_decode-scm-1.rockspec36
-rwxr-xr-xkaldi_decode/local/score.sh67
-rw-r--r--kaldi_decode/nnet-forward-with-nerv.sh2
-rwxr-xr-xkaldi_decode/path.sh5
-rw-r--r--kaldi_decode/src/Makefile12
-rw-r--r--kaldi_decode/src/asr_propagator.lua84
-rw-r--r--kaldi_decode/src/nerv4decode.lua79
-rw-r--r--kaldi_decode/src/nnet-forward.cc18
-rwxr-xr-xkaldi_decode/utils/int2sym.pl71
-rwxr-xr-xkaldi_decode/utils/parse_options.sh97
-rwxr-xr-xkaldi_decode/utils/queue.pl580
-rwxr-xr-xkaldi_decode/utils/run.pl264
-rwxr-xr-xkaldi_decode/utils/split_data.sh135
-rw-r--r--kaldi_io/Makefile18
-rw-r--r--kaldi_io/init.c2
-rw-r--r--kaldi_io/init.lua25
-rw-r--r--kaldi_io/kaldi_io-scm-1.rockspec2
-rw-r--r--kaldi_io/src/cwrapper_kaldi.cpp52
-rw-r--r--kaldi_io/src/cwrapper_kaldi.h15
-rw-r--r--kaldi_io/src/init.c17
-rw-r--r--kaldi_io/src/test.c7
-rwxr-xr-xkaldi_io/tools/convert_from_kaldi_pretrain.sh64
-rwxr-xr-xkaldi_io/tools/kaldi_to_nervbin0 -> 18719 bytes
-rw-r--r--kaldi_io/tools/kaldi_to_nerv.cpp57
-rw-r--r--kaldi_io/tools/nerv_to_kaldi.lua4
-rw-r--r--kaldi_seq/Makefile10
-rw-r--r--kaldi_seq/init.c2
-rw-r--r--kaldi_seq/src/init.c2
-rw-r--r--kaldi_seq/src/kaldi_mmi.cpp5
-rw-r--r--kaldi_seq/src/kaldi_mmi.h4
-rw-r--r--kaldi_seq/src/kaldi_mpe.cpp5
-rw-r--r--kaldi_seq/src/kaldi_mpe.h4
-rw-r--r--speech_utils/init.lua9
-rw-r--r--tutorial/howto_pretrain_from_kaldi.rst117
47 files changed, 601 insertions, 1479 deletions
diff --git a/htk_io/Makefile b/htk_io/Makefile
index d32d17a..6a5f529 100644
--- a/htk_io/Makefile
+++ b/htk_io/Makefile
@@ -1,3 +1,7 @@
+ifndef LUA_BINDIR
+$(error Please build the package via luarocks: `luarocks make`)
+endif
+
.PHONY: tnet
SHELL := /bin/bash
BUILD_DIR := $(CURDIR)/build
@@ -17,6 +21,7 @@ OBJ_SUBDIR := $(addprefix $(OBJ_DIR)/,$(SUBDIR))
LUA_SUBDIR := $(addprefix $(LUA_DIR)/,$(SUBDIR))
LUA_LIBS := $(addprefix $(LUA_DIR)/,$(LUA_LIBS))
LIB_PATH := $(LUA_BINDIR)/../lib
+LUALIB_PATH := $(LUA_BINDIR)/../lib/lua/5.1/
build: $(OBJ_DIR) $(OBJ_SUBDIR) $(OBJS) tnet $(OBJ_DIR)/src/test
install: $(LUA_DIR) $(LUA_SUBDIR) $(LUA_LIBS) $(LIBS)
@@ -26,7 +31,7 @@ $(OBJ_DIR) $(LUA_DIR) $(OBJ_SUBDIR) $(LUA_SUBDIR):
$(LUA_DIR)/%.lua: %.lua
cp $< $@
$(LIBS): $(OBJ_DIR)/src/cwrapper.o $(OBJ_DIR)/init.o $(OBJ_DIR)/src/init.o $(OBJ_DIR)/src/libKaldiLib.a
- gcc -shared -o $@ $(OBJ_DIR)/src/cwrapper.o $(OBJ_DIR)/init.o $(OBJ_DIR)/src/libKaldiLib.a $(OBJ_DIR)/src/init.o -lstdc++ -Wl,-rpath=$(LIB_PATH) -L$(LIB_PATH) -lnervcore -lluaT
+ gcc -shared -o $@ $(OBJ_DIR)/src/cwrapper.o $(OBJ_DIR)/init.o $(OBJ_DIR)/src/libKaldiLib.a $(OBJ_DIR)/src/init.o -lstdc++ -Wl,-rpath=$(LIB_PATH) -L$(LIB_PATH) -lnervcore -Wl,-rpath=$(LUALIB_PATH) -L$(LUALIB_PATH) -lnerv -lluaT
$(OBJ_DIR)/src/test: $(OBJ_DIR)/src/cwrapper.o $(OBJ_DIR)/src/test.o $(OBJ_DIR)/src/libKaldiLib.a
gcc -o $@ $^ -Wl,-rpath=$(LIB_PATH) -L$(LIB_PATH) -lnervcore -Wl,-rpath=$(LUA_LIBDIR) -L$(LUA_LIBDIR) -lluajit-5.1 -lstdc++ -lm
$(OBJ_DIR)/src/cwrapper.o: src/cwrapper.cpp
@@ -34,7 +39,7 @@ $(OBJ_DIR)/src/cwrapper.o: src/cwrapper.cpp
$(OBJ_DIR)/%.o: %.c
gcc -o $@ -c $< -g $(INCLUDE) -fPIC
clean:
- -rm $(OBJ_DIR)/src/*.o
+ -rm -r $(OBJ_DIR)
$(MAKE) -C src/KaldiLib/ clean
tnet:
$(MAKE) -C src/KaldiLib/ OBJ_DIR=$(OBJ_DIR)/src
diff --git a/htk_io/init.c b/htk_io/init.c
index edd454f..5b373f9 100644
--- a/htk_io/init.c
+++ b/htk_io/init.c
@@ -1,4 +1,4 @@
-#include "../nerv/common.h"
+#include "nerv/lib/common.h"
#include <stdio.h>
extern void tnet_io_init(lua_State *L);
diff --git a/htk_io/init.lua b/htk_io/init.lua
index b360b67..1cdabf1 100644
--- a/htk_io/init.lua
+++ b/htk_io/init.lua
@@ -6,6 +6,11 @@ function TNetReader:__init(global_conf, reader_conf)
self.feat_id = reader_conf.id
self.frm_ext = reader_conf.frm_ext
self.gconf = global_conf
+ if self.gconf.use_cpu then
+ self.mat_type = self.gconf.mmat_type
+ else
+ self.mat_type = self.gconf.cumat_type
+ end
self.debug = global_conf.debug
if self.debug == nil then
self.debug = false
@@ -31,12 +36,15 @@ function TNetReader:get_data()
end
local res = {}
-- read HTK feature
- local raw = self.gconf.cumat_type.new_from_host(self.feat_repo:cur_utter(self.debug))
+ local raw = self.feat_repo:cur_utter(self.debug)
+ if not self.gconf.use_cpu then
+ raw = self.gconf.cumat_type.new_from_host(raw)
+ end
local rearranged
if self.frm_ext and self.frm_ext > 0 then
local step = self.frm_ext * 2 + 1
-- expand the feature
- local expanded = self.gconf.cumat_type(raw:nrow(), raw:ncol() * step)
+ local expanded = self.mat_type(raw:nrow(), raw:ncol() * step)
expanded:expand_frm(raw, self.frm_ext)
-- rearrange the feature (``transpose'' operation in TNet)
if self.gconf.rearrange then
@@ -53,8 +61,12 @@ function TNetReader:get_data()
feat_utter = self.gconf.mmat_type(rearranged:nrow() - self.gconf.frm_trim * 2, rearranged:ncol())
rearranged:copy_toh(feat_utter, self.gconf.frm_trim, rearranged:nrow() - self.gconf.frm_trim)
else
- feat_utter = self.gconf.mmat_type(rearranged:nrow(), rearranged:ncol())
- rearranged:copy_toh(feat_utter)
+ if self.gconf.use_cpu then
+ feat_utter = rearranged
+ else
+ feat_utter = self.gconf.mmat_type(rearranged:nrow(), rearranged:ncol())
+ rearranged:copy_toh(feat_utter)
+ end
end
res[self.feat_id] = feat_utter
-- add corresponding labels
diff --git a/htk_io/src/cwrapper.cpp b/htk_io/src/cwrapper.cpp
index b7ce2d5..efb5628 100644
--- a/htk_io/src/cwrapper.cpp
+++ b/htk_io/src/cwrapper.cpp
@@ -8,9 +8,10 @@
extern "C" {
#include "cwrapper.h"
#include "string.h"
-#include "nerv/common.h"
+#include "nerv/lib/common.h"
+#include "nerv/lib/matrix/mmatrix.h"
- extern Matrix *nerv_matrix_host_float_create(long nrow, long ncol, Status *status);
+ extern Matrix *nerv_matrix_host_float_create(long nrow, long ncol, MContext *context, Status *status);
struct TNetFeatureRepo {
TNet::FeatureRepository feature_repo;
@@ -53,7 +54,8 @@ extern "C" {
return repo;
}
- Matrix *tnet_feature_repo_read_utterance(TNetFeatureRepo *repo, lua_State *L, int debug) {
+ Matrix *tnet_feature_repo_read_utterance(TNetFeatureRepo *repo, lua_State *L,
+ int debug, MContext *context) {
Matrix *mat; /* nerv implementation */
repo->feature_repo.ReadFullMatrix(repo->feats_host);
std::string utter_str = repo->feature_repo.Current().Logical();
@@ -61,7 +63,7 @@ extern "C" {
int n = repo->feats_host.Rows();
int m = repo->feats_host.Cols();
Status status;
- mat = nerv_matrix_host_float_create(n, m, &status);
+ mat = nerv_matrix_host_float_create(n, m, context, &status);
NERV_LUA_CHECK_STATUS(L, status);
size_t stride = mat->stride;
if (debug)
@@ -119,7 +121,8 @@ extern "C" {
size_t sample_rate,
const char *tag,
lua_State *L,
- int debug) {
+ int debug,
+ MContext *context) {
std::vector<TNet::Matrix<float> > labs_hosts; /* KaldiLib implementation */
Matrix *mat;
repo->label_repo.GenDesiredMatrixExt(labs_hosts, frames,
@@ -127,7 +130,7 @@ extern "C" {
int n = labs_hosts[0].Rows();
int m = labs_hosts[0].Cols();
Status status;
- mat = nerv_matrix_host_float_create(n, m, &status);
+ mat = nerv_matrix_host_float_create(n, m, context, &status);
NERV_LUA_CHECK_STATUS(L, status);
size_t stride = mat->stride;
if (debug)
diff --git a/htk_io/src/cwrapper.h b/htk_io/src/cwrapper.h
index e1bce6e..0469773 100644
--- a/htk_io/src/cwrapper.h
+++ b/htk_io/src/cwrapper.h
@@ -1,7 +1,7 @@
#ifndef NERV_TNET_IO_CWRAPPER
#define NERV_TNET_IO_CWRAPPER
-#include "nerv/matrix/matrix.h"
-#include "nerv/common.h"
+#include "nerv/lib/matrix/mmatrix.h"
+#include "nerv/lib/common.h"
#ifdef __cplusplus
extern "C" {
#endif
@@ -10,7 +10,7 @@ extern "C" {
TNetFeatureRepo *tnet_feature_repo_new(const char *scp,
const char *config, int context);
- Matrix *tnet_feature_repo_read_utterance(TNetFeatureRepo *repo, lua_State *L, int debug);
+ Matrix *tnet_feature_repo_read_utterance(TNetFeatureRepo *repo, lua_State *L, int debug, MContext *context);
size_t tnet_feature_repo_current_samplerate(TNetFeatureRepo *repo);
const char *tnet_feature_repo_current_tag(TNetFeatureRepo *repo);
void tnet_feature_repo_next(TNetFeatureRepo *repo);
@@ -28,7 +28,8 @@ extern "C" {
size_t sample_rate,
const char *tag,
lua_State *L,
- int debug);
+ int debug,
+ MContext *context);
void tnet_label_repo_destroy(TNetLabelRepo *repo);
#ifdef __cplusplus
diff --git a/htk_io/src/init.c b/htk_io/src/init.c
index 8a1ec3b..a5132ba 100644
--- a/htk_io/src/init.c
+++ b/htk_io/src/init.c
@@ -1,4 +1,5 @@
-#include "nerv/common.h"
+#include "nerv/lib/common.h"
+#include "nerv/matrix/matrix.h"
#include "cwrapper.h"
#include <stdio.h>
@@ -28,12 +29,14 @@ static int feat_repo_current_tag(lua_State *L) {
}
static int feat_repo_current_utterance(lua_State *L) {
+ MContext *context;
+ MMATRIX_GET_CONTEXT(L, 3);
TNetFeatureRepo *repo = luaT_checkudata(L, 1, nerv_tnet_feat_repo_tname);
int debug;
if (!lua_isboolean(L, 2))
nerv_error(L, "debug flag should be a boolean");
debug = lua_toboolean(L, 2);
- Matrix *utter = tnet_feature_repo_read_utterance(repo, L, debug);
+ Matrix *utter = tnet_feature_repo_read_utterance(repo, L, debug, context);
luaT_pushudata(L, utter, nerv_matrix_host_float_tname);
return 1;
}
@@ -72,6 +75,8 @@ static int label_repo_new(lua_State *L) {
}
static int label_repo_read_utterance(lua_State *L) {
+ MContext *context;
+ MMATRIX_GET_CONTEXT(L, 5);
TNetLabelRepo *repo = luaT_checkudata(L, 1, nerv_tnet_label_repo_tname);
TNetFeatureRepo *feat_repo = luaT_checkudata(L, 2, nerv_tnet_feat_repo_tname);
size_t frames = luaL_checkinteger(L, 3);
@@ -82,7 +87,8 @@ static int label_repo_read_utterance(lua_State *L) {
Matrix *utter = tnet_label_repo_read_utterance(repo,
frames,
tnet_feature_repo_current_samplerate(feat_repo),
- tnet_feature_repo_current_tag(feat_repo), L, debug);
+ tnet_feature_repo_current_tag(feat_repo),
+ L, debug, context);
luaT_pushudata(L, utter, nerv_matrix_host_float_tname);
return 1;
}
diff --git a/htk_io/src/test.c b/htk_io/src/test.c
index 6812ef1..1ced108 100644
--- a/htk_io/src/test.c
+++ b/htk_io/src/test.c
@@ -1,6 +1,9 @@
#include "cwrapper.h"
+#include "nerv/lib/matrix/mmatrix.h"
#include <stdio.h>
+MContext context;
+
void print_nerv_matrix(Matrix *mat) {
int n = mat->nrow;
int m = mat->ncol;
@@ -22,7 +25,7 @@ int main() {
"/slfs1/users/mfy43/swb_ivec/train_bp.scp",
"/slfs1/users/mfy43/swb_ivec/plp_0_d_a.conf", 5);
Matrix *feat_utter;
- feat_utter = tnet_feature_repo_read_utterance(feat_repo, NULL, 1);
+ feat_utter = tnet_feature_repo_read_utterance(feat_repo, NULL, 1, &context);
TNetLabelRepo *lab_repo = tnet_label_repo_new(
"/slfs1/users/mfy43/swb_ivec/ref.mlf",
@@ -34,7 +37,7 @@ int main() {
feat_utter->nrow - 5 * 2,
tnet_feature_repo_current_samplerate(feat_repo),
tnet_feature_repo_current_tag(feat_repo), NULL,
- 1);
+ 1, &context);
print_nerv_matrix(lab_utter);
return 0;
}
diff --git a/kaldi_decode/Makefile b/kaldi_decode/Makefile
new file mode 100644
index 0000000..e3a7c2d
--- /dev/null
+++ b/kaldi_decode/Makefile
@@ -0,0 +1,43 @@
+ifndef LUA_BINDIR
+$(error Please build the package via luarocks: `luarocks make`)
+endif
+
+ifndef KALDI_BASE
+$(error KALDI_BASE is not set)
+endif
+
+ifndef CUDA_BASE
+$(error CUDA_BASE is not set)
+endif
+
+KDIR := $(KALDI_BASE)
+BUILD_DIR := $(CURDIR)/build
+INC_PATH := $(LUA_BINDIR)/../include/
+OBJS := src/nnet-forward.o nnet-forward
+
+SUBDIR := src
+OBJ_DIR := $(BUILD_DIR)/objs
+LUA_DIR = $(INST_LUADIR)/kaldi_decode
+KALDIINCLUDE := -I $(KDIR)/tools/ATLAS/include/ -I $(KDIR)/tools/openfst/include/ -I $(KDIR)/src/
+
+OBJS := $(addprefix $(OBJ_DIR)/,$(OBJS))
+OBJ_SUBDIR := $(addprefix $(OBJ_DIR)/,$(SUBDIR))
+
+KL := $(KDIR)/src/feat/kaldi-feat.a $(KDIR)/src/cudamatrix/kaldi-cudamatrix.a $(KDIR)/src/matrix/kaldi-matrix.a $(KDIR)/src/base/kaldi-base.a $(KDIR)/src/util/kaldi-util.a $(KDIR)/src/hmm/kaldi-hmm.a $(KDIR)/src/tree/kaldi-tree.a $(KDIR)/src/nnet/kaldi-nnet.a $(BLAS_LDFLAGS)
+
+build: $(OBJ_DIR) $(LUA_DIR) $(OBJ_SUBDIR) $(OBJS)
+$(OBJ_DIR)/%.o: %.cc
+ g++ -c -o $@ $< -Wall $(KALDIINCLUDE) -DHAVE_ATLAS -DKALDI_DOUBLEPRECISION=0 -DHAVE_POSIX_MEMALIGN -DLUA_USE_APICHECK -I $(LUA_INCDIR) -I $(INC_PATH) $(CFLAGS)
+$(OBJ_DIR)/nnet-forward: $(OBJ_DIR)/src/nnet-forward.o
+ g++ -o $@ $< $(KL) -L$(LUA_LIBDIR) -Wl,-rpath=$(LUA_LIBDIR) -lluajit-5.1 -L$(CUDA_BASE)/lib64/ -Wl,-rpath=$(CUDA_BASE)/lib64/ -lcudart -lcublas -ldl
+$(OBJ_DIR) $(LUA_DIR) $(OBJ_SUBDIR):
+ -mkdir -p $@
+install: $(LUA_DIR)
+ cp $(OBJ_DIR)/nnet-forward $(LUA_BINDIR)/nnet-forward-with-nerv
+ cp src/asr_propagator.lua $(LUA_DIR)/
+ sed 's*nnet_forward=*nnet_forward=$(LUA_BINDIR)/nnet-forward-with-nerv.sh*g;s*asr_propagator=*asr_propagator=$(LUA_BINDIR)/../share/lua/5.1/kaldi_decode/asr_propagator.lua*g' decode_with_nerv.sh > $(LUA_BINDIR)/decode_with_nerv.sh
+ echo '$(LUA_BINDIR)/nnet-forward-with-nerv "$$@"' | cat nnet-forward-with-nerv.sh - | sed 's*\.\./\.\./install/bin/luarocks*$(LUA_BINDIR)/luarocks*g' > $(LUA_BINDIR)/nnet-forward-with-nerv.sh
+ chmod +x $(LUA_BINDIR)/nnet-forward-with-nerv.sh
+ chmod +x $(LUA_BINDIR)/decode_with_nerv.sh
+clean:
+ -rm -r $(OBJ_DIR)
diff --git a/kaldi_decode/README b/kaldi_decode/README
deleted file mode 100755
index 8d0a95b..0000000
--- a/kaldi_decode/README
+++ /dev/null
@@ -1,13 +0,0 @@
-source path.sh
-source cmd.sh
-
-acwt=0.1
-dir=/slfs5/users/ymz09/chime/baseline/ASR/exp/nerv_seq/
-graph=/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced/graph_tgpr_5k
-data=/slfs5/users/ymz09/chime/baseline/ASR/data-fbank/et05_real_enhanced
-config=/slfs6/users/ymz09/nerv-project/nerv/nerv/examples/mpe_chime3.lua
-
-decode.sh --nj 4 --cmd "$decode_cmd" --config conf/decode_dnn.config --acwt $acwt \
- $graph $data $config \
- $dir/decode_tgpr_5k_et05_real_enhanced_nerv
-
diff --git a/kaldi_decode/README.timit b/kaldi_decode/README.timit
new file mode 100755
index 0000000..7fac918
--- /dev/null
+++ b/kaldi_decode/README.timit
@@ -0,0 +1,15 @@
+#!/bin/bash
+source path.sh
+source cmd.sh
+
+gmmdir=/speechlab/users/mfy43/timit/s5/exp/tri3/
+data_fmllr=/speechlab/users/mfy43/timit/s5/data-fmllr-tri3/
+dir=/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/
+nerv_config=/speechlab/users/mfy43/nerv/nerv/examples/timit_baseline2.lua
+decode=/speechlab/users/mfy43/nerv/install/bin/decode_with_nerv.sh
+
+# Decode (reuse HCLG graph)
+$decode --nj 20 --cmd "$decode_cmd" --acwt 0.2 \
+ $gmmdir/graph $data_fmllr/test $nerv_config $dir/decode_test || exit 1;
+$decode --nj 20 --cmd "$decode_cmd" --acwt 0.2 \
+ $gmmdir/graph $data_fmllr/dev $nerv_config $dir/decode_dev || exit 1;
diff --git a/kaldi_decode/cmd.sh b/kaldi_decode/cmd.sh
deleted file mode 100755
index e2e54e8..0000000
--- a/kaldi_decode/cmd.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-# "queue.pl" uses qsub. The options to it are
-# options to qsub. If you have GridEngine installed,
-# change this to a queue you have access to.
-# Otherwise, use "run.pl", which will run jobs locally
-# (make sure your --num-jobs options are no more than
-# the number of cpus on your machine.
-
-#a) JHU cluster options
-#export train_cmd="queue.pl -l arch=*64"
-#export decode_cmd="queue.pl -l arch=*64,mem_free=2G,ram_free=2G"
-#export mkgraph_cmd="queue.pl -l arch=*64,ram_free=4G,mem_free=4G"
-
-#export cuda_cmd="..."
-
-
-#b) BUT cluster options
-#export train_cmd="queue.pl -q all.q@@blade -l ram_free=1200M,mem_free=1200M"
-#export decode_cmd="queue.pl -q all.q@@blade -l ram_free=1700M,mem_free=1700M"
-#export decodebig_cmd="queue.pl -q all.q@@blade -l ram_free=4G,mem_free=4G"
-
-#export cuda_cmd="queue.pl -q long.q@@pco203 -l gpu=1"
-#export cuda_cmd="queue.pl -q long.q@pcspeech-gpu"
-#export mkgraph_cmd="queue.pl -q all.q@@servers -l ram_free=4G,mem_free=4G"
-
-#c) run it locally...
-export train_cmd=run.pl
-export decode_cmd=run.pl
-export cuda_cmd=run.pl
-export mkgraph_cmd=run.pl
-
-#export train_cmd='queue.pl'
-#export decode_cmd='queue.pl'
-#export cuda_cmd='queue.pl -l gpu=1 -l hostname="markov|date|hamming"'
-#export mkgraph_cmd='queue.pl"'
-
diff --git a/kaldi_decode/conf/decode_dnn.config b/kaldi_decode/conf/decode_dnn.config
deleted file mode 100644
index 89dd992..0000000
--- a/kaldi_decode/conf/decode_dnn.config
+++ /dev/null
@@ -1,2 +0,0 @@
-beam=18.0 # beam for decoding. Was 13.0 in the scripts.
-lattice_beam=10.0 # this has most effect on size of the lattices.
diff --git a/kaldi_decode/decode.sh b/kaldi_decode/decode_with_nerv.sh
index aa7e089..5554b2e 100755
--- a/kaldi_decode/decode.sh
+++ b/kaldi_decode/decode_with_nerv.sh
@@ -1,5 +1,4 @@
#!/bin/bash
-
# Copyright 2012-2013 Karel Vesely, Daniel Povey
# Apache 2.0
@@ -20,7 +19,7 @@ lattice_beam=8.0
min_active=200
max_active=7000 # limit of active tokens
max_mem=50000000 # approx. limit to memory consumption during minimization in bytes
-nnet_forward_opts="--prior-scale=1.0"
+nnet_forward_opts="--apply-log=true" # IMPORTANT, to apply log before to substract log-prior, and to know the modified 'nnet-forward' removed '--no-softmax' option
skip_scoring=false
scoring_opts="--min-lmwt 4 --max-lmwt 15"
@@ -28,6 +27,13 @@ scoring_opts="--min-lmwt 4 --max-lmwt 15"
num_threads=1 # if >1, will use latgen-faster-parallel
parallel_opts= # Ignored now.
use_gpu="no" # yes|no|optionaly
+
+cmvn_opts=
+splice_opts=
+delta_opts=
+
+asr_propagator=
+nnet_forward=
# End configuration section.
echo "$0 $@" # Print the command line for logging
@@ -36,7 +42,7 @@ echo "$0 $@" # Print the command line for logging
. parse_options.sh || exit 1;
if [ $# != 4 ]; then
- echo "Usage: $0 [options] <graph-dir> <data-dir> <nerv-config> <decode-dir>"
+ echo "Usage: $0 [options] <graph-dir> <data-dir> <nerv-model-config> <decode-dir>"
echo "... where <decode-dir> is assumed to be a sub-directory of the directory"
echo " where the DNN and transition model is."
echo "e.g.: $0 exp/dnn1/graph_tgpr data/test config.lua exp/dnn1/decode_tgpr"
@@ -62,8 +68,9 @@ fi
graphdir=$1
data=$2
-config=$3
+model_conf=$3
dir=$4
+
[ -z $srcdir ] && srcdir=`dirname $dir`; # Default model directory one level up from decoding directory.
sdata=$data/split$nj;
@@ -90,11 +97,10 @@ thread_string=
# PREPARE FEATURE EXTRACTION PIPELINE
# import config,
-cmvn_opts=
-delta_opts=
D=$srcdir
[ -e $D/norm_vars ] && cmvn_opts="--norm-means=true --norm-vars=$(cat $D/norm_vars)" # Bwd-compatibility,
[ -e $D/cmvn_opts ] && cmvn_opts=$(cat $D/cmvn_opts)
+[ -e $D/splice_opts ] && splice_opts=$(cat $D/splice_opts)
[ -e $D/delta_order ] && delta_opts="--delta-order=$(cat $D/delta_order)" # Bwd-compatibility,
[ -e $D/delta_opts ] && delta_opts=$(cat $D/delta_opts)
#
@@ -103,13 +109,17 @@ feats="ark,s,cs:copy-feats scp:$sdata/JOB/feats.scp ark:- |"
# apply-cmvn (optional),
[ ! -z "$cmvn_opts" -a ! -f $sdata/1/cmvn.scp ] && echo "$0: Missing $sdata/1/cmvn.scp" && exit 1
[ ! -z "$cmvn_opts" ] && feats="$feats apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp ark:- ark:- |"
+# splice-opts (optional),
+[ ! -z "$splice_opts" ] && feats="$feats splice-feats $splice_opts ark:- ark:- |"
# add-deltas (optional),
[ ! -z "$delta_opts" ] && feats="$feats add-deltas $delta_opts ark:- ark:- |"
#
# Run the decoding in the queue,
if [ $stage -le 0 ]; then
- $cmd --num-threads $((num_threads+1)) JOB=1:$nj $dir/log/decode.JOB.log \
- ./src/nnet-forward $nnet_forward_opts --class-frame-counts=$class_frame_counts --use-gpu=$use_gpu $config "$feats" ark:- \| \
+# $cmd --num-threads $((num_threads+1)) JOB=1:$nj $dir/log/decode.JOB.log \
+# remove multi-threads to avoid smp requirement
+ $cmd --num-threads $((num_threads)) JOB=1:$nj $dir/log/decode.JOB.log \
+ $nnet_forward $nnet_forward_opts --class-frame-counts=$class_frame_counts --use-gpu=$use_gpu $model_conf "$feats" ark:- $asr_propagator \| \
latgen-faster-mapped$thread_string --min-active=$min_active --max-active=$max_active --max-mem=$max_mem --beam=$beam \
--lattice-beam=$lattice_beam --acoustic-scale=$acwt --allow-partial=true --word-symbol-table=$graphdir/words.txt \
$model $graphdir/HCLG.fst ark:- "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1;
diff --git a/kaldi_decode/kaldi_decode-scm-1.rockspec b/kaldi_decode/kaldi_decode-scm-1.rockspec
new file mode 100644
index 0000000..cc533ae
--- /dev/null
+++ b/kaldi_decode/kaldi_decode-scm-1.rockspec
@@ -0,0 +1,36 @@
+package = "kaldi_decode"
+version = "scm-1"
+source = {
+ url = "https://github.com/Nerv-SJTU/nerv-speech.git"
+}
+description = {
+ summary = "Kaldi decode support for NERV",
+ detailed = [[
+ ]],
+ homepage = "https://github.com/Determinant/nerv-speech",
+ license = "BSD"
+}
+dependencies = {
+ "nerv >= scm-1",
+ "lua >= 5.1"
+}
+build = {
+ type = "make",
+ build_variables = {
+ CFLAGS="$(CFLAGS) -Wall -Wextra -g -O2",
+ --CFLAGS="$(CFLAGS) -Wall -Wextra -g",
+ LIBFLAG="$(LIBFLAG)",
+ LUA_LIBDIR="$(LUA_LIBDIR)",
+ LUA_BINDIR="$(LUA_BINDIR)",
+ LUA_INCDIR="$(LUA_INCDIR)",
+ LUA="$(LUA)",
+ },
+ install_variables = {
+ LUA_BINDIR="$(LUA_BINDIR)",
+ INST_PREFIX="$(PREFIX)",
+ INST_BINDIR="$(BINDIR)",
+ INST_LIBDIR="$(LIBDIR)",
+ INST_LUADIR="$(LUADIR)",
+ INST_CONFDIR="$(CONFDIR)",
+ },
+}
diff --git a/kaldi_decode/local/score.sh b/kaldi_decode/local/score.sh
deleted file mode 100755
index b18f350..0000000
--- a/kaldi_decode/local/score.sh
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/bin/bash
-# Copyright 2012 Johns Hopkins University (Author: Daniel Povey)
-# Apache 2.0
-
-[ -f ./path.sh ] && . ./path.sh
-
-# begin configuration section.
-cmd=run.pl
-stage=0
-decode_mbr=true
-reverse=false
-word_ins_penalty=0.0
-min_lmwt=5
-max_lmwt=20
-#end configuration section.
-
-[ -f ./path.sh ] && . ./path.sh
-. parse_options.sh || exit 1;
-
-if [ $# -ne 3 ]; then
- echo "Usage: local/score.sh [--cmd (run.pl|queue.pl...)] <data-dir> <lang-dir|graph-dir> <decode-dir>"
- echo " Options:"
- echo " --cmd (run.pl|queue.pl...) # specify how to run the sub-processes."
- echo " --stage (0|1|2) # start scoring script from part-way through."
- echo " --decode_mbr (true/false) # maximum bayes risk decoding (confusion network)."
- echo " --min_lmwt <int> # minumum LM-weight for lattice rescoring "
- echo " --max_lmwt <int> # maximum LM-weight for lattice rescoring "
- echo " --reverse (true/false) # score with time reversed features "
- exit 1;
-fi
-
-data=$1
-lang_or_graph=$2
-dir=$3
-
-symtab=$lang_or_graph/words.txt
-
-for f in $symtab $dir/lat.1.gz $data/text; do
- [ ! -f $f ] && echo "score.sh: no such file $f" && exit 1;
-done
-
-mkdir -p $dir/scoring/log
-
-cat $data/text | sed 's:<NOISE>::g' | sed 's:<SPOKEN_NOISE>::g' > $dir/scoring/test_filt.txt
-
-$cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/best_path.LMWT.log \
- lattice-scale --inv-acoustic-scale=LMWT "ark:gunzip -c $dir/lat.*.gz|" ark:- \| \
- lattice-add-penalty --word-ins-penalty=$word_ins_penalty ark:- ark:- \| \
- lattice-best-path --word-symbol-table=$symtab \
- ark:- ark,t:$dir/scoring/LMWT.tra || exit 1;
-
-if $reverse; then
- for lmwt in `seq $min_lmwt $max_lmwt`; do
- mv $dir/scoring/$lmwt.tra $dir/scoring/$lmwt.tra.orig
- awk '{ printf("%s ",$1); for(i=NF; i>1; i--){ printf("%s ",$i); } printf("\n"); }' \
- <$dir/scoring/$lmwt.tra.orig >$dir/scoring/$lmwt.tra
- done
-fi
-
-# Note: the double level of quoting for the sed command
-$cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/score.LMWT.log \
- cat $dir/scoring/LMWT.tra \| \
- utils/int2sym.pl -f 2- $symtab \| sed 's:\<UNK\>::g' \| \
- compute-wer --text --mode=present \
- ark:$dir/scoring/test_filt.txt ark,p:- ">&" $dir/wer_LMWT || exit 1;
-
-exit 0;
diff --git a/kaldi_decode/nnet-forward-with-nerv.sh b/kaldi_decode/nnet-forward-with-nerv.sh
new file mode 100644
index 0000000..71bf239
--- /dev/null
+++ b/kaldi_decode/nnet-forward-with-nerv.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+source <(../../install/bin/luarocks path)
diff --git a/kaldi_decode/path.sh b/kaldi_decode/path.sh
deleted file mode 100755
index 5aebc72..0000000
--- a/kaldi_decode/path.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-export KALDI_ROOT=/slfs6/users/ymz09/kaldi/
-export PATH=$PWD/utils/:$KALDI_ROOT/src/bin:$KALDI_ROOT/tools/openfst/bin:$KALDI_ROOT/src/fstbin/:$KALDI_ROOT/src/gmmbin/:$KALDI_ROOT/src/featbin/:$KALDI_ROOT/src/lm/:$KALDI_ROOT/src/sgmmbin/:$KALDI_ROOT/src/sgmm2bin/:$KALDI_ROOT/src/fgmmbin/:$KALDI_ROOT/src/latbin/:$KALDI_ROOT/src/nnetbin:$KALDI_ROOT/src/nnet2bin/:$KALDI_ROOT/src/kwsbin:$PWD:$PATH
-export LC_ALL=C
-export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/slhome/ymz09/mylibs/:.
-
diff --git a/kaldi_decode/src/Makefile b/kaldi_decode/src/Makefile
deleted file mode 100644
index 7cffbc2..0000000
--- a/kaldi_decode/src/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# Change KDIR to `kaldi-trunk' path (Kaldi must be compiled with --share)
-KDIR := /slfs6/users/ymz09/kaldi/
-NERVDIR := /slfs6/users/ymz09/nerv-project/nerv/
-CUDADIR := /usr/local/cuda/
-
-nnet-forward:
- g++ -msse -msse2 -Wall -I $(KDIR)/src/ -pthread -DKALDI_DOUBLEPRECISION=0 -DHAVE_POSIX_MEMALIGN -Wno-sign-compare -Wno-unused-local-typedefs -Winit-self -DHAVE_EXECINFO_H=1 -rdynamic -DHAVE_CXXABI_H -DHAVE_ATLAS -I $(KDIR)/tools/ATLAS/include -I $(KDIR)/tools/openfst/include -Wno-sign-compare -g -fPIC -DHAVE_CUDA -I $(CUDADIR)/include -DKALDI_NO_EXPF -I $(NERVDIR)/install//include/luajit-2.0/ -I $(NERVDIR)/install/include/ -DLUA_USE_APICHECK -c -o nnet-forward.o nnet-forward.cc
- g++ -rdynamic -Wl,-rpath=$(KDIR)/tools/openfst/lib -L$(CUDADIR)/lib64 -Wl,-rpath=$(CUDADIR)/lib64 -Wl,-rpath=$(KDIR)/src/lib -L. -L$(KDIR)/src/nnet/ -L$(KDIR)/src/cudamatrix/ -L$(KDIR)/src/lat/ -L$(KDIR)/src/hmm/ -L$(KDIR)/src/tree/ -L$(KDIR)/src/matrix/ -L$(KDIR)/src/util/ -L$(KDIR)/src/base/ nnet-forward.o $(KDIR)/src/nnet//libkaldi-nnet.so $(KDIR)/src/cudamatrix//libkaldi-cudamatrix.so $(KDIR)/src/lat//libkaldi-lat.so $(KDIR)/src/hmm//libkaldi-hmm.so $(KDIR)/src/tree//libkaldi-tree.so $(KDIR)/src/matrix//libkaldi-matrix.so $(KDIR)/src/util//libkaldi-util.so $(KDIR)/src/base//libkaldi-base.so -L$(KDIR)/tools/openfst/lib -lfst /usr/lib/liblapack.so /usr/lib/libcblas.so /usr/lib/libatlas.so /usr/lib/libf77blas.so -lm -lpthread -ldl -lcublas -lcudart -lkaldi-nnet -lkaldi-cudamatrix -lkaldi-lat -lkaldi-hmm -lkaldi-tree -lkaldi-matrix -lkaldi-util -lkaldi-base -lstdc++ -L$(NERVDIR)/install/lib -Wl,-rpath=$(NERVDIR)/install/lib -lnervcore -lluaT -rdynamic -Wl,-rpath=$(KDIR)//tools/openfst/lib -L$(CUDADIR)/lib64 -Wl,-rpath=$(CUDADIR)/lib64 -Wl,-rpath=$(KDIR)//src/lib -lfst -lm -lpthread -ldl -lcublas -lcudart -L $(NERVDIR)/luajit-2.0/src/ -lluajit -o nnet-forward
-
-clean:
- -rm nnet-forward.o nnet-forward
-
diff --git a/kaldi_decode/src/asr_propagator.lua b/kaldi_decode/src/asr_propagator.lua
new file mode 100644
index 0000000..5d0ad7c
--- /dev/null
+++ b/kaldi_decode/src/asr_propagator.lua
@@ -0,0 +1,84 @@
+print = function(...) io.write(table.concat({...}, "\t")) end
+io.output('/dev/null')
+-- path and cpath are correctly set by `path.sh`
+local k,l,_=pcall(require,"luarocks.loader") _=k and l.add_context("nerv","scm-1")
+require 'nerv'
+nerv.printf("*** NERV: A Lua-based toolkit for high-performance deep learning (alpha) ***\n")
+nerv.info("automatically initialize a default MContext...")
+nerv.MMatrix._default_context = nerv.MContext()
+nerv.info("the default MContext is ok")
+-- only for backward compatibilty, will be removed in the future
+local function _add_profile_method(cls)
+ local c = cls._default_context
+ cls.print_profile = function () c:print_profile() end
+ cls.clear_profile = function () c:clear_profile() end
+end
+_add_profile_method(nerv.MMatrix)
+
+function build_propagator(ifname, feature)
+ local param_repo = nerv.ParamRepo()
+ param_repo:import(ifname, nil, gconf)
+ local layer_repo = make_layer_repo(param_repo)
+ local network = get_decode_network(layer_repo)
+ local global_transf = get_global_transf(layer_repo)
+ local input_order = get_decode_input_order()
+ local readers = make_decode_readers(feature, layer_repo)
+
+ local batch_propagator = function()
+ local data = nil
+ for ri = 1, #readers do
+ data = readers[ri].reader:get_data()
+ if data ~= nil then
+ break
+ end
+ end
+
+ if data == nil then
+ return "", nil
+ end
+
+ gconf.batch_size = data[input_order[1].id]:nrow()
+ network:init(gconf.batch_size)
+
+ local input = {}
+ for i, e in ipairs(input_order) do
+ local id = e.id
+ if data[id] == nil then
+ nerv.error("input data %s not found", id)
+ end
+ local transformed
+ if e.global_transf then
+ transformed = nerv.speech_utils.global_transf(data[id],
+ global_transf,
+ gconf.frm_ext or 0, 0,
+ gconf)
+ else
+ transformed = data[id]
+ end
+ table.insert(input, transformed)
+ end
+ local output = {nerv.MMatrixFloat(input[1]:nrow(), network.dim_out[1])}
+ network:propagate(input, output)
+
+ local utt = data["key"]
+ if utt == nil then
+ nerv.error("no key found.")
+ end
+
+ collectgarbage("collect")
+ return utt, output[1]
+ end
+
+ return batch_propagator
+end
+
+function init(config, feature)
+ dofile(config)
+ gconf.use_cpu = true -- use CPU to decode
+ trainer = build_propagator(gconf.decode_param, feature)
+end
+
+function feed()
+ local utt, mat = trainer()
+ return utt, mat
+end
diff --git a/kaldi_decode/src/nerv4decode.lua b/kaldi_decode/src/nerv4decode.lua
deleted file mode 100644
index b2ff344..0000000
--- a/kaldi_decode/src/nerv4decode.lua
+++ /dev/null
@@ -1,79 +0,0 @@
-package.path="/home/slhome/ymz09/.luarocks/share/lua/5.1/?.lua;/home/slhome/ymz09/.luarocks/share/lua/5.1/?/init.lua;/slfs6/users/ymz09/nerv-project/nerv/install/share/lua/5.1/?.lua;/slfs6/users/ymz09/nerv-project/nerv/install/share/lua/5.1/?/init.lua;"..package.path;
-package.cpath="/home/slhome/ymz09/.luarocks/lib/lua/5.1/?.so;/slfs6/users/ymz09/nerv-project/nerv/install/lib/lua/5.1/?.so;"..package.cpath;
-local k,l,_=pcall(require,"luarocks.loader") _=k and l.add_context("nerv","scm-1")
-require 'nerv'
-
-function build_trainer(ifname, feature)
- local param_repo = nerv.ParamRepo()
- param_repo:import(ifname, nil, gconf)
- local layer_repo = make_layer_repo(param_repo)
- local network = get_decode_network(layer_repo)
- local global_transf = get_global_transf(layer_repo)
- local input_order = get_input_order()
- local readers = make_readers(feature, layer_repo)
- network:init(1)
-
- local iterative_trainer = function()
- local data = nil
- for ri = 1, #readers, 1 do
- data = readers[ri].reader:get_data()
- if data ~= nil then
- break
- end
- end
-
- if data == nil then
- return "", nil
- end
-
- local input = {}
- for i, e in ipairs(input_order) do
- local id = e.id
- if data[id] == nil then
- nerv.error("input data %s not found", id)
- end
- local transformed
- if e.global_transf then
- local batch = gconf.cumat_type(data[id]:nrow(), data[id]:ncol())
- batch:copy_fromh(data[id])
- transformed = nerv.speech_utils.global_transf(batch,
- global_transf,
- gconf.frm_ext or 0, 0,
- gconf)
- else
- transformed = data[id]
- end
- table.insert(input, transformed)
- end
- local output = {nerv.CuMatrixFloat(input[1]:nrow(), network.dim_out[1])}
- network:batch_resize(input[1]:nrow())
- network:propagate(input, output)
-
- local utt = data["key"]
- if utt == nil then
- nerv.error("no key found.")
- end
-
- local mat = nerv.MMatrixFloat(output[1]:nrow(), output[1]:ncol())
- output[1]:copy_toh(mat)
-
- collectgarbage("collect")
- return utt, mat
- end
-
- return iterative_trainer
-end
-
-function init(config, feature)
- local tmp = io.write
- io.write = function(...)
- end
- dofile(config)
- trainer = build_trainer(gconf.decode_param, feature)
- io.write = tmp
-end
-
-function feed()
- local utt, mat = trainer()
- return utt, mat
-end
diff --git a/kaldi_decode/src/nnet-forward.cc b/kaldi_decode/src/nnet-forward.cc
index 007f623..8781705 100644
--- a/kaldi_decode/src/nnet-forward.cc
+++ b/kaldi_decode/src/nnet-forward.cc
@@ -21,9 +21,9 @@ extern "C"{
#include "lua.h"
#include "lauxlib.h"
#include "lualib.h"
-#include "nerv/matrix/matrix.h"
-#include "nerv/common.h"
-#include "nerv/luaT/luaT.h"
+#include "nerv/lib/matrix/matrix.h"
+#include "nerv/lib/common.h"
+#include "nerv/lib/luaT/luaT.h"
}
#include <limits>
@@ -46,7 +46,7 @@ int main(int argc, char *argv[]) {
const char *usage =
"Perform forward pass through Neural Network.\n"
"\n"
- "Usage: nnet-forward [options] <nerv-config> <feature-rspecifier> <feature-wspecifier> [nerv4decode.lua]\n"
+ "Usage: nnet-forward [options] <nerv-config> <feature-rspecifier> <feature-wspecifier> [asr_propagator.lua]\n"
"e.g.: \n"
" nnet-forward config.lua ark:features.ark ark:mlpoutput.ark\n";
@@ -78,9 +78,9 @@ int main(int argc, char *argv[]) {
std::string config = po.GetArg(1),
feature_rspecifier = po.GetArg(2),
feature_wspecifier = po.GetArg(3),
- nerv4decode = "src/nerv4decode.lua";
- if(po.NumArgs() >= 4)
- nerv4decode = po.GetArg(4);
+ propagator = "src/asr_propagator.lua";
+ if(po.NumArgs() >= 4)
+ propagator = po.GetArg(4);
//Select the GPU
#if HAVE_CUDA==1
@@ -99,8 +99,8 @@ int main(int argc, char *argv[]) {
lua_State *L = lua_open();
luaL_openlibs(L);
- if(luaL_loadfile(L, nerv4decode.c_str()))
- KALDI_ERR << "luaL_loadfile() " << nerv4decode << " failed " << lua_tostring(L, -1);
+ if(luaL_loadfile(L, propagator.c_str()))
+ KALDI_ERR << "luaL_loadfile() " << propagator << " failed " << lua_tostring(L, -1);
if(lua_pcall(L, 0, 0, 0))
KALDI_ERR << "lua_pall failed " << lua_tostring(L, -1);
diff --git a/kaldi_decode/utils/int2sym.pl b/kaldi_decode/utils/int2sym.pl
deleted file mode 100755
index d618939..0000000
--- a/kaldi_decode/utils/int2sym.pl
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env perl
-# Copyright 2010-2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey)
-# Apache 2.0.
-
-undef $field_begin;
-undef $field_end;
-
-
-if ($ARGV[0] eq "-f") {
- shift @ARGV;
- $field_spec = shift @ARGV;
- if ($field_spec =~ m/^\d+$/) {
- $field_begin = $field_spec - 1; $field_end = $field_spec - 1;
- }
- if ($field_spec =~ m/^(\d*)[-:](\d*)/) { # accept e.g. 1:10 as a courtesty (properly, 1-10)
- if ($1 ne "") {
- $field_begin = $1 - 1; # Change to zero-based indexing.
- }
- if ($2 ne "") {
- $field_end = $2 - 1; # Change to zero-based indexing.
- }
- }
- if (!defined $field_begin && !defined $field_end) {
- die "Bad argument to -f option: $field_spec";
- }
-}
-$symtab = shift @ARGV;
-if(!defined $symtab) {
- print STDERR "Usage: sym2int.pl [options] symtab [input] > output\n" .
- "options: [-f (<field>|<field_start>-<field-end>)]\n" .
- "e.g.: -f 2, or -f 3-4\n";
- exit(1);
-}
-
-open(F, "<$symtab") || die "Error opening symbol table file $symtab";
-while(<F>) {
- @A = split(" ", $_);
- @A == 2 || die "bad line in symbol table file: $_";
- $int2sym{$A[1]} = $A[0];
-}
-
-sub int2sym {
- my $a = shift @_;
- my $pos = shift @_;
- if($a !~ m:^\d+$:) { # not all digits..
- $pos1 = $pos+1; # make it one-based.
- die "int2sym.pl: found noninteger token $a [in position $pos1]\n";
- }
- $s = $int2sym{$a};
- if(!defined ($s)) {
- die "int2sym.pl: integer $a not in symbol table $symtab.";
- }
- return $s;
-}
-
-$error = 0;
-while (<>) {
- @A = split(" ", $_);
- for ($pos = 0; $pos <= $#A; $pos++) {
- $a = $A[$pos];
- if ( (!defined $field_begin || $pos >= $field_begin)
- && (!defined $field_end || $pos <= $field_end)) {
- $a = int2sym($a, $pos);
- }
- print $a . " ";
- }
- print "\n";
-}
-
-
-
diff --git a/kaldi_decode/utils/parse_options.sh b/kaldi_decode/utils/parse_options.sh
deleted file mode 100755
index fdc8a36..0000000
--- a/kaldi_decode/utils/parse_options.sh
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/bin/bash
-
-# Copyright 2012 Johns Hopkins University (Author: Daniel Povey);
-# Arnab Ghoshal, Karel Vesely
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
-# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
-# MERCHANTABLITY OR NON-INFRINGEMENT.
-# See the Apache 2 License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Parse command-line options.
-# To be sourced by another script (as in ". parse_options.sh").
-# Option format is: --option-name arg
-# and shell variable "option_name" gets set to value "arg."
-# The exception is --help, which takes no arguments, but prints the
-# $help_message variable (if defined).
-
-
-###
-### The --config file options have lower priority to command line
-### options, so we need to import them first...
-###
-
-# Now import all the configs specified by command-line, in left-to-right order
-for ((argpos=1; argpos<$#; argpos++)); do
- if [ "${!argpos}" == "--config" ]; then
- argpos_plus1=$((argpos+1))
- config=${!argpos_plus1}
- [ ! -r $config ] && echo "$0: missing config '$config'" && exit 1
- . $config # source the config file.
- fi
-done
-
-
-###
-### No we process the command line options
-###
-while true; do
- [ -z "${1:-}" ] && break; # break if there are no arguments
- case "$1" in
- # If the enclosing script is called with --help option, print the help
- # message and exit. Scripts should put help messages in $help_message
- --help|-h) if [ -z "$help_message" ]; then echo "No help found." 1>&2;
- else printf "$help_message\n" 1>&2 ; fi;
- exit 0 ;;
- --*=*) echo "$0: options to scripts must be of the form --name value, got '$1'"
- exit 1 ;;
- # If the first command-line argument begins with "--" (e.g. --foo-bar),
- # then work out the variable name as $name, which will equal "foo_bar".
- --*) name=`echo "$1" | sed s/^--// | sed s/-/_/g`;
- # Next we test whether the variable in question is undefned-- if so it's
- # an invalid option and we die. Note: $0 evaluates to the name of the
- # enclosing script.
- # The test [ -z ${foo_bar+xxx} ] will return true if the variable foo_bar
- # is undefined. We then have to wrap this test inside "eval" because
- # foo_bar is itself inside a variable ($name).
- eval '[ -z "${'$name'+xxx}" ]' && echo "$0: invalid option $1" 1>&2 && exit 1;
-
- oldval="`eval echo \\$$name`";
- # Work out whether we seem to be expecting a Boolean argument.
- if [ "$oldval" == "true" ] || [ "$oldval" == "false" ]; then
- was_bool=true;
- else
- was_bool=false;
- fi
-
- # Set the variable to the right value-- the escaped quotes make it work if
- # the option had spaces, like --cmd "queue.pl -sync y"
- eval $name=\"$2\";
-
- # Check that Boolean-valued arguments are really Boolean.
- if $was_bool && [[ "$2" != "true" && "$2" != "false" ]]; then
- echo "$0: expected \"true\" or \"false\": $1 $2" 1>&2
- exit 1;
- fi
- shift 2;
- ;;
- *) break;
- esac
-done
-
-
-# Check for an empty argument to the --cmd option, which can easily occur as a
-# result of scripting errors.
-[ ! -z "${cmd+xxx}" ] && [ -z "$cmd" ] && echo "$0: empty argument to --cmd option" 1>&2 && exit 1;
-
-
-true; # so this script returns exit code 0.
diff --git a/kaldi_decode/utils/queue.pl b/kaldi_decode/utils/queue.pl
deleted file mode 100755
index 1e36de6..0000000
--- a/kaldi_decode/utils/queue.pl
+++ /dev/null
@@ -1,580 +0,0 @@
-#!/usr/bin/env perl
-use strict;
-use warnings;
-
-# Copyright 2012 Johns Hopkins University (Author: Daniel Povey).
-# 2014 Vimal Manohar (Johns Hopkins University)
-# Apache 2.0.
-
-use File::Basename;
-use Cwd;
-use Getopt::Long;
-
-# queue.pl has the same functionality as run.pl, except that
-# it runs the job in question on the queue (Sun GridEngine).
-# This version of queue.pl uses the task array functionality
-# of the grid engine. Note: it's different from the queue.pl
-# in the s4 and earlier scripts.
-
-# The script now supports configuring the queue system using a config file
-# (default in conf/queue.conf; but can be passed specified with --config option)
-# and a set of command line options.
-# The current script handles:
-# 1) Normal configuration arguments
-# For e.g. a command line option of "--gpu 1" could be converted into the option
-# "-q g.q -l gpu=1" to qsub. How the CLI option is handled is determined by a
-# line in the config file like
-# gpu=* -q g.q -l gpu=$0
-# $0 here in the line is replaced with the argument read from the CLI and the
-# resulting string is passed to qsub.
-# 2) Special arguments to options such as
-# gpu=0
-# If --gpu 0 is given in the command line, then no special "-q" is given.
-# 3) Default argument
-# default gpu=0
-# If --gpu option is not passed in the command line, then the script behaves as
-# if --gpu 0 was passed since 0 is specified as the default argument for that
-# option
-# 4) Arbitrary options and arguments.
-# Any command line option starting with '--' and its argument would be handled
-# as long as its defined in the config file.
-# 5) Default behavior
-# If the config file that is passed using is not readable, then the script
-# behaves as if the queue has the following config file:
-# $ cat conf/queue.conf
-# # Default configuration
-# command qsub -v PATH -cwd -S /bin/bash -j y -l arch=*64*
-# option mem=* -l mem_free=$0,ram_free=$0
-# option mem=0 # Do not add anything to qsub_opts
-# option num_threads=* -pe smp $0
-# option num_threads=1 # Do not add anything to qsub_opts
-# option max_jobs_run=* -tc $0
-# default gpu=0
-# option gpu=0 -q all.q
-# option gpu=* -l gpu=$0 -q g.q
-
-my $qsub_opts = "";
-my $sync = 0;
-my $num_threads = 1;
-my $gpu = 0;
-
-my $config = "conf/queue.conf";
-
-my %cli_options = ();
-
-my $jobname;
-my $jobstart;
-my $jobend;
-
-my $array_job = 0;
-
-sub print_usage() {
- print STDERR
- "Usage: queue.pl [options] [JOB=1:n] log-file command-line arguments...\n" .
- "e.g.: queue.pl foo.log echo baz\n" .
- " (which will echo \"baz\", with stdout and stderr directed to foo.log)\n" .
- "or: queue.pl -q all.q\@xyz foo.log echo bar \| sed s/bar/baz/ \n" .
- " (which is an example of using a pipe; you can provide other escaped bash constructs)\n" .
- "or: queue.pl -q all.q\@qyz JOB=1:10 foo.JOB.log echo JOB \n" .
- " (which illustrates the mechanism to submit parallel jobs; note, you can use \n" .
- " another string other than JOB)\n" .
- "Note: if you pass the \"-sync y\" option to qsub, this script will take note\n" .
- "and change its behavior. Otherwise it uses qstat to work out when the job finished\n" .
- "Options:\n" .
- " --config <config-file> (default: $config)\n" .
- " --mem <mem-requirement> (e.g. --mem 2G, --mem 500M, \n" .
- " also support K and numbers mean bytes)\n" .
- " --num-threads <num-threads> (default: $num_threads)\n" .
- " --max-jobs-run <num-jobs>\n" .
- " --gpu <0|1> (default: $gpu)\n";
- exit 1;
-}
-
-if (@ARGV < 2) {
- print_usage();
-}
-
-for (my $x = 1; $x <= 3; $x++) { # This for-loop is to
- # allow the JOB=1:n option to be interleaved with the
- # options to qsub.
- while (@ARGV >= 2 && $ARGV[0] =~ m:^-:) {
- my $switch = shift @ARGV;
-
- if ($switch eq "-V") {
- $qsub_opts .= "-V ";
- } else {
- my $argument = shift @ARGV;
- if ($argument =~ m/^--/) {
- print STDERR "WARNING: suspicious argument '$argument' to $switch; starts with '-'\n";
- }
- if ($switch eq "-sync" && $argument =~ m/^[yY]/) {
- $sync = 1;
- $qsub_opts .= "$switch $argument ";
- } elsif ($switch eq "-pe") { # e.g. -pe smp 5
- my $argument2 = shift @ARGV;
- $qsub_opts .= "$switch $argument $argument2 ";
- $num_threads = $argument2;
- } elsif ($switch =~ m/^--/) { # Config options
- # Convert CLI option to variable name
- # by removing '--' from the switch and replacing any
- # '-' with a '_'
- $switch =~ s/^--//;
- $switch =~ s/-/_/g;
- $cli_options{$switch} = $argument;
- } else { # Other qsub options - passed as is
- $qsub_opts .= "$switch $argument ";
- }
- }
- }
- if ($ARGV[0] =~ m/^([\w_][\w\d_]*)+=(\d+):(\d+)$/) { # e.g. JOB=1:20
- $array_job = 1;
- $jobname = $1;
- $jobstart = $2;
- $jobend = $3;
- shift;
- if ($jobstart > $jobend) {
- die "queue.pl: invalid job range $ARGV[0]";
- }
- if ($jobstart <= 0) {
- die "run.pl: invalid job range $ARGV[0], start must be strictly positive (this is a GridEngine limitation).";
- }
- } elsif ($ARGV[0] =~ m/^([\w_][\w\d_]*)+=(\d+)$/) { # e.g. JOB=1.
- $array_job = 1;
- $jobname = $1;
- $jobstart = $2;
- $jobend = $2;
- shift;
- } elsif ($ARGV[0] =~ m/.+\=.*\:.*$/) {
- print STDERR "Warning: suspicious first argument to queue.pl: $ARGV[0]\n";
- }
-}
-
-if (@ARGV < 2) {
- print_usage();
-}
-
-if (exists $cli_options{"config"}) {
- $config = $cli_options{"config"};
-}
-
-my $default_config_file = <<'EOF';
-# Default configuration
-command qsub -v PATH -cwd -S /bin/bash -j y -l arch=*64*
-option mem=* -l mem_free=$0,ram_free=$0
-option mem=0 # Do not add anything to qsub_opts
-option num_threads=* -pe smp $0
-option num_threads=1 # Do not add anything to qsub_opts
-option max_jobs_run=* -tc $0
-default gpu=0
-option gpu=0
-option gpu=* -l gpu=$0 -q g.q
-EOF
-
-# Here the configuration options specified by the user on the command line
-# (e.g. --mem 2G) are converted to options to the qsub system as defined in
-# the config file. (e.g. if the config file has the line
-# "option mem=* -l ram_free=$0,mem_free=$0"
-# and the user has specified '--mem 2G' on the command line, the options
-# passed to queue system would be "-l ram_free=2G,mem_free=2G
-# A more detailed description of the ways the options would be handled is at
-# the top of this file.
-
-my $opened_config_file = 1;
-
-open CONFIG, "<$config" or $opened_config_file = 0;
-
-my %cli_config_options = ();
-my %cli_default_options = ();
-
-if ($opened_config_file == 0 && exists($cli_options{"config"})) {
- print STDERR "Could not open config file $config\n";
- exit(1);
-} elsif ($opened_config_file == 0 && !exists($cli_options{"config"})) {
- # Open the default config file instead
- open (CONFIG, "echo '$default_config_file' |") or die "Unable to open pipe\n";
- $config = "Default config";
-}
-
-my $qsub_cmd = "";
-my $read_command = 0;
-
-while(<CONFIG>) {
- chomp;
- my $line = $_;
- $_ =~ s/\s*#.*//g;
- if ($_ eq "") { next; }
- if ($_ =~ /^command (.+)/) {
- $read_command = 1;
- $qsub_cmd = $1 . " ";
- } elsif ($_ =~ m/^option ([^=]+)=\* (.+)$/) {
- # Config option that needs replacement with parameter value read from CLI
- # e.g.: option mem=* -l mem_free=$0,ram_free=$0
- my $option = $1; # mem
- my $arg= $2; # -l mem_free=$0,ram_free=$0
- if ($arg !~ m:\$0:) {
- die "Unable to parse line '$line' in config file ($config)\n";
- }
- if (exists $cli_options{$option}) {
- # Replace $0 with the argument read from command line.
- # e.g. "-l mem_free=$0,ram_free=$0" -> "-l mem_free=2G,ram_free=2G"
- $arg =~ s/\$0/$cli_options{$option}/g;
- $cli_config_options{$option} = $arg;
- }
- } elsif ($_ =~ m/^option ([^=]+)=(\S+)\s?(.*)$/) {
- # Config option that does not need replacement
- # e.g. option gpu=0 -q all.q
- my $option = $1; # gpu
- my $value = $2; # 0
- my $arg = $3; # -q all.q
- if (exists $cli_options{$option}) {
- $cli_default_options{($option,$value)} = $arg;
- }
- } elsif ($_ =~ m/^default (\S+)=(\S+)/) {
- # Default options. Used for setting default values to options i.e. when
- # the user does not specify the option on the command line
- # e.g. default gpu=0
- my $option = $1; # gpu
- my $value = $2; # 0
- if (!exists $cli_options{$option}) {
- # If the user has specified this option on the command line, then we
- # don't have to do anything
- $cli_options{$option} = $value;
- }
- } else {
- print STDERR "queue.pl: unable to parse line '$line' in config file ($config)\n";
- exit(1);
- }
-}
-
-close(CONFIG);
-
-if ($read_command != 1) {
- print STDERR "queue.pl: config file ($config) does not contain the line \"command .*\"\n";
- exit(1);
-}
-
-for my $option (keys %cli_options) {
- if ($option eq "config") { next; }
- if ($option eq "max_jobs_run" && $array_job != 1) { next; }
- my $value = $cli_options{$option};
-
- if (exists $cli_default_options{($option,$value)}) {
- $qsub_opts .= "$cli_default_options{($option,$value)} ";
- } elsif (exists $cli_config_options{$option}) {
- $qsub_opts .= "$cli_config_options{$option} ";
- } else {
- if ($opened_config_file == 0) { $config = "default config file"; }
- die "queue.pl: Command line option $option not described in $config (or value '$value' not allowed)\n";
- }
-}
-
-my $cwd = getcwd();
-my $logfile = shift @ARGV;
-
-if ($array_job == 1 && $logfile !~ m/$jobname/
- && $jobend > $jobstart) {
- print STDERR "queue.pl: you are trying to run a parallel job but "
- . "you are putting the output into just one log file ($logfile)\n";
- exit(1);
-}
-
-#
-# Work out the command; quote escaping is done here.
-# Note: the rules for escaping stuff are worked out pretty
-# arbitrarily, based on what we want it to do. Some things that
-# we pass as arguments to queue.pl, such as "|", we want to be
-# interpreted by bash, so we don't escape them. Other things,
-# such as archive specifiers like 'ark:gunzip -c foo.gz|', we want
-# to be passed, in quotes, to the Kaldi program. Our heuristic
-# is that stuff with spaces in should be quoted. This doesn't
-# always work.
-#
-my $cmd = "";
-
-foreach my $x (@ARGV) {
- if ($x =~ m/^\S+$/) { $cmd .= $x . " "; } # If string contains no spaces, take
- # as-is.
- elsif ($x =~ m:\":) { $cmd .= "'$x' "; } # else if no dbl-quotes, use single
- else { $cmd .= "\"$x\" "; } # else use double.
-}
-
-#
-# Work out the location of the script file, and open it for writing.
-#
-my $dir = dirname($logfile);
-my $base = basename($logfile);
-my $qdir = "$dir/q";
-$qdir =~ s:/(log|LOG)/*q:/q:; # If qdir ends in .../log/q, make it just .../q.
-my $queue_logfile = "$qdir/$base";
-
-if (!-d $dir) { system "mkdir -p $dir 2>/dev/null"; } # another job may be doing this...
-if (!-d $dir) { die "Cannot make the directory $dir\n"; }
-# make a directory called "q",
-# where we will put the log created by qsub... normally this doesn't contain
-# anything interesting, evertyhing goes to $logfile.
-if (! -d "$qdir") {
- system "mkdir $qdir 2>/dev/null";
- sleep(5); ## This is to fix an issue we encountered in denominator lattice creation,
- ## where if e.g. the exp/tri2b_denlats/log/15/q directory had just been
- ## created and the job immediately ran, it would die with an error because nfs
- ## had not yet synced. I'm also decreasing the acdirmin and acdirmax in our
- ## NFS settings to something like 5 seconds.
-}
-
-my $queue_array_opt = "";
-if ($array_job == 1) { # It's an array job.
- $queue_array_opt = "-t $jobstart:$jobend";
- $logfile =~ s/$jobname/\$SGE_TASK_ID/g; # This variable will get
- # replaced by qsub, in each job, with the job-id.
- $cmd =~ s/$jobname/\$\{SGE_TASK_ID\}/g; # same for the command...
- $queue_logfile =~ s/\.?$jobname//; # the log file in the q/ subdirectory
- # is for the queue to put its log, and this doesn't need the task array subscript
- # so we remove it.
-}
-
-# queue_scriptfile is as $queue_logfile [e.g. dir/q/foo.log] but
-# with the suffix .sh.
-my $queue_scriptfile = $queue_logfile;
-($queue_scriptfile =~ s/\.[a-zA-Z]{1,5}$/.sh/) || ($queue_scriptfile .= ".sh");
-if ($queue_scriptfile !~ m:^/:) {
- $queue_scriptfile = $cwd . "/" . $queue_scriptfile; # just in case.
-}
-
-# We'll write to the standard input of "qsub" (the file-handle Q),
-# the job that we want it to execute.
-# Also keep our current PATH around, just in case there was something
-# in it that we need (although we also source ./path.sh)
-
-my $syncfile = "$qdir/done.$$";
-
-system("rm $queue_logfile $syncfile 2>/dev/null");
-#
-# Write to the script file, and then close it.
-#
-open(Q, ">$queue_scriptfile") || die "Failed to write to $queue_scriptfile";
-
-print Q "#!/bin/bash\n";
-print Q "cd $cwd\n";
-print Q ". ./path.sh\n";
-print Q "( echo '#' Running on \`hostname\`\n";
-print Q " echo '#' Started at \`date\`\n";
-print Q " echo -n '# '; cat <<EOF\n";
-print Q "$cmd\n"; # this is a way of echoing the command into a comment in the log file,
-print Q "EOF\n"; # without having to escape things like "|" and quote characters.
-print Q ") >$logfile\n";
-print Q "time1=\`date +\"%s\"\`\n";
-print Q " ( $cmd ) 2>>$logfile >>$logfile\n";
-print Q "ret=\$?\n";
-print Q "time2=\`date +\"%s\"\`\n";
-print Q "echo '#' Accounting: time=\$((\$time2-\$time1)) threads=$num_threads >>$logfile\n";
-print Q "echo '#' Finished at \`date\` with status \$ret >>$logfile\n";
-print Q "[ \$ret -eq 137 ] && exit 100;\n"; # If process was killed (e.g. oom) it will exit with status 137;
- # let the script return with status 100 which will put it to E state; more easily rerunnable.
-if ($array_job == 0) { # not an array job
- print Q "touch $syncfile\n"; # so we know it's done.
-} else {
- print Q "touch $syncfile.\$SGE_TASK_ID\n"; # touch a bunch of sync-files.
-}
-print Q "exit \$[\$ret ? 1 : 0]\n"; # avoid status 100 which grid-engine
-print Q "## submitted with:\n"; # treats specially.
-$qsub_cmd .= "-o $queue_logfile $qsub_opts $queue_array_opt $queue_scriptfile >>$queue_logfile 2>&1";
-print Q "# $qsub_cmd\n";
-if (!close(Q)) { # close was not successful... || die "Could not close script file $shfile";
- die "Failed to close the script file (full disk?)";
-}
-
-my $ret = system ($qsub_cmd);
-if ($ret != 0) {
- if ($sync && $ret == 256) { # this is the exit status when a job failed (bad exit status)
- if (defined $jobname) { $logfile =~ s/\$SGE_TASK_ID/*/g; }
- print STDERR "queue.pl: job writing to $logfile failed\n";
- } else {
- print STDERR "queue.pl: error submitting jobs to queue (return status was $ret)\n";
- print STDERR "queue log file is $queue_logfile, command was $qsub_cmd\n";
- print STDERR `tail $queue_logfile`;
- }
- exit(1);
-}
-
-my $sge_job_id;
-if (! $sync) { # We're not submitting with -sync y, so we
- # need to wait for the jobs to finish. We wait for the
- # sync-files we "touched" in the script to exist.
- my @syncfiles = ();
- if (!defined $jobname) { # not an array job.
- push @syncfiles, $syncfile;
- } else {
- for (my $jobid = $jobstart; $jobid <= $jobend; $jobid++) {
- push @syncfiles, "$syncfile.$jobid";
- }
- }
- # We will need the sge_job_id, to check that job still exists
- { # Get the SGE job-id from the log file in q/
- open(L, "<$queue_logfile") || die "Error opening log file $queue_logfile";
- undef $sge_job_id;
- while (<L>) {
- if (m/Your job\S* (\d+)[. ].+ has been submitted/) {
- if (defined $sge_job_id) {
- die "Error: your job was submitted more than once (see $queue_logfile)";
- } else {
- $sge_job_id = $1;
- }
- }
- }
- close(L);
- if (!defined $sge_job_id) {
- die "Error: log file $queue_logfile does not specify the SGE job-id.";
- }
- }
- my $check_sge_job_ctr=1;
- #
- my $wait = 0.1;
- my $counter = 0;
- foreach my $f (@syncfiles) {
- # wait for them to finish one by one.
- while (! -f $f) {
- sleep($wait);
- $wait *= 1.2;
- if ($wait > 3.0) {
- $wait = 3.0; # never wait more than 3 seconds.
- # the following (.kick) commands are basically workarounds for NFS bugs.
- if (rand() < 0.25) { # don't do this every time...
- if (rand() > 0.5) {
- system("touch $qdir/.kick");
- } else {
- system("rm $qdir/.kick 2>/dev/null");
- }
- }
- if ($counter++ % 10 == 0) {
- # This seems to kick NFS in the teeth to cause it to refresh the
- # directory. I've seen cases where it would indefinitely fail to get
- # updated, even though the file exists on the server.
- # Only do this every 10 waits (every 30 seconds) though, or if there
- # are many jobs waiting they can overwhelm the file server.
- system("ls $qdir >/dev/null");
- }
- }
-
- # Check that the job exists in SGE. Job can be killed if duration
- # exceeds some hard limit, or in case of a machine shutdown.
- if (($check_sge_job_ctr++ % 10) == 0) { # Don't run qstat too often, avoid stress on SGE.
- if ( -f $f ) { next; }; #syncfile appeared: OK.
- $ret = system("qstat -j $sge_job_id >/dev/null 2>/dev/null");
- # system(...) : To get the actual exit value, shift $ret right by eight bits.
- if ($ret>>8 == 1) { # Job does not seem to exist
- # Don't consider immediately missing job as error, first wait some
- # time to make sure it is not just delayed creation of the syncfile.
-
- sleep(3);
- # Sometimes NFS gets confused and thinks it's transmitted the directory
- # but it hasn't, due to timestamp issues. Changing something in the
- # directory will usually fix that.
- system("touch $qdir/.kick");
- system("rm $qdir/.kick 2>/dev/null");
- if ( -f $f ) { next; } #syncfile appeared, ok
- sleep(7);
- system("touch $qdir/.kick");
- sleep(1);
- system("rm $qdir/.kick 2>/dev/null");
- if ( -f $f ) { next; } #syncfile appeared, ok
- sleep(60);
- system("touch $qdir/.kick");
- sleep(1);
- system("rm $qdir/.kick 2>/dev/null");
- if ( -f $f ) { next; } #syncfile appeared, ok
- $f =~ m/\.(\d+)$/ || die "Bad sync-file name $f";
- my $job_id = $1;
- if (defined $jobname) {
- $logfile =~ s/\$SGE_TASK_ID/$job_id/g;
- }
- my $last_line = `tail -n 1 $logfile`;
- if ($last_line =~ m/status 0$/ && (-M $logfile) < 0) {
- # if the last line of $logfile ended with "status 0" and
- # $logfile is newer than this program [(-M $logfile) gives the
- # time elapsed between file modification and the start of this
- # program], then we assume the program really finished OK,
- # and maybe something is up with the file system.
- print STDERR "**queue.pl: syncfile $f was not created but job seems\n" .
- "**to have finished OK. Probably your file-system has problems.\n" .
- "**This is just a warning.\n";
- last;
- } else {
- chop $last_line;
- print STDERR "queue.pl: Error, unfinished job no " .
- "longer exists, log is in $logfile, last line is '$last_line', " .
- "syncfile is $f, return status of qstat was $ret\n" .
- "Possible reasons: a) Exceeded time limit? -> Use more jobs!" .
- " b) Shutdown/Frozen machine? -> Run again!\n";
- exit(1);
- }
- } elsif ($ret != 0) {
- print STDERR "queue.pl: Warning: qstat command returned status $ret (qstat -j $sge_job_id,$!)\n";
- }
- }
- }
- }
- my $all_syncfiles = join(" ", @syncfiles);
- system("rm $all_syncfiles 2>/dev/null");
-}
-
-# OK, at this point we are synced; we know the job is done.
-# But we don't know about its exit status. We'll look at $logfile for this.
-# First work out an array @logfiles of file-locations we need to
-# read (just one, unless it's an array job).
-my @logfiles = ();
-if (!defined $jobname) { # not an array job.
- push @logfiles, $logfile;
-} else {
- for (my $jobid = $jobstart; $jobid <= $jobend; $jobid++) {
- my $l = $logfile;
- $l =~ s/\$SGE_TASK_ID/$jobid/g;
- push @logfiles, $l;
- }
-}
-
-my $num_failed = 0;
-my $status = 1;
-foreach my $l (@logfiles) {
- my @wait_times = (0.1, 0.2, 0.2, 0.3, 0.5, 0.5, 1.0, 2.0, 5.0, 5.0, 5.0, 10.0, 25.0);
- for (my $iter = 0; $iter <= @wait_times; $iter++) {
- my $line = `tail -10 $l 2>/dev/null`; # Note: although this line should be the last
- # line of the file, I've seen cases where it was not quite the last line because
- # of delayed output by the process that was running, or processes it had called.
- # so tail -10 gives it a little leeway.
- if ($line =~ m/with status (\d+)/) {
- $status = $1;
- last;
- } else {
- if ($iter < @wait_times) {
- sleep($wait_times[$iter]);
- } else {
- if (! -f $l) {
- print STDERR "Log-file $l does not exist.\n";
- } else {
- print STDERR "The last line of log-file $l does not seem to indicate the "
- . "return status as expected\n";
- }
- exit(1); # Something went wrong with the queue, or the
- # machine it was running on, probably.
- }
- }
- }
- # OK, now we have $status, which is the return-status of
- # the command in the job.
- if ($status != 0) { $num_failed++; }
-}
-if ($num_failed == 0) { exit(0); }
-else { # we failed.
- if (@logfiles == 1) {
- if (defined $jobname) { $logfile =~ s/\$SGE_TASK_ID/$jobstart/g; }
- print STDERR "queue.pl: job failed with status $status, log is in $logfile\n";
- if ($logfile =~ m/JOB/) {
- print STDERR "queue.pl: probably you forgot to put JOB=1:\$nj in your script.\n";
- }
- } else {
- if (defined $jobname) { $logfile =~ s/\$SGE_TASK_ID/*/g; }
- my $numjobs = 1 + $jobend - $jobstart;
- print STDERR "queue.pl: $num_failed / $numjobs failed, log is in $logfile\n";
- }
- exit(1);
-}
diff --git a/kaldi_decode/utils/run.pl b/kaldi_decode/utils/run.pl
deleted file mode 100755
index 6145a7a..0000000
--- a/kaldi_decode/utils/run.pl
+++ /dev/null
@@ -1,264 +0,0 @@
-#!/usr/bin/env perl
-use warnings; #sed replacement for -w perl parameter
-
-# In general, doing
-# run.pl some.log a b c is like running the command a b c in
-# the bash shell, and putting the standard error and output into some.log.
-# To run parallel jobs (backgrounded on the host machine), you can do (e.g.)
-# run.pl JOB=1:4 some.JOB.log a b c JOB is like running the command a b c JOB
-# and putting it in some.JOB.log, for each one. [Note: JOB can be any identifier].
-# If any of the jobs fails, this script will fail.
-
-# A typical example is:
-# run.pl some.log my-prog "--opt=foo bar" foo \| other-prog baz
-# and run.pl will run something like:
-# ( my-prog '--opt=foo bar' foo | other-prog baz ) >& some.log
-#
-# Basically it takes the command-line arguments, quotes them
-# as necessary to preserve spaces, and evaluates them with bash.
-# In addition it puts the command line at the top of the log, and
-# the start and end times of the command at the beginning and end.
-# The reason why this is useful is so that we can create a different
-# version of this program that uses a queueing system instead.
-
-# use Data::Dumper;
-
-@ARGV < 2 && die "usage: run.pl log-file command-line arguments...";
-
-
-$max_jobs_run = -1;
-$jobstart = 1;
-$jobend = 1;
-$ignored_opts = ""; # These will be ignored.
-
-# First parse an option like JOB=1:4, and any
-# options that would normally be given to
-# queue.pl, which we will just discard.
-
-if (@ARGV > 0) {
- while (@ARGV >= 2 && $ARGV[0] =~ m:^-:) { # parse any options
- # that would normally go to qsub, but which will be ignored here.
- $switch = shift @ARGV;
- if ($switch eq "-V") {
- $ignored_opts .= "-V ";
- } elsif ($switch eq "--max-jobs-run" || $switch eq "-tc") {
- # we do support the option --max-jobs-run n, and its GridEngine form -tc n.
- $max_jobs_run = shift @ARGV;
- if (! ($max_jobs_run > 0)) {
- die "run.pl: invalid option --max-jobs-run $max_jobs_run";
- }
- } else {
- $option = shift @ARGV;
- if ($switch eq "-sync" && $option =~ m/^[yY]/) {
- $ignored_opts .= "-sync "; # Note: in the
- # corresponding code in queue.pl it says instead, just "$sync = 1;".
- }
- $ignored_opts .= "$switch $option ";
- if ($switch eq "-pe") { # e.g. -pe smp 5
- $option2 = shift @ARGV;
- $ignored_opts .= "$option2 ";
- }
- }
- }
- if ($ARGV[0] =~ m/^([\w_][\w\d_]*)+=(\d+):(\d+)$/) { # e.g. JOB=1:10
- $jobname = $1;
- $jobstart = $2;
- $jobend = $3;
- shift;
- if ($jobstart > $jobend) {
- die "run.pl: invalid job range $ARGV[0]";
- }
- if ($jobstart <= 0) {
- die "run.pl: invalid job range $ARGV[0], start must be strictly positive (this is required for GridEngine compatibility).";
- }
- } elsif ($ARGV[0] =~ m/^([\w_][\w\d_]*)+=(\d+)$/) { # e.g. JOB=1.
- $jobname = $1;
- $jobstart = $2;
- $jobend = $2;
- shift;
- } elsif ($ARGV[0] =~ m/.+\=.*\:.*$/) {
- print STDERR "run.pl: Warning: suspicious first argument to run.pl: $ARGV[0]\n";
- }
-}
-
-# Users found this message confusing so we are removing it.
-# if ($ignored_opts ne "") {
-# print STDERR "run.pl: Warning: ignoring options \"$ignored_opts\"\n";
-# }
-
-if ($max_jobs_run == -1) { # If --max-jobs-run option not set,
- # then work out the number of processors if possible,
- # and set it based on that.
- $max_jobs_run = 0;
- if (open(P, "</proc/cpuinfo")) { # Linux
- while (<P>) { if (m/^processor/) { $max_jobs_run++; } }
- if ($max_jobs_run == 0) {
- print STDERR "run.pl: Warning: failed to detect any processors from /proc/cpuinfo\n";
- $max_jobs_run = 10; # reasonable default.
- }
- close(P);
- } elsif (open(P, "sysctl -a |")) { # BSD/Darwin
- while (<P>) {
- if (m/hw\.ncpu\s*[:=]\s*(\d+)/) { # hw.ncpu = 4, or hw.ncpu: 4
- $max_jobs_run = $1;
- last;
- }
- }
- close(P);
- if ($max_jobs_run == 0) {
- print STDERR "run.pl: Warning: failed to detect any processors from sysctl -a\n";
- $max_jobs_run = 10; # reasonable default.
- }
- } else {
- # allow at most 32 jobs at once, on non-UNIX systems; change this code
- # if you need to change this default.
- $max_jobs_run = 32;
- }
- # The just-computed value of $max_jobs_run is just the number of processors
- # (or our best guess); and if it happens that the number of jobs we need to
- # run is just slightly above $max_jobs_run, it will make sense to increase
- # $max_jobs_run to equal the number of jobs, so we don't have a small number
- # of leftover jobs.
- $num_jobs = $jobend - $jobstart + 1;
- if ($num_jobs > $max_jobs_run && $num_jobs < 1.4 * $max_jobs_run) {
- $max_jobs_run = $num_jobs;
- }
-}
-
-$logfile = shift @ARGV;
-
-if (defined $jobname && $logfile !~ m/$jobname/ &&
- $jobend > $jobstart) {
- print STDERR "run.pl: you are trying to run a parallel job but "
- . "you are putting the output into just one log file ($logfile)\n";
- exit(1);
-}
-
-$cmd = "";
-
-foreach $x (@ARGV) {
- if ($x =~ m/^\S+$/) { $cmd .= $x . " "; }
- elsif ($x =~ m:\":) { $cmd .= "'$x' "; }
- else { $cmd .= "\"$x\" "; }
-}
-
-#$Data::Dumper::Indent=0;
-$ret = 0;
-$numfail = 0;
-%active_pids=();
-
-use POSIX ":sys_wait_h";
-for ($jobid = $jobstart; $jobid <= $jobend; $jobid++) {
- if (scalar(keys %active_pids) >= $max_jobs_run) {
-
- # Lets wait for a change in any child's status
- # Then we have to work out which child finished
- $r = waitpid(-1, 0);
- $code = $?;
- if ($r < 0 ) { die "run.pl: Error waiting for child process"; } # should never happen.
- if ( defined $active_pids{$r} ) {
- $jid=$active_pids{$r};
- $fail[$jid]=$code;
- if ($code !=0) { $numfail++;}
- delete $active_pids{$r};
- # print STDERR "Finished: $r/$jid " . Dumper(\%active_pids) . "\n";
- } else {
- die "run.pl: Cannot find the PID of the chold process that just finished.";
- }
-
- # In theory we could do a non-blocking waitpid over all jobs running just
- # to find out if only one or more jobs finished during the previous waitpid()
- # However, we just omit this and will reap the next one in the next pass
- # through the for(;;) cycle
- }
- $childpid = fork();
- if (!defined $childpid) { die "run.pl: Error forking in run.pl (writing to $logfile)"; }
- if ($childpid == 0) { # We're in the child... this branch
- # executes the job and returns (possibly with an error status).
- if (defined $jobname) {
- $cmd =~ s/$jobname/$jobid/g;
- $logfile =~ s/$jobname/$jobid/g;
- }
- system("mkdir -p `dirname $logfile` 2>/dev/null");
- open(F, ">$logfile") || die "run.pl: Error opening log file $logfile";
- print F "# " . $cmd . "\n";
- print F "# Started at " . `date`;
- $starttime = `date +'%s'`;
- print F "#\n";
- close(F);
-
- # Pipe into bash.. make sure we're not using any other shell.
- open(B, "|bash") || die "run.pl: Error opening shell command";
- print B "( " . $cmd . ") 2>>$logfile >> $logfile";
- close(B); # If there was an error, exit status is in $?
- $ret = $?;
-
- $lowbits = $ret & 127;
- $highbits = $ret >> 8;
- if ($lowbits != 0) { $return_str = "code $highbits; signal $lowbits" }
- else { $return_str = "code $highbits"; }
-
- $endtime = `date +'%s'`;
- open(F, ">>$logfile") || die "run.pl: Error opening log file $logfile (again)";
- $enddate = `date`;
- chop $enddate;
- print F "# Accounting: time=" . ($endtime - $starttime) . " threads=1\n";
- print F "# Ended ($return_str) at " . $enddate . ", elapsed time " . ($endtime-$starttime) . " seconds\n";
- close(F);
- exit($ret == 0 ? 0 : 1);
- } else {
- $pid[$jobid] = $childpid;
- $active_pids{$childpid} = $jobid;
- # print STDERR "Queued: " . Dumper(\%active_pids) . "\n";
- }
-}
-
-# Now we have submitted all the jobs, lets wait until all the jobs finish
-foreach $child (keys %active_pids) {
- $jobid=$active_pids{$child};
- $r = waitpid($pid[$jobid], 0);
- $code = $?;
- if ($r == -1) { die "run.pl: Error waiting for child process"; } # should never happen.
- if ($r != 0) { $fail[$jobid]=$code; $numfail++ if $code!=0; } # Completed successfully
-}
-
-# Some sanity checks:
-# The $fail array should not contain undefined codes
-# The number of non-zeros in that array should be equal to $numfail
-# We cannot do foreach() here, as the JOB ids do not necessarily start by zero
-$failed_jids=0;
-for ($jobid = $jobstart; $jobid <= $jobend; $jobid++) {
- $job_return = $fail[$jobid];
- if (not defined $job_return ) {
- # print Dumper(\@fail);
-
- die "run.pl: Sanity check failed: we have indication that some jobs are running " .
- "even after we waited for all jobs to finish" ;
- }
- if ($job_return != 0 ){ $failed_jids++;}
-}
-if ($failed_jids != $numfail) {
- die "run.pl: Sanity check failed: cannot find out how many jobs failed ($failed_jids x $numfail)."
-}
-if ($numfail > 0) { $ret = 1; }
-
-if ($ret != 0) {
- $njobs = $jobend - $jobstart + 1;
- if ($njobs == 1) {
- if (defined $jobname) {
- $logfile =~ s/$jobname/$jobstart/; # only one numbered job, so replace name with
- # that job.
- }
- print STDERR "run.pl: job failed, log is in $logfile\n";
- if ($logfile =~ m/JOB/) {
- print STDERR "run.pl: probably you forgot to put JOB=1:\$nj in your script.";
- }
- }
- else {
- $logfile =~ s/$jobname/*/g;
- print STDERR "run.pl: $numfail / $njobs failed, log is in $logfile\n";
- }
-}
-
-
-exit ($ret);
diff --git a/kaldi_decode/utils/split_data.sh b/kaldi_decode/utils/split_data.sh
deleted file mode 100755
index 941890c..0000000
--- a/kaldi_decode/utils/split_data.sh
+++ /dev/null
@@ -1,135 +0,0 @@
-#!/bin/bash
-# Copyright 2010-2013 Microsoft Corporation
-# Johns Hopkins University (Author: Daniel Povey)
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
-# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
-# MERCHANTABLITY OR NON-INFRINGEMENT.
-# See the Apache 2 License for the specific language governing permissions and
-# limitations under the License.
-
-split_per_spk=true
-if [ "$1" == "--per-utt" ]; then
- split_per_spk=false
- shift
-fi
-
-if [ $# != 2 ]; then
- echo "Usage: split_data.sh [--per-utt] <data-dir> <num-to-split>"
- echo "This script will not split the data-dir if it detects that the output is newer than the input."
- echo "By default it splits per speaker (so each speaker is in only one split dir),"
- echo "but with the --per-utt option it will ignore the speaker information while splitting."
- exit 1
-fi
-
-data=$1
-numsplit=$2
-
-if [ $numsplit -le 0 ]; then
- echo "Invalid num-split argument $numsplit";
- exit 1;
-fi
-
-n=0;
-feats=""
-wavs=""
-utt2spks=""
-texts=""
-
-nu=`cat $data/utt2spk | wc -l`
-nf=`cat $data/feats.scp 2>/dev/null | wc -l`
-nt=`cat $data/text 2>/dev/null | wc -l` # take it as zero if no such file
-if [ -f $data/feats.scp ] && [ $nu -ne $nf ]; then
- echo "** split_data.sh: warning, #lines is (utt2spk,feats.scp) is ($nu,$nf); you can "
- echo "** use utils/fix_data_dir.sh $data to fix this."
-fi
-if [ -f $data/text ] && [ $nu -ne $nt ]; then
- echo "** split_data.sh: warning, #lines is (utt2spk,text) is ($nu,$nt); you can "
- echo "** use utils/fix_data_dir.sh to fix this."
-fi
-
-s1=$data/split$numsplit/1
-if [ ! -d $s1 ]; then
- need_to_split=true
-else
- need_to_split=false
- for f in utt2spk spk2utt spk2warp feats.scp text wav.scp cmvn.scp spk2gender \
- vad.scp segments reco2file_and_channel utt2lang; do
- if [[ -f $data/$f && ( ! -f $s1/$f || $s1/$f -ot $data/$f ) ]]; then
- need_to_split=true
- fi
- done
-fi
-
-if ! $need_to_split; then
- exit 0;
-fi
-
-for n in `seq $numsplit`; do
- mkdir -p $data/split$numsplit/$n
- utt2spks="$utt2spks $data/split$numsplit/$n/utt2spk"
-done
-
-if $split_per_spk; then
- utt2spk_opt="--utt2spk=$data/utt2spk"
-else
- utt2spk_opt=
-fi
-
-# If lockfile is not installed, just don't lock it. It's not a big deal.
-which lockfile >&/dev/null && lockfile -l 60 $data/.split_lock
-
-utils/split_scp.pl $utt2spk_opt $data/utt2spk $utt2spks || exit 1
-
-for n in `seq $numsplit`; do
- dsn=$data/split$numsplit/$n
- utils/utt2spk_to_spk2utt.pl $dsn/utt2spk > $dsn/spk2utt || exit 1;
-done
-
-maybe_wav_scp=
-if [ ! -f $data/segments ]; then
- maybe_wav_scp=wav.scp # If there is no segments file, then wav file is
- # indexed per utt.
-fi
-
-# split some things that are indexed by utterance.
-for f in feats.scp text vad.scp utt2lang $maybe_wav_scp; do
- if [ -f $data/$f ]; then
- utils/filter_scps.pl JOB=1:$numsplit \
- $data/split$numsplit/JOB/utt2spk $data/$f $data/split$numsplit/JOB/$f || exit 1;
- fi
-done
-
-# split some things that are indexed by speaker
-for f in spk2gender spk2warp cmvn.scp; do
- if [ -f $data/$f ]; then
- utils/filter_scps.pl JOB=1:$numsplit \
- $data/split$numsplit/JOB/spk2utt $data/$f $data/split$numsplit/JOB/$f || exit 1;
- fi
-done
-
-for n in `seq $numsplit`; do
- dsn=$data/split$numsplit/$n
- if [ -f $data/segments ]; then
- utils/filter_scp.pl $dsn/utt2spk $data/segments > $dsn/segments
- awk '{print $2;}' $dsn/segments | sort | uniq > $data/tmp.reco # recording-ids.
- if [ -f $data/reco2file_and_channel ]; then
- utils/filter_scp.pl $data/tmp.reco $data/reco2file_and_channel > $dsn/reco2file_and_channel
- fi
- if [ -f $data/wav.scp ]; then
- utils/filter_scp.pl $data/tmp.reco $data/wav.scp >$dsn/wav.scp
- fi
- rm $data/tmp.reco
- fi # else it would have been handled above, see maybe_wav.
-done
-
-rm -f $data/.split_lock
-
-exit 0
diff --git a/kaldi_io/Makefile b/kaldi_io/Makefile
index 7b0c0bd..abfa8e6 100644
--- a/kaldi_io/Makefile
+++ b/kaldi_io/Makefile
@@ -1,6 +1,12 @@
-# Change KDIR to `kaldi-trunk' path (Kaldi must be compiled with --share)
-KDIR := /slfs6/users/ymz09/kaldi/
+ifndef LUA_BINDIR
+$(error Please build the package via luarocks: `luarocks make`)
+endif
+ifndef KALDI_BASE
+$(error KALDI_BASE is not set)
+endif
+
+KDIR := $(KALDI_BASE)
SHELL := /bin/bash
BUILD_DIR := $(CURDIR)/build
INC_PATH := $(LUA_BINDIR)/../include/
@@ -20,19 +26,19 @@ OBJ_SUBDIR := $(addprefix $(OBJ_DIR)/,$(SUBDIR))
LUA_SUBDIR := $(addprefix $(LUA_DIR)/,$(SUBDIR))
LUA_LIBS := $(addprefix $(LUA_DIR)/,$(LUA_LIBS))
LIB_PATH := $(LUA_BINDIR)/../lib
+LUALIB_PATH := $(LUA_BINDIR)/../lib/lua/5.1/
build: $(OBJ_DIR) $(OBJ_SUBDIR) $(OBJS) $(OBJ_DIR)/src/test
install: $(LUA_DIR) $(LUA_SUBDIR) $(LUA_LIBS) $(LIBS)
include $(KDIR)/src/kaldi.mk
-KL := $(KDIR)/src/feat/kaldi-feat.a $(KDIR)/src/matrix/kaldi-matrix.a $(KDIR)/src/base/kaldi-base.a $(KDIR)/src/util/kaldi-util.a $(KDIR)/src/hmm/kaldi-hmm.a $(KDIR)/src/tree/kaldi-tree.a -lcblas -llapack_atlas
-
+KL := $(KDIR)/src/feat/kaldi-feat.a $(KDIR)/src/matrix/kaldi-matrix.a $(KDIR)/src/base/kaldi-base.a $(KDIR)/src/util/kaldi-util.a $(KDIR)/src/hmm/kaldi-hmm.a $(KDIR)/src/tree/kaldi-tree.a $(BLAS_LDFLAGS)
$(OBJ_DIR) $(LUA_DIR) $(OBJ_SUBDIR) $(LUA_SUBDIR):
-mkdir -p $@
$(LUA_DIR)/%.lua: %.lua
cp $< $@
$(LIBS): $(OBJ_DIR)/src/cwrapper_kaldi.o $(OBJ_DIR)/init.o $(OBJ_DIR)/src/init.o
- gcc -shared -fPIC -o $@ $(OBJ_DIR)/src/cwrapper_kaldi.o $(OBJ_DIR)/init.o $(OBJ_DIR)/src/init.o -lstdc++ -Wl,-rpath=$(LIB_PATH) -L$(LIB_PATH) -lnervcore -lluaT $(KL)
+ gcc -shared -fPIC -o $@ $(OBJ_DIR)/src/cwrapper_kaldi.o $(OBJ_DIR)/init.o $(OBJ_DIR)/src/init.o -lstdc++ -Wl,-rpath=$(LIB_PATH) -L$(LIB_PATH) -lnervcore -Wl,-rpath=$(LUALIB_PATH) -L$(LUALIB_PATH) -lnerv -lluaT $(KL)
$(OBJ_DIR)/src/cwrapper_kaldi.o: src/cwrapper_kaldi.cpp
g++ -o $@ -c $< -DHAVE_ATLAS $(KALDIINCLUDE) -g -fPIC $(INCLUDE) -DKALDI_DOUBLEPRECISION=0 -msse2 -DHAVE_POSIX_MEMALIGN
$(OBJ_DIR)/src/test: $(OBJ_DIR)/src/cwrapper_kaldi.o $(OBJ_DIR)/src/test.o
@@ -40,5 +46,5 @@ $(OBJ_DIR)/src/test: $(OBJ_DIR)/src/cwrapper_kaldi.o $(OBJ_DIR)/src/test.o
$(OBJ_DIR)/%.o: %.c
gcc -o $@ -c $< -g $(INCLUDE) -fPIC
clean:
- -rm $(OBJ_DIR)/src/*.o
+ -rm -r $(OBJ_DIR)
diff --git a/kaldi_io/init.c b/kaldi_io/init.c
index fe2f967..87682d2 100644
--- a/kaldi_io/init.c
+++ b/kaldi_io/init.c
@@ -1,4 +1,4 @@
-#include "../nerv/common.h"
+#include "nerv/lib/common.h"
#include <stdio.h>
extern void kaldi_io_init(lua_State *L);
diff --git a/kaldi_io/init.lua b/kaldi_io/init.lua
index 9f3ad55..751247a 100644
--- a/kaldi_io/init.lua
+++ b/kaldi_io/init.lua
@@ -7,6 +7,11 @@ function KaldiReader:__init(global_conf, reader_conf)
self.frm_ext = reader_conf.frm_ext
self.need_key = reader_conf.need_key -- for sequence training
self.gconf = global_conf
+ if self.gconf.use_cpu then
+ self.mat_type = self.gconf.mmat_type
+ else
+ self.mat_type = self.gconf.cumat_type
+ end
self.debug = global_conf.debug
if self.debug == nil then
self.debug = false
@@ -42,12 +47,15 @@ function KaldiReader:get_data()
end
local res = {}
-- read Kaldi feature
- local raw = self.gconf.cumat_type.new_from_host(self.feat_repo:cur_utter(self.debug))
+ local raw = self.feat_repo:cur_utter(self.debug)
+ if not self.gconf.use_cpu then
+ raw = self.gconf.cumat_type.new_from_host(raw)
+ end
local rearranged
if self.frm_ext and self.frm_ext > 0 then
local step = self.frm_ext * 2 + 1
-- expand the feature
- local expanded = self.gconf.cumat_type(raw:nrow(), raw:ncol() * step)
+ local expanded = self.mat_type(raw:nrow(), raw:ncol() * step)
expanded:expand_frm(raw, self.frm_ext)
-- rearrange the feature (``transpose'' operation in TNet)
if self.gconf.rearrange then
@@ -64,8 +72,12 @@ function KaldiReader:get_data()
feat_utter = self.gconf.mmat_type(rearranged:nrow() - self.gconf.frm_trim * 2, rearranged:ncol())
rearranged:copy_toh(feat_utter, self.gconf.frm_trim, rearranged:nrow() - self.gconf.frm_trim)
else
- feat_utter = self.gconf.mmat_type(rearranged:nrow(), rearranged:ncol())
- rearranged:copy_toh(feat_utter)
+ if self.gconf.use_cpu then
+ feat_utter = rearranged
+ else
+ feat_utter = self.gconf.mmat_type(rearranged:nrow(), rearranged:ncol())
+ rearranged:copy_toh(feat_utter)
+ end
end
res[self.feat_id] = feat_utter
if self.need_key then
@@ -76,8 +88,7 @@ function KaldiReader:get_data()
local lab_utter = repo:get_utter(self.feat_repo,
feat_utter:nrow(),
self.debug)
- -- need provide 'gconf.decoding = true' while decoding
- if not self.gconf.decoding and lab_utter == nil then
+ if lab_utter == nil then
fail_to_read_alignment = true
end
res[id] = lab_utter
@@ -104,7 +115,7 @@ function KaldiReader:get_data()
self.feat_repo:next()
collectgarbage("collect")
if fail_to_read_alignment then
- nerv.info("[Kaldi IO] utterance %s alignment not found, skip it.", self.feat_repo:key())
+ nerv.info("[kaldi] utterance %s alignment not found, skip it.", self.feat_repo:key())
res = self:get_data()
end
return res
diff --git a/kaldi_io/kaldi_io-scm-1.rockspec b/kaldi_io/kaldi_io-scm-1.rockspec
index 7c9f8d8..5a97cff 100644
--- a/kaldi_io/kaldi_io-scm-1.rockspec
+++ b/kaldi_io/kaldi_io-scm-1.rockspec
@@ -4,7 +4,7 @@ source = {
url = "https://github.com/Nerv-SJTU/nerv-speech.git"
}
description = {
- summary = "Kaldi I/O support (Kaldi I/O wrapper) for Nerv",
+ summary = "Kaldi I/O support (Kaldi I/O wrapper) for NERV",
detailed = [[
]],
homepage = "https://github.com/Nerv-SJTU/nerv-speech",
diff --git a/kaldi_io/src/cwrapper_kaldi.cpp b/kaldi_io/src/cwrapper_kaldi.cpp
index 542f1d0..788128b 100644
--- a/kaldi_io/src/cwrapper_kaldi.cpp
+++ b/kaldi_io/src/cwrapper_kaldi.cpp
@@ -10,10 +10,11 @@ extern "C" {
#include "cwrapper_kaldi.h"
#include "string.h"
#include "assert.h"
-#include "nerv/common.h"
+#include "nerv/lib/common.h"
+#include "nerv/lib/matrix/mmatrix.h"
- extern Matrix *nerv_matrix_host_float_create(long nrow, long ncol, Status *status);
- extern Matrix *nerv_matrix_host_double_create(long nrow, long ncol, Status *status);
+ extern Matrix *nerv_matrix_host_float_create(long nrow, long ncol, MContext *context, Status *status);
+ extern Matrix *nerv_matrix_host_double_create(long nrow, long ncol, MContext *context, Status *status);
struct KaldiFeatureRepo {
kaldi::SequentialBaseFloatMatrixReader* feature_reader;
@@ -26,7 +27,8 @@ extern "C" {
return repo;
}
- Matrix *kaldi_feature_repo_read_utterance(KaldiFeatureRepo *repo, lua_State *L, int debug) {
+ Matrix *kaldi_feature_repo_read_utterance(KaldiFeatureRepo *repo, lua_State *L,
+ int debug, MContext *context) {
Matrix *mat; /* nerv implementation */
repo->utt = repo->feature_reader->Key();
@@ -37,9 +39,9 @@ extern "C" {
Status status;
assert(sizeof(BaseFloat) == sizeof(float));
if(sizeof(BaseFloat) == sizeof(float))
- mat = nerv_matrix_host_float_create(n, m, &status);
+ mat = nerv_matrix_host_float_create(n, m, context, &status);
else if(sizeof(BaseFloat) == sizeof(double))
- mat = nerv_matrix_host_double_create(n, m, &status);
+ mat = nerv_matrix_host_double_create(n, m, context, &status);
NERV_LUA_CHECK_STATUS(L, status);
size_t stride = mat->stride;
if (debug)
@@ -80,26 +82,33 @@ extern "C" {
KaldiLookupFeatureRepo *kaldi_lookup_feature_repo_new(const char *feature_rspecifier, const char *map_rspecifier) {
KaldiLookupFeatureRepo *repo = new KaldiLookupFeatureRepo();
- kaldi::SequentialBaseFloatMatrixReader feature_reader = kaldi::SequentialBaseFloatMatrixReader(string(feature_rspecifier));
- for (; !feature_reader.Done(); feature_reader.Next())
+ kaldi::SequentialBaseFloatMatrixReader *feature_reader = \
+ new kaldi::SequentialBaseFloatMatrixReader(string(feature_rspecifier));
+ for (; !feature_reader->Done(); feature_reader->Next())
{
- const std::string &key = feature_reader.Key();
- const kaldi::Matrix<BaseFloat> &feat = feature_reader.Value();
+ const std::string &key = feature_reader->Key();
+ const kaldi::Matrix<BaseFloat> &feat = feature_reader->Value();
if (repo->key2mat.find(key) != repo->key2mat.end())
fprintf(stderr, "[kaldi] warning: lookup feature for key %s already exists", key.c_str());
repo->key2mat[key] = feat;
}
- kaldi::SequentialTokenVectorReader map_reader = kaldi::SequentialTokenVectorReader(string(map_rspecifier));
- for (; !map_reader.Done(); map_reader.Next())
+ delete feature_reader;
+ kaldi::SequentialTokenVectorReader *map_reader = \
+ new kaldi::SequentialTokenVectorReader(string(map_rspecifier));
+ for (; !map_reader->Done(); map_reader->Next())
{
- const std::vector<std::string> target = map_reader.Value();
+ const std::vector<std::string> target = map_reader->Value();
assert(target.size() >= 1);
- repo->map[map_reader.Key()] = *target.begin();
+ repo->map[map_reader->Key()] = *target.begin();
}
+ delete map_reader;
return repo;
}
- Matrix *kaldi_lookup_feature_repo_read_utterance(KaldiLookupFeatureRepo *repo, KaldiFeatureRepo *frepo, int nframes, lua_State *L, int debug) {
+ Matrix *kaldi_lookup_feature_repo_read_utterance(KaldiLookupFeatureRepo *repo,
+ KaldiFeatureRepo *frepo,
+ int nframes, lua_State *L,
+ int debug, MContext *context) {
Matrix *mat; /* nerv implementation */
StringToString_t::iterator mit = repo->map.find(frepo->utt);
if (mit == repo->map.end())
@@ -115,9 +124,9 @@ extern "C" {
Status status;
assert(sizeof(BaseFloat) == sizeof(float));
if(sizeof(BaseFloat) == sizeof(float))
- mat = nerv_matrix_host_float_create(n, m, &status);
+ mat = nerv_matrix_host_float_create(n, m, context, &status);
else if(sizeof(BaseFloat) == sizeof(double))
- mat = nerv_matrix_host_double_create(n, m, &status);
+ mat = nerv_matrix_host_double_create(n, m, context, &status);
NERV_LUA_CHECK_STATUS(L, status);
size_t stride = mat->stride;
if (debug)
@@ -149,13 +158,14 @@ extern "C" {
Matrix *kaldi_label_repo_read_utterance(KaldiLabelRepo *repo, KaldiFeatureRepo *frepo, int nframes,
lua_State *L,
- int debug) {
+ int debug, MContext *context) {
Matrix *mat = NULL;
/* check if the alignment of the utterance exists, otherwise return NULL */
kaldi::Posterior targets;
if (repo->targets_reader->HasKey(frepo->utt))
targets = repo->targets_reader->Value(frepo->utt);
- else return mat;
+ else
+ return mat;
int n = targets.size() < nframes ? targets.size() : nframes;
int m = (int)targets[0].size();
@@ -163,9 +173,9 @@ extern "C" {
Status status;
assert(sizeof(BaseFloat) == sizeof(float));
if(sizeof(BaseFloat) == sizeof(float))
- mat = nerv_matrix_host_float_create(n, m, &status);
+ mat = nerv_matrix_host_float_create(n, m, context, &status);
else if(sizeof(BaseFloat) == sizeof(double))
- mat = nerv_matrix_host_double_create(n, m, &status);
+ mat = nerv_matrix_host_double_create(n, m, context, &status);
NERV_LUA_CHECK_STATUS(L, status);
size_t stride = mat->stride;
if (debug)
diff --git a/kaldi_io/src/cwrapper_kaldi.h b/kaldi_io/src/cwrapper_kaldi.h
index c8a7a25..db20087 100644
--- a/kaldi_io/src/cwrapper_kaldi.h
+++ b/kaldi_io/src/cwrapper_kaldi.h
@@ -1,7 +1,7 @@
#ifndef NERV_kaldi_KALDI_IO_CWRAPPER
#define NERV_kaldi_KALDI_IO_CWRAPPER
-#include "nerv/matrix/matrix.h"
-#include "nerv/common.h"
+#include "nerv/lib/matrix/mmatrix.h"
+#include "nerv/lib/common.h"
#ifdef __cplusplus
extern "C" {
#endif
@@ -9,7 +9,8 @@ extern "C" {
typedef struct KaldiFeatureRepo KaldiFeatureRepo;
KaldiFeatureRepo *kaldi_feature_repo_new(const char *);
- Matrix *kaldi_feature_repo_read_utterance(KaldiFeatureRepo *repo, lua_State *L, int debug);
+ Matrix *kaldi_feature_repo_read_utterance(KaldiFeatureRepo *repo, lua_State *L,
+ int debug, MContext *context);
void kaldi_feature_repo_next(KaldiFeatureRepo *repo);
int kaldi_feature_repo_is_end(KaldiFeatureRepo *repo);
const char *kaldi_feature_repo_key(KaldiFeatureRepo *repo);
@@ -21,14 +22,18 @@ extern "C" {
Matrix *kaldi_label_repo_read_utterance(KaldiLabelRepo *repo, KaldiFeatureRepo *, int,
lua_State *L,
- int debug);
+ int debug,
+ MContext *context);
void kaldi_label_repo_destroy(KaldiLabelRepo *repo);
typedef struct KaldiLookupFeatureRepo KaldiLookupFeatureRepo;
KaldiLookupFeatureRepo *kaldi_lookup_feature_repo_new(const char *, const char *);
- Matrix *kaldi_lookup_feature_repo_read_utterance(KaldiLookupFeatureRepo *repo, KaldiFeatureRepo *frepo, int nframes, lua_State *L, int debug);
+ Matrix *kaldi_lookup_feature_repo_read_utterance(KaldiLookupFeatureRepo *repo,
+ KaldiFeatureRepo *frepo,
+ int nframes, lua_State *L,
+ int debug, MContext *context);
void kaldi_lookup_feature_repo_destroy(KaldiLookupFeatureRepo *repo);
#ifdef __cplusplus
diff --git a/kaldi_io/src/init.c b/kaldi_io/src/init.c
index 529895b..e8b4ea6 100644
--- a/kaldi_io/src/init.c
+++ b/kaldi_io/src/init.c
@@ -1,4 +1,5 @@
-#include "nerv/common.h"
+#include "nerv/lib/common.h"
+#include "nerv/matrix/matrix.h"
#include "cwrapper_kaldi.h"
#include <stdio.h>
@@ -21,12 +22,14 @@ static int feat_repo_destroy(lua_State *L) {
}
static int feat_repo_current_utterance(lua_State *L) {
+ MContext *context;
+ MMATRIX_GET_CONTEXT(L, 3);
KaldiFeatureRepo *repo = luaT_checkudata(L, 1, nerv_kaldi_feat_repo_tname);
int debug;
if (!lua_isboolean(L, 2))
nerv_error(L, "debug flag should be a boolean");
debug = lua_toboolean(L, 2);
- Matrix *utter = kaldi_feature_repo_read_utterance(repo, L, debug);
+ Matrix *utter = kaldi_feature_repo_read_utterance(repo, L, debug, context);
luaT_pushudata(L, utter, nerv_matrix_host_float_tname);
return 1;
}
@@ -72,6 +75,8 @@ static int lookup_feat_repo_destroy(lua_State *L) {
}
static int lookup_feat_repo_read_utterance(lua_State *L) {
+ MContext *context;
+ MMATRIX_GET_CONTEXT(L, 5);
KaldiLookupFeatureRepo *repo = luaT_checkudata(L, 1, nerv_kaldi_lookup_feat_repo_tname);
KaldiFeatureRepo *feat_repo = luaT_checkudata(L, 2, nerv_kaldi_feat_repo_tname);
int nframes, debug;
@@ -81,7 +86,8 @@ static int lookup_feat_repo_read_utterance(lua_State *L) {
if (!lua_isboolean(L, 4))
nerv_error(L, "debug flag should be a boolean");
debug = lua_toboolean(L, 4);
- Matrix *utter = kaldi_lookup_feature_repo_read_utterance(repo, feat_repo, nframes, L, debug);
+ Matrix *utter = kaldi_lookup_feature_repo_read_utterance(repo, feat_repo,
+ nframes, L, debug, context);
luaT_pushudata(L, utter, nerv_matrix_host_float_tname);
return 1;
}
@@ -100,6 +106,8 @@ static int label_repo_new(lua_State *L) {
}
static int label_repo_read_utterance(lua_State *L) {
+ MContext *context;
+ MMATRIX_GET_CONTEXT(L, 5);
KaldiLabelRepo *repo = luaT_checkudata(L, 1, nerv_kaldi_label_repo_tname);
KaldiFeatureRepo *feat_repo = luaT_checkudata(L, 2, nerv_kaldi_feat_repo_tname);
int nframes, debug;
@@ -109,7 +117,8 @@ static int label_repo_read_utterance(lua_State *L) {
if (!lua_isboolean(L, 4))
nerv_error(L, "debug flag should be a boolean");
debug = lua_toboolean(L, 4);
- Matrix *utter = kaldi_label_repo_read_utterance(repo, feat_repo, nframes, L, debug);
+ Matrix *utter = kaldi_label_repo_read_utterance(repo, feat_repo, nframes,
+ L, debug, context);
luaT_pushudata(L, utter, nerv_matrix_host_float_tname);
return 1;
}
diff --git a/kaldi_io/src/test.c b/kaldi_io/src/test.c
index e92b4c9..e3368df 100644
--- a/kaldi_io/src/test.c
+++ b/kaldi_io/src/test.c
@@ -7,8 +7,11 @@
**********************************************************************************/
#include "cwrapper_kaldi.h"
+#include "nerv/lib/matrix/mmatrix.h"
#include <stdio.h>
+MContext context;
+
char feature_rspecifier[] = {"ark:/slfs6/users/ymz09/kaldi/src/featbin/copy-feats scp:/slfs6/users/ymz09/swb_ivec/train_bp.scp ark:- |"};
void print_nerv_matrix(Matrix *mat) {
@@ -30,13 +33,13 @@ int main(int argc, char *argv[])
Matrix *mat;
KaldiFeatureRepo *repo = kaldi_feature_repo_new(feature_rspecifier);
- mat = kaldi_feature_repo_read_utterance(repo, NULL, 1);
+ mat = kaldi_feature_repo_read_utterance(repo, NULL, 1, &context);
printf("1st uttrance\n");
print_nerv_matrix(mat);
kaldi_feature_repo_next(repo);
- mat = kaldi_feature_repo_read_utterance(repo, NULL, 1);
+ mat = kaldi_feature_repo_read_utterance(repo, NULL, 1, &context);
printf("2nd uttrance\n");
print_nerv_matrix(mat);
diff --git a/kaldi_io/tools/convert_from_kaldi_pretrain.sh b/kaldi_io/tools/convert_from_kaldi_pretrain.sh
new file mode 100755
index 0000000..78f532f
--- /dev/null
+++ b/kaldi_io/tools/convert_from_kaldi_pretrain.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+hid_dim=1024
+hid_num=6
+pretrain_dir=exp/dnn4_pretrain-dbn
+nerv_kaldi=/speechlab/users/mfy43/nerv/speech/kaldi_io/
+
+[ -f path.sh ] && . ./path.sh
+. parse_options.sh || exit 1;
+
+data=$1
+data_cv=$2
+lang=$3
+alidir=$4
+alidir_cv=$5
+dir=$6
+
+[[ -z $data_fmllr ]] && data_fmllr=data-fmllr-tri3
+[[ -z $alidir ]] && alidir=exp/tri3_ali
+[[ -z $dir ]] && dir=exp/dnn4_nerv_prepare
+[[ -z $data ]] && data=$data_fmllr/train_tr90
+[[ -z $data_cv ]] && data_cv=$data_fmllr/train_cv10
+kaldi_to_nerv=$nerv_kaldi/tools/kaldi_to_nerv
+mkdir $dir -p
+mkdir $dir/log -p
+###### PREPARE DATASETS ######
+cp $data/feats.scp $dir/train_sorted.scp
+cp $data_cv/feats.scp $dir/cv.scp
+utils/shuffle_list.pl --srand ${seed:-777} <$dir/train_sorted.scp >$dir/train.scp
+
+feats_tr="ark:copy-feats scp:$dir/train.scp ark:- |"
+
+###### INITIALIZE OUTPUT LAYER ######
+[ -z $num_tgt ] && \
+ num_tgt=$(hmm-info --print-args=false $alidir/final.mdl | grep pdfs | awk '{ print $NF }')
+nnet_proto=$dir/nnet_output.proto
+echo "# genrating network prototype $nnet_proto"
+utils/nnet/make_nnet_proto.py \
+ $hid_dim $num_tgt 0 $hid_dim >$nnet_proto || exit 1
+nnet_init=$dir/nnet_output.init
+nnet-initialize --binary=false $nnet_proto $nnet_init
+
+###### MODEL PARAMETER CONVERSION ######
+$kaldi_to_nerv $nnet_init $dir/nnet_output.nerv $hid_num
+$kaldi_to_nerv <(nnet-copy --binary=false $pretrain_dir/${hid_num}.dbn -) $dir/nnet_init.nerv
+$kaldi_to_nerv <(nnet-copy --binary=false $pretrain_dir/final.feature_transform -) $dir/nnet_trans.nerv
+###### PREPARE FOR DECODING #####
+echo "Using PDF targets from dirs '$alidir' '$alidir_cv'"
+# training targets in posterior format,
+labels_tr="ark:ali-to-pdf $alidir/final.mdl \"ark:gunzip -c $alidir/ali.*.gz |\" ark:- | ali-to-post ark:- ark:- |"
+labels_cv="ark:ali-to-pdf $alidir/final.mdl \"ark:gunzip -c $alidir_cv/ali.*.gz |\" ark:- | ali-to-post ark:- ark:- |"
+# training targets for analyze-counts,
+labels_tr_pdf="ark:ali-to-pdf $alidir/final.mdl \"ark:gunzip -c $alidir/ali.*.gz |\" ark:- |"
+labels_tr_phn="ark:ali-to-phones --per-frame=true $alidir/final.mdl \"ark:gunzip -c $alidir/ali.*.gz |\" ark:- |"
+
+# get pdf-counts, used later for decoding/aligning,
+analyze-counts --verbose=1 --binary=false "$labels_tr_pdf" $dir/ali_train_pdf.counts 2>$dir/log/analyze_counts_pdf.log || exit 1
+# copy the old transition model, will be needed by decoder,
+copy-transition-model --binary=false $alidir/final.mdl $dir/final.mdl || exit 1
+# copy the tree
+cp $alidir/tree $dir/tree || exit 1
+
+# make phone counts for analysis,
+[ -e $lang/phones.txt ] && analyze-counts --verbose=1 --symbol-table=$lang/phones.txt "$labels_tr_phn" /dev/null 2>$dir/log/analyze_counts_phones.log || exit 1
diff --git a/kaldi_io/tools/kaldi_to_nerv b/kaldi_io/tools/kaldi_to_nerv
new file mode 100755
index 0000000..78469f8
--- /dev/null
+++ b/kaldi_io/tools/kaldi_to_nerv
Binary files differ
diff --git a/kaldi_io/tools/kaldi_to_nerv.cpp b/kaldi_io/tools/kaldi_to_nerv.cpp
index 1edb0f2..f16de44 100644
--- a/kaldi_io/tools/kaldi_to_nerv.cpp
+++ b/kaldi_io/tools/kaldi_to_nerv.cpp
@@ -3,31 +3,53 @@
#include <string>
#include <cstring>
#include <cassert>
+#include <cstdlib>
char token[1024];
char output[1024];
-double mat[4096][4096];
+
+double **new_matrix(int nrow, int ncol) {
+ double **mat = new double *[nrow];
+ int i;
+ for (i = 0; i < nrow; i++)
+ mat[i] = new double[ncol];
+ return mat;
+}
+
+void free_matrix(double **mat, int nrow, int ncol) {
+ int i;
+ for (i = 0; i < nrow; i++)
+ delete [] mat[i];
+ delete [] mat;
+}
+
int main(int argc, char **argv) {
+ FILE *fin;
std::ofstream fout;
- fout.open(argv[1]);
- int cnt = 0;
+ assert(argc >= 3);
+ fin = fopen(argv[1], "r");
+ fout.open(argv[2]);
+ assert(fin != NULL);
+ int cnt = argc > 3 ? atoi(argv[3]) : 0;
bool shift;
- while (scanf("%s", token) != EOF)
+ while (fscanf(fin, "%s", token) != EOF)
{
int nrow, ncol;
int i, j;
+ double **mat;
if (strcmp(token, "<AffineTransform>") == 0)
{
double lrate, blrate, mnorm;
- scanf("%d %d", &ncol, &nrow);
- scanf("%s %lf %s %lf %s %lf",
+ fscanf(fin, "%d %d", &ncol, &nrow);
+ fscanf(fin, "%s %lf %s %lf %s %lf",
token, &lrate, token, &blrate, token, &mnorm);
- scanf("%s", token);
+ fscanf(fin, "%s", token);
assert(*token == '[');
printf("%d %d\n", nrow, ncol);
+ mat = new_matrix(nrow, ncol);
for (j = 0; j < ncol; j++)
for (i = 0; i < nrow; i++)
- scanf("%lf", mat[i] + j);
+ fscanf(fin, "%lf", mat[i] + j);
long base = fout.tellp();
sprintf(output, "%16d", 0);
fout << output;
@@ -47,13 +69,13 @@ int main(int argc, char **argv) {
sprintf(output, "[%13lu]\n", length);
fout << output;
fout.seekp(0, std::ios_base::end);
- scanf("%s", token);
+ fscanf(fin, "%s", token);
assert(*token == ']');
- if (scanf("%s", token) == 1 && *token == '[')
+ if (fscanf(fin, "%s", token) == 1 && *token == '[')
{
base = fout.tellp();
for (j = 0; j < ncol; j++)
- scanf("%lf", mat[0] + j);
+ fscanf(fin, "%lf", mat[0] + j);
sprintf(output, "%16d", 0);
fout << output;
sprintf(output, "{type=\"nerv.BiasParam\",id=\"affine%d_bp\"}\n",
@@ -71,19 +93,21 @@ int main(int argc, char **argv) {
fout.seekp(0, std::ios_base::end);
cnt++;
}
+ free_matrix(mat, nrow, ncol);
}
else if ((shift = (strcmp(token, "<AddShift>") == 0)) ||
strcmp(token, "<Rescale>") == 0)
{
double lrate, blrate, mnorm;
- scanf("%d %d", &ncol, &ncol);
- scanf("%s %lf",
+ fscanf(fin, "%d %d", &ncol, &ncol);
+ mat = new_matrix(1, ncol);
+ fscanf(fin, "%s %lf",
token, &lrate);
- scanf("%s", token);
+ fscanf(fin, "%s", token);
assert(*token == '[');
printf("%d\n", ncol);
for (j = 0; j < ncol; j++)
- scanf("%lf", mat[0] + j);
+ fscanf(fin, "%lf", mat[0] + j);
long base = fout.tellp();
sprintf(output, "%16d", 0);
fout << output;
@@ -101,8 +125,9 @@ int main(int argc, char **argv) {
sprintf(output, "[%13lu]\n", length);
fout << output;
fout.seekp(0, std::ios_base::end);
- scanf("%s", token);
+ fscanf(fin, "%s", token);
assert(*token == ']');
+ free_matrix(mat, 1, ncol);
}
}
return 0;
diff --git a/kaldi_io/tools/nerv_to_kaldi.lua b/kaldi_io/tools/nerv_to_kaldi.lua
index 804f09b..fba6a6c 100644
--- a/kaldi_io/tools/nerv_to_kaldi.lua
+++ b/kaldi_io/tools/nerv_to_kaldi.lua
@@ -1,8 +1,8 @@
--- usage: nerv config_file nerv_param_input tnet_output
+-- usage: nerv nerv_to_kaldi.lua config_file nerv_param_input kaldi_param_output
dofile(arg[1])
param_repo = nerv.ParamRepo()
-param_repo:import({arg[2], gconf.initialized_param[2]}, nil, gconf)
+param_repo:import({arg[2]}, nil, gconf)
layer_repo = make_layer_repo(param_repo)
f = assert(io.open(arg[3], "w"))
diff --git a/kaldi_seq/Makefile b/kaldi_seq/Makefile
index e76eea8..c712319 100644
--- a/kaldi_seq/Makefile
+++ b/kaldi_seq/Makefile
@@ -1,6 +1,12 @@
-# Change KDIR to `kaldi-trunk' path (Kaldi must be compiled with --share)
-KDIR := /slfs6/users/ymz09/kaldi/
+ifndef LUA_BINDIR
+$(error Please build the package via luarocks: `luarocks make`)
+endif
+ifndef KALDI_BASE
+$(error KALDI_BASE is not set)
+endif
+
+KDIR := $(KALDI_BASE)
SHELL := /bin/bash
BUILD_DIR := $(CURDIR)/build
INC_PATH := $(LUA_BINDIR)/../include/
diff --git a/kaldi_seq/init.c b/kaldi_seq/init.c
index ed89473..a3af948 100644
--- a/kaldi_seq/init.c
+++ b/kaldi_seq/init.c
@@ -1,4 +1,4 @@
-#include "../nerv/common.h"
+#include "nerv/lib/common.h"
#include <stdio.h>
extern void kaldi_seq_init(lua_State *L);
diff --git a/kaldi_seq/src/init.c b/kaldi_seq/src/init.c
index 9b38056..c2002cf 100644
--- a/kaldi_seq/src/init.c
+++ b/kaldi_seq/src/init.c
@@ -1,4 +1,4 @@
-#include "nerv/common.h"
+#include "nerv/lib/common.h"
#include "kaldi_mpe.h"
#include "kaldi_mmi.h"
#include <stdio.h>
diff --git a/kaldi_seq/src/kaldi_mmi.cpp b/kaldi_seq/src/kaldi_mmi.cpp
index ea9b4f1..6f8dad9 100644
--- a/kaldi_seq/src/kaldi_mmi.cpp
+++ b/kaldi_seq/src/kaldi_mmi.cpp
@@ -36,9 +36,10 @@ extern "C" {
#include "kaldi_mmi.h"
#include "string.h"
#include "assert.h"
-#include "nerv/common.h"
+#include "nerv/lib/common.h"
+#include "nerv/lib/matrix/mmatrix.h"
- extern NervMatrix *nerv_matrix_host_float_create(long nrow, long ncol, Status *status);
+ extern NervMatrix *nerv_matrix_host_float_create(long nrow, long ncol, MContext *context, Status *status);
extern void nerv_matrix_host_float_copy_fromd(NervMatrix *mat, const NervMatrix *cumat, int, int, int, Status *);
using namespace kaldi;
using namespace kaldi::nnet1;
diff --git a/kaldi_seq/src/kaldi_mmi.h b/kaldi_seq/src/kaldi_mmi.h
index ce6787c..6175479 100644
--- a/kaldi_seq/src/kaldi_mmi.h
+++ b/kaldi_seq/src/kaldi_mmi.h
@@ -1,7 +1,7 @@
#ifndef NERV_kaldi_KALDI_MMI
#define NERV_kaldi_KALDI_MMI
-#include "nerv/matrix/matrix.h"
-#include "nerv/common.h"
+#include "nerv/lib/matrix/matrix.h"
+#include "nerv/lib/common.h"
#ifdef __cplusplus
extern "C" {
#endif
diff --git a/kaldi_seq/src/kaldi_mpe.cpp b/kaldi_seq/src/kaldi_mpe.cpp
index 60384e2..5d4149c 100644
--- a/kaldi_seq/src/kaldi_mpe.cpp
+++ b/kaldi_seq/src/kaldi_mpe.cpp
@@ -68,9 +68,10 @@ extern "C" {
#include "kaldi_mpe.h"
#include "string.h"
#include "assert.h"
-#include "nerv/common.h"
+#include "nerv/lib/common.h"
+#include "nerv/lib/matrix/mmatrix.h"
- extern NervMatrix *nerv_matrix_host_float_create(long nrow, long ncol, Status *status);
+ extern NervMatrix *nerv_matrix_host_float_create(long nrow, long ncol, MContext *context, Status *status);
extern void nerv_matrix_host_float_copy_fromd(NervMatrix *mat, const NervMatrix *cumat, int, int, int, Status *);
using namespace kaldi;
using namespace kaldi::nnet1;
diff --git a/kaldi_seq/src/kaldi_mpe.h b/kaldi_seq/src/kaldi_mpe.h
index fd09574..218cff5 100644
--- a/kaldi_seq/src/kaldi_mpe.h
+++ b/kaldi_seq/src/kaldi_mpe.h
@@ -1,7 +1,7 @@
#ifndef NERV_kaldi_KALDI_MPE
#define NERV_kaldi_KALDI_MPE
-#include "nerv/matrix/matrix.h"
-#include "nerv/common.h"
+#include "nerv/lib/matrix/matrix.h"
+#include "nerv/lib/common.h"
#ifdef __cplusplus
extern "C" {
#endif
diff --git a/speech_utils/init.lua b/speech_utils/init.lua
index f89f4fd..9e8adba 100644
--- a/speech_utils/init.lua
+++ b/speech_utils/init.lua
@@ -9,8 +9,13 @@ function nerv.speech_utils.global_transf(feat_utter, global_transf,
global_transf:init(input[1]:nrow())
global_transf:propagate(input, output)
-- trim frames
- expanded = gconf.cumat_type(output[1]:nrow() - frm_trim * 2, output[1]:ncol())
- expanded:copy_fromd(output[1], frm_trim, feat_utter:nrow() - frm_trim)
+ if gconf.use_cpu then
+ mat_type = gconf.mmat_type
+ else
+ mat_type = gconf.cumat_type
+ end
+ expanded = mat_type(output[1]:nrow() - frm_trim * 2, output[1]:ncol())
+ expanded:copy_from(output[1], frm_trim, feat_utter:nrow() - frm_trim)
collectgarbage("collect")
return expanded
end
diff --git a/tutorial/howto_pretrain_from_kaldi.rst b/tutorial/howto_pretrain_from_kaldi.rst
new file mode 100644
index 0000000..ff6ef3d
--- /dev/null
+++ b/tutorial/howto_pretrain_from_kaldi.rst
@@ -0,0 +1,117 @@
+How to Use a Pretrained nnet Model from Kaldi
+=============================================
+
+:author: Ted Yin (mfy43) <[email protected]>
+:abstract: Instruct on how to pretrain a basic dnn with timit dataset using
+ Kaldi and then convert the pretrained model to nerv format to let
+ NERV finetune. Finally it shows two possible ways to decode the
+ finetuned model in Kaldi framework.
+
+- Locate the egs/timit inside Kaldi trunk directory.
+
+- Configure ``cmd.sh`` and ``path.sh`` according to your machine setting.
+
+- Open the ``run.sh`` and locate the line saying ``exit 0 # From this point
+ you can run Karel's DNN: local/nnet/run_dnn.sh``. Uncomment this line. This
+ is because in this tutorial, we only want to train a basic tri-phone DNN,
+ so we simply don't do MMI training, system combination or fancy things like
+ these.
+
+- Run ``./run.sh`` to start the training stages. After that, we will get
+ tri-phone GMM-HMM trained and the aligned labels. Let's move forward to
+ pretrain a DNN.
+
+- Open ``local/nnet/run_dnn.sh``, there are again several stages. Note that
+ the first stage is what we actually need (pretraining the DNN), since in
+ this tutorial we want to demonstrate how to get the pretrained model from
+ stage 1, replace stage 2 with NERV (finetune per-frame cross-entropy), and
+ decode using the finetuned network. However, here we add a line ``exit 0``
+ after stage 2 to preserve stage 2 in order to compare the NERV result
+ against the standard one (the decode result using finetuned model produced
+ by the original stage 2).
+
+- Run ``local/nnet/run_dnn.sh`` (first two stages).
+- You'll find directory like ``dnn4_pretrain-dbn`` and
+ ``dnn4_pretrain-dbn_dnn`` inside the ``exp/``. They correspond to stage 1 and
+ stage 2 respectively. To use NERV to do stage 2 instead, we need the
+ pretrained network and the global transformation from stage 1:
+
+ - Check the file ``exp/dnn4_pretrain-dbn/6.dbn`` exists. (pretrained network)
+ - Check the file ``exp/dnn4_pretrain-dbn/tr_splice5_cmvn-g.nnet`` exists. (global transformation)
+ - Run script from ``kaldi_io/tools/convert_from_kaldi_pretrain.sh`` to
+ generate the parameters for the output layer and the script files for
+ training and cross-validation set.
+
+ - The previous conversion commands will automatically give identifiers to the
+ parameters read from the Kaldi network file. The identifiers are like, for
+ example, ``affine0_ltp`` and ``bias0``. These names should correspond to
+ the identifiers used in the declaration of the network. Luckily, this
+ tutorial comes with a written network declaration at
+ ``nerv/examples/timit_baseline2.lua``.
+
+- Copy the file ``nerv/examples/timit_baseline2.lua`` to
+ ``timit_mybaseline.lua``, and change the line containing ``/speechlab`` to
+ your own setting.
+
+- Start the NERV training by ``install/bin/nerv nerv/examples/asr_trainer.lua timit_mybaseline.lua``.
+
+ - ``install/bin/nerv`` is the program which sets up the NERV environment,
+
+ - followed by an argument ``nerv/examples/asr_trainer.lua`` which is the script
+ you actually want to run (the general DNN training scheduler),
+
+ - followed by an argument ``timit_mybaseline.lua`` to the scheduler,
+ specifying the network you want to train and some relevant settings, such
+ as where to find the initialized parameters and learning rate, etc.
+
+- Finally, after about 13 iterations, the funetune ends. There are two ways to
+ decode your model:
+
+ - Plan A:
+
+ - Open your ``timit_mybaseline.lua`` again and modify ``decode_param`` to
+ your final chunk file (the file with an extension ``.nerv``) and your
+ global transformation chunk file once used in training. This part lets
+ the decoder know about the set of parameters for decoding.
+
+ - Copy the script ``nerv/speech/kaldi_io/README.timit`` to your Kaldi
+ working directory (``timit/s5``) and modify the paths listed in the
+ script.
+
+ - Run the modified ``README.timit`` in ``s5`` directory (where there is the
+ ``path.sh``).
+
+ - After decoding, run ``bash RESULT exp/dnn4_nerv`` to see the results.
+
+ - Plan B: In this plan, we manually convert the trained model back to Kaldi
+ nnet format, and use Kaldi to decode.
+
+ - Create a copy of ``nerv/speech/kaldi_io/tools/nerv_to_kaldi.lua``.
+
+ - Modify the list named ``lnames`` to list the name of layers you want to
+ put into the output Kaldi parameter file in order. (You don't actually
+ need to change for this tutorial) You may ask why the NERV-to-Kaldi
+ converstion is so cumbersome. This is because Kaldi nnet is a special
+ case of more general NERV toolkit --- it only allows stacked DNNs and
+ therefore Kaldi-to-NERV conversion is lossless but the other direction is
+ not. Your future NERV network may have multiple branches and that's why
+ you need to specify how to select and "stack" your layers in the Kaldi
+ parameter output.
+
+ - Do the conversion by:
+
+ ::
+
+ cat your_trained_params.nerv your_global_trans.nerv > all.nerv
+ install/bin/nerv nerv_to_kaldi.lua timit_mybaseline.lua all.nerv your_kaldi_output.nnet
+
+ - Finally, locate the directory of stage 2: ``exp/dnn4_pretrain-dbn_dnn``
+ and temporarily change the symbolic link for the final network file to the converted one:
+
+ ::
+
+ cd exp/dnn4_pretrain-dbn_dnn
+ mv final.nnet final.nnet.orig
+ ln -sv your_kaldi_output.nnet final.nnet
+
+ Then proceed a normal Kaldi decoding.