summaryrefslogtreecommitdiff
path: root/kaldi_decode
diff options
context:
space:
mode:
Diffstat (limited to 'kaldi_decode')
-rwxr-xr-xkaldi_decode/README13
-rwxr-xr-xkaldi_decode/cmd.sh36
-rw-r--r--kaldi_decode/conf/decode_dnn.config2
-rwxr-xr-xkaldi_decode/decode.sh124
-rwxr-xr-xkaldi_decode/local/score.sh67
-rwxr-xr-xkaldi_decode/path.sh5
-rw-r--r--kaldi_decode/src/Makefile12
-rw-r--r--kaldi_decode/src/nerv4decode.lua79
-rw-r--r--kaldi_decode/src/nnet-forward.cc215
-rwxr-xr-xkaldi_decode/utils/int2sym.pl71
-rwxr-xr-xkaldi_decode/utils/parse_options.sh97
-rwxr-xr-xkaldi_decode/utils/queue.pl580
-rwxr-xr-xkaldi_decode/utils/run.pl264
-rwxr-xr-xkaldi_decode/utils/split_data.sh135
14 files changed, 1700 insertions, 0 deletions
diff --git a/kaldi_decode/README b/kaldi_decode/README
new file mode 100755
index 0000000..8d0a95b
--- /dev/null
+++ b/kaldi_decode/README
@@ -0,0 +1,13 @@
+source path.sh
+source cmd.sh
+
+acwt=0.1
+dir=/slfs5/users/ymz09/chime/baseline/ASR/exp/nerv_seq/
+graph=/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced/graph_tgpr_5k
+data=/slfs5/users/ymz09/chime/baseline/ASR/data-fbank/et05_real_enhanced
+config=/slfs6/users/ymz09/nerv-project/nerv/nerv/examples/mpe_chime3.lua
+
+decode.sh --nj 4 --cmd "$decode_cmd" --config conf/decode_dnn.config --acwt $acwt \
+ $graph $data $config \
+ $dir/decode_tgpr_5k_et05_real_enhanced_nerv
+
diff --git a/kaldi_decode/cmd.sh b/kaldi_decode/cmd.sh
new file mode 100755
index 0000000..e2e54e8
--- /dev/null
+++ b/kaldi_decode/cmd.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+# "queue.pl" uses qsub. The options to it are
+# options to qsub. If you have GridEngine installed,
+# change this to a queue you have access to.
+# Otherwise, use "run.pl", which will run jobs locally
+# (make sure your --num-jobs options are no more than
+# the number of cpus on your machine.
+
+#a) JHU cluster options
+#export train_cmd="queue.pl -l arch=*64"
+#export decode_cmd="queue.pl -l arch=*64,mem_free=2G,ram_free=2G"
+#export mkgraph_cmd="queue.pl -l arch=*64,ram_free=4G,mem_free=4G"
+
+#export cuda_cmd="..."
+
+
+#b) BUT cluster options
+#export train_cmd="queue.pl -q all.q@@blade -l ram_free=1200M,mem_free=1200M"
+#export decode_cmd="queue.pl -q all.q@@blade -l ram_free=1700M,mem_free=1700M"
+#export decodebig_cmd="queue.pl -q all.q@@blade -l ram_free=4G,mem_free=4G"
+
+#export cuda_cmd="queue.pl -q long.q@@pco203 -l gpu=1"
+#export cuda_cmd="queue.pl -q long.q@pcspeech-gpu"
+#export mkgraph_cmd="queue.pl -q all.q@@servers -l ram_free=4G,mem_free=4G"
+
+#c) run it locally...
+export train_cmd=run.pl
+export decode_cmd=run.pl
+export cuda_cmd=run.pl
+export mkgraph_cmd=run.pl
+
+#export train_cmd='queue.pl'
+#export decode_cmd='queue.pl'
+#export cuda_cmd='queue.pl -l gpu=1 -l hostname="markov|date|hamming"'
+#export mkgraph_cmd='queue.pl"'
+
diff --git a/kaldi_decode/conf/decode_dnn.config b/kaldi_decode/conf/decode_dnn.config
new file mode 100644
index 0000000..89dd992
--- /dev/null
+++ b/kaldi_decode/conf/decode_dnn.config
@@ -0,0 +1,2 @@
+beam=18.0 # beam for decoding. Was 13.0 in the scripts.
+lattice_beam=10.0 # this has most effect on size of the lattices.
diff --git a/kaldi_decode/decode.sh b/kaldi_decode/decode.sh
new file mode 100755
index 0000000..aa7e089
--- /dev/null
+++ b/kaldi_decode/decode.sh
@@ -0,0 +1,124 @@
+#!/bin/bash
+
+# Copyright 2012-2013 Karel Vesely, Daniel Povey
+# Apache 2.0
+
+# Begin configuration section.
+nnet= # non-default location of DNN (optional)
+feature_transform= # non-default location of feature_transform (optional)
+model= # non-default location of transition model (optional)
+class_frame_counts= # non-default location of PDF counts (optional)
+srcdir= # non-default location of DNN-dir (decouples model dir from decode dir)
+
+stage=0 # stage=1 skips lattice generation
+nj=4
+cmd=run.pl
+
+acwt=0.10 # note: only really affects pruning (scoring is on lattices).
+beam=13.0
+lattice_beam=8.0
+min_active=200
+max_active=7000 # limit of active tokens
+max_mem=50000000 # approx. limit to memory consumption during minimization in bytes
+nnet_forward_opts="--prior-scale=1.0"
+
+skip_scoring=false
+scoring_opts="--min-lmwt 4 --max-lmwt 15"
+
+num_threads=1 # if >1, will use latgen-faster-parallel
+parallel_opts= # Ignored now.
+use_gpu="no" # yes|no|optionaly
+# End configuration section.
+
+echo "$0 $@" # Print the command line for logging
+
+[ -f ./path.sh ] && . ./path.sh; # source the path.
+. parse_options.sh || exit 1;
+
+if [ $# != 4 ]; then
+ echo "Usage: $0 [options] <graph-dir> <data-dir> <nerv-config> <decode-dir>"
+ echo "... where <decode-dir> is assumed to be a sub-directory of the directory"
+ echo " where the DNN and transition model is."
+ echo "e.g.: $0 exp/dnn1/graph_tgpr data/test config.lua exp/dnn1/decode_tgpr"
+ echo ""
+ echo "This script works on plain or modified features (CMN,delta+delta-delta),"
+ echo "which are then sent through feature-transform. It works out what type"
+ echo "of features you used from content of srcdir."
+ echo ""
+ echo "main options (for others, see top of script file)"
+ echo " --config <config-file> # config containing options"
+ echo " --nj <nj> # number of parallel jobs"
+ echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
+ echo ""
+ echo " --srcdir <dir> # non-default dir with DNN/models, can be different"
+ echo " # from parent dir of <decode-dir>' (opt.)"
+ echo ""
+ echo " --acwt <float> # select acoustic scale for decoding"
+ echo " --scoring-opts <opts> # options forwarded to local/score.sh"
+ echo " --num-threads <N> # N>1: run multi-threaded decoder"
+ exit 1;
+fi
+
+
+graphdir=$1
+data=$2
+config=$3
+dir=$4
+[ -z $srcdir ] && srcdir=`dirname $dir`; # Default model directory one level up from decoding directory.
+sdata=$data/split$nj;
+
+mkdir -p $dir/log
+
+[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;
+echo $nj > $dir/num_jobs
+
+# Select default locations to model files (if not already set externally)
+[ -z "$model" ] && model=$srcdir/final.mdl
+#
+[ -z "$class_frame_counts" -a -f $srcdir/prior_counts ] && class_frame_counts=$srcdir/prior_counts # priority,
+[ -z "$class_frame_counts" ] && class_frame_counts=$srcdir/ali_train_pdf.counts
+
+# Check that files exist
+for f in $sdata/1/feats.scp $model $class_frame_counts $graphdir/HCLG.fst; do
+ [ ! -f $f ] && echo "$0: missing file $f" && exit 1;
+done
+
+# Possibly use multi-threaded decoder
+thread_string=
+[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads"
+
+
+# PREPARE FEATURE EXTRACTION PIPELINE
+# import config,
+cmvn_opts=
+delta_opts=
+D=$srcdir
+[ -e $D/norm_vars ] && cmvn_opts="--norm-means=true --norm-vars=$(cat $D/norm_vars)" # Bwd-compatibility,
+[ -e $D/cmvn_opts ] && cmvn_opts=$(cat $D/cmvn_opts)
+[ -e $D/delta_order ] && delta_opts="--delta-order=$(cat $D/delta_order)" # Bwd-compatibility,
+[ -e $D/delta_opts ] && delta_opts=$(cat $D/delta_opts)
+#
+# Create the feature stream,
+feats="ark,s,cs:copy-feats scp:$sdata/JOB/feats.scp ark:- |"
+# apply-cmvn (optional),
+[ ! -z "$cmvn_opts" -a ! -f $sdata/1/cmvn.scp ] && echo "$0: Missing $sdata/1/cmvn.scp" && exit 1
+[ ! -z "$cmvn_opts" ] && feats="$feats apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp ark:- ark:- |"
+# add-deltas (optional),
+[ ! -z "$delta_opts" ] && feats="$feats add-deltas $delta_opts ark:- ark:- |"
+#
+# Run the decoding in the queue,
+if [ $stage -le 0 ]; then
+ $cmd --num-threads $((num_threads+1)) JOB=1:$nj $dir/log/decode.JOB.log \
+ ./src/nnet-forward $nnet_forward_opts --class-frame-counts=$class_frame_counts --use-gpu=$use_gpu $config "$feats" ark:- \| \
+ latgen-faster-mapped$thread_string --min-active=$min_active --max-active=$max_active --max-mem=$max_mem --beam=$beam \
+ --lattice-beam=$lattice_beam --acoustic-scale=$acwt --allow-partial=true --word-symbol-table=$graphdir/words.txt \
+ $model $graphdir/HCLG.fst ark:- "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1;
+fi
+# Run the scoring
+if ! $skip_scoring ; then
+ [ ! -x local/score.sh ] && \
+ echo "Not scoring because local/score.sh does not exist or not executable." && exit 1;
+ local/score.sh $scoring_opts --cmd "$cmd" $data $graphdir $dir || exit 1;
+fi
+
+exit 0;
diff --git a/kaldi_decode/local/score.sh b/kaldi_decode/local/score.sh
new file mode 100755
index 0000000..b18f350
--- /dev/null
+++ b/kaldi_decode/local/score.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+# Copyright 2012 Johns Hopkins University (Author: Daniel Povey)
+# Apache 2.0
+
+[ -f ./path.sh ] && . ./path.sh
+
+# begin configuration section.
+cmd=run.pl
+stage=0
+decode_mbr=true
+reverse=false
+word_ins_penalty=0.0
+min_lmwt=5
+max_lmwt=20
+#end configuration section.
+
+[ -f ./path.sh ] && . ./path.sh
+. parse_options.sh || exit 1;
+
+if [ $# -ne 3 ]; then
+ echo "Usage: local/score.sh [--cmd (run.pl|queue.pl...)] <data-dir> <lang-dir|graph-dir> <decode-dir>"
+ echo " Options:"
+ echo " --cmd (run.pl|queue.pl...) # specify how to run the sub-processes."
+ echo " --stage (0|1|2) # start scoring script from part-way through."
+ echo " --decode_mbr (true/false) # maximum bayes risk decoding (confusion network)."
+ echo " --min_lmwt <int> # minumum LM-weight for lattice rescoring "
+ echo " --max_lmwt <int> # maximum LM-weight for lattice rescoring "
+ echo " --reverse (true/false) # score with time reversed features "
+ exit 1;
+fi
+
+data=$1
+lang_or_graph=$2
+dir=$3
+
+symtab=$lang_or_graph/words.txt
+
+for f in $symtab $dir/lat.1.gz $data/text; do
+ [ ! -f $f ] && echo "score.sh: no such file $f" && exit 1;
+done
+
+mkdir -p $dir/scoring/log
+
+cat $data/text | sed 's:<NOISE>::g' | sed 's:<SPOKEN_NOISE>::g' > $dir/scoring/test_filt.txt
+
+$cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/best_path.LMWT.log \
+ lattice-scale --inv-acoustic-scale=LMWT "ark:gunzip -c $dir/lat.*.gz|" ark:- \| \
+ lattice-add-penalty --word-ins-penalty=$word_ins_penalty ark:- ark:- \| \
+ lattice-best-path --word-symbol-table=$symtab \
+ ark:- ark,t:$dir/scoring/LMWT.tra || exit 1;
+
+if $reverse; then
+ for lmwt in `seq $min_lmwt $max_lmwt`; do
+ mv $dir/scoring/$lmwt.tra $dir/scoring/$lmwt.tra.orig
+ awk '{ printf("%s ",$1); for(i=NF; i>1; i--){ printf("%s ",$i); } printf("\n"); }' \
+ <$dir/scoring/$lmwt.tra.orig >$dir/scoring/$lmwt.tra
+ done
+fi
+
+# Note: the double level of quoting for the sed command
+$cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/score.LMWT.log \
+ cat $dir/scoring/LMWT.tra \| \
+ utils/int2sym.pl -f 2- $symtab \| sed 's:\<UNK\>::g' \| \
+ compute-wer --text --mode=present \
+ ark:$dir/scoring/test_filt.txt ark,p:- ">&" $dir/wer_LMWT || exit 1;
+
+exit 0;
diff --git a/kaldi_decode/path.sh b/kaldi_decode/path.sh
new file mode 100755
index 0000000..5aebc72
--- /dev/null
+++ b/kaldi_decode/path.sh
@@ -0,0 +1,5 @@
+export KALDI_ROOT=/slfs6/users/ymz09/kaldi/
+export PATH=$PWD/utils/:$KALDI_ROOT/src/bin:$KALDI_ROOT/tools/openfst/bin:$KALDI_ROOT/src/fstbin/:$KALDI_ROOT/src/gmmbin/:$KALDI_ROOT/src/featbin/:$KALDI_ROOT/src/lm/:$KALDI_ROOT/src/sgmmbin/:$KALDI_ROOT/src/sgmm2bin/:$KALDI_ROOT/src/fgmmbin/:$KALDI_ROOT/src/latbin/:$KALDI_ROOT/src/nnetbin:$KALDI_ROOT/src/nnet2bin/:$KALDI_ROOT/src/kwsbin:$PWD:$PATH
+export LC_ALL=C
+export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/slhome/ymz09/mylibs/:.
+
diff --git a/kaldi_decode/src/Makefile b/kaldi_decode/src/Makefile
new file mode 100644
index 0000000..118420c
--- /dev/null
+++ b/kaldi_decode/src/Makefile
@@ -0,0 +1,12 @@
+# Change KDIR to `kaldi-trunk' path (Kaldi must be compiled with --share)
+KDIR := /slfs6/users/ymz09/kaldi/
+NERVDIR := /slfs6/users/ymz09/nerv-project/nerv/
+CUDADIR := /usr/local/cuda/
+
+nnet-forward:
+ g++ -msse -msse2 -Wall -I $(KDIR)/src/ -pthread -DKALDI_DOUBLEPRECISION=0 -DHAVE_POSIX_MEMALIGN -Wno-sign-compare -Wno-unused-local-typedefs -Winit-self -DHAVE_EXECINFO_H=1 -rdynamic -DHAVE_CXXABI_H -DHAVE_ATLAS -I $(KDIR)/tools/ATLAS/include -I $(KDIR)/tools/openfst/include -Wno-sign-compare -g -fPIC -DHAVE_CUDA -I $(CUDADIR)/include -DKALDI_NO_EXPF -I $(NERVDIR)/install//include/luajit-2.0/ -I $(NERVDIR)/install/include/ -DLUA_USE_APICHECK -c -o nnet-forward.o nnet-forward.cc
+ g++ -rdynamic -Wl,-rpath=$(KDIR)/tools/openfst/lib -L$(CUDADIR)/lib64 -Wl,-rpath,$(CUDADIR)/lib64 -Wl,-rpath=$(KDIR)/src/lib -L. -L$(KDIR)/src/nnet/ -L$(KDIR)/src/cudamatrix/ -L$(KDIR)/src/lat/ -L$(KDIR)/src/hmm/ -L$(KDIR)/src/tree/ -L$(KDIR)/src/matrix/ -L$(KDIR)/src/util/ -L$(KDIR)/src/base/ nnet-forward.o $(KDIR)/src/nnet//libkaldi-nnet.so $(KDIR)/src/cudamatrix//libkaldi-cudamatrix.so $(KDIR)/src/lat//libkaldi-lat.so $(KDIR)/src/hmm//libkaldi-hmm.so $(KDIR)/src/tree//libkaldi-tree.so $(KDIR)/src/matrix//libkaldi-matrix.so $(KDIR)/src/util//libkaldi-util.so $(KDIR)/src/base//libkaldi-base.so -L$(KDIR)/tools/openfst/lib -lfst /usr/lib/liblapack.so /usr/lib/libcblas.so /usr/lib/libatlas.so /usr/lib/libf77blas.so -lm -lpthread -ldl -lcublas -lcudart -lkaldi-nnet -lkaldi-cudamatrix -lkaldi-lat -lkaldi-hmm -lkaldi-tree -lkaldi-matrix -lkaldi-util -lkaldi-base -lstdc++ -L$(NERVDIR)/install/lib -lnervcore -lluaT -rdynamic -Wl,-rpath=$(KDIR)//tools/openfst/lib -L$(DUDADIR)/lib64 -Wl,-rpath,$(CUDADIR)/lib64 -Wl,-rpath=$(KDIR)//src/lib -lfst -lm -lpthread -ldl -lcublas -lcudart -L $(NERVDIR)/luajit-2.0/src/ -lluajit -o nnet-forward
+
+clean:
+ -rm nnet-forward.o nnet-forward
+
diff --git a/kaldi_decode/src/nerv4decode.lua b/kaldi_decode/src/nerv4decode.lua
new file mode 100644
index 0000000..b2ff344
--- /dev/null
+++ b/kaldi_decode/src/nerv4decode.lua
@@ -0,0 +1,79 @@
+package.path="/home/slhome/ymz09/.luarocks/share/lua/5.1/?.lua;/home/slhome/ymz09/.luarocks/share/lua/5.1/?/init.lua;/slfs6/users/ymz09/nerv-project/nerv/install/share/lua/5.1/?.lua;/slfs6/users/ymz09/nerv-project/nerv/install/share/lua/5.1/?/init.lua;"..package.path;
+package.cpath="/home/slhome/ymz09/.luarocks/lib/lua/5.1/?.so;/slfs6/users/ymz09/nerv-project/nerv/install/lib/lua/5.1/?.so;"..package.cpath;
+local k,l,_=pcall(require,"luarocks.loader") _=k and l.add_context("nerv","scm-1")
+require 'nerv'
+
+function build_trainer(ifname, feature)
+ local param_repo = nerv.ParamRepo()
+ param_repo:import(ifname, nil, gconf)
+ local layer_repo = make_layer_repo(param_repo)
+ local network = get_decode_network(layer_repo)
+ local global_transf = get_global_transf(layer_repo)
+ local input_order = get_input_order()
+ local readers = make_readers(feature, layer_repo)
+ network:init(1)
+
+ local iterative_trainer = function()
+ local data = nil
+ for ri = 1, #readers, 1 do
+ data = readers[ri].reader:get_data()
+ if data ~= nil then
+ break
+ end
+ end
+
+ if data == nil then
+ return "", nil
+ end
+
+ local input = {}
+ for i, e in ipairs(input_order) do
+ local id = e.id
+ if data[id] == nil then
+ nerv.error("input data %s not found", id)
+ end
+ local transformed
+ if e.global_transf then
+ local batch = gconf.cumat_type(data[id]:nrow(), data[id]:ncol())
+ batch:copy_fromh(data[id])
+ transformed = nerv.speech_utils.global_transf(batch,
+ global_transf,
+ gconf.frm_ext or 0, 0,
+ gconf)
+ else
+ transformed = data[id]
+ end
+ table.insert(input, transformed)
+ end
+ local output = {nerv.CuMatrixFloat(input[1]:nrow(), network.dim_out[1])}
+ network:batch_resize(input[1]:nrow())
+ network:propagate(input, output)
+
+ local utt = data["key"]
+ if utt == nil then
+ nerv.error("no key found.")
+ end
+
+ local mat = nerv.MMatrixFloat(output[1]:nrow(), output[1]:ncol())
+ output[1]:copy_toh(mat)
+
+ collectgarbage("collect")
+ return utt, mat
+ end
+
+ return iterative_trainer
+end
+
+function init(config, feature)
+ local tmp = io.write
+ io.write = function(...)
+ end
+ dofile(config)
+ trainer = build_trainer(gconf.decode_param, feature)
+ io.write = tmp
+end
+
+function feed()
+ local utt, mat = trainer()
+ return utt, mat
+end
diff --git a/kaldi_decode/src/nnet-forward.cc b/kaldi_decode/src/nnet-forward.cc
new file mode 100644
index 0000000..007f623
--- /dev/null
+++ b/kaldi_decode/src/nnet-forward.cc
@@ -0,0 +1,215 @@
+// nnetbin/nnet-forward.cc
+
+// Copyright 2011-2013 Brno University of Technology (Author: Karel Vesely)
+
+// See ../../COPYING for clarification regarding multiple authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
+// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
+// MERCHANTABLITY OR NON-INFRINGEMENT.
+// See the Apache 2 License for the specific language governing permissions and
+// limitations under the License.
+
+extern "C"{
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+#include "nerv/matrix/matrix.h"
+#include "nerv/common.h"
+#include "nerv/luaT/luaT.h"
+}
+
+#include <limits>
+
+#include "nnet/nnet-nnet.h"
+#include "nnet/nnet-loss.h"
+#include "nnet/nnet-pdf-prior.h"
+#include "base/kaldi-common.h"
+#include "util/common-utils.h"
+#include "base/timer.h"
+
+typedef kaldi::BaseFloat BaseFloat;
+typedef struct Matrix NervMatrix;
+
+
+int main(int argc, char *argv[]) {
+ using namespace kaldi;
+ using namespace kaldi::nnet1;
+ try {
+ const char *usage =
+ "Perform forward pass through Neural Network.\n"
+ "\n"
+ "Usage: nnet-forward [options] <nerv-config> <feature-rspecifier> <feature-wspecifier> [nerv4decode.lua]\n"
+ "e.g.: \n"
+ " nnet-forward config.lua ark:features.ark ark:mlpoutput.ark\n";
+
+ ParseOptions po(usage);
+
+ PdfPriorOptions prior_opts;
+ prior_opts.Register(&po);
+
+ bool apply_log = false;
+ po.Register("apply-log", &apply_log, "Transform MLP output to logscale");
+
+ std::string use_gpu="no";
+ po.Register("use-gpu", &use_gpu, "yes|no|optional, only has effect if compiled with CUDA");
+
+ using namespace kaldi;
+ using namespace kaldi::nnet1;
+ typedef kaldi::int32 int32;
+
+ int32 time_shift = 0;
+ po.Register("time-shift", &time_shift, "LSTM : repeat last input frame N-times, discrad N initial output frames.");
+
+ po.Read(argc, argv);
+
+ if (po.NumArgs() < 3) {
+ po.PrintUsage();
+ exit(1);
+ }
+
+ std::string config = po.GetArg(1),
+ feature_rspecifier = po.GetArg(2),
+ feature_wspecifier = po.GetArg(3),
+ nerv4decode = "src/nerv4decode.lua";
+ if(po.NumArgs() >= 4)
+ nerv4decode = po.GetArg(4);
+
+ //Select the GPU
+#if HAVE_CUDA==1
+ CuDevice::Instantiate().SelectGpuId(use_gpu);
+#endif
+
+ // we will subtract log-priors later,
+ PdfPrior pdf_prior(prior_opts);
+
+ kaldi::int64 tot_t = 0;
+
+ BaseFloatMatrixWriter feature_writer(feature_wspecifier);
+
+ CuMatrix<BaseFloat> nnet_out;
+ kaldi::Matrix<BaseFloat> nnet_out_host;
+
+ lua_State *L = lua_open();
+ luaL_openlibs(L);
+ if(luaL_loadfile(L, nerv4decode.c_str()))
+ KALDI_ERR << "luaL_loadfile() " << nerv4decode << " failed " << lua_tostring(L, -1);
+
+ if(lua_pcall(L, 0, 0, 0))
+ KALDI_ERR << "lua_pall failed " << lua_tostring(L, -1);
+
+ lua_settop(L, 0);
+ lua_getglobal(L, "init");
+ lua_pushstring(L, config.c_str());
+ lua_pushstring(L, feature_rspecifier.c_str());
+ if(lua_pcall(L, 2, 0, 0))
+ KALDI_ERR << "lua_pcall failed " << lua_tostring(L, -1);
+
+ Timer time;
+ double time_now = 0;
+ int32 num_done = 0;
+ // iterate over all feature files
+ for(;;){
+ lua_settop(L, 0);
+ lua_getglobal(L, "feed");
+ if(lua_pcall(L, 0, 2, 0))
+ KALDI_ERR << "lua_pcall failed " << lua_tostring(L, -1);
+
+ std::string utt = std::string(lua_tostring(L, -2));
+ if(utt == "")
+ break;
+ NervMatrix *mat = *(NervMatrix **)lua_touserdata(L, -1);
+
+ nnet_out_host.Resize(mat->nrow, mat->ncol, kUndefined);
+
+ size_t stride = mat->stride;
+ for(int i = 0; i < mat->nrow; i++){
+ const BaseFloat *nerv_row = (BaseFloat *)((char *)mat->data.f + i * stride);
+ BaseFloat *row = nnet_out_host.RowData(i);
+ memmove(row, nerv_row, sizeof(BaseFloat) * mat->ncol);
+ }
+
+ KALDI_VLOG(2) << "Processing utterance " << num_done+1
+ << ", " << utt
+ << ", " << nnet_out_host.NumRows() << "frm";
+
+ nnet_out.Resize(nnet_out_host.NumRows(), nnet_out_host.NumCols(), kUndefined);
+ nnet_out.CopyFromMat(nnet_out_host);
+
+ if (!KALDI_ISFINITE(nnet_out.Sum())) { // check there's no nan/inf,
+ KALDI_ERR << "NaN or inf found in nn-output for " << utt;
+ }
+
+ // convert posteriors to log-posteriors,
+ if (apply_log) {
+ if (!(nnet_out.Min() >= 0.0 && nnet_out.Max() <= 1.0)) {
+ KALDI_WARN << utt << " "
+ << "Applying 'log' to data which don't seem to be probabilities "
+ << "(is there a softmax somwhere?)";
+ }
+ nnet_out.Add(1e-20); // avoid log(0),
+ nnet_out.ApplyLog();
+ }
+
+ // subtract log-priors from log-posteriors or pre-softmax,
+ if (prior_opts.class_frame_counts != "") {
+ if (nnet_out.Min() >= 0.0 && nnet_out.Max() <= 1.0) {
+ KALDI_WARN << utt << " "
+ << "Subtracting log-prior on 'probability-like' data in range [0..1] "
+ << "(Did you forget --no-softmax=true or --apply-log=true ?)";
+ }
+ pdf_prior.SubtractOnLogpost(&nnet_out);
+ }
+
+ // download from GPU,
+ nnet_out_host.Resize(nnet_out.NumRows(), nnet_out.NumCols());
+ nnet_out.CopyToMat(&nnet_out_host);
+
+ // time-shift, remove N first frames of LSTM output,
+ if (time_shift > 0) {
+ kaldi::Matrix<BaseFloat> tmp(nnet_out_host);
+ nnet_out_host = tmp.RowRange(time_shift, tmp.NumRows() - time_shift);
+ }
+
+ // write,
+ if (!KALDI_ISFINITE(nnet_out_host.Sum())) { // check there's no nan/inf,
+ KALDI_ERR << "NaN or inf found in final output nn-output for " << utt;
+ }
+ feature_writer.Write(utt, nnet_out_host);
+
+ // progress log
+ if (num_done % 100 == 0) {
+ time_now = time.Elapsed();
+ KALDI_VLOG(1) << "After " << num_done << " utterances: time elapsed = "
+ << time_now/60 << " min; processed " << tot_t/time_now
+ << " frames per second.";
+ }
+ num_done++;
+ tot_t += nnet_out_host.NumRows();
+ }
+
+ // final message
+ KALDI_LOG << "Done " << num_done << " files"
+ << " in " << time.Elapsed()/60 << "min,"
+ << " (fps " << tot_t/time.Elapsed() << ")";
+
+#if HAVE_CUDA==1
+ if (kaldi::g_kaldi_verbose_level >= 1) {
+ CuDevice::Instantiate().PrintProfile();
+ }
+#endif
+ lua_close(L);
+ if (num_done == 0) return -1;
+ return 0;
+ } catch(const std::exception &e) {
+ KALDI_ERR << e.what();
+ return -1;
+ }
+}
diff --git a/kaldi_decode/utils/int2sym.pl b/kaldi_decode/utils/int2sym.pl
new file mode 100755
index 0000000..d618939
--- /dev/null
+++ b/kaldi_decode/utils/int2sym.pl
@@ -0,0 +1,71 @@
+#!/usr/bin/env perl
+# Copyright 2010-2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey)
+# Apache 2.0.
+
+undef $field_begin;
+undef $field_end;
+
+
+if ($ARGV[0] eq "-f") {
+ shift @ARGV;
+ $field_spec = shift @ARGV;
+ if ($field_spec =~ m/^\d+$/) {
+ $field_begin = $field_spec - 1; $field_end = $field_spec - 1;
+ }
+ if ($field_spec =~ m/^(\d*)[-:](\d*)/) { # accept e.g. 1:10 as a courtesty (properly, 1-10)
+ if ($1 ne "") {
+ $field_begin = $1 - 1; # Change to zero-based indexing.
+ }
+ if ($2 ne "") {
+ $field_end = $2 - 1; # Change to zero-based indexing.
+ }
+ }
+ if (!defined $field_begin && !defined $field_end) {
+ die "Bad argument to -f option: $field_spec";
+ }
+}
+$symtab = shift @ARGV;
+if(!defined $symtab) {
+ print STDERR "Usage: sym2int.pl [options] symtab [input] > output\n" .
+ "options: [-f (<field>|<field_start>-<field-end>)]\n" .
+ "e.g.: -f 2, or -f 3-4\n";
+ exit(1);
+}
+
+open(F, "<$symtab") || die "Error opening symbol table file $symtab";
+while(<F>) {
+ @A = split(" ", $_);
+ @A == 2 || die "bad line in symbol table file: $_";
+ $int2sym{$A[1]} = $A[0];
+}
+
+sub int2sym {
+ my $a = shift @_;
+ my $pos = shift @_;
+ if($a !~ m:^\d+$:) { # not all digits..
+ $pos1 = $pos+1; # make it one-based.
+ die "int2sym.pl: found noninteger token $a [in position $pos1]\n";
+ }
+ $s = $int2sym{$a};
+ if(!defined ($s)) {
+ die "int2sym.pl: integer $a not in symbol table $symtab.";
+ }
+ return $s;
+}
+
+$error = 0;
+while (<>) {
+ @A = split(" ", $_);
+ for ($pos = 0; $pos <= $#A; $pos++) {
+ $a = $A[$pos];
+ if ( (!defined $field_begin || $pos >= $field_begin)
+ && (!defined $field_end || $pos <= $field_end)) {
+ $a = int2sym($a, $pos);
+ }
+ print $a . " ";
+ }
+ print "\n";
+}
+
+
+
diff --git a/kaldi_decode/utils/parse_options.sh b/kaldi_decode/utils/parse_options.sh
new file mode 100755
index 0000000..fdc8a36
--- /dev/null
+++ b/kaldi_decode/utils/parse_options.sh
@@ -0,0 +1,97 @@
+#!/bin/bash
+
+# Copyright 2012 Johns Hopkins University (Author: Daniel Povey);
+# Arnab Ghoshal, Karel Vesely
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
+# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
+# MERCHANTABLITY OR NON-INFRINGEMENT.
+# See the Apache 2 License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Parse command-line options.
+# To be sourced by another script (as in ". parse_options.sh").
+# Option format is: --option-name arg
+# and shell variable "option_name" gets set to value "arg."
+# The exception is --help, which takes no arguments, but prints the
+# $help_message variable (if defined).
+
+
+###
+### The --config file options have lower priority to command line
+### options, so we need to import them first...
+###
+
+# Now import all the configs specified by command-line, in left-to-right order
+for ((argpos=1; argpos<$#; argpos++)); do
+ if [ "${!argpos}" == "--config" ]; then
+ argpos_plus1=$((argpos+1))
+ config=${!argpos_plus1}
+ [ ! -r $config ] && echo "$0: missing config '$config'" && exit 1
+ . $config # source the config file.
+ fi
+done
+
+
+###
+### No we process the command line options
+###
+while true; do
+ [ -z "${1:-}" ] && break; # break if there are no arguments
+ case "$1" in
+ # If the enclosing script is called with --help option, print the help
+ # message and exit. Scripts should put help messages in $help_message
+ --help|-h) if [ -z "$help_message" ]; then echo "No help found." 1>&2;
+ else printf "$help_message\n" 1>&2 ; fi;
+ exit 0 ;;
+ --*=*) echo "$0: options to scripts must be of the form --name value, got '$1'"
+ exit 1 ;;
+ # If the first command-line argument begins with "--" (e.g. --foo-bar),
+ # then work out the variable name as $name, which will equal "foo_bar".
+ --*) name=`echo "$1" | sed s/^--// | sed s/-/_/g`;
+ # Next we test whether the variable in question is undefned-- if so it's
+ # an invalid option and we die. Note: $0 evaluates to the name of the
+ # enclosing script.
+ # The test [ -z ${foo_bar+xxx} ] will return true if the variable foo_bar
+ # is undefined. We then have to wrap this test inside "eval" because
+ # foo_bar is itself inside a variable ($name).
+ eval '[ -z "${'$name'+xxx}" ]' && echo "$0: invalid option $1" 1>&2 && exit 1;
+
+ oldval="`eval echo \\$$name`";
+ # Work out whether we seem to be expecting a Boolean argument.
+ if [ "$oldval" == "true" ] || [ "$oldval" == "false" ]; then
+ was_bool=true;
+ else
+ was_bool=false;
+ fi
+
+ # Set the variable to the right value-- the escaped quotes make it work if
+ # the option had spaces, like --cmd "queue.pl -sync y"
+ eval $name=\"$2\";
+
+ # Check that Boolean-valued arguments are really Boolean.
+ if $was_bool && [[ "$2" != "true" && "$2" != "false" ]]; then
+ echo "$0: expected \"true\" or \"false\": $1 $2" 1>&2
+ exit 1;
+ fi
+ shift 2;
+ ;;
+ *) break;
+ esac
+done
+
+
+# Check for an empty argument to the --cmd option, which can easily occur as a
+# result of scripting errors.
+[ ! -z "${cmd+xxx}" ] && [ -z "$cmd" ] && echo "$0: empty argument to --cmd option" 1>&2 && exit 1;
+
+
+true; # so this script returns exit code 0.
diff --git a/kaldi_decode/utils/queue.pl b/kaldi_decode/utils/queue.pl
new file mode 100755
index 0000000..1e36de6
--- /dev/null
+++ b/kaldi_decode/utils/queue.pl
@@ -0,0 +1,580 @@
+#!/usr/bin/env perl
+use strict;
+use warnings;
+
+# Copyright 2012 Johns Hopkins University (Author: Daniel Povey).
+# 2014 Vimal Manohar (Johns Hopkins University)
+# Apache 2.0.
+
+use File::Basename;
+use Cwd;
+use Getopt::Long;
+
+# queue.pl has the same functionality as run.pl, except that
+# it runs the job in question on the queue (Sun GridEngine).
+# This version of queue.pl uses the task array functionality
+# of the grid engine. Note: it's different from the queue.pl
+# in the s4 and earlier scripts.
+
+# The script now supports configuring the queue system using a config file
+# (default in conf/queue.conf; but can be passed specified with --config option)
+# and a set of command line options.
+# The current script handles:
+# 1) Normal configuration arguments
+# For e.g. a command line option of "--gpu 1" could be converted into the option
+# "-q g.q -l gpu=1" to qsub. How the CLI option is handled is determined by a
+# line in the config file like
+# gpu=* -q g.q -l gpu=$0
+# $0 here in the line is replaced with the argument read from the CLI and the
+# resulting string is passed to qsub.
+# 2) Special arguments to options such as
+# gpu=0
+# If --gpu 0 is given in the command line, then no special "-q" is given.
+# 3) Default argument
+# default gpu=0
+# If --gpu option is not passed in the command line, then the script behaves as
+# if --gpu 0 was passed since 0 is specified as the default argument for that
+# option
+# 4) Arbitrary options and arguments.
+# Any command line option starting with '--' and its argument would be handled
+# as long as its defined in the config file.
+# 5) Default behavior
+# If the config file that is passed using is not readable, then the script
+# behaves as if the queue has the following config file:
+# $ cat conf/queue.conf
+# # Default configuration
+# command qsub -v PATH -cwd -S /bin/bash -j y -l arch=*64*
+# option mem=* -l mem_free=$0,ram_free=$0
+# option mem=0 # Do not add anything to qsub_opts
+# option num_threads=* -pe smp $0
+# option num_threads=1 # Do not add anything to qsub_opts
+# option max_jobs_run=* -tc $0
+# default gpu=0
+# option gpu=0 -q all.q
+# option gpu=* -l gpu=$0 -q g.q
+
+my $qsub_opts = "";
+my $sync = 0;
+my $num_threads = 1;
+my $gpu = 0;
+
+my $config = "conf/queue.conf";
+
+my %cli_options = ();
+
+my $jobname;
+my $jobstart;
+my $jobend;
+
+my $array_job = 0;
+
+sub print_usage() {
+ print STDERR
+ "Usage: queue.pl [options] [JOB=1:n] log-file command-line arguments...\n" .
+ "e.g.: queue.pl foo.log echo baz\n" .
+ " (which will echo \"baz\", with stdout and stderr directed to foo.log)\n" .
+ "or: queue.pl -q all.q\@xyz foo.log echo bar \| sed s/bar/baz/ \n" .
+ " (which is an example of using a pipe; you can provide other escaped bash constructs)\n" .
+ "or: queue.pl -q all.q\@qyz JOB=1:10 foo.JOB.log echo JOB \n" .
+ " (which illustrates the mechanism to submit parallel jobs; note, you can use \n" .
+ " another string other than JOB)\n" .
+ "Note: if you pass the \"-sync y\" option to qsub, this script will take note\n" .
+ "and change its behavior. Otherwise it uses qstat to work out when the job finished\n" .
+ "Options:\n" .
+ " --config <config-file> (default: $config)\n" .
+ " --mem <mem-requirement> (e.g. --mem 2G, --mem 500M, \n" .
+ " also support K and numbers mean bytes)\n" .
+ " --num-threads <num-threads> (default: $num_threads)\n" .
+ " --max-jobs-run <num-jobs>\n" .
+ " --gpu <0|1> (default: $gpu)\n";
+ exit 1;
+}
+
+if (@ARGV < 2) {
+ print_usage();
+}
+
+for (my $x = 1; $x <= 3; $x++) { # This for-loop is to
+ # allow the JOB=1:n option to be interleaved with the
+ # options to qsub.
+ while (@ARGV >= 2 && $ARGV[0] =~ m:^-:) {
+ my $switch = shift @ARGV;
+
+ if ($switch eq "-V") {
+ $qsub_opts .= "-V ";
+ } else {
+ my $argument = shift @ARGV;
+ if ($argument =~ m/^--/) {
+ print STDERR "WARNING: suspicious argument '$argument' to $switch; starts with '-'\n";
+ }
+ if ($switch eq "-sync" && $argument =~ m/^[yY]/) {
+ $sync = 1;
+ $qsub_opts .= "$switch $argument ";
+ } elsif ($switch eq "-pe") { # e.g. -pe smp 5
+ my $argument2 = shift @ARGV;
+ $qsub_opts .= "$switch $argument $argument2 ";
+ $num_threads = $argument2;
+ } elsif ($switch =~ m/^--/) { # Config options
+ # Convert CLI option to variable name
+ # by removing '--' from the switch and replacing any
+ # '-' with a '_'
+ $switch =~ s/^--//;
+ $switch =~ s/-/_/g;
+ $cli_options{$switch} = $argument;
+ } else { # Other qsub options - passed as is
+ $qsub_opts .= "$switch $argument ";
+ }
+ }
+ }
+ if ($ARGV[0] =~ m/^([\w_][\w\d_]*)+=(\d+):(\d+)$/) { # e.g. JOB=1:20
+ $array_job = 1;
+ $jobname = $1;
+ $jobstart = $2;
+ $jobend = $3;
+ shift;
+ if ($jobstart > $jobend) {
+ die "queue.pl: invalid job range $ARGV[0]";
+ }
+ if ($jobstart <= 0) {
+ die "run.pl: invalid job range $ARGV[0], start must be strictly positive (this is a GridEngine limitation).";
+ }
+ } elsif ($ARGV[0] =~ m/^([\w_][\w\d_]*)+=(\d+)$/) { # e.g. JOB=1.
+ $array_job = 1;
+ $jobname = $1;
+ $jobstart = $2;
+ $jobend = $2;
+ shift;
+ } elsif ($ARGV[0] =~ m/.+\=.*\:.*$/) {
+ print STDERR "Warning: suspicious first argument to queue.pl: $ARGV[0]\n";
+ }
+}
+
+if (@ARGV < 2) {
+ print_usage();
+}
+
+if (exists $cli_options{"config"}) {
+ $config = $cli_options{"config"};
+}
+
+my $default_config_file = <<'EOF';
+# Default configuration
+command qsub -v PATH -cwd -S /bin/bash -j y -l arch=*64*
+option mem=* -l mem_free=$0,ram_free=$0
+option mem=0 # Do not add anything to qsub_opts
+option num_threads=* -pe smp $0
+option num_threads=1 # Do not add anything to qsub_opts
+option max_jobs_run=* -tc $0
+default gpu=0
+option gpu=0
+option gpu=* -l gpu=$0 -q g.q
+EOF
+
+# Here the configuration options specified by the user on the command line
+# (e.g. --mem 2G) are converted to options to the qsub system as defined in
+# the config file. (e.g. if the config file has the line
+# "option mem=* -l ram_free=$0,mem_free=$0"
+# and the user has specified '--mem 2G' on the command line, the options
+# passed to queue system would be "-l ram_free=2G,mem_free=2G
+# A more detailed description of the ways the options would be handled is at
+# the top of this file.
+
+my $opened_config_file = 1;
+
+open CONFIG, "<$config" or $opened_config_file = 0;
+
+my %cli_config_options = ();
+my %cli_default_options = ();
+
+if ($opened_config_file == 0 && exists($cli_options{"config"})) {
+ print STDERR "Could not open config file $config\n";
+ exit(1);
+} elsif ($opened_config_file == 0 && !exists($cli_options{"config"})) {
+ # Open the default config file instead
+ open (CONFIG, "echo '$default_config_file' |") or die "Unable to open pipe\n";
+ $config = "Default config";
+}
+
+my $qsub_cmd = "";
+my $read_command = 0;
+
+while(<CONFIG>) {
+ chomp;
+ my $line = $_;
+ $_ =~ s/\s*#.*//g;
+ if ($_ eq "") { next; }
+ if ($_ =~ /^command (.+)/) {
+ $read_command = 1;
+ $qsub_cmd = $1 . " ";
+ } elsif ($_ =~ m/^option ([^=]+)=\* (.+)$/) {
+ # Config option that needs replacement with parameter value read from CLI
+ # e.g.: option mem=* -l mem_free=$0,ram_free=$0
+ my $option = $1; # mem
+ my $arg= $2; # -l mem_free=$0,ram_free=$0
+ if ($arg !~ m:\$0:) {
+ die "Unable to parse line '$line' in config file ($config)\n";
+ }
+ if (exists $cli_options{$option}) {
+ # Replace $0 with the argument read from command line.
+ # e.g. "-l mem_free=$0,ram_free=$0" -> "-l mem_free=2G,ram_free=2G"
+ $arg =~ s/\$0/$cli_options{$option}/g;
+ $cli_config_options{$option} = $arg;
+ }
+ } elsif ($_ =~ m/^option ([^=]+)=(\S+)\s?(.*)$/) {
+ # Config option that does not need replacement
+ # e.g. option gpu=0 -q all.q
+ my $option = $1; # gpu
+ my $value = $2; # 0
+ my $arg = $3; # -q all.q
+ if (exists $cli_options{$option}) {
+ $cli_default_options{($option,$value)} = $arg;
+ }
+ } elsif ($_ =~ m/^default (\S+)=(\S+)/) {
+ # Default options. Used for setting default values to options i.e. when
+ # the user does not specify the option on the command line
+ # e.g. default gpu=0
+ my $option = $1; # gpu
+ my $value = $2; # 0
+ if (!exists $cli_options{$option}) {
+ # If the user has specified this option on the command line, then we
+ # don't have to do anything
+ $cli_options{$option} = $value;
+ }
+ } else {
+ print STDERR "queue.pl: unable to parse line '$line' in config file ($config)\n";
+ exit(1);
+ }
+}
+
+close(CONFIG);
+
+if ($read_command != 1) {
+ print STDERR "queue.pl: config file ($config) does not contain the line \"command .*\"\n";
+ exit(1);
+}
+
+for my $option (keys %cli_options) {
+ if ($option eq "config") { next; }
+ if ($option eq "max_jobs_run" && $array_job != 1) { next; }
+ my $value = $cli_options{$option};
+
+ if (exists $cli_default_options{($option,$value)}) {
+ $qsub_opts .= "$cli_default_options{($option,$value)} ";
+ } elsif (exists $cli_config_options{$option}) {
+ $qsub_opts .= "$cli_config_options{$option} ";
+ } else {
+ if ($opened_config_file == 0) { $config = "default config file"; }
+ die "queue.pl: Command line option $option not described in $config (or value '$value' not allowed)\n";
+ }
+}
+
+my $cwd = getcwd();
+my $logfile = shift @ARGV;
+
+if ($array_job == 1 && $logfile !~ m/$jobname/
+ && $jobend > $jobstart) {
+ print STDERR "queue.pl: you are trying to run a parallel job but "
+ . "you are putting the output into just one log file ($logfile)\n";
+ exit(1);
+}
+
+#
+# Work out the command; quote escaping is done here.
+# Note: the rules for escaping stuff are worked out pretty
+# arbitrarily, based on what we want it to do. Some things that
+# we pass as arguments to queue.pl, such as "|", we want to be
+# interpreted by bash, so we don't escape them. Other things,
+# such as archive specifiers like 'ark:gunzip -c foo.gz|', we want
+# to be passed, in quotes, to the Kaldi program. Our heuristic
+# is that stuff with spaces in should be quoted. This doesn't
+# always work.
+#
+my $cmd = "";
+
+foreach my $x (@ARGV) {
+ if ($x =~ m/^\S+$/) { $cmd .= $x . " "; } # If string contains no spaces, take
+ # as-is.
+ elsif ($x =~ m:\":) { $cmd .= "'$x' "; } # else if no dbl-quotes, use single
+ else { $cmd .= "\"$x\" "; } # else use double.
+}
+
+#
+# Work out the location of the script file, and open it for writing.
+#
+my $dir = dirname($logfile);
+my $base = basename($logfile);
+my $qdir = "$dir/q";
+$qdir =~ s:/(log|LOG)/*q:/q:; # If qdir ends in .../log/q, make it just .../q.
+my $queue_logfile = "$qdir/$base";
+
+if (!-d $dir) { system "mkdir -p $dir 2>/dev/null"; } # another job may be doing this...
+if (!-d $dir) { die "Cannot make the directory $dir\n"; }
+# make a directory called "q",
+# where we will put the log created by qsub... normally this doesn't contain
+# anything interesting, evertyhing goes to $logfile.
+if (! -d "$qdir") {
+ system "mkdir $qdir 2>/dev/null";
+ sleep(5); ## This is to fix an issue we encountered in denominator lattice creation,
+ ## where if e.g. the exp/tri2b_denlats/log/15/q directory had just been
+ ## created and the job immediately ran, it would die with an error because nfs
+ ## had not yet synced. I'm also decreasing the acdirmin and acdirmax in our
+ ## NFS settings to something like 5 seconds.
+}
+
+my $queue_array_opt = "";
+if ($array_job == 1) { # It's an array job.
+ $queue_array_opt = "-t $jobstart:$jobend";
+ $logfile =~ s/$jobname/\$SGE_TASK_ID/g; # This variable will get
+ # replaced by qsub, in each job, with the job-id.
+ $cmd =~ s/$jobname/\$\{SGE_TASK_ID\}/g; # same for the command...
+ $queue_logfile =~ s/\.?$jobname//; # the log file in the q/ subdirectory
+ # is for the queue to put its log, and this doesn't need the task array subscript
+ # so we remove it.
+}
+
+# queue_scriptfile is as $queue_logfile [e.g. dir/q/foo.log] but
+# with the suffix .sh.
+my $queue_scriptfile = $queue_logfile;
+($queue_scriptfile =~ s/\.[a-zA-Z]{1,5}$/.sh/) || ($queue_scriptfile .= ".sh");
+if ($queue_scriptfile !~ m:^/:) {
+ $queue_scriptfile = $cwd . "/" . $queue_scriptfile; # just in case.
+}
+
+# We'll write to the standard input of "qsub" (the file-handle Q),
+# the job that we want it to execute.
+# Also keep our current PATH around, just in case there was something
+# in it that we need (although we also source ./path.sh)
+
+my $syncfile = "$qdir/done.$$";
+
+system("rm $queue_logfile $syncfile 2>/dev/null");
+#
+# Write to the script file, and then close it.
+#
+open(Q, ">$queue_scriptfile") || die "Failed to write to $queue_scriptfile";
+
+print Q "#!/bin/bash\n";
+print Q "cd $cwd\n";
+print Q ". ./path.sh\n";
+print Q "( echo '#' Running on \`hostname\`\n";
+print Q " echo '#' Started at \`date\`\n";
+print Q " echo -n '# '; cat <<EOF\n";
+print Q "$cmd\n"; # this is a way of echoing the command into a comment in the log file,
+print Q "EOF\n"; # without having to escape things like "|" and quote characters.
+print Q ") >$logfile\n";
+print Q "time1=\`date +\"%s\"\`\n";
+print Q " ( $cmd ) 2>>$logfile >>$logfile\n";
+print Q "ret=\$?\n";
+print Q "time2=\`date +\"%s\"\`\n";
+print Q "echo '#' Accounting: time=\$((\$time2-\$time1)) threads=$num_threads >>$logfile\n";
+print Q "echo '#' Finished at \`date\` with status \$ret >>$logfile\n";
+print Q "[ \$ret -eq 137 ] && exit 100;\n"; # If process was killed (e.g. oom) it will exit with status 137;
+ # let the script return with status 100 which will put it to E state; more easily rerunnable.
+if ($array_job == 0) { # not an array job
+ print Q "touch $syncfile\n"; # so we know it's done.
+} else {
+ print Q "touch $syncfile.\$SGE_TASK_ID\n"; # touch a bunch of sync-files.
+}
+print Q "exit \$[\$ret ? 1 : 0]\n"; # avoid status 100 which grid-engine
+print Q "## submitted with:\n"; # treats specially.
+$qsub_cmd .= "-o $queue_logfile $qsub_opts $queue_array_opt $queue_scriptfile >>$queue_logfile 2>&1";
+print Q "# $qsub_cmd\n";
+if (!close(Q)) { # close was not successful... || die "Could not close script file $shfile";
+ die "Failed to close the script file (full disk?)";
+}
+
+my $ret = system ($qsub_cmd);
+if ($ret != 0) {
+ if ($sync && $ret == 256) { # this is the exit status when a job failed (bad exit status)
+ if (defined $jobname) { $logfile =~ s/\$SGE_TASK_ID/*/g; }
+ print STDERR "queue.pl: job writing to $logfile failed\n";
+ } else {
+ print STDERR "queue.pl: error submitting jobs to queue (return status was $ret)\n";
+ print STDERR "queue log file is $queue_logfile, command was $qsub_cmd\n";
+ print STDERR `tail $queue_logfile`;
+ }
+ exit(1);
+}
+
+my $sge_job_id;
+if (! $sync) { # We're not submitting with -sync y, so we
+ # need to wait for the jobs to finish. We wait for the
+ # sync-files we "touched" in the script to exist.
+ my @syncfiles = ();
+ if (!defined $jobname) { # not an array job.
+ push @syncfiles, $syncfile;
+ } else {
+ for (my $jobid = $jobstart; $jobid <= $jobend; $jobid++) {
+ push @syncfiles, "$syncfile.$jobid";
+ }
+ }
+ # We will need the sge_job_id, to check that job still exists
+ { # Get the SGE job-id from the log file in q/
+ open(L, "<$queue_logfile") || die "Error opening log file $queue_logfile";
+ undef $sge_job_id;
+ while (<L>) {
+ if (m/Your job\S* (\d+)[. ].+ has been submitted/) {
+ if (defined $sge_job_id) {
+ die "Error: your job was submitted more than once (see $queue_logfile)";
+ } else {
+ $sge_job_id = $1;
+ }
+ }
+ }
+ close(L);
+ if (!defined $sge_job_id) {
+ die "Error: log file $queue_logfile does not specify the SGE job-id.";
+ }
+ }
+ my $check_sge_job_ctr=1;
+ #
+ my $wait = 0.1;
+ my $counter = 0;
+ foreach my $f (@syncfiles) {
+ # wait for them to finish one by one.
+ while (! -f $f) {
+ sleep($wait);
+ $wait *= 1.2;
+ if ($wait > 3.0) {
+ $wait = 3.0; # never wait more than 3 seconds.
+ # the following (.kick) commands are basically workarounds for NFS bugs.
+ if (rand() < 0.25) { # don't do this every time...
+ if (rand() > 0.5) {
+ system("touch $qdir/.kick");
+ } else {
+ system("rm $qdir/.kick 2>/dev/null");
+ }
+ }
+ if ($counter++ % 10 == 0) {
+ # This seems to kick NFS in the teeth to cause it to refresh the
+ # directory. I've seen cases where it would indefinitely fail to get
+ # updated, even though the file exists on the server.
+ # Only do this every 10 waits (every 30 seconds) though, or if there
+ # are many jobs waiting they can overwhelm the file server.
+ system("ls $qdir >/dev/null");
+ }
+ }
+
+ # Check that the job exists in SGE. Job can be killed if duration
+ # exceeds some hard limit, or in case of a machine shutdown.
+ if (($check_sge_job_ctr++ % 10) == 0) { # Don't run qstat too often, avoid stress on SGE.
+ if ( -f $f ) { next; }; #syncfile appeared: OK.
+ $ret = system("qstat -j $sge_job_id >/dev/null 2>/dev/null");
+ # system(...) : To get the actual exit value, shift $ret right by eight bits.
+ if ($ret>>8 == 1) { # Job does not seem to exist
+ # Don't consider immediately missing job as error, first wait some
+ # time to make sure it is not just delayed creation of the syncfile.
+
+ sleep(3);
+ # Sometimes NFS gets confused and thinks it's transmitted the directory
+ # but it hasn't, due to timestamp issues. Changing something in the
+ # directory will usually fix that.
+ system("touch $qdir/.kick");
+ system("rm $qdir/.kick 2>/dev/null");
+ if ( -f $f ) { next; } #syncfile appeared, ok
+ sleep(7);
+ system("touch $qdir/.kick");
+ sleep(1);
+ system("rm $qdir/.kick 2>/dev/null");
+ if ( -f $f ) { next; } #syncfile appeared, ok
+ sleep(60);
+ system("touch $qdir/.kick");
+ sleep(1);
+ system("rm $qdir/.kick 2>/dev/null");
+ if ( -f $f ) { next; } #syncfile appeared, ok
+ $f =~ m/\.(\d+)$/ || die "Bad sync-file name $f";
+ my $job_id = $1;
+ if (defined $jobname) {
+ $logfile =~ s/\$SGE_TASK_ID/$job_id/g;
+ }
+ my $last_line = `tail -n 1 $logfile`;
+ if ($last_line =~ m/status 0$/ && (-M $logfile) < 0) {
+ # if the last line of $logfile ended with "status 0" and
+ # $logfile is newer than this program [(-M $logfile) gives the
+ # time elapsed between file modification and the start of this
+ # program], then we assume the program really finished OK,
+ # and maybe something is up with the file system.
+ print STDERR "**queue.pl: syncfile $f was not created but job seems\n" .
+ "**to have finished OK. Probably your file-system has problems.\n" .
+ "**This is just a warning.\n";
+ last;
+ } else {
+ chop $last_line;
+ print STDERR "queue.pl: Error, unfinished job no " .
+ "longer exists, log is in $logfile, last line is '$last_line', " .
+ "syncfile is $f, return status of qstat was $ret\n" .
+ "Possible reasons: a) Exceeded time limit? -> Use more jobs!" .
+ " b) Shutdown/Frozen machine? -> Run again!\n";
+ exit(1);
+ }
+ } elsif ($ret != 0) {
+ print STDERR "queue.pl: Warning: qstat command returned status $ret (qstat -j $sge_job_id,$!)\n";
+ }
+ }
+ }
+ }
+ my $all_syncfiles = join(" ", @syncfiles);
+ system("rm $all_syncfiles 2>/dev/null");
+}
+
+# OK, at this point we are synced; we know the job is done.
+# But we don't know about its exit status. We'll look at $logfile for this.
+# First work out an array @logfiles of file-locations we need to
+# read (just one, unless it's an array job).
+my @logfiles = ();
+if (!defined $jobname) { # not an array job.
+ push @logfiles, $logfile;
+} else {
+ for (my $jobid = $jobstart; $jobid <= $jobend; $jobid++) {
+ my $l = $logfile;
+ $l =~ s/\$SGE_TASK_ID/$jobid/g;
+ push @logfiles, $l;
+ }
+}
+
+my $num_failed = 0;
+my $status = 1;
+foreach my $l (@logfiles) {
+ my @wait_times = (0.1, 0.2, 0.2, 0.3, 0.5, 0.5, 1.0, 2.0, 5.0, 5.0, 5.0, 10.0, 25.0);
+ for (my $iter = 0; $iter <= @wait_times; $iter++) {
+ my $line = `tail -10 $l 2>/dev/null`; # Note: although this line should be the last
+ # line of the file, I've seen cases where it was not quite the last line because
+ # of delayed output by the process that was running, or processes it had called.
+ # so tail -10 gives it a little leeway.
+ if ($line =~ m/with status (\d+)/) {
+ $status = $1;
+ last;
+ } else {
+ if ($iter < @wait_times) {
+ sleep($wait_times[$iter]);
+ } else {
+ if (! -f $l) {
+ print STDERR "Log-file $l does not exist.\n";
+ } else {
+ print STDERR "The last line of log-file $l does not seem to indicate the "
+ . "return status as expected\n";
+ }
+ exit(1); # Something went wrong with the queue, or the
+ # machine it was running on, probably.
+ }
+ }
+ }
+ # OK, now we have $status, which is the return-status of
+ # the command in the job.
+ if ($status != 0) { $num_failed++; }
+}
+if ($num_failed == 0) { exit(0); }
+else { # we failed.
+ if (@logfiles == 1) {
+ if (defined $jobname) { $logfile =~ s/\$SGE_TASK_ID/$jobstart/g; }
+ print STDERR "queue.pl: job failed with status $status, log is in $logfile\n";
+ if ($logfile =~ m/JOB/) {
+ print STDERR "queue.pl: probably you forgot to put JOB=1:\$nj in your script.\n";
+ }
+ } else {
+ if (defined $jobname) { $logfile =~ s/\$SGE_TASK_ID/*/g; }
+ my $numjobs = 1 + $jobend - $jobstart;
+ print STDERR "queue.pl: $num_failed / $numjobs failed, log is in $logfile\n";
+ }
+ exit(1);
+}
diff --git a/kaldi_decode/utils/run.pl b/kaldi_decode/utils/run.pl
new file mode 100755
index 0000000..6145a7a
--- /dev/null
+++ b/kaldi_decode/utils/run.pl
@@ -0,0 +1,264 @@
+#!/usr/bin/env perl
+use warnings; #sed replacement for -w perl parameter
+
+# In general, doing
+# run.pl some.log a b c is like running the command a b c in
+# the bash shell, and putting the standard error and output into some.log.
+# To run parallel jobs (backgrounded on the host machine), you can do (e.g.)
+# run.pl JOB=1:4 some.JOB.log a b c JOB is like running the command a b c JOB
+# and putting it in some.JOB.log, for each one. [Note: JOB can be any identifier].
+# If any of the jobs fails, this script will fail.
+
+# A typical example is:
+# run.pl some.log my-prog "--opt=foo bar" foo \| other-prog baz
+# and run.pl will run something like:
+# ( my-prog '--opt=foo bar' foo | other-prog baz ) >& some.log
+#
+# Basically it takes the command-line arguments, quotes them
+# as necessary to preserve spaces, and evaluates them with bash.
+# In addition it puts the command line at the top of the log, and
+# the start and end times of the command at the beginning and end.
+# The reason why this is useful is so that we can create a different
+# version of this program that uses a queueing system instead.
+
+# use Data::Dumper;
+
+@ARGV < 2 && die "usage: run.pl log-file command-line arguments...";
+
+
+$max_jobs_run = -1;
+$jobstart = 1;
+$jobend = 1;
+$ignored_opts = ""; # These will be ignored.
+
+# First parse an option like JOB=1:4, and any
+# options that would normally be given to
+# queue.pl, which we will just discard.
+
+if (@ARGV > 0) {
+ while (@ARGV >= 2 && $ARGV[0] =~ m:^-:) { # parse any options
+ # that would normally go to qsub, but which will be ignored here.
+ $switch = shift @ARGV;
+ if ($switch eq "-V") {
+ $ignored_opts .= "-V ";
+ } elsif ($switch eq "--max-jobs-run" || $switch eq "-tc") {
+ # we do support the option --max-jobs-run n, and its GridEngine form -tc n.
+ $max_jobs_run = shift @ARGV;
+ if (! ($max_jobs_run > 0)) {
+ die "run.pl: invalid option --max-jobs-run $max_jobs_run";
+ }
+ } else {
+ $option = shift @ARGV;
+ if ($switch eq "-sync" && $option =~ m/^[yY]/) {
+ $ignored_opts .= "-sync "; # Note: in the
+ # corresponding code in queue.pl it says instead, just "$sync = 1;".
+ }
+ $ignored_opts .= "$switch $option ";
+ if ($switch eq "-pe") { # e.g. -pe smp 5
+ $option2 = shift @ARGV;
+ $ignored_opts .= "$option2 ";
+ }
+ }
+ }
+ if ($ARGV[0] =~ m/^([\w_][\w\d_]*)+=(\d+):(\d+)$/) { # e.g. JOB=1:10
+ $jobname = $1;
+ $jobstart = $2;
+ $jobend = $3;
+ shift;
+ if ($jobstart > $jobend) {
+ die "run.pl: invalid job range $ARGV[0]";
+ }
+ if ($jobstart <= 0) {
+ die "run.pl: invalid job range $ARGV[0], start must be strictly positive (this is required for GridEngine compatibility).";
+ }
+ } elsif ($ARGV[0] =~ m/^([\w_][\w\d_]*)+=(\d+)$/) { # e.g. JOB=1.
+ $jobname = $1;
+ $jobstart = $2;
+ $jobend = $2;
+ shift;
+ } elsif ($ARGV[0] =~ m/.+\=.*\:.*$/) {
+ print STDERR "run.pl: Warning: suspicious first argument to run.pl: $ARGV[0]\n";
+ }
+}
+
+# Users found this message confusing so we are removing it.
+# if ($ignored_opts ne "") {
+# print STDERR "run.pl: Warning: ignoring options \"$ignored_opts\"\n";
+# }
+
+if ($max_jobs_run == -1) { # If --max-jobs-run option not set,
+ # then work out the number of processors if possible,
+ # and set it based on that.
+ $max_jobs_run = 0;
+ if (open(P, "</proc/cpuinfo")) { # Linux
+ while (<P>) { if (m/^processor/) { $max_jobs_run++; } }
+ if ($max_jobs_run == 0) {
+ print STDERR "run.pl: Warning: failed to detect any processors from /proc/cpuinfo\n";
+ $max_jobs_run = 10; # reasonable default.
+ }
+ close(P);
+ } elsif (open(P, "sysctl -a |")) { # BSD/Darwin
+ while (<P>) {
+ if (m/hw\.ncpu\s*[:=]\s*(\d+)/) { # hw.ncpu = 4, or hw.ncpu: 4
+ $max_jobs_run = $1;
+ last;
+ }
+ }
+ close(P);
+ if ($max_jobs_run == 0) {
+ print STDERR "run.pl: Warning: failed to detect any processors from sysctl -a\n";
+ $max_jobs_run = 10; # reasonable default.
+ }
+ } else {
+ # allow at most 32 jobs at once, on non-UNIX systems; change this code
+ # if you need to change this default.
+ $max_jobs_run = 32;
+ }
+ # The just-computed value of $max_jobs_run is just the number of processors
+ # (or our best guess); and if it happens that the number of jobs we need to
+ # run is just slightly above $max_jobs_run, it will make sense to increase
+ # $max_jobs_run to equal the number of jobs, so we don't have a small number
+ # of leftover jobs.
+ $num_jobs = $jobend - $jobstart + 1;
+ if ($num_jobs > $max_jobs_run && $num_jobs < 1.4 * $max_jobs_run) {
+ $max_jobs_run = $num_jobs;
+ }
+}
+
+$logfile = shift @ARGV;
+
+if (defined $jobname && $logfile !~ m/$jobname/ &&
+ $jobend > $jobstart) {
+ print STDERR "run.pl: you are trying to run a parallel job but "
+ . "you are putting the output into just one log file ($logfile)\n";
+ exit(1);
+}
+
+$cmd = "";
+
+foreach $x (@ARGV) {
+ if ($x =~ m/^\S+$/) { $cmd .= $x . " "; }
+ elsif ($x =~ m:\":) { $cmd .= "'$x' "; }
+ else { $cmd .= "\"$x\" "; }
+}
+
+#$Data::Dumper::Indent=0;
+$ret = 0;
+$numfail = 0;
+%active_pids=();
+
+use POSIX ":sys_wait_h";
+for ($jobid = $jobstart; $jobid <= $jobend; $jobid++) {
+ if (scalar(keys %active_pids) >= $max_jobs_run) {
+
+ # Lets wait for a change in any child's status
+ # Then we have to work out which child finished
+ $r = waitpid(-1, 0);
+ $code = $?;
+ if ($r < 0 ) { die "run.pl: Error waiting for child process"; } # should never happen.
+ if ( defined $active_pids{$r} ) {
+ $jid=$active_pids{$r};
+ $fail[$jid]=$code;
+ if ($code !=0) { $numfail++;}
+ delete $active_pids{$r};
+ # print STDERR "Finished: $r/$jid " . Dumper(\%active_pids) . "\n";
+ } else {
+ die "run.pl: Cannot find the PID of the chold process that just finished.";
+ }
+
+ # In theory we could do a non-blocking waitpid over all jobs running just
+ # to find out if only one or more jobs finished during the previous waitpid()
+ # However, we just omit this and will reap the next one in the next pass
+ # through the for(;;) cycle
+ }
+ $childpid = fork();
+ if (!defined $childpid) { die "run.pl: Error forking in run.pl (writing to $logfile)"; }
+ if ($childpid == 0) { # We're in the child... this branch
+ # executes the job and returns (possibly with an error status).
+ if (defined $jobname) {
+ $cmd =~ s/$jobname/$jobid/g;
+ $logfile =~ s/$jobname/$jobid/g;
+ }
+ system("mkdir -p `dirname $logfile` 2>/dev/null");
+ open(F, ">$logfile") || die "run.pl: Error opening log file $logfile";
+ print F "# " . $cmd . "\n";
+ print F "# Started at " . `date`;
+ $starttime = `date +'%s'`;
+ print F "#\n";
+ close(F);
+
+ # Pipe into bash.. make sure we're not using any other shell.
+ open(B, "|bash") || die "run.pl: Error opening shell command";
+ print B "( " . $cmd . ") 2>>$logfile >> $logfile";
+ close(B); # If there was an error, exit status is in $?
+ $ret = $?;
+
+ $lowbits = $ret & 127;
+ $highbits = $ret >> 8;
+ if ($lowbits != 0) { $return_str = "code $highbits; signal $lowbits" }
+ else { $return_str = "code $highbits"; }
+
+ $endtime = `date +'%s'`;
+ open(F, ">>$logfile") || die "run.pl: Error opening log file $logfile (again)";
+ $enddate = `date`;
+ chop $enddate;
+ print F "# Accounting: time=" . ($endtime - $starttime) . " threads=1\n";
+ print F "# Ended ($return_str) at " . $enddate . ", elapsed time " . ($endtime-$starttime) . " seconds\n";
+ close(F);
+ exit($ret == 0 ? 0 : 1);
+ } else {
+ $pid[$jobid] = $childpid;
+ $active_pids{$childpid} = $jobid;
+ # print STDERR "Queued: " . Dumper(\%active_pids) . "\n";
+ }
+}
+
+# Now we have submitted all the jobs, lets wait until all the jobs finish
+foreach $child (keys %active_pids) {
+ $jobid=$active_pids{$child};
+ $r = waitpid($pid[$jobid], 0);
+ $code = $?;
+ if ($r == -1) { die "run.pl: Error waiting for child process"; } # should never happen.
+ if ($r != 0) { $fail[$jobid]=$code; $numfail++ if $code!=0; } # Completed successfully
+}
+
+# Some sanity checks:
+# The $fail array should not contain undefined codes
+# The number of non-zeros in that array should be equal to $numfail
+# We cannot do foreach() here, as the JOB ids do not necessarily start by zero
+$failed_jids=0;
+for ($jobid = $jobstart; $jobid <= $jobend; $jobid++) {
+ $job_return = $fail[$jobid];
+ if (not defined $job_return ) {
+ # print Dumper(\@fail);
+
+ die "run.pl: Sanity check failed: we have indication that some jobs are running " .
+ "even after we waited for all jobs to finish" ;
+ }
+ if ($job_return != 0 ){ $failed_jids++;}
+}
+if ($failed_jids != $numfail) {
+ die "run.pl: Sanity check failed: cannot find out how many jobs failed ($failed_jids x $numfail)."
+}
+if ($numfail > 0) { $ret = 1; }
+
+if ($ret != 0) {
+ $njobs = $jobend - $jobstart + 1;
+ if ($njobs == 1) {
+ if (defined $jobname) {
+ $logfile =~ s/$jobname/$jobstart/; # only one numbered job, so replace name with
+ # that job.
+ }
+ print STDERR "run.pl: job failed, log is in $logfile\n";
+ if ($logfile =~ m/JOB/) {
+ print STDERR "run.pl: probably you forgot to put JOB=1:\$nj in your script.";
+ }
+ }
+ else {
+ $logfile =~ s/$jobname/*/g;
+ print STDERR "run.pl: $numfail / $njobs failed, log is in $logfile\n";
+ }
+}
+
+
+exit ($ret);
diff --git a/kaldi_decode/utils/split_data.sh b/kaldi_decode/utils/split_data.sh
new file mode 100755
index 0000000..941890c
--- /dev/null
+++ b/kaldi_decode/utils/split_data.sh
@@ -0,0 +1,135 @@
+#!/bin/bash
+# Copyright 2010-2013 Microsoft Corporation
+# Johns Hopkins University (Author: Daniel Povey)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
+# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
+# MERCHANTABLITY OR NON-INFRINGEMENT.
+# See the Apache 2 License for the specific language governing permissions and
+# limitations under the License.
+
+split_per_spk=true
+if [ "$1" == "--per-utt" ]; then
+ split_per_spk=false
+ shift
+fi
+
+if [ $# != 2 ]; then
+ echo "Usage: split_data.sh [--per-utt] <data-dir> <num-to-split>"
+ echo "This script will not split the data-dir if it detects that the output is newer than the input."
+ echo "By default it splits per speaker (so each speaker is in only one split dir),"
+ echo "but with the --per-utt option it will ignore the speaker information while splitting."
+ exit 1
+fi
+
+data=$1
+numsplit=$2
+
+if [ $numsplit -le 0 ]; then
+ echo "Invalid num-split argument $numsplit";
+ exit 1;
+fi
+
+n=0;
+feats=""
+wavs=""
+utt2spks=""
+texts=""
+
+nu=`cat $data/utt2spk | wc -l`
+nf=`cat $data/feats.scp 2>/dev/null | wc -l`
+nt=`cat $data/text 2>/dev/null | wc -l` # take it as zero if no such file
+if [ -f $data/feats.scp ] && [ $nu -ne $nf ]; then
+ echo "** split_data.sh: warning, #lines is (utt2spk,feats.scp) is ($nu,$nf); you can "
+ echo "** use utils/fix_data_dir.sh $data to fix this."
+fi
+if [ -f $data/text ] && [ $nu -ne $nt ]; then
+ echo "** split_data.sh: warning, #lines is (utt2spk,text) is ($nu,$nt); you can "
+ echo "** use utils/fix_data_dir.sh to fix this."
+fi
+
+s1=$data/split$numsplit/1
+if [ ! -d $s1 ]; then
+ need_to_split=true
+else
+ need_to_split=false
+ for f in utt2spk spk2utt spk2warp feats.scp text wav.scp cmvn.scp spk2gender \
+ vad.scp segments reco2file_and_channel utt2lang; do
+ if [[ -f $data/$f && ( ! -f $s1/$f || $s1/$f -ot $data/$f ) ]]; then
+ need_to_split=true
+ fi
+ done
+fi
+
+if ! $need_to_split; then
+ exit 0;
+fi
+
+for n in `seq $numsplit`; do
+ mkdir -p $data/split$numsplit/$n
+ utt2spks="$utt2spks $data/split$numsplit/$n/utt2spk"
+done
+
+if $split_per_spk; then
+ utt2spk_opt="--utt2spk=$data/utt2spk"
+else
+ utt2spk_opt=
+fi
+
+# If lockfile is not installed, just don't lock it. It's not a big deal.
+which lockfile >&/dev/null && lockfile -l 60 $data/.split_lock
+
+utils/split_scp.pl $utt2spk_opt $data/utt2spk $utt2spks || exit 1
+
+for n in `seq $numsplit`; do
+ dsn=$data/split$numsplit/$n
+ utils/utt2spk_to_spk2utt.pl $dsn/utt2spk > $dsn/spk2utt || exit 1;
+done
+
+maybe_wav_scp=
+if [ ! -f $data/segments ]; then
+ maybe_wav_scp=wav.scp # If there is no segments file, then wav file is
+ # indexed per utt.
+fi
+
+# split some things that are indexed by utterance.
+for f in feats.scp text vad.scp utt2lang $maybe_wav_scp; do
+ if [ -f $data/$f ]; then
+ utils/filter_scps.pl JOB=1:$numsplit \
+ $data/split$numsplit/JOB/utt2spk $data/$f $data/split$numsplit/JOB/$f || exit 1;
+ fi
+done
+
+# split some things that are indexed by speaker
+for f in spk2gender spk2warp cmvn.scp; do
+ if [ -f $data/$f ]; then
+ utils/filter_scps.pl JOB=1:$numsplit \
+ $data/split$numsplit/JOB/spk2utt $data/$f $data/split$numsplit/JOB/$f || exit 1;
+ fi
+done
+
+for n in `seq $numsplit`; do
+ dsn=$data/split$numsplit/$n
+ if [ -f $data/segments ]; then
+ utils/filter_scp.pl $dsn/utt2spk $data/segments > $dsn/segments
+ awk '{print $2;}' $dsn/segments | sort | uniq > $data/tmp.reco # recording-ids.
+ if [ -f $data/reco2file_and_channel ]; then
+ utils/filter_scp.pl $data/tmp.reco $data/reco2file_and_channel > $dsn/reco2file_and_channel
+ fi
+ if [ -f $data/wav.scp ]; then
+ utils/filter_scp.pl $data/tmp.reco $data/wav.scp >$dsn/wav.scp
+ fi
+ rm $data/tmp.reco
+ fi # else it would have been handled above, see maybe_wav.
+done
+
+rm -f $data/.split_lock
+
+exit 0