diff options
Diffstat (limited to 'kaldi_decode')
-rwxr-xr-x | kaldi_decode/README | 13 | ||||
-rwxr-xr-x | kaldi_decode/cmd.sh | 36 | ||||
-rw-r--r-- | kaldi_decode/conf/decode_dnn.config | 2 | ||||
-rwxr-xr-x | kaldi_decode/decode.sh | 124 | ||||
-rwxr-xr-x | kaldi_decode/local/score.sh | 67 | ||||
-rwxr-xr-x | kaldi_decode/path.sh | 5 | ||||
-rw-r--r-- | kaldi_decode/src/Makefile | 12 | ||||
-rw-r--r-- | kaldi_decode/src/nerv4decode.lua | 79 | ||||
-rw-r--r-- | kaldi_decode/src/nnet-forward.cc | 215 | ||||
-rwxr-xr-x | kaldi_decode/utils/int2sym.pl | 71 | ||||
-rwxr-xr-x | kaldi_decode/utils/parse_options.sh | 97 | ||||
-rwxr-xr-x | kaldi_decode/utils/queue.pl | 580 | ||||
-rwxr-xr-x | kaldi_decode/utils/run.pl | 264 | ||||
-rwxr-xr-x | kaldi_decode/utils/split_data.sh | 135 |
14 files changed, 1700 insertions, 0 deletions
diff --git a/kaldi_decode/README b/kaldi_decode/README new file mode 100755 index 0000000..8d0a95b --- /dev/null +++ b/kaldi_decode/README @@ -0,0 +1,13 @@ +source path.sh +source cmd.sh + +acwt=0.1 +dir=/slfs5/users/ymz09/chime/baseline/ASR/exp/nerv_seq/ +graph=/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced/graph_tgpr_5k +data=/slfs5/users/ymz09/chime/baseline/ASR/data-fbank/et05_real_enhanced +config=/slfs6/users/ymz09/nerv-project/nerv/nerv/examples/mpe_chime3.lua + +decode.sh --nj 4 --cmd "$decode_cmd" --config conf/decode_dnn.config --acwt $acwt \ + $graph $data $config \ + $dir/decode_tgpr_5k_et05_real_enhanced_nerv + diff --git a/kaldi_decode/cmd.sh b/kaldi_decode/cmd.sh new file mode 100755 index 0000000..e2e54e8 --- /dev/null +++ b/kaldi_decode/cmd.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# "queue.pl" uses qsub. The options to it are +# options to qsub. If you have GridEngine installed, +# change this to a queue you have access to. +# Otherwise, use "run.pl", which will run jobs locally +# (make sure your --num-jobs options are no more than +# the number of cpus on your machine. + +#a) JHU cluster options +#export train_cmd="queue.pl -l arch=*64" +#export decode_cmd="queue.pl -l arch=*64,mem_free=2G,ram_free=2G" +#export mkgraph_cmd="queue.pl -l arch=*64,ram_free=4G,mem_free=4G" + +#export cuda_cmd="..." + + +#b) BUT cluster options +#export train_cmd="queue.pl -q all.q@@blade -l ram_free=1200M,mem_free=1200M" +#export decode_cmd="queue.pl -q all.q@@blade -l ram_free=1700M,mem_free=1700M" +#export decodebig_cmd="queue.pl -q all.q@@blade -l ram_free=4G,mem_free=4G" + +#export cuda_cmd="queue.pl -q long.q@@pco203 -l gpu=1" +#export cuda_cmd="queue.pl -q long.q@pcspeech-gpu" +#export mkgraph_cmd="queue.pl -q all.q@@servers -l ram_free=4G,mem_free=4G" + +#c) run it locally... +export train_cmd=run.pl +export decode_cmd=run.pl +export cuda_cmd=run.pl +export mkgraph_cmd=run.pl + +#export train_cmd='queue.pl' +#export decode_cmd='queue.pl' +#export cuda_cmd='queue.pl -l gpu=1 -l hostname="markov|date|hamming"' +#export mkgraph_cmd='queue.pl"' + diff --git a/kaldi_decode/conf/decode_dnn.config b/kaldi_decode/conf/decode_dnn.config new file mode 100644 index 0000000..89dd992 --- /dev/null +++ b/kaldi_decode/conf/decode_dnn.config @@ -0,0 +1,2 @@ +beam=18.0 # beam for decoding. Was 13.0 in the scripts. +lattice_beam=10.0 # this has most effect on size of the lattices. diff --git a/kaldi_decode/decode.sh b/kaldi_decode/decode.sh new file mode 100755 index 0000000..aa7e089 --- /dev/null +++ b/kaldi_decode/decode.sh @@ -0,0 +1,124 @@ +#!/bin/bash + +# Copyright 2012-2013 Karel Vesely, Daniel Povey +# Apache 2.0 + +# Begin configuration section. +nnet= # non-default location of DNN (optional) +feature_transform= # non-default location of feature_transform (optional) +model= # non-default location of transition model (optional) +class_frame_counts= # non-default location of PDF counts (optional) +srcdir= # non-default location of DNN-dir (decouples model dir from decode dir) + +stage=0 # stage=1 skips lattice generation +nj=4 +cmd=run.pl + +acwt=0.10 # note: only really affects pruning (scoring is on lattices). +beam=13.0 +lattice_beam=8.0 +min_active=200 +max_active=7000 # limit of active tokens +max_mem=50000000 # approx. limit to memory consumption during minimization in bytes +nnet_forward_opts="--prior-scale=1.0" + +skip_scoring=false +scoring_opts="--min-lmwt 4 --max-lmwt 15" + +num_threads=1 # if >1, will use latgen-faster-parallel +parallel_opts= # Ignored now. +use_gpu="no" # yes|no|optionaly +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "Usage: $0 [options] <graph-dir> <data-dir> <nerv-config> <decode-dir>" + echo "... where <decode-dir> is assumed to be a sub-directory of the directory" + echo " where the DNN and transition model is." + echo "e.g.: $0 exp/dnn1/graph_tgpr data/test config.lua exp/dnn1/decode_tgpr" + echo "" + echo "This script works on plain or modified features (CMN,delta+delta-delta)," + echo "which are then sent through feature-transform. It works out what type" + echo "of features you used from content of srcdir." + echo "" + echo "main options (for others, see top of script file)" + echo " --config <config-file> # config containing options" + echo " --nj <nj> # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs." + echo "" + echo " --srcdir <dir> # non-default dir with DNN/models, can be different" + echo " # from parent dir of <decode-dir>' (opt.)" + echo "" + echo " --acwt <float> # select acoustic scale for decoding" + echo " --scoring-opts <opts> # options forwarded to local/score.sh" + echo " --num-threads <N> # N>1: run multi-threaded decoder" + exit 1; +fi + + +graphdir=$1 +data=$2 +config=$3 +dir=$4 +[ -z $srcdir ] && srcdir=`dirname $dir`; # Default model directory one level up from decoding directory. +sdata=$data/split$nj; + +mkdir -p $dir/log + +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +# Select default locations to model files (if not already set externally) +[ -z "$model" ] && model=$srcdir/final.mdl +# +[ -z "$class_frame_counts" -a -f $srcdir/prior_counts ] && class_frame_counts=$srcdir/prior_counts # priority, +[ -z "$class_frame_counts" ] && class_frame_counts=$srcdir/ali_train_pdf.counts + +# Check that files exist +for f in $sdata/1/feats.scp $model $class_frame_counts $graphdir/HCLG.fst; do + [ ! -f $f ] && echo "$0: missing file $f" && exit 1; +done + +# Possibly use multi-threaded decoder +thread_string= +[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads" + + +# PREPARE FEATURE EXTRACTION PIPELINE +# import config, +cmvn_opts= +delta_opts= +D=$srcdir +[ -e $D/norm_vars ] && cmvn_opts="--norm-means=true --norm-vars=$(cat $D/norm_vars)" # Bwd-compatibility, +[ -e $D/cmvn_opts ] && cmvn_opts=$(cat $D/cmvn_opts) +[ -e $D/delta_order ] && delta_opts="--delta-order=$(cat $D/delta_order)" # Bwd-compatibility, +[ -e $D/delta_opts ] && delta_opts=$(cat $D/delta_opts) +# +# Create the feature stream, +feats="ark,s,cs:copy-feats scp:$sdata/JOB/feats.scp ark:- |" +# apply-cmvn (optional), +[ ! -z "$cmvn_opts" -a ! -f $sdata/1/cmvn.scp ] && echo "$0: Missing $sdata/1/cmvn.scp" && exit 1 +[ ! -z "$cmvn_opts" ] && feats="$feats apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp ark:- ark:- |" +# add-deltas (optional), +[ ! -z "$delta_opts" ] && feats="$feats add-deltas $delta_opts ark:- ark:- |" +# +# Run the decoding in the queue, +if [ $stage -le 0 ]; then + $cmd --num-threads $((num_threads+1)) JOB=1:$nj $dir/log/decode.JOB.log \ + ./src/nnet-forward $nnet_forward_opts --class-frame-counts=$class_frame_counts --use-gpu=$use_gpu $config "$feats" ark:- \| \ + latgen-faster-mapped$thread_string --min-active=$min_active --max-active=$max_active --max-mem=$max_mem --beam=$beam \ + --lattice-beam=$lattice_beam --acoustic-scale=$acwt --allow-partial=true --word-symbol-table=$graphdir/words.txt \ + $model $graphdir/HCLG.fst ark:- "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; +fi +# Run the scoring +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh $scoring_opts --cmd "$cmd" $data $graphdir $dir || exit 1; +fi + +exit 0; diff --git a/kaldi_decode/local/score.sh b/kaldi_decode/local/score.sh new file mode 100755 index 0000000..b18f350 --- /dev/null +++ b/kaldi_decode/local/score.sh @@ -0,0 +1,67 @@ +#!/bin/bash +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 + +[ -f ./path.sh ] && . ./path.sh + +# begin configuration section. +cmd=run.pl +stage=0 +decode_mbr=true +reverse=false +word_ins_penalty=0.0 +min_lmwt=5 +max_lmwt=20 +#end configuration section. + +[ -f ./path.sh ] && . ./path.sh +. parse_options.sh || exit 1; + +if [ $# -ne 3 ]; then + echo "Usage: local/score.sh [--cmd (run.pl|queue.pl...)] <data-dir> <lang-dir|graph-dir> <decode-dir>" + echo " Options:" + echo " --cmd (run.pl|queue.pl...) # specify how to run the sub-processes." + echo " --stage (0|1|2) # start scoring script from part-way through." + echo " --decode_mbr (true/false) # maximum bayes risk decoding (confusion network)." + echo " --min_lmwt <int> # minumum LM-weight for lattice rescoring " + echo " --max_lmwt <int> # maximum LM-weight for lattice rescoring " + echo " --reverse (true/false) # score with time reversed features " + exit 1; +fi + +data=$1 +lang_or_graph=$2 +dir=$3 + +symtab=$lang_or_graph/words.txt + +for f in $symtab $dir/lat.1.gz $data/text; do + [ ! -f $f ] && echo "score.sh: no such file $f" && exit 1; +done + +mkdir -p $dir/scoring/log + +cat $data/text | sed 's:<NOISE>::g' | sed 's:<SPOKEN_NOISE>::g' > $dir/scoring/test_filt.txt + +$cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/best_path.LMWT.log \ + lattice-scale --inv-acoustic-scale=LMWT "ark:gunzip -c $dir/lat.*.gz|" ark:- \| \ + lattice-add-penalty --word-ins-penalty=$word_ins_penalty ark:- ark:- \| \ + lattice-best-path --word-symbol-table=$symtab \ + ark:- ark,t:$dir/scoring/LMWT.tra || exit 1; + +if $reverse; then + for lmwt in `seq $min_lmwt $max_lmwt`; do + mv $dir/scoring/$lmwt.tra $dir/scoring/$lmwt.tra.orig + awk '{ printf("%s ",$1); for(i=NF; i>1; i--){ printf("%s ",$i); } printf("\n"); }' \ + <$dir/scoring/$lmwt.tra.orig >$dir/scoring/$lmwt.tra + done +fi + +# Note: the double level of quoting for the sed command +$cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/score.LMWT.log \ + cat $dir/scoring/LMWT.tra \| \ + utils/int2sym.pl -f 2- $symtab \| sed 's:\<UNK\>::g' \| \ + compute-wer --text --mode=present \ + ark:$dir/scoring/test_filt.txt ark,p:- ">&" $dir/wer_LMWT || exit 1; + +exit 0; diff --git a/kaldi_decode/path.sh b/kaldi_decode/path.sh new file mode 100755 index 0000000..5aebc72 --- /dev/null +++ b/kaldi_decode/path.sh @@ -0,0 +1,5 @@ +export KALDI_ROOT=/slfs6/users/ymz09/kaldi/ +export PATH=$PWD/utils/:$KALDI_ROOT/src/bin:$KALDI_ROOT/tools/openfst/bin:$KALDI_ROOT/src/fstbin/:$KALDI_ROOT/src/gmmbin/:$KALDI_ROOT/src/featbin/:$KALDI_ROOT/src/lm/:$KALDI_ROOT/src/sgmmbin/:$KALDI_ROOT/src/sgmm2bin/:$KALDI_ROOT/src/fgmmbin/:$KALDI_ROOT/src/latbin/:$KALDI_ROOT/src/nnetbin:$KALDI_ROOT/src/nnet2bin/:$KALDI_ROOT/src/kwsbin:$PWD:$PATH +export LC_ALL=C +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/slhome/ymz09/mylibs/:. + diff --git a/kaldi_decode/src/Makefile b/kaldi_decode/src/Makefile new file mode 100644 index 0000000..118420c --- /dev/null +++ b/kaldi_decode/src/Makefile @@ -0,0 +1,12 @@ +# Change KDIR to `kaldi-trunk' path (Kaldi must be compiled with --share) +KDIR := /slfs6/users/ymz09/kaldi/ +NERVDIR := /slfs6/users/ymz09/nerv-project/nerv/ +CUDADIR := /usr/local/cuda/ + +nnet-forward: + g++ -msse -msse2 -Wall -I $(KDIR)/src/ -pthread -DKALDI_DOUBLEPRECISION=0 -DHAVE_POSIX_MEMALIGN -Wno-sign-compare -Wno-unused-local-typedefs -Winit-self -DHAVE_EXECINFO_H=1 -rdynamic -DHAVE_CXXABI_H -DHAVE_ATLAS -I $(KDIR)/tools/ATLAS/include -I $(KDIR)/tools/openfst/include -Wno-sign-compare -g -fPIC -DHAVE_CUDA -I $(CUDADIR)/include -DKALDI_NO_EXPF -I $(NERVDIR)/install//include/luajit-2.0/ -I $(NERVDIR)/install/include/ -DLUA_USE_APICHECK -c -o nnet-forward.o nnet-forward.cc + g++ -rdynamic -Wl,-rpath=$(KDIR)/tools/openfst/lib -L$(CUDADIR)/lib64 -Wl,-rpath,$(CUDADIR)/lib64 -Wl,-rpath=$(KDIR)/src/lib -L. -L$(KDIR)/src/nnet/ -L$(KDIR)/src/cudamatrix/ -L$(KDIR)/src/lat/ -L$(KDIR)/src/hmm/ -L$(KDIR)/src/tree/ -L$(KDIR)/src/matrix/ -L$(KDIR)/src/util/ -L$(KDIR)/src/base/ nnet-forward.o $(KDIR)/src/nnet//libkaldi-nnet.so $(KDIR)/src/cudamatrix//libkaldi-cudamatrix.so $(KDIR)/src/lat//libkaldi-lat.so $(KDIR)/src/hmm//libkaldi-hmm.so $(KDIR)/src/tree//libkaldi-tree.so $(KDIR)/src/matrix//libkaldi-matrix.so $(KDIR)/src/util//libkaldi-util.so $(KDIR)/src/base//libkaldi-base.so -L$(KDIR)/tools/openfst/lib -lfst /usr/lib/liblapack.so /usr/lib/libcblas.so /usr/lib/libatlas.so /usr/lib/libf77blas.so -lm -lpthread -ldl -lcublas -lcudart -lkaldi-nnet -lkaldi-cudamatrix -lkaldi-lat -lkaldi-hmm -lkaldi-tree -lkaldi-matrix -lkaldi-util -lkaldi-base -lstdc++ -L$(NERVDIR)/install/lib -lnervcore -lluaT -rdynamic -Wl,-rpath=$(KDIR)//tools/openfst/lib -L$(DUDADIR)/lib64 -Wl,-rpath,$(CUDADIR)/lib64 -Wl,-rpath=$(KDIR)//src/lib -lfst -lm -lpthread -ldl -lcublas -lcudart -L $(NERVDIR)/luajit-2.0/src/ -lluajit -o nnet-forward + +clean: + -rm nnet-forward.o nnet-forward + diff --git a/kaldi_decode/src/nerv4decode.lua b/kaldi_decode/src/nerv4decode.lua new file mode 100644 index 0000000..b2ff344 --- /dev/null +++ b/kaldi_decode/src/nerv4decode.lua @@ -0,0 +1,79 @@ +package.path="/home/slhome/ymz09/.luarocks/share/lua/5.1/?.lua;/home/slhome/ymz09/.luarocks/share/lua/5.1/?/init.lua;/slfs6/users/ymz09/nerv-project/nerv/install/share/lua/5.1/?.lua;/slfs6/users/ymz09/nerv-project/nerv/install/share/lua/5.1/?/init.lua;"..package.path; +package.cpath="/home/slhome/ymz09/.luarocks/lib/lua/5.1/?.so;/slfs6/users/ymz09/nerv-project/nerv/install/lib/lua/5.1/?.so;"..package.cpath; +local k,l,_=pcall(require,"luarocks.loader") _=k and l.add_context("nerv","scm-1") +require 'nerv' + +function build_trainer(ifname, feature) + local param_repo = nerv.ParamRepo() + param_repo:import(ifname, nil, gconf) + local layer_repo = make_layer_repo(param_repo) + local network = get_decode_network(layer_repo) + local global_transf = get_global_transf(layer_repo) + local input_order = get_input_order() + local readers = make_readers(feature, layer_repo) + network:init(1) + + local iterative_trainer = function() + local data = nil + for ri = 1, #readers, 1 do + data = readers[ri].reader:get_data() + if data ~= nil then + break + end + end + + if data == nil then + return "", nil + end + + local input = {} + for i, e in ipairs(input_order) do + local id = e.id + if data[id] == nil then + nerv.error("input data %s not found", id) + end + local transformed + if e.global_transf then + local batch = gconf.cumat_type(data[id]:nrow(), data[id]:ncol()) + batch:copy_fromh(data[id]) + transformed = nerv.speech_utils.global_transf(batch, + global_transf, + gconf.frm_ext or 0, 0, + gconf) + else + transformed = data[id] + end + table.insert(input, transformed) + end + local output = {nerv.CuMatrixFloat(input[1]:nrow(), network.dim_out[1])} + network:batch_resize(input[1]:nrow()) + network:propagate(input, output) + + local utt = data["key"] + if utt == nil then + nerv.error("no key found.") + end + + local mat = nerv.MMatrixFloat(output[1]:nrow(), output[1]:ncol()) + output[1]:copy_toh(mat) + + collectgarbage("collect") + return utt, mat + end + + return iterative_trainer +end + +function init(config, feature) + local tmp = io.write + io.write = function(...) + end + dofile(config) + trainer = build_trainer(gconf.decode_param, feature) + io.write = tmp +end + +function feed() + local utt, mat = trainer() + return utt, mat +end diff --git a/kaldi_decode/src/nnet-forward.cc b/kaldi_decode/src/nnet-forward.cc new file mode 100644 index 0000000..007f623 --- /dev/null +++ b/kaldi_decode/src/nnet-forward.cc @@ -0,0 +1,215 @@ +// nnetbin/nnet-forward.cc + +// Copyright 2011-2013 Brno University of Technology (Author: Karel Vesely) + +// See ../../COPYING for clarification regarding multiple authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +// MERCHANTABLITY OR NON-INFRINGEMENT. +// See the Apache 2 License for the specific language governing permissions and +// limitations under the License. + +extern "C"{ +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" +#include "nerv/matrix/matrix.h" +#include "nerv/common.h" +#include "nerv/luaT/luaT.h" +} + +#include <limits> + +#include "nnet/nnet-nnet.h" +#include "nnet/nnet-loss.h" +#include "nnet/nnet-pdf-prior.h" +#include "base/kaldi-common.h" +#include "util/common-utils.h" +#include "base/timer.h" + +typedef kaldi::BaseFloat BaseFloat; +typedef struct Matrix NervMatrix; + + +int main(int argc, char *argv[]) { + using namespace kaldi; + using namespace kaldi::nnet1; + try { + const char *usage = + "Perform forward pass through Neural Network.\n" + "\n" + "Usage: nnet-forward [options] <nerv-config> <feature-rspecifier> <feature-wspecifier> [nerv4decode.lua]\n" + "e.g.: \n" + " nnet-forward config.lua ark:features.ark ark:mlpoutput.ark\n"; + + ParseOptions po(usage); + + PdfPriorOptions prior_opts; + prior_opts.Register(&po); + + bool apply_log = false; + po.Register("apply-log", &apply_log, "Transform MLP output to logscale"); + + std::string use_gpu="no"; + po.Register("use-gpu", &use_gpu, "yes|no|optional, only has effect if compiled with CUDA"); + + using namespace kaldi; + using namespace kaldi::nnet1; + typedef kaldi::int32 int32; + + int32 time_shift = 0; + po.Register("time-shift", &time_shift, "LSTM : repeat last input frame N-times, discrad N initial output frames."); + + po.Read(argc, argv); + + if (po.NumArgs() < 3) { + po.PrintUsage(); + exit(1); + } + + std::string config = po.GetArg(1), + feature_rspecifier = po.GetArg(2), + feature_wspecifier = po.GetArg(3), + nerv4decode = "src/nerv4decode.lua"; + if(po.NumArgs() >= 4) + nerv4decode = po.GetArg(4); + + //Select the GPU +#if HAVE_CUDA==1 + CuDevice::Instantiate().SelectGpuId(use_gpu); +#endif + + // we will subtract log-priors later, + PdfPrior pdf_prior(prior_opts); + + kaldi::int64 tot_t = 0; + + BaseFloatMatrixWriter feature_writer(feature_wspecifier); + + CuMatrix<BaseFloat> nnet_out; + kaldi::Matrix<BaseFloat> nnet_out_host; + + lua_State *L = lua_open(); + luaL_openlibs(L); + if(luaL_loadfile(L, nerv4decode.c_str())) + KALDI_ERR << "luaL_loadfile() " << nerv4decode << " failed " << lua_tostring(L, -1); + + if(lua_pcall(L, 0, 0, 0)) + KALDI_ERR << "lua_pall failed " << lua_tostring(L, -1); + + lua_settop(L, 0); + lua_getglobal(L, "init"); + lua_pushstring(L, config.c_str()); + lua_pushstring(L, feature_rspecifier.c_str()); + if(lua_pcall(L, 2, 0, 0)) + KALDI_ERR << "lua_pcall failed " << lua_tostring(L, -1); + + Timer time; + double time_now = 0; + int32 num_done = 0; + // iterate over all feature files + for(;;){ + lua_settop(L, 0); + lua_getglobal(L, "feed"); + if(lua_pcall(L, 0, 2, 0)) + KALDI_ERR << "lua_pcall failed " << lua_tostring(L, -1); + + std::string utt = std::string(lua_tostring(L, -2)); + if(utt == "") + break; + NervMatrix *mat = *(NervMatrix **)lua_touserdata(L, -1); + + nnet_out_host.Resize(mat->nrow, mat->ncol, kUndefined); + + size_t stride = mat->stride; + for(int i = 0; i < mat->nrow; i++){ + const BaseFloat *nerv_row = (BaseFloat *)((char *)mat->data.f + i * stride); + BaseFloat *row = nnet_out_host.RowData(i); + memmove(row, nerv_row, sizeof(BaseFloat) * mat->ncol); + } + + KALDI_VLOG(2) << "Processing utterance " << num_done+1 + << ", " << utt + << ", " << nnet_out_host.NumRows() << "frm"; + + nnet_out.Resize(nnet_out_host.NumRows(), nnet_out_host.NumCols(), kUndefined); + nnet_out.CopyFromMat(nnet_out_host); + + if (!KALDI_ISFINITE(nnet_out.Sum())) { // check there's no nan/inf, + KALDI_ERR << "NaN or inf found in nn-output for " << utt; + } + + // convert posteriors to log-posteriors, + if (apply_log) { + if (!(nnet_out.Min() >= 0.0 && nnet_out.Max() <= 1.0)) { + KALDI_WARN << utt << " " + << "Applying 'log' to data which don't seem to be probabilities " + << "(is there a softmax somwhere?)"; + } + nnet_out.Add(1e-20); // avoid log(0), + nnet_out.ApplyLog(); + } + + // subtract log-priors from log-posteriors or pre-softmax, + if (prior_opts.class_frame_counts != "") { + if (nnet_out.Min() >= 0.0 && nnet_out.Max() <= 1.0) { + KALDI_WARN << utt << " " + << "Subtracting log-prior on 'probability-like' data in range [0..1] " + << "(Did you forget --no-softmax=true or --apply-log=true ?)"; + } + pdf_prior.SubtractOnLogpost(&nnet_out); + } + + // download from GPU, + nnet_out_host.Resize(nnet_out.NumRows(), nnet_out.NumCols()); + nnet_out.CopyToMat(&nnet_out_host); + + // time-shift, remove N first frames of LSTM output, + if (time_shift > 0) { + kaldi::Matrix<BaseFloat> tmp(nnet_out_host); + nnet_out_host = tmp.RowRange(time_shift, tmp.NumRows() - time_shift); + } + + // write, + if (!KALDI_ISFINITE(nnet_out_host.Sum())) { // check there's no nan/inf, + KALDI_ERR << "NaN or inf found in final output nn-output for " << utt; + } + feature_writer.Write(utt, nnet_out_host); + + // progress log + if (num_done % 100 == 0) { + time_now = time.Elapsed(); + KALDI_VLOG(1) << "After " << num_done << " utterances: time elapsed = " + << time_now/60 << " min; processed " << tot_t/time_now + << " frames per second."; + } + num_done++; + tot_t += nnet_out_host.NumRows(); + } + + // final message + KALDI_LOG << "Done " << num_done << " files" + << " in " << time.Elapsed()/60 << "min," + << " (fps " << tot_t/time.Elapsed() << ")"; + +#if HAVE_CUDA==1 + if (kaldi::g_kaldi_verbose_level >= 1) { + CuDevice::Instantiate().PrintProfile(); + } +#endif + lua_close(L); + if (num_done == 0) return -1; + return 0; + } catch(const std::exception &e) { + KALDI_ERR << e.what(); + return -1; + } +} diff --git a/kaldi_decode/utils/int2sym.pl b/kaldi_decode/utils/int2sym.pl new file mode 100755 index 0000000..d618939 --- /dev/null +++ b/kaldi_decode/utils/int2sym.pl @@ -0,0 +1,71 @@ +#!/usr/bin/env perl +# Copyright 2010-2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0. + +undef $field_begin; +undef $field_end; + + +if ($ARGV[0] eq "-f") { + shift @ARGV; + $field_spec = shift @ARGV; + if ($field_spec =~ m/^\d+$/) { + $field_begin = $field_spec - 1; $field_end = $field_spec - 1; + } + if ($field_spec =~ m/^(\d*)[-:](\d*)/) { # accept e.g. 1:10 as a courtesty (properly, 1-10) + if ($1 ne "") { + $field_begin = $1 - 1; # Change to zero-based indexing. + } + if ($2 ne "") { + $field_end = $2 - 1; # Change to zero-based indexing. + } + } + if (!defined $field_begin && !defined $field_end) { + die "Bad argument to -f option: $field_spec"; + } +} +$symtab = shift @ARGV; +if(!defined $symtab) { + print STDERR "Usage: sym2int.pl [options] symtab [input] > output\n" . + "options: [-f (<field>|<field_start>-<field-end>)]\n" . + "e.g.: -f 2, or -f 3-4\n"; + exit(1); +} + +open(F, "<$symtab") || die "Error opening symbol table file $symtab"; +while(<F>) { + @A = split(" ", $_); + @A == 2 || die "bad line in symbol table file: $_"; + $int2sym{$A[1]} = $A[0]; +} + +sub int2sym { + my $a = shift @_; + my $pos = shift @_; + if($a !~ m:^\d+$:) { # not all digits.. + $pos1 = $pos+1; # make it one-based. + die "int2sym.pl: found noninteger token $a [in position $pos1]\n"; + } + $s = $int2sym{$a}; + if(!defined ($s)) { + die "int2sym.pl: integer $a not in symbol table $symtab."; + } + return $s; +} + +$error = 0; +while (<>) { + @A = split(" ", $_); + for ($pos = 0; $pos <= $#A; $pos++) { + $a = $A[$pos]; + if ( (!defined $field_begin || $pos >= $field_begin) + && (!defined $field_end || $pos <= $field_end)) { + $a = int2sym($a, $pos); + } + print $a . " "; + } + print "\n"; +} + + + diff --git a/kaldi_decode/utils/parse_options.sh b/kaldi_decode/utils/parse_options.sh new file mode 100755 index 0000000..fdc8a36 --- /dev/null +++ b/kaldi_decode/utils/parse_options.sh @@ -0,0 +1,97 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey); +# Arnab Ghoshal, Karel Vesely + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +# MERCHANTABLITY OR NON-INFRINGEMENT. +# See the Apache 2 License for the specific language governing permissions and +# limitations under the License. + + +# Parse command-line options. +# To be sourced by another script (as in ". parse_options.sh"). +# Option format is: --option-name arg +# and shell variable "option_name" gets set to value "arg." +# The exception is --help, which takes no arguments, but prints the +# $help_message variable (if defined). + + +### +### The --config file options have lower priority to command line +### options, so we need to import them first... +### + +# Now import all the configs specified by command-line, in left-to-right order +for ((argpos=1; argpos<$#; argpos++)); do + if [ "${!argpos}" == "--config" ]; then + argpos_plus1=$((argpos+1)) + config=${!argpos_plus1} + [ ! -r $config ] && echo "$0: missing config '$config'" && exit 1 + . $config # source the config file. + fi +done + + +### +### No we process the command line options +### +while true; do + [ -z "${1:-}" ] && break; # break if there are no arguments + case "$1" in + # If the enclosing script is called with --help option, print the help + # message and exit. Scripts should put help messages in $help_message + --help|-h) if [ -z "$help_message" ]; then echo "No help found." 1>&2; + else printf "$help_message\n" 1>&2 ; fi; + exit 0 ;; + --*=*) echo "$0: options to scripts must be of the form --name value, got '$1'" + exit 1 ;; + # If the first command-line argument begins with "--" (e.g. --foo-bar), + # then work out the variable name as $name, which will equal "foo_bar". + --*) name=`echo "$1" | sed s/^--// | sed s/-/_/g`; + # Next we test whether the variable in question is undefned-- if so it's + # an invalid option and we die. Note: $0 evaluates to the name of the + # enclosing script. + # The test [ -z ${foo_bar+xxx} ] will return true if the variable foo_bar + # is undefined. We then have to wrap this test inside "eval" because + # foo_bar is itself inside a variable ($name). + eval '[ -z "${'$name'+xxx}" ]' && echo "$0: invalid option $1" 1>&2 && exit 1; + + oldval="`eval echo \\$$name`"; + # Work out whether we seem to be expecting a Boolean argument. + if [ "$oldval" == "true" ] || [ "$oldval" == "false" ]; then + was_bool=true; + else + was_bool=false; + fi + + # Set the variable to the right value-- the escaped quotes make it work if + # the option had spaces, like --cmd "queue.pl -sync y" + eval $name=\"$2\"; + + # Check that Boolean-valued arguments are really Boolean. + if $was_bool && [[ "$2" != "true" && "$2" != "false" ]]; then + echo "$0: expected \"true\" or \"false\": $1 $2" 1>&2 + exit 1; + fi + shift 2; + ;; + *) break; + esac +done + + +# Check for an empty argument to the --cmd option, which can easily occur as a +# result of scripting errors. +[ ! -z "${cmd+xxx}" ] && [ -z "$cmd" ] && echo "$0: empty argument to --cmd option" 1>&2 && exit 1; + + +true; # so this script returns exit code 0. diff --git a/kaldi_decode/utils/queue.pl b/kaldi_decode/utils/queue.pl new file mode 100755 index 0000000..1e36de6 --- /dev/null +++ b/kaldi_decode/utils/queue.pl @@ -0,0 +1,580 @@ +#!/usr/bin/env perl +use strict; +use warnings; + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). +# 2014 Vimal Manohar (Johns Hopkins University) +# Apache 2.0. + +use File::Basename; +use Cwd; +use Getopt::Long; + +# queue.pl has the same functionality as run.pl, except that +# it runs the job in question on the queue (Sun GridEngine). +# This version of queue.pl uses the task array functionality +# of the grid engine. Note: it's different from the queue.pl +# in the s4 and earlier scripts. + +# The script now supports configuring the queue system using a config file +# (default in conf/queue.conf; but can be passed specified with --config option) +# and a set of command line options. +# The current script handles: +# 1) Normal configuration arguments +# For e.g. a command line option of "--gpu 1" could be converted into the option +# "-q g.q -l gpu=1" to qsub. How the CLI option is handled is determined by a +# line in the config file like +# gpu=* -q g.q -l gpu=$0 +# $0 here in the line is replaced with the argument read from the CLI and the +# resulting string is passed to qsub. +# 2) Special arguments to options such as +# gpu=0 +# If --gpu 0 is given in the command line, then no special "-q" is given. +# 3) Default argument +# default gpu=0 +# If --gpu option is not passed in the command line, then the script behaves as +# if --gpu 0 was passed since 0 is specified as the default argument for that +# option +# 4) Arbitrary options and arguments. +# Any command line option starting with '--' and its argument would be handled +# as long as its defined in the config file. +# 5) Default behavior +# If the config file that is passed using is not readable, then the script +# behaves as if the queue has the following config file: +# $ cat conf/queue.conf +# # Default configuration +# command qsub -v PATH -cwd -S /bin/bash -j y -l arch=*64* +# option mem=* -l mem_free=$0,ram_free=$0 +# option mem=0 # Do not add anything to qsub_opts +# option num_threads=* -pe smp $0 +# option num_threads=1 # Do not add anything to qsub_opts |