summaryrefslogtreecommitdiff
path: root/kaldi_decode/src
diff options
context:
space:
mode:
Diffstat (limited to 'kaldi_decode/src')
-rw-r--r--kaldi_decode/src/Makefile12
-rw-r--r--kaldi_decode/src/nerv4decode.lua79
-rw-r--r--kaldi_decode/src/nnet-forward.cc215
3 files changed, 306 insertions, 0 deletions
diff --git a/kaldi_decode/src/Makefile b/kaldi_decode/src/Makefile
new file mode 100644
index 0000000..118420c
--- /dev/null
+++ b/kaldi_decode/src/Makefile
@@ -0,0 +1,12 @@
+# Change KDIR to `kaldi-trunk' path (Kaldi must be compiled with --share)
+KDIR := /slfs6/users/ymz09/kaldi/
+NERVDIR := /slfs6/users/ymz09/nerv-project/nerv/
+CUDADIR := /usr/local/cuda/
+
+nnet-forward:
+ g++ -msse -msse2 -Wall -I $(KDIR)/src/ -pthread -DKALDI_DOUBLEPRECISION=0 -DHAVE_POSIX_MEMALIGN -Wno-sign-compare -Wno-unused-local-typedefs -Winit-self -DHAVE_EXECINFO_H=1 -rdynamic -DHAVE_CXXABI_H -DHAVE_ATLAS -I $(KDIR)/tools/ATLAS/include -I $(KDIR)/tools/openfst/include -Wno-sign-compare -g -fPIC -DHAVE_CUDA -I $(CUDADIR)/include -DKALDI_NO_EXPF -I $(NERVDIR)/install//include/luajit-2.0/ -I $(NERVDIR)/install/include/ -DLUA_USE_APICHECK -c -o nnet-forward.o nnet-forward.cc
+ g++ -rdynamic -Wl,-rpath=$(KDIR)/tools/openfst/lib -L$(CUDADIR)/lib64 -Wl,-rpath,$(CUDADIR)/lib64 -Wl,-rpath=$(KDIR)/src/lib -L. -L$(KDIR)/src/nnet/ -L$(KDIR)/src/cudamatrix/ -L$(KDIR)/src/lat/ -L$(KDIR)/src/hmm/ -L$(KDIR)/src/tree/ -L$(KDIR)/src/matrix/ -L$(KDIR)/src/util/ -L$(KDIR)/src/base/ nnet-forward.o $(KDIR)/src/nnet//libkaldi-nnet.so $(KDIR)/src/cudamatrix//libkaldi-cudamatrix.so $(KDIR)/src/lat//libkaldi-lat.so $(KDIR)/src/hmm//libkaldi-hmm.so $(KDIR)/src/tree//libkaldi-tree.so $(KDIR)/src/matrix//libkaldi-matrix.so $(KDIR)/src/util//libkaldi-util.so $(KDIR)/src/base//libkaldi-base.so -L$(KDIR)/tools/openfst/lib -lfst /usr/lib/liblapack.so /usr/lib/libcblas.so /usr/lib/libatlas.so /usr/lib/libf77blas.so -lm -lpthread -ldl -lcublas -lcudart -lkaldi-nnet -lkaldi-cudamatrix -lkaldi-lat -lkaldi-hmm -lkaldi-tree -lkaldi-matrix -lkaldi-util -lkaldi-base -lstdc++ -L$(NERVDIR)/install/lib -lnervcore -lluaT -rdynamic -Wl,-rpath=$(KDIR)//tools/openfst/lib -L$(DUDADIR)/lib64 -Wl,-rpath,$(CUDADIR)/lib64 -Wl,-rpath=$(KDIR)//src/lib -lfst -lm -lpthread -ldl -lcublas -lcudart -L $(NERVDIR)/luajit-2.0/src/ -lluajit -o nnet-forward
+
+clean:
+ -rm nnet-forward.o nnet-forward
+
diff --git a/kaldi_decode/src/nerv4decode.lua b/kaldi_decode/src/nerv4decode.lua
new file mode 100644
index 0000000..b2ff344
--- /dev/null
+++ b/kaldi_decode/src/nerv4decode.lua
@@ -0,0 +1,79 @@
+package.path="/home/slhome/ymz09/.luarocks/share/lua/5.1/?.lua;/home/slhome/ymz09/.luarocks/share/lua/5.1/?/init.lua;/slfs6/users/ymz09/nerv-project/nerv/install/share/lua/5.1/?.lua;/slfs6/users/ymz09/nerv-project/nerv/install/share/lua/5.1/?/init.lua;"..package.path;
+package.cpath="/home/slhome/ymz09/.luarocks/lib/lua/5.1/?.so;/slfs6/users/ymz09/nerv-project/nerv/install/lib/lua/5.1/?.so;"..package.cpath;
+local k,l,_=pcall(require,"luarocks.loader") _=k and l.add_context("nerv","scm-1")
+require 'nerv'
+
+function build_trainer(ifname, feature)
+ local param_repo = nerv.ParamRepo()
+ param_repo:import(ifname, nil, gconf)
+ local layer_repo = make_layer_repo(param_repo)
+ local network = get_decode_network(layer_repo)
+ local global_transf = get_global_transf(layer_repo)
+ local input_order = get_input_order()
+ local readers = make_readers(feature, layer_repo)
+ network:init(1)
+
+ local iterative_trainer = function()
+ local data = nil
+ for ri = 1, #readers, 1 do
+ data = readers[ri].reader:get_data()
+ if data ~= nil then
+ break
+ end
+ end
+
+ if data == nil then
+ return "", nil
+ end
+
+ local input = {}
+ for i, e in ipairs(input_order) do
+ local id = e.id
+ if data[id] == nil then
+ nerv.error("input data %s not found", id)
+ end
+ local transformed
+ if e.global_transf then
+ local batch = gconf.cumat_type(data[id]:nrow(), data[id]:ncol())
+ batch:copy_fromh(data[id])
+ transformed = nerv.speech_utils.global_transf(batch,
+ global_transf,
+ gconf.frm_ext or 0, 0,
+ gconf)
+ else
+ transformed = data[id]
+ end
+ table.insert(input, transformed)
+ end
+ local output = {nerv.CuMatrixFloat(input[1]:nrow(), network.dim_out[1])}
+ network:batch_resize(input[1]:nrow())
+ network:propagate(input, output)
+
+ local utt = data["key"]
+ if utt == nil then
+ nerv.error("no key found.")
+ end
+
+ local mat = nerv.MMatrixFloat(output[1]:nrow(), output[1]:ncol())
+ output[1]:copy_toh(mat)
+
+ collectgarbage("collect")
+ return utt, mat
+ end
+
+ return iterative_trainer
+end
+
+function init(config, feature)
+ local tmp = io.write
+ io.write = function(...)
+ end
+ dofile(config)
+ trainer = build_trainer(gconf.decode_param, feature)
+ io.write = tmp
+end
+
+function feed()
+ local utt, mat = trainer()
+ return utt, mat
+end
diff --git a/kaldi_decode/src/nnet-forward.cc b/kaldi_decode/src/nnet-forward.cc
new file mode 100644
index 0000000..007f623
--- /dev/null
+++ b/kaldi_decode/src/nnet-forward.cc
@@ -0,0 +1,215 @@
+// nnetbin/nnet-forward.cc
+
+// Copyright 2011-2013 Brno University of Technology (Author: Karel Vesely)
+
+// See ../../COPYING for clarification regarding multiple authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
+// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
+// MERCHANTABLITY OR NON-INFRINGEMENT.
+// See the Apache 2 License for the specific language governing permissions and
+// limitations under the License.
+
+extern "C"{
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+#include "nerv/matrix/matrix.h"
+#include "nerv/common.h"
+#include "nerv/luaT/luaT.h"
+}
+
+#include <limits>
+
+#include "nnet/nnet-nnet.h"
+#include "nnet/nnet-loss.h"
+#include "nnet/nnet-pdf-prior.h"
+#include "base/kaldi-common.h"
+#include "util/common-utils.h"
+#include "base/timer.h"
+
+typedef kaldi::BaseFloat BaseFloat;
+typedef struct Matrix NervMatrix;
+
+
+int main(int argc, char *argv[]) {
+ using namespace kaldi;
+ using namespace kaldi::nnet1;
+ try {
+ const char *usage =
+ "Perform forward pass through Neural Network.\n"
+ "\n"
+ "Usage: nnet-forward [options] <nerv-config> <feature-rspecifier> <feature-wspecifier> [nerv4decode.lua]\n"
+ "e.g.: \n"
+ " nnet-forward config.lua ark:features.ark ark:mlpoutput.ark\n";
+
+ ParseOptions po(usage);
+
+ PdfPriorOptions prior_opts;
+ prior_opts.Register(&po);
+
+ bool apply_log = false;
+ po.Register("apply-log", &apply_log, "Transform MLP output to logscale");
+
+ std::string use_gpu="no";
+ po.Register("use-gpu", &use_gpu, "yes|no|optional, only has effect if compiled with CUDA");
+
+ using namespace kaldi;
+ using namespace kaldi::nnet1;
+ typedef kaldi::int32 int32;
+
+ int32 time_shift = 0;
+ po.Register("time-shift", &time_shift, "LSTM : repeat last input frame N-times, discrad N initial output frames.");
+
+ po.Read(argc, argv);
+
+ if (po.NumArgs() < 3) {
+ po.PrintUsage();
+ exit(1);
+ }
+
+ std::string config = po.GetArg(1),
+ feature_rspecifier = po.GetArg(2),
+ feature_wspecifier = po.GetArg(3),
+ nerv4decode = "src/nerv4decode.lua";
+ if(po.NumArgs() >= 4)
+ nerv4decode = po.GetArg(4);
+
+ //Select the GPU
+#if HAVE_CUDA==1
+ CuDevice::Instantiate().SelectGpuId(use_gpu);
+#endif
+
+ // we will subtract log-priors later,
+ PdfPrior pdf_prior(prior_opts);
+
+ kaldi::int64 tot_t = 0;
+
+ BaseFloatMatrixWriter feature_writer(feature_wspecifier);
+
+ CuMatrix<BaseFloat> nnet_out;
+ kaldi::Matrix<BaseFloat> nnet_out_host;
+
+ lua_State *L = lua_open();
+ luaL_openlibs(L);
+ if(luaL_loadfile(L, nerv4decode.c_str()))
+ KALDI_ERR << "luaL_loadfile() " << nerv4decode << " failed " << lua_tostring(L, -1);
+
+ if(lua_pcall(L, 0, 0, 0))
+ KALDI_ERR << "lua_pall failed " << lua_tostring(L, -1);
+
+ lua_settop(L, 0);
+ lua_getglobal(L, "init");
+ lua_pushstring(L, config.c_str());
+ lua_pushstring(L, feature_rspecifier.c_str());
+ if(lua_pcall(L, 2, 0, 0))
+ KALDI_ERR << "lua_pcall failed " << lua_tostring(L, -1);
+
+ Timer time;
+ double time_now = 0;
+ int32 num_done = 0;
+ // iterate over all feature files
+ for(;;){
+ lua_settop(L, 0);
+ lua_getglobal(L, "feed");
+ if(lua_pcall(L, 0, 2, 0))
+ KALDI_ERR << "lua_pcall failed " << lua_tostring(L, -1);
+
+ std::string utt = std::string(lua_tostring(L, -2));
+ if(utt == "")
+ break;
+ NervMatrix *mat = *(NervMatrix **)lua_touserdata(L, -1);
+
+ nnet_out_host.Resize(mat->nrow, mat->ncol, kUndefined);
+
+ size_t stride = mat->stride;
+ for(int i = 0; i < mat->nrow; i++){
+ const BaseFloat *nerv_row = (BaseFloat *)((char *)mat->data.f + i * stride);
+ BaseFloat *row = nnet_out_host.RowData(i);
+ memmove(row, nerv_row, sizeof(BaseFloat) * mat->ncol);
+ }
+
+ KALDI_VLOG(2) << "Processing utterance " << num_done+1
+ << ", " << utt
+ << ", " << nnet_out_host.NumRows() << "frm";
+
+ nnet_out.Resize(nnet_out_host.NumRows(), nnet_out_host.NumCols(), kUndefined);
+ nnet_out.CopyFromMat(nnet_out_host);
+
+ if (!KALDI_ISFINITE(nnet_out.Sum())) { // check there's no nan/inf,
+ KALDI_ERR << "NaN or inf found in nn-output for " << utt;
+ }
+
+ // convert posteriors to log-posteriors,
+ if (apply_log) {
+ if (!(nnet_out.Min() >= 0.0 && nnet_out.Max() <= 1.0)) {
+ KALDI_WARN << utt << " "
+ << "Applying 'log' to data which don't seem to be probabilities "
+ << "(is there a softmax somwhere?)";
+ }
+ nnet_out.Add(1e-20); // avoid log(0),
+ nnet_out.ApplyLog();
+ }
+
+ // subtract log-priors from log-posteriors or pre-softmax,
+ if (prior_opts.class_frame_counts != "") {
+ if (nnet_out.Min() >= 0.0 && nnet_out.Max() <= 1.0) {
+ KALDI_WARN << utt << " "
+ << "Subtracting log-prior on 'probability-like' data in range [0..1] "
+ << "(Did you forget --no-softmax=true or --apply-log=true ?)";
+ }
+ pdf_prior.SubtractOnLogpost(&nnet_out);
+ }
+
+ // download from GPU,
+ nnet_out_host.Resize(nnet_out.NumRows(), nnet_out.NumCols());
+ nnet_out.CopyToMat(&nnet_out_host);
+
+ // time-shift, remove N first frames of LSTM output,
+ if (time_shift > 0) {
+ kaldi::Matrix<BaseFloat> tmp(nnet_out_host);
+ nnet_out_host = tmp.RowRange(time_shift, tmp.NumRows() - time_shift);
+ }
+
+ // write,
+ if (!KALDI_ISFINITE(nnet_out_host.Sum())) { // check there's no nan/inf,
+ KALDI_ERR << "NaN or inf found in final output nn-output for " << utt;
+ }
+ feature_writer.Write(utt, nnet_out_host);
+
+ // progress log
+ if (num_done % 100 == 0) {
+ time_now = time.Elapsed();
+ KALDI_VLOG(1) << "After " << num_done << " utterances: time elapsed = "
+ << time_now/60 << " min; processed " << tot_t/time_now
+ << " frames per second.";
+ }
+ num_done++;
+ tot_t += nnet_out_host.NumRows();
+ }
+
+ // final message
+ KALDI_LOG << "Done " << num_done << " files"
+ << " in " << time.Elapsed()/60 << "min,"
+ << " (fps " << tot_t/time.Elapsed() << ")";
+
+#if HAVE_CUDA==1
+ if (kaldi::g_kaldi_verbose_level >= 1) {
+ CuDevice::Instantiate().PrintProfile();
+ }
+#endif
+ lua_close(L);
+ if (num_done == 0) return -1;
+ return 0;
+ } catch(const std::exception &e) {
+ KALDI_ERR << e.what();
+ return -1;
+ }
+}