From b33b3a6732c6b6a66bd5c44c615be56d66f4ed67 Mon Sep 17 00:00:00 2001 From: Yimmon Zhuang Date: Wed, 14 Oct 2015 15:37:20 +0800 Subject: support kaldi decoder --- kaldi_decode/cmd.sh | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100755 kaldi_decode/cmd.sh (limited to 'kaldi_decode/cmd.sh') diff --git a/kaldi_decode/cmd.sh b/kaldi_decode/cmd.sh new file mode 100755 index 0000000..e2e54e8 --- /dev/null +++ b/kaldi_decode/cmd.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# "queue.pl" uses qsub. The options to it are +# options to qsub. If you have GridEngine installed, +# change this to a queue you have access to. +# Otherwise, use "run.pl", which will run jobs locally +# (make sure your --num-jobs options are no more than +# the number of cpus on your machine. + +#a) JHU cluster options +#export train_cmd="queue.pl -l arch=*64" +#export decode_cmd="queue.pl -l arch=*64,mem_free=2G,ram_free=2G" +#export mkgraph_cmd="queue.pl -l arch=*64,ram_free=4G,mem_free=4G" + +#export cuda_cmd="..." + + +#b) BUT cluster options +#export train_cmd="queue.pl -q all.q@@blade -l ram_free=1200M,mem_free=1200M" +#export decode_cmd="queue.pl -q all.q@@blade -l ram_free=1700M,mem_free=1700M" +#export decodebig_cmd="queue.pl -q all.q@@blade -l ram_free=4G,mem_free=4G" + +#export cuda_cmd="queue.pl -q long.q@@pco203 -l gpu=1" +#export cuda_cmd="queue.pl -q long.q@pcspeech-gpu" +#export mkgraph_cmd="queue.pl -q all.q@@servers -l ram_free=4G,mem_free=4G" + +#c) run it locally... +export train_cmd=run.pl +export decode_cmd=run.pl +export cuda_cmd=run.pl +export mkgraph_cmd=run.pl + +#export train_cmd='queue.pl' +#export decode_cmd='queue.pl' +#export cuda_cmd='queue.pl -l gpu=1 -l hostname="markov|date|hamming"' +#export mkgraph_cmd='queue.pl"' + -- cgit v1.2.3