summaryrefslogtreecommitdiff
path: root/kaldi_decode/utils
diff options
context:
space:
mode:
authorYimmon Zhuang <[email protected]>2015-10-14 15:37:20 +0800
committerYimmon Zhuang <[email protected]>2015-10-14 15:37:20 +0800
commitb33b3a6732c6b6a66bd5c44c615be56d66f4ed67 (patch)
tree47501412a3324e4c13b1238eeb913aae02b2024a /kaldi_decode/utils
parente39fb231f64ddc8b79a6eb5434f529aadb3165fe (diff)
support kaldi decoder
Diffstat (limited to 'kaldi_decode/utils')
-rwxr-xr-xkaldi_decode/utils/int2sym.pl71
-rwxr-xr-xkaldi_decode/utils/parse_options.sh97
-rwxr-xr-xkaldi_decode/utils/queue.pl580
-rwxr-xr-xkaldi_decode/utils/run.pl264
-rwxr-xr-xkaldi_decode/utils/split_data.sh135
5 files changed, 1147 insertions, 0 deletions
diff --git a/kaldi_decode/utils/int2sym.pl b/kaldi_decode/utils/int2sym.pl
new file mode 100755
index 0000000..d618939
--- /dev/null
+++ b/kaldi_decode/utils/int2sym.pl
@@ -0,0 +1,71 @@
+#!/usr/bin/env perl
+# Copyright 2010-2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey)
+# Apache 2.0.
+
+undef $field_begin;
+undef $field_end;
+
+
+if ($ARGV[0] eq "-f") {
+ shift @ARGV;
+ $field_spec = shift @ARGV;
+ if ($field_spec =~ m/^\d+$/) {
+ $field_begin = $field_spec - 1; $field_end = $field_spec - 1;
+ }
+ if ($field_spec =~ m/^(\d*)[-:](\d*)/) { # accept e.g. 1:10 as a courtesty (properly, 1-10)
+ if ($1 ne "") {
+ $field_begin = $1 - 1; # Change to zero-based indexing.
+ }
+ if ($2 ne "") {
+ $field_end = $2 - 1; # Change to zero-based indexing.
+ }
+ }
+ if (!defined $field_begin && !defined $field_end) {
+ die "Bad argument to -f option: $field_spec";
+ }
+}
+$symtab = shift @ARGV;
+if(!defined $symtab) {
+ print STDERR "Usage: sym2int.pl [options] symtab [input] > output\n" .
+ "options: [-f (<field>|<field_start>-<field-end>)]\n" .
+ "e.g.: -f 2, or -f 3-4\n";
+ exit(1);
+}
+
+open(F, "<$symtab") || die "Error opening symbol table file $symtab";
+while(<F>) {
+ @A = split(" ", $_);
+ @A == 2 || die "bad line in symbol table file: $_";
+ $int2sym{$A[1]} = $A[0];
+}
+
+sub int2sym {
+ my $a = shift @_;
+ my $pos = shift @_;
+ if($a !~ m:^\d+$:) { # not all digits..
+ $pos1 = $pos+1; # make it one-based.
+ die "int2sym.pl: found noninteger token $a [in position $pos1]\n";
+ }
+ $s = $int2sym{$a};
+ if(!defined ($s)) {
+ die "int2sym.pl: integer $a not in symbol table $symtab.";
+ }
+ return $s;
+}
+
+$error = 0;
+while (<>) {
+ @A = split(" ", $_);
+ for ($pos = 0; $pos <= $#A; $pos++) {
+ $a = $A[$pos];
+ if ( (!defined $field_begin || $pos >= $field_begin)
+ && (!defined $field_end || $pos <= $field_end)) {
+ $a = int2sym($a, $pos);
+ }
+ print $a . " ";
+ }
+ print "\n";
+}
+
+
+
diff --git a/kaldi_decode/utils/parse_options.sh b/kaldi_decode/utils/parse_options.sh
new file mode 100755
index 0000000..fdc8a36
--- /dev/null
+++ b/kaldi_decode/utils/parse_options.sh
@@ -0,0 +1,97 @@
+#!/bin/bash
+
+# Copyright 2012 Johns Hopkins University (Author: Daniel Povey);
+# Arnab Ghoshal, Karel Vesely
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
+# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
+# MERCHANTABLITY OR NON-INFRINGEMENT.
+# See the Apache 2 License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Parse command-line options.
+# To be sourced by another script (as in ". parse_options.sh").
+# Option format is: --option-name arg
+# and shell variable "option_name" gets set to value "arg."
+# The exception is --help, which takes no arguments, but prints the
+# $help_message variable (if defined).
+
+
+###
+### The --config file options have lower priority to command line
+### options, so we need to import them first...
+###
+
+# Now import all the configs specified by command-line, in left-to-right order
+for ((argpos=1; argpos<$#; argpos++)); do
+ if [ "${!argpos}" == "--config" ]; then
+ argpos_plus1=$((argpos+1))
+ config=${!argpos_plus1}
+ [ ! -r $config ] && echo "$0: missing config '$config'" && exit 1
+ . $config # source the config file.
+ fi
+done
+
+
+###
+### No we process the command line options
+###
+while true; do
+ [ -z "${1:-}" ] && break; # break if there are no arguments
+ case "$1" in
+ # If the enclosing script is called with --help option, print the help
+ # message and exit. Scripts should put help messages in $help_message
+ --help|-h) if [ -z "$help_message" ]; then echo "No help found." 1>&2;
+ else printf "$help_message\n" 1>&2 ; fi;
+ exit 0 ;;
+ --*=*) echo "$0: options to scripts must be of the form --name value, got '$1'"
+ exit 1 ;;
+ # If the first command-line argument begins with "--" (e.g. --foo-bar),
+ # then work out the variable name as $name, which will equal "foo_bar".
+ --*) name=`echo "$1" | sed s/^--// | sed s/-/_/g`;
+ # Next we test whether the variable in question is undefned-- if so it's
+ # an invalid option and we die. Note: $0 evaluates to the name of the
+ # enclosing script.
+ # The test [ -z ${foo_bar+xxx} ] will return true if the variable foo_bar
+ # is undefined. We then have to wrap this test inside "eval" because
+ # foo_bar is itself inside a variable ($name).
+ eval '[ -z "${'$name'+xxx}" ]' && echo "$0: invalid option $1" 1>&2 && exit 1;
+
+ oldval="`eval echo \\$$name`";
+ # Work out whether we seem to be expecting a Boolean argument.
+ if [ "$oldval" == "true" ] || [ "$oldval" == "false" ]; then
+ was_bool=true;
+ else
+ was_bool=false;
+ fi
+
+ # Set the variable to the right value-- the escaped quotes make it work if
+ # the option had spaces, like --cmd "queue.pl -sync y"
+ eval $name=\"$2\";
+
+ # Check that Boolean-valued arguments are really Boolean.
+ if $was_bool && [[ "$2" != "true" && "$2" != "false" ]]; then
+ echo "$0: expected \"true\" or \"false\": $1 $2" 1>&2
+ exit 1;
+ fi
+ shift 2;
+ ;;
+ *) break;
+ esac
+done
+
+
+# Check for an empty argument to the --cmd option, which can easily occur as a
+# result of scripting errors.
+[ ! -z "${cmd+xxx}" ] && [ -z "$cmd" ] && echo "$0: empty argument to --cmd option" 1>&2 && exit 1;
+
+
+true; # so this script returns exit code 0.
diff --git a/kaldi_decode/utils/queue.pl b/kaldi_decode/utils/queue.pl
new file mode 100755
index 0000000..1e36de6
--- /dev/null
+++ b/kaldi_decode/utils/queue.pl
@@ -0,0 +1,580 @@
+#!/usr/bin/env perl
+use strict;
+use warnings;
+
+# Copyright 2012 Johns Hopkins University (Author: Daniel Povey).
+# 2014 Vimal Manohar (Johns Hopkins University)
+# Apache 2.0.
+
+use File::Basename;
+use Cwd;
+use Getopt::Long;
+
+# queue.pl has the same functionality as run.pl, except that
+# it runs the job in question on the queue (Sun GridEngine).
+# This version of queue.pl uses the task array functionality
+# of the grid engine. Note: it's different from the queue.pl
+# in the s4 and earlier scripts.
+
+# The script now supports configuring the queue system using a config file
+# (default in conf/queue.conf; but can be passed specified with --config option)
+# and a set of command line options.
+# The current script handles:
+# 1) Normal configuration arguments
+# For e.g. a command line option of "--gpu 1" could be converted into the option
+# "-q g.q -l gpu=1" to qsub. How the CLI option is handled is determined by a
+# line in the config file like
+# gpu=* -q g.q -l gpu=$0
+# $0 here in the line is replaced with the argument read from the CLI and the
+# resulting string is passed to qsub.
+# 2) Special arguments to options such as
+# gpu=0
+# If --gpu 0 is given in the command line, then no special "-q" is given.
+# 3) Default argument
+# default gpu=0
+# If --gpu option is not passed in the command line, then the script behaves as
+# if --gpu 0 was passed since 0 is specified as the default argument for that
+# option
+# 4) Arbitrary options and arguments.
+# Any command line option starting with '--' and its argument would be handled
+# as long as its defined in the config file.
+# 5) Default behavior
+# If the config file that is passed using is not readable, then the script
+# behaves as if the queue has the following config file:
+# $ cat conf/queue.conf
+# # Default configuration
+# command qsub -v PATH -cwd -S /bin/bash -j y -l arch=*64*
+# option mem=* -l mem_free=$0,ram_free=$0
+# option mem=0 # Do not add anything to qsub_opts
+# option num_threads=* -pe smp $0
+# option num_threads=1 # Do not add anything to qsub_opts
+# option max_jobs_run=* -tc $0
+# default gpu=0
+# option gpu=0 -q all.q
+# option gpu=* -l gpu=$0 -q g.q
+
+my $qsub_opts = "";
+my $sync = 0;
+my $num_threads = 1;
+my $gpu = 0;
+
+my $config = "conf/queue.conf";
+
+my %cli_options = ();
+
+my $jobname;
+my $jobstart;
+my $jobend;
+
+my $array_job = 0;
+
+sub print_usage() {
+ print STDERR
+ "Usage: queue.pl [options] [JOB=1:n] log-file command-line arguments...\n" .
+ "e.g.: queue.pl foo.log echo baz\n" .
+ " (which will echo \"baz\", with stdout and stderr directed to foo.log)\n" .
+ "or: queue.pl -q all.q\@xyz foo.log echo bar \| sed s/bar/baz/ \n" .
+ " (which is an example of using a pipe; you can provide other escaped bash constructs)\n" .
+ "or: queue.pl -q all.q\@qyz JOB=1:10 foo.JOB.log echo JOB \n" .
+ " (which illustrates the mechanism to submit parallel jobs; note, you can use \n" .
+ " another string other than JOB)\n" .
+ "Note: if you pass the \"-sync y\" option to qsub, this script will take note\n" .
+ "and change its behavior. Otherwise it uses qstat to work out when the job finished\n" .
+ "Options:\n" .
+ " --config <config-file> (default: $config)\n" .
+ " --mem <mem-requirement> (e.g. --mem 2G, --mem 500M, \n" .
+ " also support K and numbers mean bytes)\n" .
+ " --num-threads <num-threads> (default: $num_threads)\n" .
+ " --max-jobs-run <num-jobs>\n" .
+ " --gpu <0|1> (default: $gpu)\n";
+ exit 1;
+}
+
+if (@ARGV < 2) {
+ print_usage();
+}
+
+for (my $x = 1; $x <= 3; $x++) { # This for-loop is to
+ # allow the JOB=1:n option to be interleaved with the
+ # options to qsub.
+ while (@ARGV >= 2 && $ARGV[0] =~ m:^-:) {
+ my $switch = shift @ARGV;
+
+ if ($switch eq "-V") {
+ $qsub_opts .= "-V ";
+ } else {
+ my $argument = shift @ARGV;
+ if ($argument =~ m/^--/) {
+ print STDERR "WARNING: suspicious argument '$argument' to $switch; starts with '-'\n";
+ }
+ if ($switch eq "-sync" && $argument =~ m/^[yY]/) {
+ $sync = 1;
+ $qsub_opts .= "$switch $argument ";
+ } elsif ($switch eq "-pe") { # e.g. -pe smp 5
+ my $argument2 = shift @ARGV;
+ $qsub_opts .= "$switch $argument $argument2 ";
+ $num_threads = $argument2;
+ } elsif ($switch =~ m/^--/) { # Config options
+ # Convert CLI option to variable name
+ # by removing '--' from the switch and replacing any
+ # '-' with a '_'
+ $switch =~ s/^--//;
+ $switch =~ s/-/_/g;
+ $cli_options{$switch} = $argument;
+ } else { # Other qsub options - passed as is
+ $qsub_opts .= "$switch $argument ";
+ }
+ }
+ }
+ if ($ARGV[0] =~ m/^([\w_][\w\d_]*)+=(\d+):(\d+)$/) { # e.g. JOB=1:20
+ $array_job = 1;
+ $jobname = $1;
+ $jobstart = $2;
+ $jobend = $3;
+ shift;
+ if ($jobstart > $jobend) {
+ die "queue.pl: invalid job range $ARGV[0]";
+ }
+ if ($jobstart <= 0) {
+ die "run.pl: invalid job range $ARGV[0], start must be strictly positive (this is a GridEngine limitation).";
+ }
+ } elsif ($ARGV[0] =~ m/^([\w_][\w\d_]*)+=(\d+)$/) { # e.g. JOB=1.
+ $array_job = 1;
+ $jobname = $1;
+ $jobstart = $2;
+ $jobend = $2;
+ shift;
+ } elsif ($ARGV[0] =~ m/.+\=.*\:.*$/) {
+ print STDERR "Warning: suspicious first argument to queue.pl: $ARGV[0]\n";
+ }
+}
+
+if (@ARGV < 2) {
+ print_usage();
+}
+
+if (exists $cli_options{"config"}) {
+ $config = $cli_options{"config"};
+}
+
+my $default_config_file = <<'EOF';
+# Default configuration
+command qsub -v PATH -cwd -S /bin/bash -j y -l arch=*64*
+option mem=* -l mem_free=$0,ram_free=$0
+option mem=0 # Do not add anything to qsub_opts
+option num_threads=* -pe smp $0
+option num_threads=1 # Do not add anything to qsub_opts
+option max_jobs_run=* -tc $0
+default gpu=0
+option gpu=0
+option gpu=* -l gpu=$0 -q g.q
+EOF
+
+# Here the configuration options specified by the user on the command line
+# (e.g. --mem 2G) are converted to options to the qsub system as defined in
+# the config file. (e.g. if the config file has the line
+# "option mem=* -l ram_free=$0,mem_free=$0"
+# and the user has specified '--mem 2G' on the command line, the options
+# passed to queue system would be "-l ram_free=2G,mem_free=2G
+# A more detailed description of the ways the options would be handled is at
+# the top of this file.
+
+my $opened_config_file = 1;
+
+open CONFIG, "<$config" or $opened_config_file = 0;
+
+my %cli_config_options = ();
+my %cli_default_options = ();
+
+if ($opened_config_file == 0 && exists($cli_options{"config"})) {
+ print STDERR "Could not open config file $config\n";
+ exit(1);
+} elsif ($opened_config_file == 0 && !exists($cli_options{"config"})) {
+ # Open the default config file instead
+ open (CONFIG, "echo '$default_config_file' |") or die "Unable to open pipe\n";
+ $config = "Default config";
+}
+
+my $qsub_cmd = "";
+my $read_command = 0;
+
+while(<CONFIG>) {
+ chomp;
+ my $line = $_;
+ $_ =~ s/\s*#.*//g;
+ if ($_ eq "") { next; }
+ if ($_ =~ /^command (.+)/) {
+ $read_command = 1;
+ $qsub_cmd = $1 . " ";
+ } elsif ($_ =~ m/^option ([^=]+)=\* (.+)$/) {
+ # Config option that needs replacement with parameter value read from CLI
+ # e.g.: option mem=* -l mem_free=$0,ram_free=$0
+ my $option = $1; # mem
+ my $arg= $2; # -l mem_free=$0,ram_free=$0
+ if ($arg !~ m:\$0:) {
+ die "Unable to parse line '$line' in config file ($config)\n";
+ }
+ if (exists $cli_options{$option}) {
+ # Replace $0 with the argument read from command line.
+ # e.g. "-l mem_free=$0,ram_free=$0" -> "-l mem_free=2G,ram_free=2G"
+ $arg =~ s/\$0/$cli_options{$option}/g;
+ $cli_config_options{$option} = $arg;
+ }
+ } elsif ($_ =~ m/^option ([^=]+)=(\S+)\s?(.*)$/) {
+ # Config option that does not need replacement
+ # e.g. option gpu=0 -q all.q
+ my $option = $1; # gpu
+ my $value = $2; # 0
+ my $arg = $3; # -q all.q
+ if (exists $cli_options{$option}) {
+ $cli_default_options{($option,$value)} = $arg;
+ }
+ } elsif ($_ =~ m/^default (\S+)=(\S+)/) {
+ # Default options. Used for setting default values to options i.e. when
+ # the user does not specify the option on the command line
+ # e.g. default gpu=0
+ my $option = $1; # gpu
+ my $value = $2; # 0
+ if (!exists $cli_options{$option}) {
+ # If the user has specified this option on the command line, then we
+ # don't have to do anything
+ $cli_options{$option} = $value;
+ }
+ } else {
+ print STDERR "queue.pl: unable to parse line '$line' in config file ($config)\n";
+ exit(1);
+ }
+}
+
+close(CONFIG);
+
+if ($read_command != 1) {
+ print STDERR "queue.pl: config file ($config) does not contain the line \"command .*\"\n";
+ exit(1);
+}
+
+for my $option (keys %cli_options) {
+ if ($option eq "config") { next; }
+ if ($option eq "max_jobs_run" && $array_job != 1) { next; }
+ my $value = $cli_options{$option};
+
+ if (exists $cli_default_options{($option,$value)}) {
+ $qsub_opts .= "$cli_default_options{($option,$value)} ";
+ } elsif (exists $cli_config_options{$option}) {
+ $qsub_opts .= "$cli_config_options{$option} ";
+ } else {
+ if ($opened_config_file == 0) { $config = "default config file"; }
+ die "queue.pl: Command line option $option not described in $config (or value '$value' not allowed)\n";
+ }
+}
+
+my $cwd = getcwd();
+my $logfile = shift @ARGV;
+
+if ($array_job == 1 && $logfile !~ m/$jobname/
+ && $jobend > $jobstart) {
+ print STDERR "queue.pl: you are trying to run a parallel job but "
+ . "you are putting the output into just one log file ($logfile)\n";
+ exit(1);
+}
+
+#
+# Work out the command; quote escaping is done here.
+# Note: the rules for escaping stuff are worked out pretty
+# arbitrarily, based on what we want it to do. Some things that
+# we pass as arguments to queue.pl, such as "|", we want to be
+# interpreted by bash, so we don't escape them. Other things,
+# such as archive specifiers like 'ark:gunzip -c foo.gz|', we want
+# to be passed, in quotes, to the Kaldi program. Our heuristic
+# is that stuff with spaces in should be quoted. This doesn't
+# always work.
+#
+my $cmd = "";
+
+foreach my $x (@ARGV) {
+ if ($x =~ m/^\S+$/) { $cmd .= $x . " "; } # If string contains no spaces, take
+ # as-is.
+ elsif ($x =~ m:\":) { $cmd .= "'$x' "; } # else if no dbl-quotes, use single
+ else { $cmd .= "\"$x\" "; } # else use double.
+}
+
+#
+# Work out the location of the script file, and open it for writing.
+#
+my $dir = dirname($logfile);
+my $base = basename($logfile);
+my $qdir = "$dir/q";
+$qdir =~ s:/(log|LOG)/*q:/q:; # If qdir ends in .../log/q, make it just .../q.
+my $queue_logfile = "$qdir/$base";
+
+if (!-d $dir) { system "mkdir -p $dir 2>/dev/null"; } # another job may be doing this...
+if (!-d $dir) { die "Cannot make the directory $dir\n"; }
+# make a directory called "q",
+# where we will put the log created by qsub... normally this doesn't contain
+# anything interesting, evertyhing goes to $logfile.
+if (! -d "$qdir") {
+ system "mkdir $qdir 2>/dev/null";
+ sleep(5); ## This is to fix an issue we encountered in denominator lattice creation,
+ ## where if e.g. the exp/tri2b_denlats/log/15/q directory had just been
+ ## created and the job immediately ran, it would die with an error because nfs
+ ## had not yet synced. I'm also decreasing the acdirmin and acdirmax in our
+ ## NFS settings to something like 5 seconds.
+}
+
+my $queue_array_opt = "";
+if ($array_job == 1) { # It's an array job.
+ $queue_array_opt = "-t $jobstart:$jobend";
+ $logfile =~ s/$jobname/\$SGE_TASK_ID/g; # This variable will get
+ # replaced by qsub, in each job, with the job-id.
+ $cmd =~ s/$jobname/\$\{SGE_TASK_ID\}/g; # same for the command...
+ $queue_logfile =~ s/\.?$jobname//; # the log file in the q/ subdirectory
+ # is for the queue to put its log, and this doesn't need the task array subscript
+ # so we remove it.
+}
+
+# queue_scriptfile is as $queue_logfile [e.g. dir/q/foo.log] but
+# with the suffix .sh.
+my $queue_scriptfile = $queue_logfile;
+($queue_scriptfile =~ s/\.[a-zA-Z]{1,5}$/.sh/) || ($queue_scriptfile .= ".sh");
+if ($queue_scriptfile !~ m:^/:) {
+ $queue_scriptfile = $cwd . "/" . $queue_scriptfile; # just in case.
+}
+
+# We'll write to the standard input of "qsub" (the file-handle Q),
+# the job that we want it to execute.
+# Also keep our current PATH around, just in case there was something
+# in it that we need (although we also source ./path.sh)
+
+my $syncfile = "$qdir/done.$$";
+
+system("rm $queue_logfile $syncfile 2>/dev/null");
+#
+# Write to the script file, and then close it.
+#
+open(Q, ">$queue_scriptfile") || die "Failed to write to $queue_scriptfile";
+
+print Q "#!/bin/bash\n";
+print Q "cd $cwd\n";
+print Q ". ./path.sh\n";
+print Q "( echo '#' Running on \`hostname\`\n";
+print Q " echo '#' Started at \`date\`\n";
+print Q " echo -n '# '; cat <<EOF\n";
+print Q "$cmd\n"; # this is a way of echoing the command into a comment in the log file,
+print Q "EOF\n"; # without having to escape things like "|" and quote characters.
+print Q ") >$logfile\n";
+print Q "time1=\`date +\"%s\"\`\n";
+print Q " ( $cmd ) 2>>$logfile >>$logfile\n";
+print Q "ret=\$?\n";
+print Q "time2=\`date +\"%s\"\`\n";
+print Q "echo '#' Accounting: time=\$((\$time2-\$time1)) threads=$num_threads >>$logfile\n";
+print Q "echo '#' Finished at \`date\` with status \$ret >>$logfile\n";
+print Q "[ \$ret -eq 137 ] && exit 100;\n"; # If process was killed (e.g. oom) it will exit with status 137;
+ # let the script return with status 100 which will put it to E state; more easily rerunnable.
+if ($array_job == 0) { # not an array job
+ print Q "touch $syncfile\n"; # so we know it's done.
+} else {
+ print Q "touch $syncfile.\$SGE_TASK_ID\n"; # touch a bunch of sync-files.
+}
+print Q "exit \$[\$ret ? 1 : 0]\n"; # avoid status 100 which grid-engine
+print Q "## submitted with:\n"; # treats specially.
+$qsub_cmd .= "-o $queue_logfile $qsub_opts $queue_array_opt $queue_scriptfile >>$queue_logfile 2>&1";
+print Q "# $qsub_cmd\n";
+if (!close(Q)) { # close was not successful... || die "Could not close script file $shfile";
+ die "Failed to close the script file (full disk?)";
+}
+
+my $ret = system ($qsub_cmd);
+if ($ret != 0) {
+ if ($sync && $ret == 256) { # this is the exit status when a job failed (bad exit status)
+ if (defined $jobname) { $logfile =~ s/\$SGE_TASK_ID/*/g; }
+ print STDERR "queue.pl: job writing to $logfile failed\n";
+ } else {
+ print STDERR "queue.pl: error submitting jobs to queue (return status was $ret)\n";
+ print STDERR "queue log file is $queue_logfile, command was $qsub_cmd\n";
+ print STDERR `tail $queue_logfile`;
+ }
+ exit(1);
+}
+
+my $sge_job_id;
+if (! $sync) { # We're not submitting with -sync y, so we
+ # need to wait for the jobs to finish. We wait for the
+ # sync-files we "touched" in the script to exist.
+ my @syncfiles = ();
+ if (!defined $jobname) { # not an array job.
+ push @syncfiles, $syncfile;
+ } else {
+ for (my $jobid = $jobstart; $jobid <= $jobend; $jobid++) {
+ push @syncfiles, "$syncfile.$jobid";
+ }
+ }
+ # We will need the sge_job_id, to check that job still exists
+ { # Get the SGE job-id from the log file in q/
+ open(L, "<$queue_logfile") || die "Error opening log file $queue_logfile";
+ undef $sge_job_id;
+ while (<L>) {
+ if (m/Your job\S* (\d+)[. ].+ has been submitted/) {
+ if (defined $sge_job_id) {
+ die "Error: your job was submitted more than once (see $queue_logfile)";
+ } else {
+ $sge_job_id = $1;
+ }
+ }
+ }
+ close(L);
+ if (!defined $sge_job_id) {
+ die "Error: log file $queue_logfile does not specify the SGE job-id.";
+ }
+ }
+ my $check_sge_job_ctr=1;
+ #
+ my $wait = 0.1;
+ my $counter = 0;
+ foreach my $f (@syncfiles) {
+ # wait for them to finish one by one.
+ while (! -f $f) {
+ sleep($wait);
+ $wait *= 1.2;
+ if ($wait > 3.0) {
+ $wait = 3.0; # never wait more than 3 seconds.
+ # the following (.kick) commands are basically workarounds for NFS bugs.
+ if (rand() < 0.25) { # don't do this every time...
+ if (rand() > 0.5) {
+ system("touch $qdir/.kick");
+ } else {
+ system("rm $qdir/.kick 2>/dev/null");
+ }
+ }
+ if ($counter++ % 10 == 0) {
+ # This seems to kick NFS in the teeth to cause it to refresh the
+ # directory. I've seen cases where it would indefinitely fail to get
+ # updated, even though the file exists on the server.
+ # Only do this every 10 waits (every 30 seconds) though, or if there
+ # are many jobs waiting they can overwhelm the file server.
+ system("ls $qdir >/dev/null");
+ }
+ }
+
+ # Check that the job exists in SGE. Job can be killed if duration
+ # exceeds some hard limit, or in case of a machine shutdown.
+ if (($check_sge_job_ctr++ % 10) == 0) { # Don't run qstat too often, avoid stress on SGE.
+ if ( -f $f ) { next; }; #syncfile appeared: OK.
+ $ret = system("qstat -j $sge_job_id >/dev/null 2>/dev/null");
+ # system(...) : To get the actual exit value, shift $ret right by eight bits.
+ if ($ret>>8 == 1) { # Job does not seem to exist
+ # Don't consider immediately missing job as error, first wait some
+ # time to make sure it is not just delayed creation of the syncfile.
+
+ sleep(3);
+ # Sometimes NFS gets confused and thinks it's transmitted the directory
+ # but it hasn't, due to timestamp issues. Changing something in the
+ # directory will usually fix that.
+ system("touch $qdir/.kick");
+ system("rm $qdir/.kick 2>/dev/null");
+ if ( -f $f ) { next; } #syncfile appeared, ok
+ sleep(7);
+ system("touch $qdir/.kick");
+ sleep(1);
+ system("rm $qdir/.kick 2>/dev/null");
+ if ( -f $f ) { next; } #syncfile appeared, ok
+ sleep(60);
+ system("touch $qdir/.kick");
+ sleep(1);
+ system("rm $qdir/.kick 2>/dev/null");
+ if ( -f $f ) { next; } #syncfile appeared, ok
+ $f =~ m/\.(\d+)$/ || die "Bad sync-file name $f";
+ my $job_id = $1;
+ if (defined $jobname) {
+ $logfile =~ s/\$SGE_TASK_ID/$job_id/g;
+ }
+ my $last_line = `tail -n 1 $logfile`;
+ if ($last_line =~ m/status 0$/ && (-M $logfile) < 0) {
+ # if the last line of $logfile ended with "status 0" and
+ # $logfile is newer than this program [(-M $logfile) gives the
+ # time elapsed between file modification and the start of this
+ # program], then we assume the program really finished OK,
+ # and maybe something is up with the file system.
+ print STDERR "**queue.pl: syncfile $f was not created but job seems\n" .
+ "**to have finished OK. Probably your file-system has problems.\n" .
+ "**This is just a warning.\n";
+ last;
+ } else {
+ chop $last_line;
+ print STDERR "queue.pl: Error, unfinished job no " .
+ "longer exists, log is in $logfile, last line is '$last_line', " .
+ "syncfile is $f, return status of qstat was $ret\n" .
+ "Possible reasons: a) Exceeded time limit? -> Use more jobs!" .
+ " b) Shutdown/Frozen machine? -> Run again!\n";
+ exit(1);
+ }
+ } elsif ($ret != 0) {
+ print STDERR "queue.pl: Warning: qstat command returned status $ret (qstat -j $sge_job_id,$!)\n";
+ }
+ }
+ }
+ }
+ my $all_syncfiles = join(" ", @syncfiles);
+ system("rm $all_syncfiles 2>/dev/null");
+}
+
+# OK, at this point we are synced; we know the job is done.
+# But we don't know about its exit status. We'll look at $logfile for this.
+# First work out an array @logfiles of file-locations we need to
+# read (just one, unless it's an array job).
+my @logfiles = ();
+if (!defined $jobname) { # not an array job.
+ push @logfiles, $logfile;
+} else {
+ for (my $jobid = $jobstart; $jobid <= $jobend; $jobid++) {
+ my $l = $logfile;
+ $l =~ s/\$SGE_TASK_ID/$jobid/g;
+ push @logfiles, $l;
+ }
+}
+
+my $num_failed = 0;
+my $status = 1;
+foreach my $l (@logfiles) {
+ my @wait_times = (0.1, 0.2, 0.2, 0.3, 0.5, 0.5, 1.0, 2.0, 5.0, 5.0, 5.0, 10.0, 25.0);
+ for (my $iter = 0; $iter <= @wait_times; $iter++) {
+ my $line = `tail -10 $l 2>/dev/null`; # Note: although this line should be the last
+ # line of the file, I've seen cases where it was not quite the last line because
+ # of delayed output by the process that was running, or processes it had called.
+ # so tail -10 gives it a little leeway.
+ if ($line =~ m/with status (\d+)/) {
+ $status = $1;
+ last;
+ } else {
+ if ($iter < @wait_times) {
+ sleep($wait_times[$iter]);
+ } else {
+ if (! -f $l) {
+ print STDERR "Log-file $l does not exist.\n";
+ } else {
+ print STDERR "The last line of log-file $l does not seem to indicate the "
+ . "return status as expected\n";
+ }
+ exit(1); # Something went wrong with the queue, or the
+ # machine it was running on, probably.
+ }
+ }
+ }
+ # OK, now we have $status, which is the return-status of
+ # the command in the job.
+ if ($status != 0) { $num_failed++; }
+}
+if ($num_failed == 0) { exit(0); }
+else { # we failed.
+ if (@logfiles == 1) {
+ if (defined $jobname) { $logfile =~ s/\$SGE_TASK_ID/$jobstart/g; }
+ print STDERR "queue.pl: job failed with status $status, log is in $logfile\n";
+ if ($logfile =~ m/JOB/) {
+ print STDERR "queue.pl: probably you forgot to put JOB=1:\$nj in your script.\n";
+ }
+ } else {
+ if (defined $jobname) { $logfile =~ s/\$SGE_TASK_ID/*/g; }
+ my $numjobs = 1 + $jobend - $jobstart;
+ print STDERR "queue.pl: $num_failed / $numjobs failed, log is in $logfile\n";
+ }
+ exit(1);
+}
diff --git a/kaldi_decode/utils/run.pl b/kaldi_decode/utils/run.pl
new file mode 100755
index 0000000..6145a7a
--- /dev/null
+++ b/kaldi_decode/utils/run.pl
@@ -0,0 +1,264 @@
+#!/usr/bin/env perl
+use warnings; #sed replacement for -w perl parameter
+
+# In general, doing
+# run.pl some.log a b c is like running the command a b c in
+# the bash shell, and putting the standard error and output into some.log.
+# To run parallel jobs (backgrounded on the host machine), you can do (e.g.)
+# run.pl JOB=1:4 some.JOB.log a b c JOB is like running the command a b c JOB
+# and putting it in some.JOB.log, for each one. [Note: JOB can be any identifier].
+# If any of the jobs fails, this script will fail.
+
+# A typical example is:
+# run.pl some.log my-prog "--opt=foo bar" foo \| other-prog baz
+# and run.pl will run something like:
+# ( my-prog '--opt=foo bar' foo | other-prog baz ) >& some.log
+#
+# Basically it takes the command-line arguments, quotes them
+# as necessary to preserve spaces, and evaluates them with bash.
+# In addition it puts the command line at the top of the log, and
+# the start and end times of the command at the beginning and end.
+# The reason why this is useful is so that we can create a different
+# version of this program that uses a queueing system instead.
+
+# use Data::Dumper;
+
+@ARGV < 2 && die "usage: run.pl log-file command-line arguments...";
+
+
+$max_jobs_run = -1;
+$jobstart = 1;
+$jobend = 1;
+$ignored_opts = ""; # These will be ignored.
+
+# First parse an option like JOB=1:4, and any
+# options that would normally be given to
+# queue.pl, which we will just discard.
+
+if (@ARGV > 0) {
+ while (@ARGV >= 2 && $ARGV[0] =~ m:^-:) { # parse any options
+ # that would normally go to qsub, but which will be ignored here.
+ $switch = shift @ARGV;
+ if ($switch eq "-V") {
+ $ignored_opts .= "-V ";
+ } elsif ($switch eq "--max-jobs-run" || $switch eq "-tc") {
+ # we do support the option --max-jobs-run n, and its GridEngine form -tc n.
+ $max_jobs_run = shift @ARGV;
+ if (! ($max_jobs_run > 0)) {
+ die "run.pl: invalid option --max-jobs-run $max_jobs_run";
+ }
+ } else {
+ $option = shift @ARGV;
+ if ($switch eq "-sync" && $option =~ m/^[yY]/) {
+ $ignored_opts .= "-sync "; # Note: in the
+ # corresponding code in queue.pl it says instead, just "$sync = 1;".
+ }
+ $ignored_opts .= "$switch $option ";
+ if ($switch eq "-pe") { # e.g. -pe smp 5
+ $option2 = shift @ARGV;
+ $ignored_opts .= "$option2 ";
+ }
+ }
+ }
+ if ($ARGV[0] =~ m/^([\w_][\w\d_]*)+=(\d+):(\d+)$/) { # e.g. JOB=1:10
+ $jobname = $1;
+ $jobstart = $2;
+ $jobend = $3;
+ shift;
+ if ($jobstart > $jobend) {
+ die "run.pl: invalid job range $ARGV[0]";
+ }
+ if ($jobstart <= 0) {
+ die "run.pl: invalid job range $ARGV[0], start must be strictly positive (this is required for GridEngine compatibility).";
+ }
+ } elsif ($ARGV[0] =~ m/^([\w_][\w\d_]*)+=(\d+)$/) { # e.g. JOB=1.
+ $jobname = $1;
+ $jobstart = $2;
+ $jobend = $2;
+ shift;
+ } elsif ($ARGV[0] =~ m/.+\=.*\:.*$/) {
+ print STDERR "run.pl: Warning: suspicious first argument to run.pl: $ARGV[0]\n";
+ }
+}
+
+# Users found this message confusing so we are removing it.
+# if ($ignored_opts ne "") {
+# print STDERR "run.pl: Warning: ignoring options \"$ignored_opts\"\n";
+# }
+
+if ($max_jobs_run == -1) { # If --max-jobs-run option not set,
+ # then work out the number of processors if possible,
+ # and set it based on that.
+ $max_jobs_run = 0;
+ if (open(P, "</proc/cpuinfo")) { # Linux
+ while (<P>) { if (m/^processor/) { $max_jobs_run++; } }
+ if ($max_jobs_run == 0) {
+ print STDERR "run.pl: Warning: failed to detect any processors from /proc/cpuinfo\n";
+ $max_jobs_run = 10; # reasonable default.
+ }
+ close(P);
+ } elsif (open(P, "sysctl -a |")) { # BSD/Darwin
+ while (<P>) {
+ if (m/hw\.ncpu\s*[:=]\s*(\d+)/) { # hw.ncpu = 4, or hw.ncpu: 4
+ $max_jobs_run = $1;
+ last;
+ }
+ }
+ close(P);
+ if ($max_jobs_run == 0) {
+ print STDERR "run.pl: Warning: failed to detect any processors from sysctl -a\n";
+ $max_jobs_run = 10; # reasonable default.
+ }
+ } else {
+ # allow at most 32 jobs at once, on non-UNIX systems; change this code
+ # if you need to change this default.
+ $max_jobs_run = 32;
+ }
+ # The just-computed value of $max_jobs_run is just the number of processors
+ # (or our best guess); and if it happens that the number of jobs we need to
+ # run is just slightly above $max_jobs_run, it will make sense to increase
+ # $max_jobs_run to equal the number of jobs, so we don't have a small number
+ # of leftover jobs.
+ $num_jobs = $jobend - $jobstart + 1;
+ if ($num_jobs > $max_jobs_run && $num_jobs < 1.4 * $max_jobs_run) {
+ $max_jobs_run = $num_jobs;
+ }
+}
+
+$logfile = shift @ARGV;
+
+if (defined $jobname && $logfile !~ m/$jobname/ &&
+ $jobend > $jobstart) {
+ print STDERR "run.pl: you are trying to run a parallel job but "
+ . "you are putting the output into just one log file ($logfile)\n";
+ exit(1);
+}
+
+$cmd = "";
+
+foreach $x (@ARGV) {
+ if ($x =~ m/^\S+$/) { $cmd .= $x . " "; }
+ elsif ($x =~ m:\":) { $cmd .= "'$x' "; }
+ else { $cmd .= "\"$x\" "; }
+}
+
+#$Data::Dumper::Indent=0;
+$ret = 0;
+$numfail = 0;
+%active_pids=();
+
+use POSIX ":sys_wait_h";
+for ($jobid = $jobstart; $jobid <= $jobend; $jobid++) {
+ if (scalar(keys %active_pids) >= $max_jobs_run) {
+
+ # Lets wait for a change in any child's status
+ # Then we have to work out which child finished
+ $r = waitpid(-1, 0);
+ $code = $?;
+ if ($r < 0 ) { die "run.pl: Error waiting for child process"; } # should never happen.
+ if ( defined $active_pids{$r} ) {
+ $jid=$active_pids{$r};
+ $fail[$jid]=$code;
+ if ($code !=0) { $numfail++;}
+ delete $active_pids{$r};
+ # print STDERR "Finished: $r/$jid " . Dumper(\%active_pids) . "\n";
+ } else {
+ die "run.pl: Cannot find the PID of the chold process that just finished.";
+ }
+
+ # In theory we could do a non-blocking waitpid over all jobs running just
+ # to find out if only one or more jobs finished during the previous waitpid()
+ # However, we just omit this and will reap the next one in the next pass
+ # through the for(;;) cycle
+ }
+ $childpid = fork();
+ if (!defined $childpid) { die "run.pl: Error forking in run.pl (writing to $logfile)"; }
+ if ($childpid == 0) { # We're in the child... this branch
+ # executes the job and returns (possibly with an error status).
+ if (defined $jobname) {
+ $cmd =~ s/$jobname/$jobid/g;
+ $logfile =~ s/$jobname/$jobid/g;
+ }
+ system("mkdir -p `dirname $logfile` 2>/dev/null");
+ open(F, ">$logfile") || die "run.pl: Error opening log file $logfile";
+ print F "# " . $cmd . "\n";
+ print F "# Started at " . `date`;
+ $starttime = `date +'%s'`;
+ print F "#\n";
+ close(F);
+
+ # Pipe into bash.. make sure we're not using any other shell.
+ open(B, "|bash") || die "run.pl: Error opening shell command";
+ print B "( " . $cmd . ") 2>>$logfile >> $logfile";
+ close(B); # If there was an error, exit status is in $?
+ $ret = $?;
+
+ $lowbits = $ret & 127;
+ $highbits = $ret >> 8;
+ if ($lowbits != 0) { $return_str = "code $highbits; signal $lowbits" }
+ else { $return_str = "code $highbits"; }
+
+ $endtime = `date +'%s'`;
+ open(F, ">>$logfile") || die "run.pl: Error opening log file $logfile (again)";
+ $enddate = `date`;
+ chop $enddate;
+ print F "# Accounting: time=" . ($endtime - $starttime) . " threads=1\n";
+ print F "# Ended ($return_str) at " . $enddate . ", elapsed time " . ($endtime-$starttime) . " seconds\n";
+ close(F);
+ exit($ret == 0 ? 0 : 1);
+ } else {
+ $pid[$jobid] = $childpid;
+ $active_pids{$childpid} = $jobid;
+ # print STDERR "Queued: " . Dumper(\%active_pids) . "\n";
+ }
+}
+
+# Now we have submitted all the jobs, lets wait until all the jobs finish
+foreach $child (keys %active_pids) {
+ $jobid=$active_pids{$child};
+ $r = waitpid($pid[$jobid], 0);
+ $code = $?;
+ if ($r == -1) { die "run.pl: Error waiting for child process"; } # should never happen.
+ if ($r != 0) { $fail[$jobid]=$code; $numfail++ if $code!=0; } # Completed successfully
+}
+
+# Some sanity checks:
+# The $fail array should not contain undefined codes
+# The number of non-zeros in that array should be equal to $numfail
+# We cannot do foreach() here, as the JOB ids do not necessarily start by zero
+$failed_jids=0;
+for ($jobid = $jobstart; $jobid <= $jobend; $jobid++) {
+ $job_return = $fail[$jobid];
+ if (not defined $job_return ) {
+ # print Dumper(\@fail);
+
+ die "run.pl: Sanity check failed: we have indication that some jobs are running " .
+ "even after we waited for all jobs to finish" ;
+ }
+ if ($job_return != 0 ){ $failed_jids++;}
+}
+if ($failed_jids != $numfail) {
+ die "run.pl: Sanity check failed: cannot find out how many jobs failed ($failed_jids x $numfail)."
+}
+if ($numfail > 0) { $ret = 1; }
+
+if ($ret != 0) {
+ $njobs = $jobend - $jobstart + 1;
+ if ($njobs == 1) {
+ if (defined $jobname) {
+ $logfile =~ s/$jobname/$jobstart/; # only one numbered job, so replace name with
+ # that job.
+ }
+ print STDERR "run.pl: job failed, log is in $logfile\n";
+ if ($logfile =~ m/JOB/) {
+ print STDERR "run.pl: probably you forgot to put JOB=1:\$nj in your script.";
+ }
+ }
+ else {
+ $logfile =~ s/$jobname/*/g;
+ print STDERR "run.pl: $numfail / $njobs failed, log is in $logfile\n";
+ }
+}
+
+
+exit ($ret);
diff --git a/kaldi_decode/utils/split_data.sh b/kaldi_decode/utils/split_data.sh
new file mode 100755
index 0000000..941890c
--- /dev/null
+++ b/kaldi_decode/utils/split_data.sh
@@ -0,0 +1,135 @@
+#!/bin/bash
+# Copyright 2010-2013 Microsoft Corporation
+# Johns Hopkins University (Author: Daniel Povey)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
+# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
+# MERCHANTABLITY OR NON-INFRINGEMENT.
+# See the Apache 2 License for the specific language governing permissions and
+# limitations under the License.
+
+split_per_spk=true
+if [ "$1" == "--per-utt" ]; then
+ split_per_spk=false
+ shift
+fi
+
+if [ $# != 2 ]; then
+ echo "Usage: split_data.sh [--per-utt] <data-dir> <num-to-split>"
+ echo "This script will not split the data-dir if it detects that the output is newer than the input."
+ echo "By default it splits per speaker (so each speaker is in only one split dir),"
+ echo "but with the --per-utt option it will ignore the speaker information while splitting."
+ exit 1
+fi
+
+data=$1
+numsplit=$2
+
+if [ $numsplit -le 0 ]; then
+ echo "Invalid num-split argument $numsplit";
+ exit 1;
+fi
+
+n=0;
+feats=""
+wavs=""
+utt2spks=""
+texts=""
+
+nu=`cat $data/utt2spk | wc -l`
+nf=`cat $data/feats.scp 2>/dev/null | wc -l`
+nt=`cat $data/text 2>/dev/null | wc -l` # take it as zero if no such file
+if [ -f $data/feats.scp ] && [ $nu -ne $nf ]; then
+ echo "** split_data.sh: warning, #lines is (utt2spk,feats.scp) is ($nu,$nf); you can "
+ echo "** use utils/fix_data_dir.sh $data to fix this."
+fi
+if [ -f $data/text ] && [ $nu -ne $nt ]; then
+ echo "** split_data.sh: warning, #lines is (utt2spk,text) is ($nu,$nt); you can "
+ echo "** use utils/fix_data_dir.sh to fix this."
+fi
+
+s1=$data/split$numsplit/1
+if [ ! -d $s1 ]; then
+ need_to_split=true
+else
+ need_to_split=false
+ for f in utt2spk spk2utt spk2warp feats.scp text wav.scp cmvn.scp spk2gender \
+ vad.scp segments reco2file_and_channel utt2lang; do
+ if [[ -f $data/$f && ( ! -f $s1/$f || $s1/$f -ot $data/$f ) ]]; then
+ need_to_split=true
+ fi
+ done
+fi
+
+if ! $need_to_split; then
+ exit 0;
+fi
+
+for n in `seq $numsplit`; do
+ mkdir -p $data/split$numsplit/$n
+ utt2spks="$utt2spks $data/split$numsplit/$n/utt2spk"
+done
+
+if $split_per_spk; then
+ utt2spk_opt="--utt2spk=$data/utt2spk"
+else
+ utt2spk_opt=
+fi
+
+# If lockfile is not installed, just don't lock it. It's not a big deal.
+which lockfile >&/dev/null && lockfile -l 60 $data/.split_lock
+
+utils/split_scp.pl $utt2spk_opt $data/utt2spk $utt2spks || exit 1
+
+for n in `seq $numsplit`; do
+ dsn=$data/split$numsplit/$n
+ utils/utt2spk_to_spk2utt.pl $dsn/utt2spk > $dsn/spk2utt || exit 1;
+done
+
+maybe_wav_scp=
+if [ ! -f $data/segments ]; then
+ maybe_wav_scp=wav.scp # If there is no segments file, then wav file is
+ # indexed per utt.
+fi
+
+# split some things that are indexed by utterance.
+for f in feats.scp text vad.scp utt2lang $maybe_wav_scp; do
+ if [ -f $data/$f ]; then
+ utils/filter_scps.pl JOB=1:$numsplit \
+ $data/split$numsplit/JOB/utt2spk $data/$f $data/split$numsplit/JOB/$f || exit 1;
+ fi
+done
+
+# split some things that are indexed by speaker
+for f in spk2gender spk2warp cmvn.scp; do
+ if [ -f $data/$f ]; then
+ utils/filter_scps.pl JOB=1:$numsplit \
+ $data/split$numsplit/JOB/spk2utt $data/$f $data/split$numsplit/JOB/$f || exit 1;
+ fi
+done
+
+for n in `seq $numsplit`; do
+ dsn=$data/split$numsplit/$n
+ if [ -f $data/segments ]; then
+ utils/filter_scp.pl $dsn/utt2spk $data/segments > $dsn/segments
+ awk '{print $2;}' $dsn/segments | sort | uniq > $data/tmp.reco # recording-ids.
+ if [ -f $data/reco2file_and_channel ]; then
+ utils/filter_scp.pl $data/tmp.reco $data/reco2file_and_channel > $dsn/reco2file_and_channel
+ fi
+ if [ -f $data/wav.scp ]; then
+ utils/filter_scp.pl $data/tmp.reco $data/wav.scp >$dsn/wav.scp
+ fi
+ rm $data/tmp.reco
+ fi # else it would have been handled above, see maybe_wav.
+done
+
+rm -f $data/.split_lock
+
+exit 0