diff options
-rw-r--r-- | Makefile | 6 | ||||
-rw-r--r-- | nerv/examples/swb_baseline2.lua | 79 | ||||
-rw-r--r-- | nerv/examples/timit_baseline2.lua | 16 | ||||
-rw-r--r-- | nerv/lib/matrix/generic/cukernel.cu | 8 |
4 files changed, 58 insertions, 51 deletions
@@ -23,7 +23,7 @@ export KALDI_BASE export BLAS_LDFLAGS .PHONY: nerv speech/speech_utils speech/htk_io speech/kaldi_io speech/kaldi_decode \ - nerv-clean speech/speech_utils-clean speech/htk_io-clean speech/kaldi_io-clean speech/kaldi_decode-clean \ + nerv-clean speech-clean speech/speech_utils-clean speech/htk_io-clean speech/kaldi_io-clean speech/kaldi_decode-clean \ Penlight all: nerv @@ -36,8 +36,10 @@ luarocks: speech: speech/speech_utils speech/htk_io speech/kaldi_io speech/kaldi_decode speech-clean: speech/speech_utils-clean speech/htk_io-clean speech/kaldi_io-clean speech/kaldi_decode-clean clean: nerv-clean speech-clean +uninstall: + -rm -rf install/ nerv Penlight speech/speech_utils speech/htk_io speech/kaldi_io speech/kaldi_decode: cd $@; $(PREFIX)/bin/luarocks make nerv-clean speech/speech_utils-clean speech/htk_io-clean speech/kaldi_io-clean speech/kaldi_decode-clean: - cd $(subst -clean,,$@); make clean LUA_BINDIR=$(PREFIX)/bin/ + -make -C $(subst -clean,,$@) clean LUA_BINDIR=$(PREFIX)/bin/ diff --git a/nerv/examples/swb_baseline2.lua b/nerv/examples/swb_baseline2.lua index 8b5ebb1..6796f6f 100644 --- a/nerv/examples/swb_baseline2.lua +++ b/nerv/examples/swb_baseline2.lua @@ -1,7 +1,6 @@ require 'htk_io' -gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, +gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, frm_ext = 5, rearrange = true, -- just to make the context order consistent with old results, deprecated - frm_ext = 5, frm_trim = 5, -- trim the first and last 5 frames, TNet just does this, deprecated tr_scp = "/speechlab/users/mfy43/swb50/train_bp.scp", cv_scp = "/speechlab/users/mfy43/swb50/train_cv.scp", @@ -15,13 +14,13 @@ function make_layer_repo(param_repo) -- global transf ["nerv.BiasLayer"] = { - blayer1 = {dim_in = {429}, dim_out = {429}, params = {bias = "bias1"}}, - blayer2 = {dim_in = {429}, dim_out = {429}, params = {bias = "bias2"}} + blayer1 = {dim_in = {429}, dim_out = {429}, params = {bias = "bias0"}}, + blayer2 = {dim_in = {429}, dim_out = {429}, params = {bias = "bias1"}} }, ["nerv.WindowLayer"] = { - wlayer1 = {dim_in = {429}, dim_out = {429}, params = {window = "window1"}}, - wlayer2 = {dim_in = {429}, dim_out = {429}, params = {window = "window2"}} + wlayer1 = {dim_in = {429}, dim_out = {429}, params = {window = "window0"}}, + wlayer2 = {dim_in = {429}, dim_out = {429}, params = {window = "window1"}} }, -- biased linearity ["nerv.AffineLayer"] = @@ -65,39 +64,39 @@ function make_layer_repo(param_repo) layer_repo:add_layers( { - ["nerv.DAGLayer"] = + ["nerv.GraphLayer"] = { global_transf = { dim_in = {429}, dim_out = {429}, - sub_layers = layer_repo, + layer_repo = layer_repo, connections = { - ["<input>[1]"] = "blayer1[1]", - ["blayer1[1]"] = "wlayer1[1]", - ["wlayer1[1]"] = "blayer2[1]", - ["blayer2[1]"] = "wlayer2[1]", - ["wlayer2[1]"] = "<output>[1]" + {"<input>[1]", "blayer1[1]", 0}, + {"blayer1[1]", "wlayer1[1]", 0}, + {"wlayer1[1]", "blayer2[1]", 0}, + {"blayer2[1]", "wlayer2[1]", 0}, + {"wlayer2[1]", "<output>[1]", 0} } }, main = { dim_in = {429}, dim_out = {3001}, - sub_layers = layer_repo, + layer_repo = layer_repo, connections = { - ["<input>[1]"] = "affine0[1]", - ["affine0[1]"] = "sigmoid0[1]", - ["sigmoid0[1]"] = "affine1[1]", - ["affine1[1]"] = "sigmoid1[1]", - ["sigmoid1[1]"] = "affine2[1]", - ["affine2[1]"] = "sigmoid2[1]", - ["sigmoid2[1]"] = "affine3[1]", - ["affine3[1]"] = "sigmoid3[1]", - ["sigmoid3[1]"] = "affine4[1]", - ["affine4[1]"] = "sigmoid4[1]", - ["sigmoid4[1]"] = "affine5[1]", - ["affine5[1]"] = "sigmoid5[1]", - ["sigmoid5[1]"] = "affine6[1]", - ["affine6[1]"] = "sigmoid6[1]", - ["sigmoid6[1]"] = "affine7[1]", - ["affine7[1]"] = "<output>[1]" + {"<input>[1]", "affine0[1]", 0}, + {"affine0[1]", "sigmoid0[1]", 0}, + {"sigmoid0[1]", "affine1[1]", 0}, + {"affine1[1]", "sigmoid1[1]", 0}, + {"sigmoid1[1]", "affine2[1]", 0}, + {"affine2[1]", "sigmoid2[1]", 0}, + {"sigmoid2[1]", "affine3[1]", 0}, + {"affine3[1]", "sigmoid3[1]", 0}, + {"sigmoid3[1]", "affine4[1]", 0}, + {"affine4[1]", "sigmoid4[1]", 0}, + {"sigmoid4[1]", "affine5[1]", 0}, + {"affine5[1]", "sigmoid5[1]", 0}, + {"sigmoid5[1]", "affine6[1]", 0}, + {"affine6[1]", "sigmoid6[1]", 0}, + {"sigmoid6[1]", "affine7[1]", 0}, + {"affine7[1]", "<output>[1]", 0} } } } @@ -105,25 +104,25 @@ function make_layer_repo(param_repo) layer_repo:add_layers( { - ["nerv.DAGLayer"] = + ["nerv.GraphLayer"] = { ce_output = { dim_in = {429, 1}, dim_out = {1}, - sub_layers = layer_repo, + layer_repo = layer_repo, connections = { - ["<input>[1]"] = "main[1]", - ["main[1]"] = "ce_crit[1]", - ["<input>[2]"] = "ce_crit[2]", - ["ce_crit[1]"] = "<output>[1]" + {"<input>[1]", "main[1]", 0}, + {"main[1]", "ce_crit[1]", 0}, + {"<input>[2]", "ce_crit[2]", 0}, + {"ce_crit[1]", "<output>[1]", 0} } }, softmax_output = { dim_in = {429}, dim_out = {3001}, - sub_layers = layer_repo, + layer_repo = layer_repo, connections = { - ["<input>[1]"] = "main[1]", - ["main[1]"] = "softmax[1]", - ["softmax[1]"] = "<output>[1]" + {"<input>[1]", "main[1]", 0}, + {"main[1]", "softmax[1]", 0}, + {"softmax[1]", "<output>[1]", 0} } } } diff --git a/nerv/examples/timit_baseline2.lua b/nerv/examples/timit_baseline2.lua index d783c3d..b1c1e66 100644 --- a/nerv/examples/timit_baseline2.lua +++ b/nerv/examples/timit_baseline2.lua @@ -1,14 +1,14 @@ require 'kaldi_io' gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, frm_ext = 5, tr_scp = "ark:/speechlab/tools/KALDI/kaldi-master/src/featbin/copy-feats " .. - "scp:/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/train.scp ark:- |", + "scp:/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/train.scp ark:- |", cv_scp = "ark:/speechlab/tools/KALDI/kaldi-master/src/featbin/copy-feats " .. - "scp:/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/cv.scp ark:- |", - initialized_param = {"/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/nnet_init.nerv", - "/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/nnet_output.nerv", - "/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/nnet_trans.nerv"}, - decode_param = {"/speechlab/users/mfy43/timit/nnet_init_20160229015745_iter_13_lr0.013437_tr72.434_cv58.729.nerv", - "/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/nnet_trans.nerv"}} + "scp:/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/cv.scp ark:- |", + initialized_param = {"/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/nnet_init.nerv", + "/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/nnet_output.nerv", + "/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/nnet_trans.nerv"}, + -- params in nnet_trans.nerv are included in the trained model + decode_param = {"/speechlab/users/mfy43/timit/s5/nerv_20160311205342/nnet_init_20160311211609_iter_13_lr0.013437_tr72.572_cv58.709.nerv"}} function make_layer_repo(param_repo) local layer_repo = nerv.LayerRepo( @@ -113,7 +113,7 @@ function make_layer_repo(param_repo) dim_in = {440}, dim_out = {1959}, layer_repo = layer_repo, connections = { - {"<input>[1]", "main[1]", 0}, + {"<input>[1]", "main[1]", 0}, {"main[1]", "softmax[1]", 0}, {"softmax[1]", "<output>[1]", 0} } diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu index 4717209..cf9d213 100644 --- a/nerv/lib/matrix/generic/cukernel.cu +++ b/nerv/lib/matrix/generic/cukernel.cu @@ -277,11 +277,13 @@ __global__ void cudak_(update_select_rows_by_rowidx)(MATRIX_ELEM *c, const MATRI int i = blockIdx.y * blockDim.y + threadIdx.y; if (i >= nrow_a || j >= ncol_a) return; int i_c = lrintf(idx[i]); + /* if (i_c < 0 || i_c >= nrow_c) { printf("ERROR inside kernel update_select_rows, i_c(%d) out of range!", i_c); } + */ //critical: i_c could conflict among threads(same index in the idx array), so atomicAdd is used - //c[j + i_c * stride_c] = c[j + i_c * stride_c] * (1 - beta * alpha) + a[j + i * stride_a] * alpha; + //c[j + i_c * stride_c] = c[j + i_c * stride_c] * (1 - beta * alpha) + a[j + i * stride_a] * alpha; atomicAdd_nvidia(c + j + i_c * stride_c, c[j + i_c * stride_c] * (- beta * alpha) + a[j + i * stride_a] * alpha); } @@ -291,9 +293,11 @@ __global__ void cudak_(update_select_rows_by_colidx)(MATRIX_ELEM *c, const MATRI int i = blockIdx.y * blockDim.y + threadIdx.y; if (i >= nrow_a || j >= ncol_a) return; int i_c = lrintf(idx[stride_idx * i]); + /* if (i_c < 0 || i_c >= nrow_c) { printf("ERROR inside kernel update_select_rows, i_c(%d) out of range!", i_c); } + */ //critical: i_c could conflict among threads(same index in the idx array), so atomicAdd is used //c[j + i_c * stride_c] = c[j + i_c * stride_c] * (1 - beta * alpha) + a[j + i * stride_a] * alpha; atomicAdd_nvidia(c + j + i_c * stride_c, c[j + i_c * stride_c] * (- beta * alpha) + a[j + i * stride_a] * alpha); @@ -394,9 +398,11 @@ __global__ void cudak_(copy_rows_by_colidx)(const MATRIX_ELEM *a, MATRIX_ELEM *b int i = blockIdx.y * blockDim.y + threadIdx.y; if (i >= nrow || j >= ncol) return; int k = lrintf(idx[i * idx_stride]); + /* if (k < 0 || k >= a_nrow) { printf("error in kernel copy_rows_by_colidx k(%d) out of range\n", k); } + */ b[j + i * stride] = a[j + k * stride]; } |