diff options
-rw-r--r-- | Makefile | 11 | ||||
-rw-r--r-- | nerv/Makefile | 2 | ||||
-rw-r--r-- | nerv/examples/asr_trainer.lua | 71 | ||||
-rw-r--r-- | nerv/examples/swb_baseline.lua | 1 | ||||
-rw-r--r-- | nerv/examples/swb_baseline2.lua | 87 | ||||
-rw-r--r-- | nerv/examples/timit_baseline2.lua | 20 | ||||
-rw-r--r-- | nerv/init.lua | 8 | ||||
-rw-r--r-- | nerv/io/frm_buffer.lua (renamed from nerv/io/sgd_buffer.lua) | 14 | ||||
-rw-r--r-- | nerv/io/init.lua | 2 | ||||
-rw-r--r-- | nerv/io/seq_buffer.lua | 7 | ||||
-rw-r--r-- | nerv/lib/matrix/generic/cukernel.cu | 8 | ||||
-rw-r--r-- | nerv/nn/network.lua | 8 |
12 files changed, 136 insertions, 103 deletions
@@ -1,4 +1,4 @@ -.PHONY: all clean install luajit luarocks speech submodule +.PHONY: all clean install luajit luarocks speech ############## EDIT THESE LINES ##################### SHELL := /bin/bash PREFIX := $(CURDIR)/install/ @@ -23,11 +23,10 @@ export KALDI_BASE export BLAS_LDFLAGS .PHONY: nerv speech/speech_utils speech/htk_io speech/kaldi_io speech/kaldi_decode \ - nerv-clean speech/speech_utils-clean speech/htk_io-clean speech/kaldi_io-clean speech/kaldi_decode-clean \ + nerv-clean speech-clean speech/speech_utils-clean speech/htk_io-clean speech/kaldi_io-clean speech/kaldi_decode-clean \ Penlight -all: nerv -submodule: luajit luarocks Penlight +all: luajit luarocks Penlight nerv luajit: PREFIX=$(PREFIX) ./tools/build_luajit.sh luarocks: @@ -36,8 +35,10 @@ luarocks: speech: speech/speech_utils speech/htk_io speech/kaldi_io speech/kaldi_decode speech-clean: speech/speech_utils-clean speech/htk_io-clean speech/kaldi_io-clean speech/kaldi_decode-clean clean: nerv-clean speech-clean +uninstall: + -rm -rf install/ nerv Penlight speech/speech_utils speech/htk_io speech/kaldi_io speech/kaldi_decode: cd $@; $(PREFIX)/bin/luarocks make nerv-clean speech/speech_utils-clean speech/htk_io-clean speech/kaldi_io-clean speech/kaldi_decode-clean: - cd $(subst -clean,,$@); make clean LUA_BINDIR=$(PREFIX)/bin/ + -make -C $(subst -clean,,$@) clean LUA_BINDIR=$(PREFIX)/bin/ diff --git a/nerv/Makefile b/nerv/Makefile index 68465a1..a5e4f66 100644 --- a/nerv/Makefile +++ b/nerv/Makefile @@ -44,7 +44,7 @@ LUA_LIBS := matrix/init.lua io/init.lua init.lua \ layer/elem_mul.lua layer/lstm.lua layer/lstm_gate.lua layer/dropout.lua layer/gru.lua \ layer/graph.lua layer/rnn.lua layer/duplicate.lua layer/identity.lua \ nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/network.lua \ - io/sgd_buffer.lua io/seq_buffer.lua + io/frm_buffer.lua io/seq_buffer.lua INCLUDE := -I $(LUA_INCDIR) -DLUA_USE_APICHECK CUDA_INCLUDE := -I $(CUDA_BASE)/include/ diff --git a/nerv/examples/asr_trainer.lua b/nerv/examples/asr_trainer.lua index 6bdf57c..645f1ef 100644 --- a/nerv/examples/asr_trainer.lua +++ b/nerv/examples/asr_trainer.lua @@ -22,9 +22,9 @@ local function build_trainer(ifname) local input_order = get_input_order() network = nerv.Network("nt", gconf, {network = network}) - network:init(gconf.batch_size, 1) + network:init(gconf.batch_size, gconf.chunk_size) global_transf = nerv.Network("gt", gconf, {network = global_transf}) - global_transf:init(gconf.batch_size, 1) + global_transf:init(gconf.batch_size, gconf.chunk_size) local iterative_trainer = function (prefix, scp_file, bp, rebind_param_repo) -- rebind the params if necessary @@ -39,11 +39,17 @@ local function build_trainer(ifname) local buffer = make_buffer(make_readers(scp_file, layer_repo)) -- initialize the network gconf.cnt = 0 - err_input = {mat_type(gconf.batch_size, 1)} - err_input[1]:fill(1) + local err_input = {{}} + local output = {{}} + for i = 1, gconf.chunk_size do + local mini_batch = mat_type(gconf.batch_size, 1) + mini_batch:fill(1) + table.insert(err_input[1], mini_batch) + table.insert(output[1], mat_type(gconf.batch_size, 1)) + end network:epoch_init() global_transf:epoch_init() - for data in buffer.get_data, buffer do + for d in buffer.get_data, buffer do -- prine stat periodically gconf.cnt = gconf.cnt + 1 if gconf.cnt == 1000 then @@ -54,35 +60,39 @@ local function build_trainer(ifname) -- break end local input = {} + local err_output = {} -- if gconf.cnt == 1000 then break end for i, e in ipairs(input_order) do local id = e.id - if data[id] == nil then + if d.data[id] == nil then nerv.error("input data %s not found", id) end - local transformed + local transformed = {} + local err_output_i = {} if e.global_transf then - transformed = nerv.speech_utils.global_transf(data[id], - global_transf, - gconf.frm_ext or 0, 0, - gconf) + for _, mini_batch in ipairs(d.data[id]) do + table.insert(transformed, + nerv.speech_utils.global_transf(mini_batch, + global_transf, + gconf.frm_ext or 0, 0, + gconf)) + end else - transformed = data[id] + transformed = d.data[id] + end + for _, mini_batch in ipairs(transformed) do + table.insert(err_output_i, mini_batch:create()) end + table.insert(err_output, err_output_i) table.insert(input, transformed) end - local output = {mat_type(gconf.batch_size, 1)} - err_output = {} - for i = 1, #input do - table.insert(err_output, input[i]:create()) - end - network:mini_batch_init({seq_length = table.vector(gconf.batch_size, 1), - new_seq = {}, + network:mini_batch_init({seq_length = d.seq_length, + new_seq = d.new_seq, do_train = bp, - input = {input}, - output = {output}, - err_input = {err_input}, - err_output = {err_output}}) + input = input, + output = output, + err_input = err_input, + err_output = err_output}) network:propagate() if bp then network:back_propagate() @@ -111,19 +121,21 @@ end local function check_and_add_defaults(spec, opts) local function get_opt_val(k) - return opts[string.gsub(k, '_', '-')].val + local k = string.gsub(k, '_', '-') + return opts[k].val, opts[k].specified end local opt_v = get_opt_val("resume_from") if opt_v then + nerv.info("resuming from previous training state") gconf = dofile(opt_v) else for k, v in pairs(spec) do - local opt_v = get_opt_val(k) - if opt_v ~= nil then + local opt_v, specified = get_opt_val(k) + if (not specified) and gconf[k] ~= nil then + nerv.info("using setting in network config file: %s = %s", k, gconf[k]) + elseif opt_v ~= nil then + nerv.info("using setting in options: %s = %s", k, opt_v) gconf[k] = opt_v - elseif gconf[k] ~= nil then - elseif v ~= nil then - gconf[k] = v end end end @@ -168,6 +180,7 @@ end local trainer_defaults = { lrate = 0.8, batch_size = 256, + chunk_size = 1, buffer_size = 81920, wcost = 1e-6, momentum = 0.9, diff --git a/nerv/examples/swb_baseline.lua b/nerv/examples/swb_baseline.lua index 0ce8468..ece4d44 100644 --- a/nerv/examples/swb_baseline.lua +++ b/nerv/examples/swb_baseline.lua @@ -171,6 +171,7 @@ function make_buffer(readers) { buffer_size = gconf.buffer_size, batch_size = gconf.batch_size, + chunk_size = gconf.chunk_size, randomize = gconf.randomize, readers = readers, use_gpu = true diff --git a/nerv/examples/swb_baseline2.lua b/nerv/examples/swb_baseline2.lua index 8b5ebb1..38cfb9a 100644 --- a/nerv/examples/swb_baseline2.lua +++ b/nerv/examples/swb_baseline2.lua @@ -1,13 +1,13 @@ require 'htk_io' -gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, +gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, frm_ext = 5, rearrange = true, -- just to make the context order consistent with old results, deprecated - frm_ext = 5, frm_trim = 5, -- trim the first and last 5 frames, TNet just does this, deprecated tr_scp = "/speechlab/users/mfy43/swb50/train_bp.scp", cv_scp = "/speechlab/users/mfy43/swb50/train_cv.scp", htk_conf = "/speechlab/users/mfy43/swb50/plp_0_d_a.conf", initialized_param = {"/speechlab/users/mfy43/swb50/swb_init.nerv", - "/speechlab/users/mfy43/swb50/swb_global_transf.nerv"}} + "/speechlab/users/mfy43/swb50/swb_global_transf.nerv"}, + chunk_size = 1} function make_layer_repo(param_repo) local layer_repo = nerv.LayerRepo( @@ -15,13 +15,13 @@ function make_layer_repo(param_repo) -- global transf ["nerv.BiasLayer"] = { - blayer1 = {dim_in = {429}, dim_out = {429}, params = {bias = "bias1"}}, - blayer2 = {dim_in = {429}, dim_out = {429}, params = {bias = "bias2"}} + blayer1 = {dim_in = {429}, dim_out = {429}, params = {bias = "bias0"}}, + blayer2 = {dim_in = {429}, dim_out = {429}, params = {bias = "bias1"}} }, ["nerv.WindowLayer"] = { - wlayer1 = {dim_in = {429}, dim_out = {429}, params = {window = "window1"}}, - wlayer2 = {dim_in = {429}, dim_out = {429}, params = {window = "window2"}} + wlayer1 = {dim_in = {429}, dim_out = {429}, params = {window = "window0"}}, + wlayer2 = {dim_in = {429}, dim_out = {429}, params = {window = "window1"}} }, -- biased linearity ["nerv.AffineLayer"] = @@ -65,39 +65,39 @@ function make_layer_repo(param_repo) layer_repo:add_layers( { - ["nerv.DAGLayer"] = + ["nerv.GraphLayer"] = { global_transf = { dim_in = {429}, dim_out = {429}, - sub_layers = layer_repo, + layer_repo = layer_repo, connections = { - ["<input>[1]"] = "blayer1[1]", - ["blayer1[1]"] = "wlayer1[1]", - ["wlayer1[1]"] = "blayer2[1]", - ["blayer2[1]"] = "wlayer2[1]", - ["wlayer2[1]"] = "<output>[1]" + {"<input>[1]", "blayer1[1]", 0}, + {"blayer1[1]", "wlayer1[1]", 0}, + {"wlayer1[1]", "blayer2[1]", 0}, + {"blayer2[1]", "wlayer2[1]", 0}, + {"wlayer2[1]", "<output>[1]", 0} } }, main = { dim_in = {429}, dim_out = {3001}, - sub_layers = layer_repo, + layer_repo = layer_repo, connections = { - ["<input>[1]"] = "affine0[1]", - ["affine0[1]"] = "sigmoid0[1]", - ["sigmoid0[1]"] = "affine1[1]", - ["affine1[1]"] = "sigmoid1[1]", - ["sigmoid1[1]"] = "affine2[1]", - ["affine2[1]"] = "sigmoid2[1]", - ["sigmoid2[1]"] = "affine3[1]", - ["affine3[1]"] = "sigmoid3[1]", - ["sigmoid3[1]"] = "affine4[1]", - ["affine4[1]"] = "sigmoid4[1]", - ["sigmoid4[1]"] = "affine5[1]", - ["affine5[1]"] = "sigmoid5[1]", - ["sigmoid5[1]"] = "affine6[1]", - ["affine6[1]"] = "sigmoid6[1]", - ["sigmoid6[1]"] = "affine7[1]", - ["affine7[1]"] = "<output>[1]" + {"<input>[1]", "affine0[1]", 0}, + {"affine0[1]", "sigmoid0[1]", 0}, + {"sigmoid0[1]", "affine1[1]", 0}, + {"affine1[1]", "sigmoid1[1]", 0}, + {"sigmoid1[1]", "affine2[1]", 0}, + {"affine2[1]", "sigmoid2[1]", 0}, + {"sigmoid2[1]", "affine3[1]", 0}, + {"affine3[1]", "sigmoid3[1]", 0}, + {"sigmoid3[1]", "affine4[1]", 0}, + {"affine4[1]", "sigmoid4[1]", 0}, + {"sigmoid4[1]", "affine5[1]", 0}, + {"affine5[1]", "sigmoid5[1]", 0}, + {"sigmoid5[1]", "affine6[1]", 0}, + {"affine6[1]", "sigmoid6[1]", 0}, + {"sigmoid6[1]", "affine7[1]", 0}, + {"affine7[1]", "<output>[1]", 0} } } } @@ -105,25 +105,25 @@ function make_layer_repo(param_repo) layer_repo:add_layers( { - ["nerv.DAGLayer"] = + ["nerv.GraphLayer"] = { ce_output = { dim_in = {429, 1}, dim_out = {1}, - sub_layers = layer_repo, + layer_repo = layer_repo, connections = { - ["<input>[1]"] = "main[1]", - ["main[1]"] = "ce_crit[1]", - ["<input>[2]"] = "ce_crit[2]", - ["ce_crit[1]"] = "<output>[1]" + {"<input>[1]", "main[1]", 0}, + {"main[1]", "ce_crit[1]", 0}, + {"<input>[2]", "ce_crit[2]", 0}, + {"ce_crit[1]", "<output>[1]", 0} } }, softmax_output = { dim_in = {429}, dim_out = {3001}, - sub_layers = layer_repo, + layer_repo = layer_repo, connections = { - ["<input>[1]"] = "main[1]", - ["main[1]"] = "softmax[1]", - ["softmax[1]"] = "<output>[1]" + {"<input>[1]", "main[1]", 0}, + {"main[1]", "softmax[1]", 0}, + {"softmax[1]", "<output>[1]", 0} } } } @@ -146,7 +146,7 @@ end function make_readers(scp_file, layer_repo) return { - {reader = nerv.TNetReader(gconf, + {reader = nerv.HTKReader(gconf, { id = "main_scp", scp_file = scp_file, @@ -167,10 +167,11 @@ function make_readers(scp_file, layer_repo) end function make_buffer(readers) - return nerv.SGDBuffer(gconf, + return nerv.FrmBuffer(gconf, { buffer_size = gconf.buffer_size, batch_size = gconf.batch_size, + chunk_size = gconf.chunk_size, randomize = gconf.randomize, readers = readers, use_gpu = true diff --git a/nerv/examples/timit_baseline2.lua b/nerv/examples/timit_baseline2.lua index d783c3d..658aa2e 100644 --- a/nerv/examples/timit_baseline2.lua +++ b/nerv/examples/timit_baseline2.lua @@ -1,14 +1,15 @@ require 'kaldi_io' gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, frm_ext = 5, tr_scp = "ark:/speechlab/tools/KALDI/kaldi-master/src/featbin/copy-feats " .. - "scp:/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/train.scp ark:- |", + "scp:/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/train.scp ark:- |", cv_scp = "ark:/speechlab/tools/KALDI/kaldi-master/src/featbin/copy-feats " .. - "scp:/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/cv.scp ark:- |", - initialized_param = {"/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/nnet_init.nerv", - "/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/nnet_output.nerv", - "/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/nnet_trans.nerv"}, - decode_param = {"/speechlab/users/mfy43/timit/nnet_init_20160229015745_iter_13_lr0.013437_tr72.434_cv58.729.nerv", - "/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/nnet_trans.nerv"}} + "scp:/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/cv.scp ark:- |", + initialized_param = {"/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/nnet_init.nerv", + "/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/nnet_output.nerv", + "/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/nnet_trans.nerv"}, + -- params in nnet_trans.nerv are included in the trained model + decode_param = {"/speechlab/users/mfy43/timit/s5/nerv_20160311205342/nnet_init_20160311211609_iter_13_lr0.013437_tr72.572_cv58.709.nerv"}, + chunk_size = 1} function make_layer_repo(param_repo) local layer_repo = nerv.LayerRepo( @@ -113,7 +114,7 @@ function make_layer_repo(param_repo) dim_in = {440}, dim_out = {1959}, layer_repo = layer_repo, connections = { - {"<input>[1]", "main[1]", 0}, + {"<input>[1]", "main[1]", 0}, {"main[1]", "softmax[1]", 0}, {"softmax[1]", "<output>[1]", 0} } @@ -176,10 +177,11 @@ function make_decode_readers(scp_file, layer_repo) end function make_buffer(readers) - return nerv.SGDBuffer(gconf, + return nerv.FrmBuffer(gconf, { buffer_size = gconf.buffer_size, batch_size = gconf.batch_size, + chunk_size = gconf.chunk_size, randomize = gconf.randomize, readers = readers, use_gpu = true diff --git a/nerv/init.lua b/nerv/init.lua index 551a9f9..d017f82 100644 --- a/nerv/init.lua +++ b/nerv/init.lua @@ -109,7 +109,7 @@ function table.val_to_str(v) (("number" == type(v) or "string" == type(v) or "boolean" == type(v)) and tostring(v)) or - nil -- failed to serialize + "" -- failed to serialize end end @@ -226,7 +226,8 @@ function nerv.parse_args(argv, options, unordered) local opt_type = v[3] local opt_meta = {type = opt_type, desc = v.desc or "", - val = v.default} + val = v.default, + specified = false} if opt_short ~= nil then if type(opt_short) ~= "string" or #opt_short ~= 1 then err() end if opt_type ~= "boolean" then @@ -246,6 +247,7 @@ function nerv.parse_args(argv, options, unordered) for c in k:gmatch"." do if sopts[c] then sopts[c].val = true + sopts[c].specified = true else nerv.error("invalid option -%s", c) end @@ -262,6 +264,7 @@ function nerv.parse_args(argv, options, unordered) k, opts[k].type) else opts[k].val = true + opts[k].specified = true end else local k, v = token:match(opt_with_val_exp) @@ -269,6 +272,7 @@ function nerv.parse_args(argv, options, unordered) if opts[k] == nil then nerv.error("invalid option %s", token) end + opts[k].specified = true if opts[k].type == "boolean" then if v == "yes" then opts[k].val = true diff --git a/nerv/io/sgd_buffer.lua b/nerv/io/frm_buffer.lua index d78f6d1..9761f16 100644 --- a/nerv/io/sgd_buffer.lua +++ b/nerv/io/frm_buffer.lua @@ -1,6 +1,6 @@ -local SGDBuffer = nerv.class("nerv.SGDBuffer", "nerv.DataBuffer") +local FrmBuffer = nerv.class("nerv.FrmBuffer", "nerv.DataBuffer") -function SGDBuffer:__init(global_conf, buffer_conf) +function FrmBuffer:__init(global_conf, buffer_conf) self.gconf = global_conf self.batch_size = buffer_conf.batch_size self.buffer_size = math.floor(buffer_conf.buffer_size / @@ -57,7 +57,7 @@ function SGDBuffer:__init(global_conf, buffer_conf) end end -function SGDBuffer:saturate() +function FrmBuffer:saturate() local buffer_size = self.buffer_size self.head = 0 self.tail = buffer_size @@ -116,7 +116,7 @@ function SGDBuffer:saturate() return self.tail >= self.batch_size end -function SGDBuffer:get_data() +function FrmBuffer:get_data() local batch_size = self.batch_size if self.head >= self.tail then -- buffer is empty local t = os.clock() @@ -132,7 +132,9 @@ function SGDBuffer:get_data() return nil -- the remaining data cannot build a batch end actual_batch_size = math.min(batch_size, self.tail - self.head) - local res = {} + local res = {seq_length = table.vector(gconf.batch_size, 1), + new_seq = {}, + data = {}} for i, reader in ipairs(self.readers) do for id, buff in pairs(reader.buffs) do local batch = self.output_mat_type(actual_batch_size, buff.width) @@ -141,7 +143,7 @@ function SGDBuffer:get_data() else self.copy_from(batch, buff.data, self.head, self.head + actual_batch_size) end - res[id] = batch + res.data[id] = {batch} end end self.head = self.head + actual_batch_size diff --git a/nerv/io/init.lua b/nerv/io/init.lua index c36d850..d3ba27c 100644 --- a/nerv/io/init.lua +++ b/nerv/io/init.lua @@ -56,5 +56,5 @@ function DataBuffer:get_data() nerv.error_method_not_implemented() end -nerv.include('sgd_buffer.lua') +nerv.include('frm_buffer.lua') nerv.include('seq_buffer.lua') diff --git a/nerv/io/seq_buffer.lua b/nerv/io/seq_buffer.lua index ad1b3f7..029e7b8 100644 --- a/nerv/io/seq_buffer.lua +++ b/nerv/io/seq_buffer.lua @@ -5,7 +5,10 @@ function SeqBuffer:__init(global_conf, buffer_conf) self.batch_size = buffer_conf.batch_size self.chunk_size = buffer_conf.chunk_size - self.readers = buffer_conf.readers + self.readers = {} + for _, v in ipairs(buffer_conf.readers) do + table.insert(self.readers, v.reader) + end self.nn_act_default = buffer_conf.nn_act_default if self.nn_act_default == nil then self.nn_act_default = 0 @@ -29,7 +32,7 @@ function SeqBuffer:new_mini_batch() end function SeqBuffer:saturate(batch) - if self.queue[self.head] ~= nil and self.queue[self.head].seq_length[batch] ~= 0 then + if self.queue[self.head] ~= nil and self.queue[self.head].seq_length[batch] ~= 0 then return true end local data = {} diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu index 4717209..cf9d213 100644 --- a/nerv/lib/matrix/generic/cukernel.cu +++ b/nerv/lib/matrix/generic/cukernel.cu @@ -277,11 +277,13 @@ __global__ void cudak_(update_select_rows_by_rowidx)(MATRIX_ELEM *c, const MATRI int i = blockIdx.y * blockDim.y + threadIdx.y; if (i >= nrow_a || j >= ncol_a) return; int i_c = lrintf(idx[i]); + /* if (i_c < 0 || i_c >= nrow_c) { printf("ERROR inside kernel update_select_rows, i_c(%d) out of range!", i_c); } + */ //critical: i_c could conflict among threads(same index in the idx array), so atomicAdd is used - //c[j + i_c * stride_c] = c[j + i_c * stride_c] * (1 - beta * alpha) + a[j + i * stride_a] * alpha; + //c[j + i_c * stride_c] = c[j + i_c * stride_c] * (1 - beta * alpha) + a[j + i * stride_a] * alpha; atomicAdd_nvidia(c + j + i_c * stride_c, c[j + i_c * stride_c] * (- beta * alpha) + a[j + i * stride_a] * alpha); } @@ -291,9 +293,11 @@ __global__ void cudak_(update_select_rows_by_colidx)(MATRIX_ELEM *c, const MATRI int i = blockIdx.y * blockDim.y + threadIdx.y; if (i >= nrow_a || j >= ncol_a) return; int i_c = lrintf(idx[stride_idx * i]); + /* if (i_c < 0 || i_c >= nrow_c) { printf("ERROR inside kernel update_select_rows, i_c(%d) out of range!", i_c); } + */ //critical: i_c could conflict among threads(same index in the idx array), so atomicAdd is used //c[j + i_c * stride_c] = c[j + i_c * stride_c] * (1 - beta * alpha) + a[j + i * stride_a] * alpha; atomicAdd_nvidia(c + j + i_c * stride_c, c[j + i_c * stride_c] * (- beta * alpha) + a[j + i * stride_a] * alpha); @@ -394,9 +398,11 @@ __global__ void cudak_(copy_rows_by_colidx)(const MATRIX_ELEM *a, MATRIX_ELEM *b int i = blockIdx.y * blockDim.y + threadIdx.y; if (i >= nrow || j >= ncol) return; int k = lrintf(idx[i * idx_stride]); + /* if (k < 0 || k >= a_nrow) { printf("error in kernel copy_rows_by_colidx k(%d) out of range\n", k); } + */ b[j + i * stride] = a[j + k * stride]; } diff --git a/nerv/nn/network.lua b/nerv/nn/network.lua index cd80b1e..bb03be4 100644 --- a/nerv/nn/network.lua +++ b/nerv/nn/network.lua @@ -415,7 +415,7 @@ function network:set_input(input) local edge = self.socket.inputs[i] local id, port, time = edge[1], edge[2], edge[3] if t + time >= 1 and t + time <= self.chunk_size then - self.input[t + time][id][port] = input[t][i] + self.input[t + time][id][port] = input[i][t] end end end @@ -427,7 +427,7 @@ function network:set_output(output) local edge = self.socket.outputs[i] local id, port, time = edge[1], edge[2], edge[3] if t - time >= 1 and t - time <= self.chunk_size then - self.output[t - time][id][port] = output[t][i] + self.output[t - time][id][port] = output[i][t] end end end @@ -439,7 +439,7 @@ function network:set_err_input(err_input) local edge = self.socket.outputs[i] local id, port, time = edge[1], edge[2], edge[3] if t - time >= 1 and t - time <= self.chunk_size then - self.err_input[t - time][id][port] = err_input[t][i] + self.err_input[t - time][id][port] = err_input[i][t] end end end @@ -451,7 +451,7 @@ function network:set_err_output(err_output) local edge = self.socket.inputs[i] local id, port, time = edge[1], edge[2], edge[3] if t + time >= 1 and t + time <= self.chunk_size then - self.err_output[t + time][id][port] = err_output[t][i] + self.err_output[t + time][id][port] = err_output[i][t] end end end |