diff options
author | Determinant <[email protected]> | 2016-03-15 15:46:05 +0800 |
---|---|---|
committer | Determinant <[email protected]> | 2016-03-15 15:46:05 +0800 |
commit | 07fc1e2794027d44c255e1062c4491346b101a08 (patch) | |
tree | 8e7217b9c5e9570b94af5aaad3f94d1a37cfe40b | |
parent | a5a4d2735b595fc9fadc9c7e91198786d3c0e078 (diff) | |
parent | e15307f071813e2eb56f7f83229b91141961325a (diff) |
Merge branch 'master' of github.com:liuq901/nerv into liuq901-master
-rw-r--r-- | Makefile | 5 | ||||
-rw-r--r-- | nerv/examples/network_debug/config.lua | 10 | ||||
-rw-r--r-- | nerv/examples/network_debug/main.lua | 21 | ||||
-rw-r--r-- | nerv/examples/network_debug/network.lua | 120 | ||||
-rw-r--r-- | nerv/examples/network_debug/reader.lua | 76 | ||||
-rw-r--r-- | nerv/io/seq_buffer.lua | 105 | ||||
-rw-r--r-- | nerv/layer/duplicate.lua | 5 | ||||
-rw-r--r-- | nerv/layer/softmax_ce.lua | 4 | ||||
-rw-r--r-- | nerv/lib/matrix/cumatrix.c | 6 | ||||
-rw-r--r-- | nerv/lib/matrix/generic/cukernel.cu | 22 | ||||
-rw-r--r-- | nerv/lib/matrix/generic/cumatrix.c | 14 | ||||
-rw-r--r-- | nerv/lib/matrix/generic/cumatrix.h | 2 | ||||
-rw-r--r-- | nerv/lib/matrix/generic/mmatrix.c | 23 | ||||
-rw-r--r-- | nerv/lib/matrix/generic/mmatrix.h | 2 | ||||
-rw-r--r-- | nerv/lib/matrix/mmatrix.c | 6 | ||||
-rw-r--r-- | nerv/matrix/generic/cumatrix.c | 1 | ||||
-rw-r--r-- | nerv/matrix/generic/matrix.c | 12 | ||||
-rw-r--r-- | nerv/matrix/generic/mmatrix.c | 1 | ||||
-rw-r--r-- | nerv/nn/network.lua | 285 |
19 files changed, 484 insertions, 236 deletions
@@ -1,4 +1,4 @@ -.PHONY: all clean install luajit luarocks speech +.PHONY: all clean install luajit luarocks speech submodule ############## EDIT THESE LINES ##################### SHELL := /bin/bash PREFIX := $(CURDIR)/install/ @@ -26,7 +26,8 @@ export BLAS_LDFLAGS nerv-clean speech-clean speech/speech_utils-clean speech/htk_io-clean speech/kaldi_io-clean speech/kaldi_decode-clean \ Penlight -all: luajit luarocks Penlight nerv +all: nerv +submodule: luajit luarocks Penlight luajit: PREFIX=$(PREFIX) ./tools/build_luajit.sh luarocks: diff --git a/nerv/examples/network_debug/config.lua b/nerv/examples/network_debug/config.lua index e20d5a9..0429e9a 100644 --- a/nerv/examples/network_debug/config.lua +++ b/nerv/examples/network_debug/config.lua @@ -35,6 +35,10 @@ function get_layers(global_conf) ['nerv.SoftmaxCELayer'] = { softmax = {dim_in = {global_conf.vocab_size, global_conf.vocab_size}, dim_out = {1}, compressed = true}, }, + ['nerv.DuplicateLayer'] = { + dup1 = {dim_in = {1}, dim_out = {1}}, + dup2 = {dim_in = {1}, dim_out = {1}}, + }, } for i = 1, global_conf.layer_num do layers['nerv.LSTMLayer']['lstm' .. i] = {dim_in = {global_conf.hidden_size}, dim_out = {global_conf.hidden_size}, pr = pr} @@ -45,12 +49,14 @@ end function get_connections(global_conf) local connections = { - {'<input>[1]', 'select[1]', 0}, + {'<input>[1]', 'dup1[1]', 0}, + {'dup1[1]', 'select[1]', 0}, {'select[1]', 'lstm1[1]', 0}, {'dropout' .. global_conf.layer_num .. '[1]', 'output[1]', 0}, {'output[1]', 'softmax[1]', 0}, {'<input>[2]', 'softmax[2]', 0}, - {'softmax[1]', '<output>[1]', 0}, + {'softmax[1]', 'dup2[1]', 0}, + {'dup2[1]', '<output>[1]', 0}, } for i = 1, global_conf.layer_num do table.insert(connections, {'lstm' .. i .. '[1]', 'dropout' .. i .. '[1]', 0}) diff --git a/nerv/examples/network_debug/main.lua b/nerv/examples/network_debug/main.lua index 790c404..bbcdb6c 100644 --- a/nerv/examples/network_debug/main.lua +++ b/nerv/examples/network_debug/main.lua @@ -6,35 +6,26 @@ nerv.include(arg[1]) local global_conf = get_global_conf() local timer = global_conf.timer -timer:tic('IO') - local data_path = 'examples/lmptb/PTBdata/' -local train_reader = nerv.Reader(data_path .. 'vocab', data_path .. 'ptb.train.txt.adds') -local val_reader = nerv.Reader(data_path .. 'vocab', data_path .. 'ptb.valid.txt.adds') - -local train_data = train_reader:get_all_batch(global_conf) -local val_data = val_reader:get_all_batch(global_conf) local layers = get_layers(global_conf) local connections = get_connections(global_conf) -local NN = nerv.NN(global_conf, train_data, val_data, layers, connections) - -timer:toc('IO') -timer:check('IO') -io.flush() +local NN = nerv.NN(global_conf, layers, connections) timer:tic('global') local best_cv = 1e10 for i = 1, global_conf.max_iter do timer:tic('Epoch' .. i) - local train_ppl, val_ppl = NN:epoch() + local train_reader = nerv.Reader(data_path .. 'vocab', data_path .. 'ptb.train.txt.adds') + local val_reader = nerv.Reader(data_path .. 'vocab', data_path .. 'ptb.valid.txt.adds') + local train_ppl, val_ppl = NN:epoch(train_reader, val_reader) + nerv.printf('Epoch %d: %f %f %f\n', i, global_conf.lrate, train_ppl, val_ppl) if val_ppl < best_cv then best_cv = val_ppl else global_conf.lrate = global_conf.lrate / 2.0 end - nerv.printf('Epoch %d: %f %f %f\n', i, global_conf.lrate, train_ppl, val_ppl) timer:toc('Epoch' .. i) timer:check('Epoch' .. i) io.flush() @@ -43,3 +34,5 @@ timer:toc('global') timer:check('global') timer:check('network') timer:check('gc') +timer:check('IO') +global_conf.cumat_type.print_profile() diff --git a/nerv/examples/network_debug/network.lua b/nerv/examples/network_debug/network.lua index 5518e27..386c3b0 100644 --- a/nerv/examples/network_debug/network.lua +++ b/nerv/examples/network_debug/network.lua @@ -2,11 +2,17 @@ nerv.include('select_linear.lua') local nn = nerv.class('nerv.NN') -function nn:__init(global_conf, train_data, val_data, layers, connections) +function nn:__init(global_conf, layers, connections) self.gconf = global_conf self.network = self:get_network(layers, connections) - self.train_data = self:get_data(train_data) - self.val_data = self:get_data(val_data) + + self.output = {} + self.err_output = {} + for i = 1, self.gconf.chunk_size do + self.output[i] = {self.gconf.cumat_type(self.gconf.batch_size, 1)} + self.err_output[i] = {self.gconf.cumat_type(self.gconf.batch_size, 1)} + self.err_output[i][2] = self.gconf.cumat_type(self.gconf.batch_size, 1) + end end function nn:get_network(layers, connections) @@ -20,79 +26,67 @@ function nn:get_network(layers, connections) return network end -function nn:get_data(data) - local err_output = {} - local softmax_output = {} - local output = {} - for i = 1, self.gconf.chunk_size do - err_output[i] = self.gconf.cumat_type(self.gconf.batch_size, 1) - softmax_output[i] = self.gconf.cumat_type(self.gconf.batch_size, self.gconf.vocab_size) - output[i] = self.gconf.cumat_type(self.gconf.batch_size, 1) - end - local ret = {} - for i = 1, #data do - ret[i] = {} - ret[i].input = {} - ret[i].output = {} - ret[i].err_input = {} - ret[i].err_output = {} - for t = 1, self.gconf.chunk_size do - ret[i].input[t] = {} - ret[i].output[t] = {} - ret[i].err_input[t] = {} - ret[i].err_output[t] = {} - ret[i].input[t][1] = data[i].input[t] - ret[i].input[t][2] = data[i].output[t] - ret[i].output[t][1] = output[t] - local err_input = self.gconf.mmat_type(self.gconf.batch_size, 1) - for j = 1, self.gconf.batch_size do - if t <= data[i].seq_len[j] then - err_input[j - 1][0] = 1 - else - err_input[j - 1][0] = 0 +function nn:process(data, do_train, reader) + local timer = self.gconf.timer + local buffer = nerv.SeqBuffer(self.gconf, { + batch_size = self.gconf.batch_size, chunk_size = self.gconf.chunk_size, + readers = {reader}, + }) + local total_err = 0 + local total_frame = 0 + self.network:epoch_init() + while true do + timer:tic('IO') + data = buffer:get_data() + if data == nil then + break + end + local err_input = {} + if do_train then + for t = 1, self.gconf.chunk_size do + local tmp = self.gconf.mmat_type(self.gconf.batch_size, 1) + for i = 1, self.gconf.batch_size do + if t <= data.seq_length[i] then + tmp[i - 1][0] = 1 + else + tmp[i - 1][0] = 0 + end end + err_input[t] = {self.gconf.cumat_type.new_from_host(tmp)} end - ret[i].err_input[t][1] = self.gconf.cumat_type.new_from_host(err_input) - ret[i].err_output[t][1] = err_output[t] - ret[i].err_output[t][2] = softmax_output[t] end - ret[i].seq_length = data[i].seq_len - ret[i].new_seq = {} - for j = 1, self.gconf.batch_size do - if data[i].seq_start[j] then - table.insert(ret[i].new_seq, j) - end + local info = {input = {}, output = self.output, err_input = err_input, do_train = do_train, + err_output = self.err_output, seq_length = data.seq_length, new_seq = data.new_seq} + for t = 1, self.gconf.chunk_size do + info.input[t] = {data.data['input'][t]} + info.input[t][2] = data.data['label'][t] end - end - return ret -end + timer:toc('IO') -function nn:process(data, do_train) - local timer = self.gconf.timer - local total_err = 0 - local total_frame = 0 - self.network:epoch_init() - for id = 1, #data do - data[id].do_train = do_train timer:tic('network') - self.network:mini_batch_init(data[id]) + self.network:mini_batch_init(info) self.network:propagate() timer:toc('network') + + timer:tic('IO') for t = 1, self.gconf.chunk_size do - local tmp = data[id].output[t][1]:new_to_host() + local tmp = info.output[t][1]:new_to_host() for i = 1, self.gconf.batch_size do - if t <= data[id].seq_length[i] then - total_err = total_err + math.log10(math.exp(tmp[i - 1][0])) - total_frame = total_frame + 1 - end + total_err = total_err + math.log10(math.exp(tmp[i - 1][0])) end end + for i = 1, self.gconf.batch_size do + total_frame = total_frame + info.seq_length[i] + end + timer:toc('IO') + + timer:tic('network') if do_train then - timer:tic('network') self.network:back_propagate() self.network:update() - timer:toc('network') end + timer:toc('network') + timer:tic('gc') collectgarbage('collect') timer:toc('gc') @@ -100,11 +94,11 @@ function nn:process(data, do_train) return math.pow(10, - total_err / total_frame) end -function nn:epoch() - local train_error = self:process(self.train_data, true) +function nn:epoch(train_reader, val_reader) + local train_error = self:process(self.train_data, true, train_reader) local tmp = self.gconf.dropout_rate self.gconf.dropout_rate = 0 - local val_error = self:process(self.val_data, false) + local val_error = self:process(self.val_data, false, val_reader) self.gconf.dropout_rate = tmp return train_error, val_error end diff --git a/nerv/examples/network_debug/reader.lua b/nerv/examples/network_debug/reader.lua index b10baaf..76a78cf 100644 --- a/nerv/examples/network_debug/reader.lua +++ b/nerv/examples/network_debug/reader.lua @@ -3,6 +3,7 @@ local Reader = nerv.class('nerv.Reader') function Reader:__init(vocab_file, input_file) self:get_vocab(vocab_file) self:get_seq(input_file) + self.offset = 1 end function Reader:get_vocab(vocab_file) @@ -32,6 +33,7 @@ function Reader:get_seq(input_file) local f = io.open(input_file, 'r') self.seq = {} while true do + -- for i = 1, 26 do local seq = f:read() if seq == nil then break @@ -47,67 +49,19 @@ function Reader:get_seq(input_file) end end -function Reader:get_in_out(id, pos) - return self.seq[id][pos], self.seq[id][pos + 1], pos + 1 == #self.seq[id] -end - -function Reader:get_all_batch(global_conf) - local data = {} - local pos = {} - local offset = 1 - for i = 1, global_conf.batch_size do - pos[i] = nil +function Reader:get_data() + if self.offset > #self.seq then + return nil end - while true do - -- for i = 1, 26 do - local input = {} - local output = {} - for i = 1, global_conf.chunk_size do - input[i] = global_conf.mmat_type(global_conf.batch_size, 1) - input[i]:fill(global_conf.nn_act_default) - output[i] = global_conf.mmat_type(global_conf.batch_size, 1) - output[i]:fill(global_conf.nn_act_default) - end - local seq_start = {} - local seq_end = {} - local seq_len = {} - for i = 1, global_conf.batch_size do - seq_start[i] = false - seq_end[i] = false - seq_len[i] = 0 - end - local has_new = false - for i = 1, global_conf.batch_size do - if pos[i] == nil then - if offset < #self.seq then - seq_start[i] = true - pos[i] = {offset, 1} - offset = offset + 1 - end - end - if pos[i] ~= nil then - has_new = true - for j = 1, global_conf.chunk_size do - local final - input[j][i-1][0], output[j][i-1][0], final = self:get_in_out(pos[i][1], pos[i][2]) - seq_len[i] = j - if final then - seq_end[i] = true - pos[i] = nil - break - end - pos[i][2] = pos[i][2] + 1 - end - end - end - if not has_new then - break - end - for i = 1, global_conf.chunk_size do - input[i] = global_conf.cumat_type.new_from_host(input[i]) - output[i] = global_conf.cumat_type.new_from_host(output[i]) - end - table.insert(data, {input = input, output = output, seq_start = seq_start, seq_end = seq_end, seq_len = seq_len}) + local tmp = self.seq[self.offset] + local res = { + input = nerv.MMatrixFloat(#tmp - 1, 1), + label = nerv.MMatrixFloat(#tmp - 1, 1), + } + for i = 1, #tmp - 1 do + res.input[i - 1][0] = tmp[i] + res.label[i - 1][0] = tmp[i + 1] end - return data + self.offset = self.offset + 1 + return res end diff --git a/nerv/io/seq_buffer.lua b/nerv/io/seq_buffer.lua index e69de29..ad1b3f7 100644 --- a/nerv/io/seq_buffer.lua +++ b/nerv/io/seq_buffer.lua @@ -0,0 +1,105 @@ +local SeqBuffer = nerv.class('nerv.SeqBuffer', 'nerv.DataBuffer') + +function SeqBuffer:__init(global_conf, buffer_conf) + self.gconf = global_conf + + self.batch_size = buffer_conf.batch_size + self.chunk_size = buffer_conf.chunk_size + self.readers = buffer_conf.readers + self.nn_act_default = buffer_conf.nn_act_default + if self.nn_act_default == nil then + self.nn_act_default = 0 + end + + self.mat_type = self.gconf.mmat_type + self.queue = {} + self.head = 1 + self.tail = 0 +end + +function SeqBuffer:new_mini_batch() + local res = {} + res.data = {} + res.new_seq = {} + res.seq_length = {} + for i = 1, self.batch_size do + res.seq_length[i] = 0 + end + return res +end + +function SeqBuffer:saturate(batch) + if self.queue[self.head] ~= nil and self.queue[self.head].seq_length[batch] ~= 0 then + return true + end + local data = {} + local drow = nil + for i = 1, #self.readers do + local tmp = self.readers[i]:get_data() + if tmp == nil then + return false + end + for id, d in pairs(tmp) do + if drow == nil then + drow = d:nrow() + elseif d:nrow() ~= drow then + nerv.error('readers provides with inconsistent rows of data') + end + data[id] = d + end + end + local offset = 0 + local head = self.head + while offset < drow do + local last = math.min(offset + self.chunk_size, drow) + if head > self.tail then + self.tail = self.tail + 1 + self.queue[self.tail] = self:new_mini_batch() + end + self.queue[head].seq_length[batch] = last - offset + if offset == 0 then + table.insert(self.queue[head].new_seq, batch) + end + local mini_batch = self.queue[head].data + for id, d in pairs(data) do + if mini_batch[id] == nil then + mini_batch[id] = {} + end + local tmp = mini_batch[id] + for i = offset + 1, last do + local chunk = i - offset + if tmp[chunk] == nil then + tmp[chunk] = self.mat_type(self.batch_size, d:ncol()) + tmp[chunk]:fill(self.nn_act_default) + end + tmp[chunk]:copy_from(d, i - 1, i, batch - 1) + end + end + head = head + 1 + offset = last + end + return true +end + +function SeqBuffer:get_data() + local has_data = false + for i = 1, self.batch_size do + if self:saturate(i) then + has_data = true + end + end + if not has_data then + return nil + end + local res = self.queue[self.head] + self.queue[self.head] = nil + self.head = self.head + 1 + if not self.gconf.use_cpu then + for id, d in pairs(res.data) do + for i = 1, #d do + d[i] = self.gconf.cumat_type.new_from_host(d[i]) + end + end + end + return res +end diff --git a/nerv/layer/duplicate.lua b/nerv/layer/duplicate.lua index 137472b..2621cdf 100644 --- a/nerv/layer/duplicate.lua +++ b/nerv/layer/duplicate.lua @@ -20,10 +20,7 @@ function DuplicateLayer:batch_resize() end function DuplicateLayer:propagate(input, output) - for i = 1, #self.dim_out do - output[i]:copy_from(input[1]) - -- FIXME: use reference copy to speed up - end + -- do nothing, use reference copy in nn/network.lua end function DuplicateLayer:back_propagate(bp_err, next_bp_err) diff --git a/nerv/layer/softmax_ce.lua b/nerv/layer/softmax_ce.lua index 7b4a80c..acd4ee6 100644 --- a/nerv/layer/softmax_ce.lua +++ b/nerv/layer/softmax_ce.lua @@ -61,14 +61,16 @@ function SoftmaxCELayer:propagate(input, output, t) end ce:mul_elem(ce, label) ce = ce:rowsum() + ce:set_values_by_mask(self.gconf.mask[t], 0) if output[1] ~= nil then output[1]:copy_from(ce) end -- add total ce self.total_ce = self.total_ce - ce:colsum()[0][0] - self.total_frames = self.total_frames + softmax:nrow() + self.total_frames = self.total_frames + self.gconf.mask[t]:colsum()[0][0] -- TODO: add colsame for uncompressed label if self.compressed then + classified:set_values_by_mask(self.gconf.mask[t], -1) self.total_correct = self.total_correct + classified:colsame(input[2])[0][0] end end diff --git a/nerv/lib/matrix/cumatrix.c b/nerv/lib/matrix/cumatrix.c index aec4d60..43448bf 100644 --- a/nerv/lib/matrix/cumatrix.c +++ b/nerv/lib/matrix/cumatrix.c @@ -9,15 +9,19 @@ void nerv_cuda_context_print_profile(CuContext *context) { HashMap *profile = context->profile; size_t i; + float tmp, tot = 0; fprintf(stderr, "*** [nerv cumatrix profile] **\n"); for (i = 0; i < profile->size; i++) { HashNode *ptr; for (ptr = profile->bucket[i]; ptr; ptr = ptr->next) { - fprintf(stderr, "%s:\t%.6f\n", ptr->key, *(float *)ptr->val); + tmp = *(float *)ptr->val; + fprintf(stderr, "%s:\t%.6f\n", ptr->key, tmp); + tot += tmp; } } + fprintf(stderr, "Total time:\t%.6f\n", tot); } void nerv_cuda_context_clear_profile(CuContext *context) { diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu index fc630ad..cf9d213 100644 --- a/nerv/lib/matrix/generic/cukernel.cu +++ b/nerv/lib/matrix/generic/cukernel.cu @@ -328,6 +328,15 @@ __global__ void cudak_(rearrange_frm)(const MATRIX_ELEM *a, MATRIX_ELEM *b, b[j + i * stride] = a[j / step + (j % step) * orig_dim + i * stride]; } +__global__ void cudak_(set_values_by_mask)(const MATRIX_ELEM *a, MATRIX_ELEM *b, + int nrow, int ncol, + int astride, int bstride, double val) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + if (i >= nrow || j >= ncol || a[i * astride] != 0.0) return; + b[j + i * bstride] = val; +} + __global__ void cudak_(scale_rows_by_col)(const MATRIX_ELEM *a, MATRIX_ELEM *b, int nrow, int ncol, int astride, int bstride) { @@ -772,6 +781,19 @@ extern "C" { cudaStreamSynchronize(0); } + void cudak_(cuda_set_values_by_mask)(const Matrix *a, Matrix *b, double val) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + cudak_(set_values_by_mask)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), + b->nrow, b->ncol, + a->stride / sizeof(MATRIX_ELEM), + b->stride / sizeof(MATRIX_ELEM), + val); + cudaStreamSynchronize(0); + } + void cudak_(cuda_scale_rows_by_row)(const Matrix *a, Matrix *b) { dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), diff --git a/nerv/lib/matrix/generic/cumatrix.c b/nerv/lib/matrix/generic/cumatrix.c index 6d84663..bc5f285 100644 --- a/nerv/lib/matrix/generic/cumatrix.c +++ b/nerv/lib/matrix/generic/cumatrix.c @@ -515,7 +515,7 @@ void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b, NERV_SET_STATUS(status, NERV_NORMAL, 0); } -void nerv_matrix_(diagonalize)(Matrix *a, CuContext * context, Status *status) { +void nerv_matrix_(diagonalize)(Matrix *a, CuContext *context, Status *status) { if (a->nrow != a->ncol) NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0); PROFILE_START @@ -524,6 +524,18 @@ void nerv_matrix_(diagonalize)(Matrix *a, CuContext * context, Status *status) { NERV_SET_STATUS(status, NERV_NORMAL, 0); } +void nerv_matrix_(set_values_by_mask)(Matrix *a, const Matrix *b, double val, + CuContext *context, Status *status) { + if (a->nrow != b->nrow) + NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0); + if (b->ncol != 1) + NERV_EXIT_STATUS(status, MAT_COL_VECTOR_EXP, 0); + PROFILE_START + cudak_(cuda_set_values_by_mask)(b, a, val); + PROFILE_STOP + NERV_SET_STATUS(status, NERV_NORMAL, 0); +} + static void cuda_matrix_(free)(MATRIX_ELEM *ptr, CuContext *context, Status *status) { CUDA_SAFE_SYNC_CALL(cudaFree(ptr), status); NERV_SET_STATUS(status, NERV_NORMAL, 0); diff --git a/nerv/lib/matrix/generic/cumatrix.h b/nerv/lib/matrix/generic/cumatrix.h index de3a09e..79bfc76 100644 --- a/nerv/lib/matrix/generic/cumatrix.h +++ b/nerv/lib/matrix/generic/cumatrix.h @@ -35,6 +35,8 @@ void nerv_matrix_(fill)(Matrix *self, double val, CuContext *context, Status *status); void nerv_matrix_(diagonalize)(Matrix *self, CuContext *context, Status *status); +void nerv_matrix_(set_values_by_mask)(Matrix *self, Matrix *mask, double val, + CuContext *context, Status *status); void nerv_matrix_(copy_fromd)(Matrix *a, const Matrix *b, int a_begin, int b_begin, int b_end, CuContext *context, Status *status); diff --git a/nerv/lib/matrix/generic/mmatrix.c b/nerv/lib/matrix/generic/mmatrix.c index badddbd..e356de7 100644 --- a/nerv/lib/matrix/generic/mmatrix.c +++ b/nerv/lib/matrix/generic/mmatrix.c @@ -507,6 +507,29 @@ void nerv_matrix_(scale_rows_by_col)(Matrix *a, const Matrix *b, NERV_SET_STATUS(status, NERV_NORMAL, 0); } +void nerv_matrix_(set_values_by_mask)(Matrix *a, const Matrix *b, double val, + MContext *context, Status *status) { + if (a->nrow != b->nrow) + NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0); + if (b->ncol != 1) + NERV_EXIT_STATUS(status, MAT_COL_VECTOR_EXP, 0); + int i, j; + size_t astride = a->stride, bstride = b->stride; + MATRIX_ELEM *arow = MATRIX_ELEM_PTR(a), + *brow = MATRIX_ELEM_PTR(b); + for (i = 0; i < a->nrow; i++) + { + if (brow[0] == 0.0) + { + for (j = 0; j < a->ncol; j++) + arow[j] = val; + } + arow = MATRIX_NEXT_ROW_PTR(arow, astride); + brow = MATRIX_NEXT_ROW_PTR(brow, bstride); + } + NERV_SET_STATUS(status, NERV_NORMAL, 0); +} + static void host_matrix_(free)(MATRIX_ELEM *ptr, MContext *context, Status *status) { free(ptr); NERV_SET_STATUS(status, NERV_NORMAL, 0); diff --git a/nerv/lib/matrix/generic/mmatrix.h b/nerv/lib/matrix/generic/mmatrix.h index 6d17c99..41c39f6 100644 --- a/nerv/lib/matrix/generic/mmatrix.h +++ b/nerv/lib/matrix/generic/mmatrix.h @@ -48,6 +48,8 @@ void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b, int cont, MContext *context, Status *status); void nerv_matrix_(rearrange_frm)(Matrix *a, const Matrix *b, int step, MContext *context, Status *status); +void nerv_matrix_(set_values_by_mask)(Matrix *a, const Matrix *b, double val, + MContext *context, Status *status); void nerv_matrix_(scale_rows_by_col)(Matrix *a, const Matrix *b, MContext *context, Status *status); void nerv_matrix_(scale_rows_by_row)(Matrix *a, const Matrix *b, diff --git a/nerv/lib/matrix/mmatrix.c b/nerv/lib/matrix/mmatrix.c index e40b160..006735d 100644 --- a/nerv/lib/matrix/mmatrix.c +++ b/nerv/lib/matrix/mmatrix.c @@ -8,15 +8,19 @@ void nerv_host_context_print_profile(MContext *context) { HashMap *profile = context->profile; size_t i; + float tmp, tot = 0; fprintf(stderr, "*** [nerv mmatrix profile] **\n"); for (i = 0; i < profile->size; i++) { HashNode *ptr; for (ptr = profile->bucket[i]; ptr; ptr = ptr->next) { - fprintf(stderr, "%s:\t%.6f\n", ptr->key, *(float *)ptr->val); + tmp = *(float *)ptr->val; + fprintf(stderr, "%s:\t%.6f\n", ptr->key, tmp); + tot += tmp; } } + fprintf(stderr, "Total time:\t%.6f\n", tot); } void nerv_host_context_clear_profile(MContext *context) { diff --git a/nerv/matrix/generic/cumatrix.c b/nerv/matrix/generic/cumatrix.c index 0c90d39..9577fd5 100644 --- a/nerv/matrix/generic/cumatrix.c +++ b/nerv/matrix/generic/cumatrix.c @@ -268,6 +268,7 @@ static const luaL_Reg nerv_matrix_(extra_methods)[] = { {"scale_rows_by_col", nerv_matrix_(lua_scale_rows_by_col)}, {"prefixsum_row", nerv_matrix_(lua_prefixsum_row)}, {"diagonalize", nerv_matrix_(lua_diagonalize)}, + {"set_values_by_mask", nerv_matrix_(lua_set_values_by_mask)}, #ifdef __NERV_FUTURE_CUDA_7 {"update_select_rows_by_rowidx", nerv_matrix_(lua_update_select_rows_by_rowidx)}, {"update_select_rows_by_colidx", nerv_matrix_(lua_update_select_rows_by_colidx)}, diff --git a/nerv/matrix/generic/matrix.c b/nerv/matrix/generic/matrix.c index fe07585..3e91933 100644 --- a/nerv/matrix/generic/matrix.c +++ b/nerv/matrix/generic/matrix.c @@ -395,4 +395,16 @@ static int nerv_matrix_(lua_diagonalize)(lua_State *L) { return 0; } +static int nerv_matrix_(lua_set_values_by_mask)(lua_State *L) { + Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 4); + Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); + Matrix *mask = luaT_checkudata(L, 2, nerv_matrix_(tname)); + double val = luaL_checknumber(L, 3); + nerv_matrix_(set_values_by_mask)(a, mask, val, context, &status); + NERV_LUA_CHECK_STATUS(L, status); + return 0; +} + #endif diff --git a/nerv/matrix/generic/mmatrix.c b/nerv/matrix/generic/mmatrix.c index a5e5969..de1eaa3 100644 --- a/nerv/matrix/generic/mmatrix.c +++ b/nerv/matrix/generic/mmatrix.c @@ -117,6 +117,7 @@ static const luaL_Reg nerv_matrix_(extra_methods)[] = { {"clip", nerv_matrix_(lua_clip)}, {"fill", nerv_matrix_(lua_fill)}, {"diagonalize", nerv_matrix_(lua_diagonalize)}, + {"set_values_by_mask", nerv_matrix_(lua_set_values_by_mask)}, {"sigmoid", nerv_matrix_(lua_sigmoid)}, {"sigmoid_grad", nerv_matrix_(lua_sigmoid_grad)}, {"softmax", nerv_matrix_(lua_softmax)}, diff --git a/nerv/nn/network.lua b/nerv/nn/network.lua index 2cb83ce..6f7fe10 100644 --- a/nerv/nn/network.lua +++ b/nerv/nn/network.lua @@ -16,6 +16,7 @@ function network:__init(id, global_conf, network_conf) if self.nn_act_default == nil then self.nn_act_default = 0 end + self.layers = {} self.input_conn = {} self.output_conn = {} @@ -26,7 +27,17 @@ function network:__init(id, global_conf, network_conf) if self.input_conn[id][port] ~= nil then nerv.error('duplicate edge') end - self.input_conn[id][port] = {0, i, time} + if nerv.is_type(self.layers[id], 'nerv.DuplicateLayer') then + local tmp = nerv.IdentityLayer('', self.gconf, {dim_in = {self.dim_in[i]}, dim_out = {self.dim_in[i]}}) + table.insert(self.layers, tmp) + local new_id = #self.layers + self.input_conn[new_id] = {{0, i, time}} + self.output_conn[new_id] = {{id, port, 0}} + self.input_conn[id][port] = {new_id, 1, 0} + self.socket.inputs[i] = {new_id, 1, time} + else + self.input_conn[id][port] = {0, i, time} + end end for i = 1, #self.dim_out do local edge = self.socket.outputs[i] @@ -34,18 +45,53 @@ function network:__init(id, global_conf, network_conf) if self.output_conn[id][port] ~= nil then nerv.error('duplicate edge') end - self.output_conn[id][port] = {0, i, time} + if nerv.is_type(self.layers[id], 'nerv.DuplicateLayer') then + local tmp = nerv.IdentityLayer('', self.gconf, {dim_in = {self.dim_out[i]}, dim_out = {self.dim_out[i]}}) + table.insert(self.layers, tmp) + local new_id = #self.layers + self.input_conn[new_id] = {{id, port, 0}} + self.output_conn[new_id] = {{0, i, time}} + self.output_conn[id][port] = {new_id, 1, 0} + self.socket.outputs[i] = {new_id, 1, time} + else + self.output_conn[id][port] = {0, i, time} + end end + self.delay = 0 for i = 1, #self.layers do local dim_in, _ = self.layers[i]:get_dim() for j = 1, #dim_in do + if self.input_conn[i][j] == nil then + nerv.error('dangling input') + end local time = self.input_conn[i][j][3] if math.abs(time) > self.delay then self.delay = math.abs(time) end end end + + self.input_edge = {} + self.output_edge = {} + for t = -self.delay, self.delay do + self.input_edge[t] = {} + self.output_edge[t] = {} + end + for i = 1, #self.layers do + local dim_in, dim_out = self.layers[i]:get_dim() + for j = 1, #dim_in do + local time = self.input_conn[i][j][3] + table.insert(self.input_edge[time], {i, j}) + end + for j = 1, #dim_out do + if self.output_conn[i][j] == nil then + nerv.error('dangling output') + end + local time = self.output_conn[i][j][3] + table.insert(self.output_edge[time], {i, j}) + end + end end function network:compile(layer) @@ -112,11 +158,22 @@ function network:init(batch_size, chunk_size) self:make_initial_store() collectgarbage('collect') + + self.flush = {} + self.gconf.mask = {} + for t = 1, self.chunk_size do + self.flush[t] = {} + self.gconf.mask[t] = self.mat_type(self.batch_size, 1) + end end function network:epoch_init() + self.timestamp = 0 for i = 1, #self.layers do self.layers[i]:init(self.batch_size, self.chunk_size) + for t = 1, self.chunk_size do + self.flush[t][i] = {timestamp = 0, input = {}, output = {}} + end end end @@ -134,12 +191,10 @@ function network:topsort() for i = 1, #self.layers do local _, dim_out = self.layers[i]:get_dim() for j = 1, #dim_out do - if self.output_conn[i][j] ~= nil then - local edge = self.output_conn[i][j] - local id, time = edge[1], edge[3] + t - if time >= 1 and time <= self.chunk_size and id ~= 0 then - degree[time][id] = degree[time][id] + 1 - end + local edge = self.output_conn[i][j] + local id, time = edge[1], edge[3] + t + if time >= 1 and time <= self.chunk_size and id ~= 0 then + degree[time][id] = degree[time][id] + 1 end end end @@ -161,15 +216,13 @@ function network:topsort() l = l + 1 local _, dim_out = self.layers[i]:get_dim() for j = 1, #dim_out do - if self.output_conn[i][j] ~= nil then - local edge = self.output_conn[i][j] - local id, time = edge[1], edge[3] + t - if time >= 1 and time <= self.chunk_size and id ~= 0 then - degree[time][id] = degree[time][id] - 1 - if degree[time][id] == 0 then - r = r + 1 - self.queue[r] = {chunk = time, id = id} - end + local edge = self.output_conn[i][j] + local id, time = edge[1], edge[3] + t + if time >= 1 and time <= self.chunk_size and id ~= 0 then + degree[time][id] = degree[time][id] - 1 + if degree[time][id] == 0 then + r = r + 1 + self.queue[r] = {chunk = time, id = id} end end end @@ -197,22 +250,26 @@ function network:make_initial_store() err_memory[t][i][j] = self.mat_type(self.batch_size, dim_in[j]) err_memory[t][i][j]:fill(0) end - for j = 1, #dim_out do - memory[t][i][j] = self.mat_type(self.batch_size, dim_out[j]) - memory[t][i][j]:fill(self.nn_act_default) + if t < 1 or t > self.chunk_size or not nerv.is_type(self.layers[i], 'nerv.DuplicateLayer') then + for j = 1, #dim_out do + memory[t][i][j] = self.mat_type(self.batch_size, dim_out[j]) + memory[t][i][j]:fill(self.nn_act_default) + end end end - -- memory[t][0] stores network input - memory[t][0] = {} - for j = 1, #self.dim_in do - memory[t][0][j] = self.mat_type(self.batch_size, self.dim_in[j]) - memory[t][0][j]:fill(self.nn_act_default) - end - -- err_memory[t][0] stores network err_input - err_memory[t][0] = {} - for j = 1, #self.dim_out do - err_memory[t][0][j] = self.mat_type(self.batch_size, self.dim_out[j]) - err_memory[t][0][j]:fill(0) + if t < 1 or t > self.chunk_size then + -- memory[t][0] stores network input + memory[t][0] = {} + for j = 1, #self.dim_in do + memory[t][0][j] = self.mat_type(self.batch_size, self.dim_in[j]) + memory[t][0][j]:fill(self.nn_act_default) + end + -- err_memory[t][0] stores network err_input + err_memory[t][0] = {} + for j = 1, #self.dim_out do + err_memory[t][0][j] = self.mat_type(self.batch_size, self.dim_out[j]) + err_memory[t][0][j]:fill(0) + end end end @@ -255,6 +312,28 @@ function network:make_initial_store() end end + -- reference copy for duplicate layer + for i = 1, #self.queue do + local t, id = self.queue[i].chunk, self.queue[i].id + if nerv.is_type(self.layers[id], 'nerv.DuplicateLayer') then + local _, dim_out = self.layers[id]:get_dim() + for j = 1, #dim_out do + if self.output[t][id][j] ~= nil then + nerv.error('duplicate output reference not nil') + end + self.output[t][id][j] = self.input[t][id][1] + local edge = self.output_conn[id][j] + local to, port, time = edge[1], edge[2], edge[3] + t + if time >= 1 and time <= self.chunk_size then + if self.input[time][to][port] ~= nil then + nerv.error('duplicate input reference not nil') + end + self.input[time][to][port] = self.output[t][id][j] + end + end + end + end + -- check dangling reference for t = 1, self.chunk_size do for i = 1, #self.dim_in do @@ -291,6 +370,7 @@ function network:make_initial_store() local dim_in, dim_out = self.layers[i]:get_dim() for j = 1, #dim_in do if self.input[t][i][j] == nil then + print(t,i,j,self.layers[i].id) nerv.error('input reference dangling') end if self.err_output[t][i][j] == nil then @@ -314,9 +394,14 @@ function network:make_initial_store() self.legacy[t] = {} for i = 1, #self.layers do self.legacy[t][i] = {} - local _, dim_out = self.layers[i]:get_dim() - for j = 1, #dim_out do - self.legacy[t][i][j] = memory[t][i][j] + end + end + for d = 1, self.delay do + for t = 1 - d, 0 do + for i = 1, #self.output_edge[d] do + local edge = self.output_edge[d][i] + local id, port = edge[1], edge[2] + self.legacy[t][id][port] = memory[t][id][port] end end end @@ -383,59 +468,87 @@ function network:mini_batch_init(info) self.info = info self:set_input(self.info.input) self:set_output(self.info.output) + if self.info.do_train then + self:set_err_input(self.info.err_input) + self:set_err_output(self.info.err_output) + end + + -- calculate mask + for t = 1, self.chunk_size do + local tmp = self.gconf.mmat_type(self.batch_size, 1) + for i = 1, self.batch_size do + if t <= self.info.seq_length[i] then + tmp[i - 1][0] = 1 + else + tmp[i - 1][0] = 0 + end + end + self.gconf.mask[t]:copy_fromh(tmp) + end -- calculate border self.max_length = 0 - self.border = {} - for i = 1, self.chunk_size do - self.border[i] = {} - end + self.timestamp = self.timestamp + 1 for i = 1, self.batch_size do if self.info.seq_length[i] > self.max_length then self.max_length = self.info.seq_length[i] end - for t = 1, self.delay do - local chunk = self.info.seq_length[i] + t - if chunk > self.chunk_size then - break + local border = self.info.seq_length[i] + for d = 1, self.delay do + for t = border + 1, border + d do + if t > self.chunk_size then + break + end + for j = 1, #self.output_edge[-d] do + local edge = self.output_edge[-d][j] + local id, port = edge[1], edge[2] + local flush = self.flush[t][id] + if flush.timestamp ~= self.timestamp then + flush.timestamp = self.timestamp + flush.input = {} + flush.output = {} + end + table.insert(flush.output, {port, i}) + end + end + if self.info.do_train then + for t = border, border - d + 1, -1 do + if t < 1 then + break + end + for j = 1, #self.input_edge[-d] do + local edge = self.input_edge[-d][j] + local id, port = edge[1], edge[2] + local flush = self.flush[t][id] + if flush.timestamp ~= self.timestamp then + flush.timestamp = self.timestamp + flush.input = {} + flush.output = {} + end + table.insert(flush.input, {port, i}) + end + end end - table.insert(self.border[chunk], i) end end -- copy legacy - for t = 1 - self.delay, 0 do - for i = 1, #self.layers do - local _, dim_out = self.layers[i]:get_dim() - for j = 1, #dim_out do - if t + self.chunk_size >= 1 and self.output_conn[i][j][1] ~= 0 then - self.legacy[t][i][j]:copy_from(self.output[t + self.chunk_size][i][j]) + for d = 1, self.delay do + for t = 1 - d, 0 do + for i = 1, #self.output_edge[d] do + local edge = self.output_edge[d][i] + local id, port = edge[1], edge[2] + if t + self.chunk_size >= 1 and self.output_conn[id][port][1] ~= 0 then + self.legacy[t][id][port]:copy_from(self.output[t + self.chunk_size][id][port]) end - for k = 1, #self.info.new_seq do - local batch = self.info.new_seq[k] - self.legacy[t][i][j][batch - 1]:fill(self.nn_act_default) + for j = 1, #self.info.new_seq do + local batch = self.info.new_seq[j] + self.legacy[t][id][port][batch - 1]:fill(self.nn_act_default) end end end end - if self.info.do_train then - self:set_err_input(self.info.err_input) - self:set_err_output(self.info.err_output) - - -- flush border gradient - for t = self.max_length + 1, self.max_length + self.delay do - if t > self.chunk_size then - break - end - for i = 1, #self.layers do - local dim_in, _ = self.layers[i]:get_dim() - for j = 1, #dim_in do - self.err_output[t][i][j]:fill(0) - end - end - end - end end function network:propagate() @@ -445,11 +558,11 @@ function network:propagate() self.layers[id]:propagate(self.input[t][id], self.output[t][id], t) end -- flush border activation - for j = 1, #self.border[t] do - local batch = self.border[t][j] - local _, dim_out = self.layers[id]:get_dim() - for k = 1, #dim_out do - self.output[t][id][k][batch - 1]:fill(self.nn_act_default) + if self.flush[t][id].timestamp == self.timestamp then + for j = 1, #self.flush[t][id].output do + local border = self.flush[t][id].output[j] + local port, batch = border[1], border[2] + self.output[t][id][port][batch - 1]:fill(self.nn_act_default) end end end @@ -459,15 +572,8 @@ function network:back_propagate() for i = #self.queue, 1, -1 do local t, id = self.queue[i].chunk, self.queue[i].id if t <= self.max_length then - -- flush border gradient - for j = 1, #self.border[t] do - local batch = self.border[t][j] - local _, dim_out = self.layers[id]:get_dim() - for k = 1, #dim_out do - self.err_input[t][id][k][batch - 1]:fill(0) - end - end self.layers[id]:back_propagate(self.err_input[t][id], self.err_output[t][id], self.input[t][id], self.output[t][id], t) + -- gradient clip if self.clip ~= nil then local dim_in, _ = self.layers[id]:get_dim() for j = 1, #dim_in do @@ -475,14 +581,21 @@ function network:back_propagate() end end end + -- flush border gradient + if self.flush[t][id].timestamp == self.timestamp then + for j = 1, #self.flush[t][id].input do + local border = self.flush[t][id].input[j] + local port, batch = border[1], border[2] + self.err_output[t][id][port][batch - 1]:fill(0) + end + end end end function network:update() - for i = 1, #self.queue do - local t, id = self.queue[i].chunk, self.queue[i].id - if t <= self.max_length then - self.layers[id]:update(self.err_input[t][id], self.input[t][id], self.output[t][id], t) + for t = 1, self.max_length do + for i = 1, #self.layers do + self.layers[i]:update(self.err_input[t][i], self.input[t][i], self.output[t][i], t) end end end |