From eba6049a82455499c68ee875843b6f44d6164fa5 Mon Sep 17 00:00:00 2001 From: Determinant Date: Fri, 5 Jun 2015 16:56:33 +0800 Subject: add close method for ChunkFile, fix #18 --- Makefile | 3 ++- examples/chunk_file_example.lua | 53 +++++++++++++++++++++++++++++++++++++++++ io/chunk_file.c | 22 ++++++++++++++++- io/chunk_file.h | 1 + io/init.lua | 3 +++ io/sgd_buffer.lua | 2 +- matrix/cuda_helper.h | 4 ++-- nn/layer_dag.lua | 11 +++------ 8 files changed, 86 insertions(+), 13 deletions(-) create mode 100644 examples/chunk_file_example.lua diff --git a/Makefile b/Makefile index 5c6fa7b..5b6e081 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,8 @@ LUA_LIBS := matrix/init.lua io/init.lua nerv.lua \ nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/layer_dag.lua \ io/sgd_buffer.lua INCLUDE := -I build/luajit-2.0/include/luajit-2.0/ -DLUA_USE_APICHECK -CUDA_BASE := /usr/local/cuda-6.5 +# CUDA_BASE := /usr/local/cuda-6.5 +CUDA_BASE := /usr/local/cuda-5.0 CUDA_INCLUDE := -I $(CUDA_BASE)/include/ INCLUDE += $(CUDA_INCLUDE) LDFLAGS := -L$(CUDA_BASE)/lib64/ -Wl,-rpath=$(CUDA_BASE)/lib64/ -lcudart -lcublas diff --git a/examples/chunk_file_example.lua b/examples/chunk_file_example.lua new file mode 100644 index 0000000..5961c98 --- /dev/null +++ b/examples/chunk_file_example.lua @@ -0,0 +1,53 @@ +-- To define a readable and writable chunk, one must define a class with the +-- following methods: __init(id, global_conf), read(handle), write(handle), +-- get_info(), set_info(info) and an id attribute. This file demonstrates a +-- basic chunk implementation which manages the I/O of a matrix + +local MatrixChunk = nerv.class("nerv.MatrixChunk") + +function MatrixChunk:__init(id, global_conf) + self.id = id + self.info = {} + self.gconf = global_conf +end + +function MatrixChunk:read(handle) + -- pass the read handle to the matrix method + self.data = nerv.MMatrixFloat.load(handle) +end + +function MatrixChunk:write(handle) + -- pass the write handle to the matrix method + self.data:save(handle) +end + +function MatrixChunk:get_info() + return self.info +end + +function MatrixChunk:set_info(info) + self.info = info +end + +function MatrixChunk.create_from_matrix(id, mat) + local ins = nerv.MatrixChunk(id) + ins.data = mat + return ins +end + +mat = nerv.MMatrixFloat(3, 4) +for i = 0, 2 do + for j = 0, 3 do + mat[i][j] = i + j + end +end + +cd = nerv.MatrixChunk.create_from_matrix("matrix1", mat) + +cf = nerv.ChunkFile("test.nerv", "w") +cf:write_chunk(cd) +cf:close() + +cf2 = nerv.ChunkFile("test.nerv", "r") +cd2 = cf2:read_chunk("matrix1") +print(cd2.data) diff --git a/io/chunk_file.c b/io/chunk_file.c index 4e987b7..aa7dd1c 100644 --- a/io/chunk_file.c +++ b/io/chunk_file.c @@ -10,6 +10,11 @@ do { \ if ((exp) != (ret)) INVALID_FORMAT_ERROR(fn); \ } while (0) +#define CHECK_FILE_OPEN(pfh) \ + do { \ + if ((pfh)->closed) \ + nerv_error(L, "operations on a closed file"); \ + } while (0) const char *nerv_chunk_file_tname = "nerv.ChunkFile"; const char *nerv_chunk_file_handle_tname = "nerv.ChunkFileHandle"; @@ -109,6 +114,7 @@ int nerv_chunk_file_open_write(lua_State *L, const char *fn) { if (!fp) nerv_error(L, "Error while opening chunk file: %s", fn); lfp = (ChunkFileHandle *)malloc(sizeof(ChunkFileHandle)); lfp->fp = fp; + lfp->closed = 0; luaT_pushudata(L, lfp, nerv_chunk_file_handle_tname); lua_setfield(L, -2, "handle"); luaT_pushmetatable(L, nerv_chunk_file_tname); @@ -174,6 +180,7 @@ int nerv_chunk_file_open_read(lua_State *L, const char *fn) { lua_setfield(L, -2, "metadata"); lfp = (ChunkFileHandle *)malloc(sizeof(ChunkFileHandle)); lfp->fp = fp; + lfp->closed = 0; luaT_pushudata(L, lfp, nerv_chunk_file_handle_tname); lua_setfield(L, -2, "handle"); luaT_pushmetatable(L, nerv_chunk_file_tname); @@ -215,6 +222,7 @@ int nerv_chunk_file_write_chunkdata(lua_State *L) { const char *metadata_str = lua_tolstring(L, 2, NULL); lua_getfield(L, 1, "handle"); pfh = luaT_checkudata(L, -1, nerv_chunk_file_handle_tname); + CHECK_FILE_OPEN(pfh); start = ftello(pfh->fp); write_chunk_header_plain(pfh->fp, 0, &status); /* fill zeros */ CHECK_WRITE(status); @@ -245,6 +253,7 @@ int nerv_chunk_file_get_chunkdata(lua_State *L) { lua_getfield(L, 1, "handle"); pfh = luaT_checkudata(L, -1, nerv_chunk_file_handle_tname); + CHECK_FILE_OPEN(pfh); lua_pop(L, 1); /* pop handle */ lua_getfield(L, 1, "metadata"); /* now stack: self, k, metadata */ @@ -260,10 +269,20 @@ int nerv_chunk_file_get_chunkdata(lua_State *L) { return 1; } +int nerv_chunk_file_close(lua_State *L) { + ChunkFileHandle *pfh; + lua_getfield(L, 1, "handle"); + pfh = luaT_checkudata(L, -1, nerv_chunk_file_handle_tname); + CHECK_FILE_OPEN(pfh); + fclose(pfh->fp); + pfh->closed = 1; + return 0; +} + int nerv_chunk_file_handle_destroy(lua_State *L) { ChunkFileHandle *pfh = luaT_checkudata(L, 1, nerv_chunk_file_handle_tname); - fclose(pfh->fp); + if (!pfh->closed) fclose(pfh->fp); free(pfh); return 0; } @@ -285,6 +304,7 @@ static int nerv_chunk_data_destroy(lua_State *L) { static const luaL_Reg nerv_chunk_file_methods[] = { {"get_chunkdata", nerv_chunk_file_get_chunkdata}, {"_write_chunkdata", nerv_chunk_file_write_chunkdata}, + {"close", nerv_chunk_file_close}, {"__init", nerv_chunk_file___init}, {NULL, NULL} }; diff --git a/io/chunk_file.h b/io/chunk_file.h index 9ece117..9bae59d 100644 --- a/io/chunk_file.h +++ b/io/chunk_file.h @@ -8,6 +8,7 @@ extern const char *nerv_chunk_data_tname; typedef struct ChunkFileHandle { FILE *fp; + int closed; } ChunkFileHandle; typedef struct ChunkInfo { diff --git a/io/init.lua b/io/init.lua index c151804..b722a81 100644 --- a/io/init.lua +++ b/io/init.lua @@ -18,6 +18,9 @@ function nerv.ChunkFile:write_chunk(chunk) end function nerv.ChunkFile:read_chunk(id, global_conf) + if self.metadata == nil then + nerv.error("wrong file opening mode") + end local metadata = self.metadata[id] if metadata == nil then nerv.error("chunk with id %s does not exist", id) diff --git a/io/sgd_buffer.lua b/io/sgd_buffer.lua index bf72744..381b863 100644 --- a/io/sgd_buffer.lua +++ b/io/sgd_buffer.lua @@ -15,7 +15,7 @@ function SGDBuffer:__init(global_conf, buffer_conf) local buffs = {} for id, width in pairs(reader_spec.data) do buffs[id] = {data = global_conf.mmat_type(self.buffer_size, width), - leftover = {}, + leftover = nil, width = width} end table.insert(self.readers, {buffs = buffs, diff --git a/matrix/cuda_helper.h b/matrix/cuda_helper.h index 88619fd..d6effdb 100644 --- a/matrix/cuda_helper.h +++ b/matrix/cuda_helper.h @@ -52,10 +52,10 @@ static const char *cublasGetErrorString(cublasStatus_t err) { return "CUBLAS_STATUS_EXECUTION_FAILED"; case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; - case CUBLAS_STATUS_NOT_SUPPORTED: +/* case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED"; case CUBLAS_STATUS_LICENSE_ERROR: - return "CUBLAS_STATUS_LICENSE_ERROR"; + return "CUBLAS_STATUS_LICENSE_ERROR"; */ } return ""; } diff --git a/nn/layer_dag.lua b/nn/layer_dag.lua index 4ee829e..3951bfa 100644 --- a/nn/layer_dag.lua +++ b/nn/layer_dag.lua @@ -210,7 +210,9 @@ function nerv.DAGLayer:update(bp_err, input, output) self:set_err_inputs(bp_err) self:set_inputs(input) self:set_outputs(output) + -- print("update") for id, ref in pairs(self.queue) do + -- print(ref.layer.id) ref.layer:update(ref.err_inputs, ref.inputs, ref.outputs) end end @@ -220,11 +222,7 @@ function nerv.DAGLayer:propagate(input, output) self:set_outputs(output) for i = 1, #self.queue do local ref = self.queue[i] - --[[ - print(ref.inputs[1]) - print(ref.outputs[1]) - print(#ref.inputs, #ref.outputs) - --]] + -- print(ref.layer.id) ref.layer:propagate(ref.inputs, ref.outputs) end end @@ -238,8 +236,5 @@ function nerv.DAGLayer:back_propagate(next_bp_err, bp_err, input, output) local ref = self.queue[i] -- print(ref.layer.id) ref.layer:back_propagate(ref.err_outputs, ref.err_inputs, ref.inputs, ref.outputs) - -- if #ref.err_outputs > 0 then - -- print(ref.err_outputs[1]) - -- end end end -- cgit v1.2.3 From 37af4bed9c3680fdb9db569605f15013e9b6b64d Mon Sep 17 00:00:00 2001 From: Determinant Date: Fri, 5 Jun 2015 17:53:05 +0800 Subject: add get_params to all layers --- Makefile | 2 +- examples/test_nn_lib.lua | 29 +++++++++++++++++++++-------- layer/affine.lua | 10 +++++++--- layer/bias.lua | 4 ++++ layer/init.lua | 4 ++++ layer/sigmoid.lua | 4 ++++ layer/softmax_ce.lua | 4 ++++ layer/window.lua | 4 ++++ nn/layer_dag.lua | 28 +++++++++++++++++++--------- 9 files changed, 68 insertions(+), 21 deletions(-) diff --git a/Makefile b/Makefile index 5b6e081..0468f57 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ LUA_LIBS := matrix/init.lua io/init.lua nerv.lua \ nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/layer_dag.lua \ io/sgd_buffer.lua INCLUDE := -I build/luajit-2.0/include/luajit-2.0/ -DLUA_USE_APICHECK -# CUDA_BASE := /usr/local/cuda-6.5 +#CUDA_BASE := /usr/local/cuda-6.5 CUDA_BASE := /usr/local/cuda-5.0 CUDA_INCLUDE := -I $(CUDA_BASE)/include/ INCLUDE += $(CUDA_INCLUDE) diff --git a/examples/test_nn_lib.lua b/examples/test_nn_lib.lua index 04fd7d6..6fdbd67 100644 --- a/examples/test_nn_lib.lua +++ b/examples/test_nn_lib.lua @@ -117,7 +117,7 @@ tnet_reader = nerv.TNetReader(gconf, buffer = nerv.SGDBuffer(gconf, { buffer_size = 81920, - -- randomize = true, + randomize = true, readers = { { reader = tnet_reader, data = {main_scp = 429, ref = 1}} @@ -128,9 +128,12 @@ sm = sublayer_repo:get_layer("softmax_ce0") main = layer_repo:get_layer("main") main:init(gconf.batch_size) gconf.cnt = 0 +-- data = buffer:get_data() +-- input = {data.main_scp, data.ref} +-- while true do for data in buffer.get_data, buffer do - if gconf.cnt == 1000 then break end - gconf.cnt = gconf.cnt + 1 +-- if gconf.cnt == 100 then break end +-- gconf.cnt = gconf.cnt + 1 input = {data.main_scp, data.ref} output = {} @@ -141,11 +144,21 @@ for data in buffer.get_data, buffer do main:back_propagate(err_output, err_input, input, output) main:update(err_input, input, output) - nerv.utils.printf("cross entropy: %.8f\n", sm.total_ce) - nerv.utils.printf("correct: %d\n", sm.total_correct) - nerv.utils.printf("frames: %d\n", sm.total_frames) - nerv.utils.printf("err/frm: %.8f\n", sm.total_ce / sm.total_frames) - nerv.utils.printf("accuracy: %.8f\n", sm.total_correct / sm.total_frames) +-- nerv.utils.printf("cross entropy: %.8f\n", sm.total_ce) +-- nerv.utils.printf("correct: %d\n", sm.total_correct) +-- nerv.utils.printf("frames: %d\n", sm.total_frames) +-- nerv.utils.printf("err/frm: %.8f\n", sm.total_ce / sm.total_frames) +-- nerv.utils.printf("accuracy: %.8f\n", sm.total_correct / sm.total_frames) collectgarbage("collect") end +nerv.utils.printf("cross entropy: %.8f\n", sm.total_ce) +nerv.utils.printf("correct: %d\n", sm.total_correct) +nerv.utils.printf("accuracy: %.3f%%\n", sm.total_correct / sm.total_frames * 100) +nerv.utils.printf("writing back...\n") +cf = nerv.ChunkFile("output.nerv", "w") +for i, p in ipairs(main:get_params()) do + print(p) + cf:write_chunk(p) +end +cf:close() nerv.Matrix.print_profile() diff --git a/layer/affine.lua b/layer/affine.lua index 59a0e91..2cd7acb 100644 --- a/layer/affine.lua +++ b/layer/affine.lua @@ -41,7 +41,7 @@ function AffineLayer:init() self.bc:fill(0) end -function nerv.AffineLayer:update(bp_err, input, output) +function AffineLayer:update(bp_err, input, output) local ltp = self.ltp.trans local bp = self.bp.trans local ltc = self.ltc @@ -60,13 +60,17 @@ function nerv.AffineLayer:update(bp_err, input, output) ltp:add(ltp, ltp, 1.0, -gconf.lrate * gconf.wcost) end -function nerv.AffineLayer:propagate(input, output) +function AffineLayer:propagate(input, output) -- apply linear transform output[1]:mul(input[1], self.ltp.trans, 1.0, 0.0, 'N', 'N') -- add bias output[1]:add_row(self.bp.trans, 1.0) end -function nerv.AffineLayer:back_propagate(next_bp_err, bp_err, input, output) +function AffineLayer:back_propagate(next_bp_err, bp_err, input, output) next_bp_err[1]:mul(bp_err[1], self.ltp.trans, 1.0, 0.0, 'N', 'T') end + +function AffineLayer:get_params() + return {self.ltp, self.bp} +end diff --git a/layer/bias.lua b/layer/bias.lua index 6ddfe11..8cd326b 100644 --- a/layer/bias.lua +++ b/layer/bias.lua @@ -22,3 +22,7 @@ function BiasLayer:propagate(input, output) output[1]:copy_fromd(input[1]) output[1]:add_row(self.bias.trans, 1.0) end + +function BiasLayer:get_params() + return {self.bias} +end diff --git a/layer/init.lua b/layer/init.lua index 38bcd7f..3011f8e 100644 --- a/layer/init.lua +++ b/layer/init.lua @@ -58,6 +58,10 @@ function Layer:check_dim_len(len_in, len_out) end end +function Layer:get_params() + nerv.error_method_not_implemented() +end + function Layer:get_dim() return self.dim_in, self.dim_out end diff --git a/layer/sigmoid.lua b/layer/sigmoid.lua index 220b7af..dd10fb9 100644 --- a/layer/sigmoid.lua +++ b/layer/sigmoid.lua @@ -25,3 +25,7 @@ end function SigmoidLayer:back_propagate(next_bp_err, bp_err, input, output) next_bp_err[1]:sigmoid_grad(bp_err[1], output[1]) end + +function SigmoidLayer:get_params() + return {} +end diff --git a/layer/softmax_ce.lua b/layer/softmax_ce.lua index cd57010..79e859e 100644 --- a/layer/softmax_ce.lua +++ b/layer/softmax_ce.lua @@ -50,3 +50,7 @@ function SoftmaxCELayer:back_propagate(next_bp_err, bp_err, input, output) end next_bp_err[1]:add(self.soutput, label, 1.0, -1.0) end + +function SoftmaxCELayer:get_params() + return {} +end diff --git a/layer/window.lua b/layer/window.lua index 8e9e761..b381c9b 100644 --- a/layer/window.lua +++ b/layer/window.lua @@ -22,3 +22,7 @@ function WindowLayer:propagate(input, output) output[1]:copy_fromd(input[1]) output[1]:scale_row(self.window.trans) end + +function WindowLayer:get_params() + return {self.window} +end diff --git a/nn/layer_dag.lua b/nn/layer_dag.lua index 3951bfa..2dda7c9 100644 --- a/nn/layer_dag.lua +++ b/nn/layer_dag.lua @@ -38,7 +38,7 @@ local function discover(id, layers, layer_repo) return ref end -function nerv.DAGLayer:__init(id, global_conf, layer_conf) +function DAGLayer:__init(id, global_conf, layer_conf) local layers = {} local inputs = {} local outputs = {} @@ -131,7 +131,7 @@ function nerv.DAGLayer:__init(id, global_conf, layer_conf) self.gconf = global_conf end -function nerv.DAGLayer:init(batch_size) -- topology sort +function DAGLayer:init(batch_size) -- topology sort for i, conn in ipairs(self.parsed_conn) do local _, output_dim local ref_from, port_from, ref_to, port_to @@ -174,7 +174,7 @@ function nerv.DAGLayer:init(batch_size) -- topology sort end end -function nerv.DAGLayer:set_inputs(input) +function DAGLayer:set_inputs(input) for i = 1, #self.dim_in do local layer = self.inputs[i][1] local port = self.inputs[i][2] @@ -182,7 +182,7 @@ function nerv.DAGLayer:set_inputs(input) end end -function nerv.DAGLayer:set_outputs(output) +function DAGLayer:set_outputs(output) for i = 1, #self.dim_out do local layer = self.outputs[i][1] local port = self.outputs[i][2] @@ -190,7 +190,7 @@ function nerv.DAGLayer:set_outputs(output) end end -function nerv.DAGLayer:set_err_inputs(bp_err) +function DAGLayer:set_err_inputs(bp_err) for i = 1, #self.dim_out do local layer = self.outputs[i][1] local port = self.outputs[i][2] @@ -198,7 +198,7 @@ function nerv.DAGLayer:set_err_inputs(bp_err) end end -function nerv.DAGLayer:set_err_outputs(next_bp_err) +function DAGLayer:set_err_outputs(next_bp_err) for i = 1, #self.dim_in do local layer = self.inputs[i][1] local port = self.inputs[i][2] @@ -206,7 +206,7 @@ function nerv.DAGLayer:set_err_outputs(next_bp_err) end end -function nerv.DAGLayer:update(bp_err, input, output) +function DAGLayer:update(bp_err, input, output) self:set_err_inputs(bp_err) self:set_inputs(input) self:set_outputs(output) @@ -217,7 +217,7 @@ function nerv.DAGLayer:update(bp_err, input, output) end end -function nerv.DAGLayer:propagate(input, output) +function DAGLayer:propagate(input, output) self:set_inputs(input) self:set_outputs(output) for i = 1, #self.queue do @@ -227,7 +227,7 @@ function nerv.DAGLayer:propagate(input, output) end end -function nerv.DAGLayer:back_propagate(next_bp_err, bp_err, input, output) +function DAGLayer:back_propagate(next_bp_err, bp_err, input, output) self:set_err_outputs(next_bp_err) self:set_err_inputs(bp_err) self:set_inputs(input) @@ -238,3 +238,13 @@ function nerv.DAGLayer:back_propagate(next_bp_err, bp_err, input, output) ref.layer:back_propagate(ref.err_outputs, ref.err_inputs, ref.inputs, ref.outputs) end end + +function DAGLayer:get_params() + local res = {} + for id, ref in pairs(self.queue) do + for i, p in ipairs(ref.layer:get_params()) do + table.insert(res, p) + end + end + return res +end -- cgit v1.2.3