aboutsummaryrefslogtreecommitdiff
path: root/nerv
diff options
context:
space:
mode:
Diffstat (limited to 'nerv')
-rw-r--r--nerv/Makefile11
-rw-r--r--nerv/examples/lmptb/bilstmlm_ptb_main.lua517
-rw-r--r--nerv/examples/lmptb/bilstmlm_v2_ptb_main.lua522
-rw-r--r--nerv/examples/lmptb/lm_trainer.lua207
-rw-r--r--nerv/examples/lmptb/lmptb/layer/lm_affine_recurrent.lua2
-rw-r--r--nerv/examples/lmptb/lmptb/layer/select_linear.lua6
-rw-r--r--nerv/examples/lmptb/lmptb/lmseqreader.lua103
-rw-r--r--nerv/examples/lmptb/lmptb/lmutil.lua6
-rw-r--r--nerv/examples/lmptb/lmptb/lstm_t_v2.lua123
-rw-r--r--nerv/examples/lmptb/lstmlm_ptb_main.lua504
-rw-r--r--nerv/examples/lmptb/m-tests/lmseqreader_test.lua7
-rw-r--r--nerv/examples/lmptb/m-tests/some-text-chn5
-rw-r--r--nerv/examples/lmptb/m-tests/sutil_test.lua15
-rw-r--r--nerv/examples/lmptb/rnnlm_ptb_main.lua (renamed from nerv/examples/lmptb/tnn_ptb_main.lua)94
-rw-r--r--nerv/examples/lmptb/unfold_ptb_main.lua19
-rw-r--r--nerv/init.lua1
-rw-r--r--nerv/layer/affine.lua39
-rw-r--r--nerv/layer/affine_recurrent.lua4
-rw-r--r--nerv/layer/elem_mul.lua38
-rw-r--r--nerv/layer/gate_fff.lua73
-rw-r--r--nerv/layer/init.lua23
-rw-r--r--nerv/layer/tanh.lua35
-rw-r--r--nerv/lib/io/chunk_file.c2
-rw-r--r--nerv/lib/matrix/cukernel.h4
-rw-r--r--nerv/lib/matrix/generic/cukernel.cu78
-rw-r--r--nerv/lib/matrix/generic/cumatrix.c34
-rw-r--r--nerv/lib/matrix/generic/cumatrix.h3
-rw-r--r--nerv/lib/matrix/generic/matrix.c8
-rw-r--r--nerv/lib/matrix/matrix.h2
-rw-r--r--nerv/matrix/generic/cumatrix.c43
-rw-r--r--nerv/nn/param_repo.lua8
-rw-r--r--nerv/tnn/init.lua (renamed from nerv/examples/lmptb/rnn/init.lua)24
-rw-r--r--nerv/tnn/layer_dag_t.lua386
-rw-r--r--nerv/tnn/layersT/dropout_t.lua71
-rw-r--r--nerv/tnn/layersT/lstm_t.lua124
-rw-r--r--nerv/tnn/layersT/softmax_ce_t.lua (renamed from nerv/examples/lmptb/rnn/softmax_ce_t.lua)16
-rw-r--r--nerv/tnn/sutil.lua80
-rw-r--r--nerv/tnn/tnn.lua (renamed from nerv/examples/lmptb/rnn/tnn.lua)93
38 files changed, 3162 insertions, 168 deletions
diff --git a/nerv/Makefile b/nerv/Makefile
index 55c174c..5c329f9 100644
--- a/nerv/Makefile
+++ b/nerv/Makefile
@@ -7,7 +7,7 @@ INC_PATH := $(LUA_BINDIR)/../include/nerv
LUA_DIR = $(INST_LUADIR)/nerv
OBJ_DIR := $(BUILD_DIR)/objs
ISUBDIR := io matrix luaT
-SUBDIR := matrix io layer examples nn lib/io lib/luaT lib/matrix
+SUBDIR := matrix io layer examples nn lib/io lib/luaT lib/matrix tnn/layersT
INC_SUBDIR := $(addprefix $(INC_PATH)/,$(ISUBDIR))
OBJ_SUBDIR := $(addprefix $(OBJ_DIR)/,$(SUBDIR))
@@ -30,10 +30,13 @@ LUAT_OBJS := $(addprefix $(OBJ_DIR)/,$(LUAT_OBJS))
OBJS := $(CORE_OBJS) $(NERV_OBJS) $(LUAT_OBJS)
LIBS := $(INST_LIBDIR)/libnerv.so $(LIB_PATH)/libnervcore.so $(LIB_PATH)/libluaT.so
LUA_LIBS := matrix/init.lua io/init.lua init.lua \
- layer/init.lua layer/affine.lua layer/sigmoid.lua layer/softmax_ce.lua layer/softmax.lua \
+ layer/init.lua layer/affine.lua layer/sigmoid.lua layer/tanh.lua layer/softmax_ce.lua layer/softmax.lua \
layer/window.lua layer/bias.lua layer/combiner.lua layer/mse.lua layer/affine_recurrent.lua \
+ layer/elem_mul.lua layer/gate_fff.lua \
nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/layer_dag.lua \
- io/sgd_buffer.lua
+ io/sgd_buffer.lua \
+ tnn/init.lua tnn/layer_dag_t.lua tnn/sutil.lua tnn/tnn.lua \
+ tnn/layersT/dropout_t.lua tnn/layersT/lstm_t.lua tnn/layersT/softmax_ce_t.lua
INCLUDE := -I $(LUA_INCDIR) -DLUA_USE_APICHECK
#CUDA_BASE := /usr/local/cuda-7.0
@@ -41,7 +44,7 @@ CUDA_BASE := /usr/local/cuda
CUDA_INCLUDE := -I $(CUDA_BASE)/include/
INCLUDE += $(CUDA_INCLUDE)
-LDFLAGS := -L$(CUDA_BASE)/lib64/ -Wl,-rpath=$(CUDA_BASE)/lib64/ -lcudart -lcublas
+LDFLAGS := -L$(CUDA_BASE)/lib64/ -Wl,-rpath=$(CUDA_BASE)/lib64/ -lcudart -lcublas -lcurand
CFLAGS := -Wall -Wextra -O2
NVCC := $(CUDA_BASE)/bin/nvcc
NVCC_FLAGS := -Xcompiler -fPIC,-Wall,-Wextra
diff --git a/nerv/examples/lmptb/bilstmlm_ptb_main.lua b/nerv/examples/lmptb/bilstmlm_ptb_main.lua
new file mode 100644
index 0000000..0472588
--- /dev/null
+++ b/nerv/examples/lmptb/bilstmlm_ptb_main.lua
@@ -0,0 +1,517 @@
+require 'lmptb.lmvocab'
+require 'lmptb.lmfeeder'
+require 'lmptb.lmutil'
+require 'lmptb.layer.init'
+--require 'tnn.init'
+require 'lmptb.lmseqreader'
+require 'lm_trainer'
+
+--[[global function rename]]--
+--local printf = nerv.printf
+local LMTrainer = nerv.LMTrainer
+--[[global function rename ends]]--
+
+--global_conf: table
+--first_time: bool
+--Returns: a ParamRepo
+function prepare_parameters(global_conf, iter)
+ nerv.printf("%s preparing parameters...\n", global_conf.sche_log_pre)
+
+ global_conf.paramRepo = nerv.ParamRepo()
+ local paramRepo = global_conf.paramRepo
+
+ if iter == -1 then --first time
+ nerv.printf("%s first time, prepare some pre-set parameters, and leaving other parameters to auto-generation...\n", global_conf.sche_log_pre)
+ local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
+ f:close()
+ --[[
+ ltp_ih = nerv.LinearTransParam("ltp_ih", global_conf)
+ ltp_ih.trans = global_conf.cumat_type(global_conf.vocab:size(), global_conf.hidden_size) --index 0 is for zero, others correspond to vocab index(starting from 1)
+ ltp_ih.trans:generate(global_conf.param_random)
+
+ ltp_hh = nerv.LinearTransParam("ltp_hh", global_conf)
+ ltp_hh.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.hidden_size)
+ ltp_hh.trans:generate(global_conf.param_random)
+
+ --ltp_ho = nerv.LinearTransParam("ltp_ho", global_conf)
+ --ltp_ho.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.vocab:size())
+ --ltp_ho.trans:generate(global_conf.param_random)
+
+ bp_h = nerv.BiasParam("bp_h", global_conf)
+ bp_h.trans = global_conf.cumat_type(1, global_conf.hidden_size)
+ bp_h.trans:generate(global_conf.param_random)
+
+ --bp_o = nerv.BiasParam("bp_o", global_conf)
+ --bp_o.trans = global_conf.cumat_type(1, global_conf.vocab:size())
+ --bp_o.trans:generate(global_conf.param_random)
+
+ local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
+ f:write_chunk(ltp_ih)
+ f:write_chunk(ltp_hh)
+ --f:write_chunk(ltp_ho)
+ f:write_chunk(bp_h)
+ --f:write_chunk(bp_o)
+ f:close()
+ ]]--
+ return nil
+ end
+
+ nerv.printf("%s loading parameter from file %s...\n", global_conf.sche_log_pre, global_conf.param_fn .. '.' .. tostring(iter))
+ paramRepo:import({global_conf.param_fn .. '.' .. tostring(iter)}, nil, global_conf)
+
+ nerv.printf("%s preparing parameters end.\n", global_conf.sche_log_pre)
+
+ return nil
+end
+
+--global_conf: table
+--Returns: nerv.LayerRepo
+function prepare_layers(global_conf)
+ nerv.printf("%s preparing layers...\n", global_conf.sche_log_pre)
+
+ local pr = global_conf.paramRepo
+
+ local du = false
+
+ local layers = {
+ ["nerv.LSTMLayerT"] = {
+ ["lstmFL1"] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["pr"] = pr}},
+ ["lstmRL1"] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["pr"] = pr}},
+ },
+
+ ["nerv.DropoutLayerT"] = {
+ ["dropoutL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}},
+ },
+
+ ["nerv.SelectLinearLayer"] = {
+ ["selectL1"] = {{}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}, ["vocab"] = global_conf.vocab, ["pr"] = pr}},
+ },
+
+ ["nerv.CombinerLayer"] = {
+ ["combinerXL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}},
+ ["combinerHFL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}},
+ ["combinerHRL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}},
+ },
+
+ ["nerv.AffineLayer"] = {
+ ["biAffineL1"] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["pr"] = pr, ["lambda"] = {1, 1}}},
+ ["outputL"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.vocab:size()}, ["direct_update"] = du, ["pr"] = pr}},
+ },
+
+ ["nerv.TanhLayer"] = {
+ ["biTanhL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}},
+ },
+
+ ["nerv.SoftmaxCELayerT"] = {
+ ["softmaxL"] = {{}, {["dim_in"] = {global_conf.vocab:size(), global_conf.vocab:size()}, ["dim_out"] = {1}}},
+ },
+ }
+
+ if global_conf.layer_num > 1 then
+ nerv.error("this script currently do not support more than one layer")
+ end
+ --[[
+ for l = 2, global_conf.layer_num do
+ layers["nerv.DropoutLayerT"]["dropoutL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}
+ layers["nerv.LSTMLayerT"]["lstmL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["pr"] = pr}}
+ layers["nerv.CombinerLayer"]["combinerL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}}
+ end
+ ]]--
+
+ local layerRepo = nerv.LayerRepo(layers, pr, global_conf)
+ nerv.printf("%s preparing layers end.\n", global_conf.sche_log_pre)
+ return layerRepo
+end
+
+--global_conf: table
+--layerRepo: nerv.LayerRepo
+--Returns: a nerv.TNN
+function prepare_tnn(global_conf, layerRepo)
+ nerv.printf("%s Generate and initing TNN ...\n", global_conf.sche_log_pre)
+
+ --input: input_w, input_w, ... input_w_now, last_activation
+ local connections_t = {
+ {"<input>[1]", "selectL1[1]", 0},
+
+ --{"selectL1[1]", "recurrentL1[1]", 0},
+ --{"recurrentL1[1]", "sigmoidL1[1]", 0},
+ --{"sigmoidL1[1]", "combinerL1[1]", 0},
+ --{"combinerL1[1]", "recurrentL1[2]", 1},
+
+ {"selectL1[1]", "combinerXL1[1]", 0},
+ {"combinerXL1[1]", "lstmFL1[1]", 0},
+ {"lstmFL1[1]", "combinerHFL1[1]", 0},
+ {"combinerHFL1[1]", "lstmFL1[2]", 1},
+ {"lstmFL1[2]", "lstmFL1[3]", 1},
+ {"combinerXL1[2]", "lstmRL1[1]", 0},
+ {"lstmRL1[1]", "combinerHRL1[1]", 0},
+ {"combinerHRL1[1]", "lstmRL1[2]", -1},
+ {"lstmRL1[2]", "lstmRL1[3]", -1},
+ {"combinerHFL1[2]", "biAffineL1[1]", 0},
+ {"combinerHRL1[2]", "biAffineL1[2]", 0},
+ {"biAffineL1[1]", "biTanhL1[1]", 0},
+ {"biTanhL1[1]", "dropoutL1[1]", 0},
+
+ {"dropoutL"..global_conf.layer_num.."[1]", "outputL[1]", 0},
+ {"outputL[1]", "softmaxL[1]", 0},
+ {"<input>[2]", "softmaxL[2]", 0},
+ {"softmaxL[1]", "<output>[1]", 0}
+ }
+
+ --[[
+ for l = 2, global_conf.layer_num do
+ table.insert(connections_t, {"dropoutL"..(l-1).."[1]", "lstmL"..l.."[1]", 0})
+ table.insert(connections_t, {"lstmL"..l.."[2]", "lstmL"..l.."[3]", 1})
+ table.insert(connections_t, {"lstmL"..l.."[1]", "combinerL"..l.."[1]", 0})
+ table.insert(connections_t, {"combinerL"..l.."[1]", "lstmL"..l.."[2]", 1})
+ table.insert(connections_t, {"combinerL"..l.."[2]", "dropoutL"..l.."[1]", 0})
+ end
+ ]]--
+
+ --[[
+ printf("%s printing DAG connections:\n", global_conf.sche_log_pre)
+ for key, value in pairs(connections_t) do
+ printf("\t%s->%s\n", key, value)
+ end
+ ]]--
+
+ local tnn = nerv.TNN("TNN", global_conf, {["dim_in"] = {1, global_conf.vocab:size()},
+ ["dim_out"] = {1}, ["sub_layers"] = layerRepo,
+ ["connections"] = connections_t, ["clip_t"] = global_conf.clip_t,
+ })
+
+ tnn:init(global_conf.batch_size, global_conf.chunk_size)
+
+ nerv.printf("%s Initing TNN end.\n", global_conf.sche_log_pre)
+ return tnn
+end
+
+function load_net(global_conf, next_iter)
+ prepare_parameters(global_conf, next_iter)
+ local layerRepo = prepare_layers(global_conf)
+ local tnn = prepare_tnn(global_conf, layerRepo)
+ return tnn
+end
+
+local train_fn, valid_fn, test_fn
+global_conf = {}
+local set = arg[1] --"test"
+
+if (set == "ptb") then
+
+root_dir = '/home/slhome/txh18/workspace'
+data_dir = root_dir .. '/ptb/DATA'
+train_fn = data_dir .. '/ptb.train.txt.adds'
+valid_fn = data_dir .. '/ptb.valid.txt.adds'
+test_fn = data_dir .. '/ptb.test.txt.adds'
+vocab_fn = data_dir .. '/vocab'
+
+qdata_dir = root_dir .. '/ptb/questionGen/gen'
+
+global_conf = {
+ lrate = 0.015, wcost = 1e-5, momentum = 0, clip_t = 5,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 300,
+ layer_num = 1,
+ chunk_size = 90,
+ batch_size = 20,
+ max_iter = 35,
+ lr_decay = 1.003,
+ decay_iter = 10,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+ dropout_str = "0",
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ vocab_fn = vocab_fn,
+ max_sen_len = 90,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 40000, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = '/home/slhome/txh18/workspace/ptb/EXP-nerv/bilstmlm_v1.0'
+}
+
+elseif (set == "msr_sc") then
+
+data_dir = '/home/slhome/txh18/workspace/sentenceCompletion/DATA_PV2'
+train_fn = data_dir .. '/normed_all.sf.len60.adds.train'
+valid_fn = data_dir .. '/normed_all.sf.len60.adds.dev'
+test_fn = data_dir .. '/answer_normed.adds'
+vocab_fn = data_dir .. '/normed_all.choose.vocab30000.addqvocab'
+
+global_conf = {
+ lrate = 1, wcost = 1e-6, momentum = 0,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 300,
+ layer_num = 1,
+ chunk_size = 15,
+ batch_size = 10,
+ max_iter = 30,
+ decay_iter = 10,
+ lr_decay = 1.003,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+ dropout_str = "0",
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ vocab_fn = vocab_fn,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 400000, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = '/home/slhome/txh18/workspace/sentenceCompletion/EXP-Nerv/rnnlm_test'
+}
+
+elseif (set == "twitter") then
+
+root_dir = '/home/slhome/txh18/workspace'
+data_dir = root_dir .. '/twitter_new/DATA'
+train_fn = data_dir .. '/twitter.choose.adds'
+valid_fn = data_dir .. '/twitter.valid.adds'
+test_fn = data_dir .. '/comm.test.choose-ppl.adds'
+vocab_fn = data_dir .. '/twitter.choose.train.vocab'
+
+--qdata_dir = root_dir .. '/ptb/questionGen/gen'
+
+global_conf = {
+ lrate = 0.15, wcost = 1e-5, momentum = 0, clip_t = 5,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 300,
+ layer_num = 1,
+ chunk_size = 15,
+ batch_size = 20,
+ max_iter = 35,
+ lr_decay = 1.003,
+ decay_iter = 10,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+ dropout_str = "0",
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ vocab_fn = vocab_fn,
+ max_sen_len = 90,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 40000, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = root_dir .. '/twitter_new/EXP-nerv/bilstmlm_v1.0'
+}
+
+else
+
+valid_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
+train_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
+test_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
+vocab_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
+
+global_conf = {
+ lrate = 0.01, wcost = 1e-5, momentum = 0,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 20,
+ layer_num = 1,
+ chunk_size = 20,
+ batch_size = 10,
+ max_iter = 2,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+ dropout_str = "0",
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ max_sen_len = 80,
+ lr_decay = 1.003,
+ decay_iter = 10,
+ vocab_fn = vocab_fn,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 10, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = '/home/slhome/txh18/workspace/nerv/play/testEXP/tnn_bilstmlm_test'
+}
+
+end
+
+lr_half = false --can not be local, to be set by loadstring
+start_iter = -1
+start_lr = global_conf.lrate
+ppl_last = 100000
+commands_str = "train:test"
+commands = {}
+test_iter = -1
+
+--for testout(question)
+q_file = "/home/slhome/txh18/workspace/ptb/questionGen/gen/ptb.test.txt.q10rs1_Msss.adds"
+
+if arg[2] ~= nil then
+ nerv.printf("%s applying arg[2](%s)...\n", global_conf.sche_log_pre, arg[2])
+ loadstring(arg[2])()
+ nerv.LMUtil.wait(0.5)
+else
+ nerv.printf("%s no user setting, all default...\n", global_conf.sche_log_pre)
+end
+
+global_conf.work_dir = global_conf.work_dir_base .. 'h' .. global_conf.hidden_size .. 'l' .. global_conf.layer_num .. 'ch' .. global_conf.chunk_size .. 'ba' .. global_conf.batch_size .. 'slr' .. global_conf.lrate .. 'wc' .. global_conf.wcost .. 'dr' .. global_conf.dropout_str
+global_conf.train_fn_shuf = global_conf.work_dir .. '/train_fn_shuf'
+global_conf.train_fn_shuf_bak = global_conf.train_fn_shuf .. '_bak'
+global_conf.param_fn = global_conf.work_dir .. "/params"
+global_conf.dropout_list = nerv.SUtil.parse_schedule(global_conf.dropout_str)
+global_conf.log_fn = global_conf.work_dir .. '/log_lstm_tnn_' .. commands_str ..os.date("_TT%m_%d_%X",os.time())
+global_conf.log_fn, _ = string.gsub(global_conf.log_fn, ':', '-')
+commands = nerv.SUtil.parse_commands_set(commands_str)
+
+global_conf.lrate = start_lr --starting lr can be set by user(arg[2])
+
+nerv.printf("%s creating work_dir(%s)...\n", global_conf.sche_log_pre, global_conf.work_dir)
+nerv.LMUtil.wait(2)
+os.execute("mkdir -p "..global_conf.work_dir)
+os.execute("cp " .. global_conf.train_fn .. " " .. global_conf.train_fn_shuf)
+
+--redirecting log outputs!
+nerv.SUtil.log_redirect(global_conf.log_fn)
+nerv.LMUtil.wait(2)
+
+----------------printing options---------------------------------
+nerv.printf("%s printing global_conf...\n", global_conf.sche_log_pre)
+for id, value in pairs(global_conf) do
+ nerv.printf("%s:\t%s\n", id, tostring(value))
+end
+nerv.LMUtil.wait(2)
+
+nerv.printf("%s printing training scheduling options...\n", global_conf.sche_log_pre)
+nerv.printf("lr_half:\t%s\n", tostring(lr_half))
+nerv.printf("start_iter:\t%s\n", tostring(start_iter))
+nerv.printf("ppl_last:\t%s\n", tostring(ppl_last))
+nerv.printf("commands_str:\t%s\n", commands_str)
+nerv.printf("test_iter:\t%s\n", tostring(test_iter))
+nerv.printf("%s printing training scheduling end.\n", global_conf.sche_log_pre)
+nerv.LMUtil.wait(2)
+------------------printing options end------------------------------
+
+math.randomseed(1)
+
+local vocab = nerv.LMVocab()
+global_conf["vocab"] = vocab
+nerv.printf("%s building vocab...\n", global_conf.sche_log_pre)
+global_conf.vocab:build_file(global_conf.vocab_fn, false)
+ppl_rec = {}
+
+local final_iter = -1
+if commands["train"] == 1 then
+ if start_iter == -1 then
+ prepare_parameters(global_conf, -1) --write pre_generated params to param.0 file
+ end
+
+ if start_iter == -1 or start_iter == 0 then
+ nerv.printf("===INITIAL VALIDATION===\n")
+ local tnn = load_net(global_conf, 0)
+ global_conf.paramRepo = tnn:get_params() --get auto-generted params
+ global_conf.paramRepo:export(global_conf.param_fn .. '.0', nil) --some parameters are auto-generated, saved again to param.0 file
+ global_conf.dropout_rate = 0
+ local result = LMTrainer.lm_process_file_birnn(global_conf, global_conf.valid_fn, tnn, false) --false update!
+ nerv.LMUtil.wait(1)
+ ppl_rec[0] = {}
+ ppl_rec[0].valid = result:ppl_all("birnn")
+ ppl_last = ppl_rec[0].valid
+ ppl_rec[0].train = 0
+ ppl_rec[0].test = 0
+ ppl_rec[0].lr = 0
+
+ start_iter = 1
+
+ nerv.printf("\n")
+ end
+
+ for iter = start_iter, global_conf.max_iter, 1 do
+ final_iter = iter --for final testing
+ global_conf.sche_log_pre = "[SCHEDULER ITER"..iter.." LR"..global_conf.lrate.."]:"
+ tnn = load_net(global_conf, iter - 1)
+ nerv.printf("===ITERATION %d LR %f===\n", iter, global_conf.lrate)
+ global_conf.dropout_rate = nerv.SUtil.sche_get(global_conf.dropout_list, iter)
+ result = LMTrainer.lm_process_file_birnn(global_conf, global_conf.train_fn_shuf, tnn, true) --true update!
+ global_conf.dropout_rate = 0
+ ppl_rec[iter] = {}
+ ppl_rec[iter].train = result:ppl_all("birnn")
+ --shuffling training file
+ nerv.printf("%s shuffling training file\n", global_conf.sche_log_pre)
+ os.execute('cp ' .. global_conf.train_fn_shuf .. ' ' .. global_conf.train_fn_shuf_bak)
+ os.execute('cat ' .. global_conf.train_fn_shuf_bak .. ' | sort -R --random-source=/dev/zero > ' .. global_conf.train_fn_shuf)
+ nerv.printf("===PEEK ON TEST %d===\n", iter)
+ result = LMTrainer.lm_process_file_birnn(global_conf, global_conf.test_fn, tnn, false) --false update!
+ ppl_rec[iter].test = result:ppl_all("birnn")
+ nerv.printf("===VALIDATION %d===\n", iter)
+ result = LMTrainer.lm_process_file_birnn(global_conf, global_conf.valid_fn, tnn, false) --false update!
+ ppl_rec[iter].valid = result:ppl_all("birnn")
+ ppl_rec[iter].lr = global_conf.lrate
+ if ((ppl_last / ppl_rec[iter].valid < global_conf.lr_decay or lr_half == true) and iter > global_conf.decay_iter) then
+ global_conf.lrate = (global_conf.lrate * 0.6)
+ end
+ if ppl_rec[iter].valid < ppl_last then
+ nerv.printf("%s PPL improves, saving net to file %s.%d...\n", global_conf.sche_log_pre, global_conf.param_fn, iter)
+ global_conf.paramRepo:export(global_conf.param_fn .. '.' .. tostring(iter), nil)
+ else
+ nerv.printf("%s PPL did not improve, rejected, copying param file of last iter...\n", global_conf.sche_log_pre)
+ os.execute('cp ' .. global_conf.param_fn..'.'..tostring(iter - 1) .. ' ' .. global_conf.param_fn..'.'..tostring(iter))
+ end
+ if ppl_last / ppl_rec[iter].valid < global_conf.lr_decay or lr_half == true then
+ lr_half = true
+ end
+ if ppl_rec[iter].valid < ppl_last then
+ ppl_last = ppl_rec[iter].valid
+ end
+ nerv.printf("\n")
+ nerv.LMUtil.wait(2)
+ end
+ nerv.info("saving final nn to param.final")
+ os.execute('cp ' .. global_conf.param_fn .. '.' .. tostring(final_iter) .. ' ' .. global_conf.param_fn .. '.final')
+
+ nerv.printf("===VALIDATION PPL record===\n")
+ for i, _ in pairs(ppl_rec) do
+ nerv.printf("<ITER%d LR%.5f train:%.3f valid:%.3f test:%.3f> \n", i, ppl_rec[i].lr, ppl_rec[i].train, ppl_rec[i].valid, ppl_rec[i].test)
+ end
+ nerv.printf("\n")
+end --if commands["train"]
+
+if commands["test"] == 1 then
+ nerv.printf("===FINAL TEST===\n")
+ global_conf.sche_log_pre = "[SCHEDULER FINAL_TEST]:"
+ if final_iter ~= -1 and test_iter == -1 then
+ test_iter = final_iter
+ end
+ if test_iter == -1 then
+ test_iter = "final"
+ end
+ tnn = load_net(global_conf, test_iter)
+ global_conf.dropout_rate = 0
+ LMTrainer.lm_process_file_birnn(global_conf, global_conf.test_fn, tnn, false) --false update!
+end --if commands["test"]
+
+if commands["testout"] == 1 then
+ nerv.printf("===TEST OUT===\n")
+ nerv.printf("q_file:\t%s\n", q_file)
+ local q_fn = q_file --qdata_dir .. '/' .. q_file
+ global_conf.sche_log_pre = "[SCHEDULER FINAL_TEST]:"
+ if final_iter ~= -1 and test_iter == -1 then
+ test_iter = final_iter
+ end
+ if test_iter == -1 then
+ test_iter = "final"
+ end
+ tnn = load_net(global_conf, test_iter)
+ global_conf.dropout_rate = 0
+ LMTrainer.lm_process_file_birnn(global_conf, q_fn, tnn, false,
+ {["one_sen_report"] = true}) --false update!
+end --if commands["testout"]
+
+
diff --git a/nerv/examples/lmptb/bilstmlm_v2_ptb_main.lua b/nerv/examples/lmptb/bilstmlm_v2_ptb_main.lua
new file mode 100644
index 0000000..4f52f29
--- /dev/null
+++ b/nerv/examples/lmptb/bilstmlm_v2_ptb_main.lua
@@ -0,0 +1,522 @@
+--[[
+The bilstm_v2 slightly changed the structure of tnn so that the current prediction won't have info about the very word to predict, so we should not get an amazingly low PPL
+]]--
+require 'lmptb.lmvocab'
+require 'lmptb.lmfeeder'
+require 'lmptb.lmutil'
+require 'lmptb.layer.init'
+--require 'tnn.init'
+require 'lmptb.lmseqreader'
+require 'lm_trainer'
+
+--[[global function rename]]--
+--local printf = nerv.printf
+local LMTrainer = nerv.LMTrainer
+--[[global function rename ends]]--
+
+--global_conf: table
+--first_time: bool
+--Returns: a ParamRepo
+function prepare_parameters(global_conf, iter)
+ nerv.printf("%s preparing parameters...\n", global_conf.sche_log_pre)
+
+ global_conf.paramRepo = nerv.ParamRepo()
+ local paramRepo = global_conf.paramRepo
+
+ if iter == -1 then --first time
+ nerv.printf("%s first time, prepare some pre-set parameters, and leaving other parameters to auto-generation...\n", global_conf.sche_log_pre)
+ local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
+ f:close()
+ --[[
+ ltp_ih = nerv.LinearTransParam("ltp_ih", global_conf)
+ ltp_ih.trans = global_conf.cumat_type(global_conf.vocab:size(), global_conf.hidden_size) --index 0 is for zero, others correspond to vocab index(starting from 1)
+ ltp_ih.trans:generate(global_conf.param_random)
+
+ ltp_hh = nerv.LinearTransParam("ltp_hh", global_conf)
+ ltp_hh.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.hidden_size)
+ ltp_hh.trans:generate(global_conf.param_random)
+
+ --ltp_ho = nerv.LinearTransParam("ltp_ho", global_conf)
+ --ltp_ho.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.vocab:size())
+ --ltp_ho.trans:generate(global_conf.param_random)
+
+ bp_h = nerv.BiasParam("bp_h", global_conf)
+ bp_h.trans = global_conf.cumat_type(1, global_conf.hidden_size)
+ bp_h.trans:generate(global_conf.param_random)
+
+ --bp_o = nerv.BiasParam("bp_o", global_conf)
+ --bp_o.trans = global_conf.cumat_type(1, global_conf.vocab:size())
+ --bp_o.trans:generate(global_conf.param_random)
+
+ local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
+ f:write_chunk(ltp_ih)
+ f:write_chunk(ltp_hh)
+ --f:write_chunk(ltp_ho)
+ f:write_chunk(bp_h)
+ --f:write_chunk(bp_o)
+ f:close()
+ ]]--
+ return nil
+ end
+
+ nerv.printf("%s loading parameter from file %s...\n", global_conf.sche_log_pre, global_conf.param_fn .. '.' .. tostring(iter))
+ paramRepo:import({global_conf.param_fn .. '.' .. tostring(iter)}, nil, global_conf)
+
+ nerv.printf("%s preparing parameters end.\n", global_conf.sche_log_pre)
+
+ return nil
+end
+
+--global_conf: table
+--Returns: nerv.LayerRepo
+function prepare_layers(global_conf)
+ nerv.printf("%s preparing layers...\n", global_conf.sche_log_pre)
+
+ local pr = global_conf.paramRepo
+
+ local du = false
+
+ local layers = {
+ ["nerv.LSTMLayerT"] = {
+ ["lstmFL1"] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["pr"] = pr}},
+ ["lstmRL1"] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["pr"] = pr}},
+ },
+
+ ["nerv.DropoutLayerT"] = {
+ ["dropoutL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}},
+ },
+
+ ["nerv.SelectLinearLayer"] = {
+ ["selectL1"] = {{}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}, ["vocab"] = global_conf.vocab, ["pr"] = pr}},
+ },
+
+ ["nerv.CombinerLayer"] = {
+ ["combinerXL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}},
+ ["combinerHFL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}},
+ ["combinerHRL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}},
+ },
+
+ ["nerv.AffineLayer"] = {
+ ["biAffineL1"] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["pr"] = pr, ["lambda"] = {1, 1}}},
+ ["outputL"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.vocab:size()}, ["direct_update"] = du, ["pr"] = pr}},
+ },
+
+ ["nerv.TanhLayer"] = {
+ ["biTanhL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}},
+ },
+
+ ["nerv.SoftmaxCELayerT"] = {
+ ["softmaxL"] = {{}, {["dim_in"] = {global_conf.vocab:size(), global_conf.vocab:size()}, ["dim_out"] = {1}}},
+ },
+ }
+
+ if global_conf.layer_num > 1 then
+ nerv.error("this script currently do not support more than one layer")
+ end
+ --[[
+ for l = 2, global_conf.layer_num do
+ layers["nerv.DropoutLayerT"]["dropoutL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}
+ layers["nerv.LSTMLayerT"]["lstmL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["pr"] = pr}}
+ layers["nerv.CombinerLayer"]["combinerL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}}
+ end
+ ]]--
+
+ local layerRepo = nerv.LayerRepo(layers, pr, global_conf)
+ nerv.printf("%s preparing layers end.\n", global_conf.sche_log_pre)
+ return layerRepo
+end
+
+--global_conf: table
+--layerRepo: nerv.LayerRepo
+--Returns: a nerv.TNN
+function prepare_tnn(global_conf, layerRepo)
+ nerv.printf("%s Generate and initing TNN ...\n", global_conf.sche_log_pre)
+
+ --input: input_w, input_w, ... input_w_now, last_activation
+ local connections_t = {
+ {"<input>[1]", "selectL1[1]", 0},
+
+ --{"selectL1[1]", "recurrentL1[1]", 0},
+ --{"recurrentL1[1]", "sigmoidL1[1]", 0},
+ --{"sigmoidL1[1]", "combinerL1[1]", 0},
+ --{"combinerL1[1]", "recurrentL1[2]", 1},
+
+ {"selectL1[1]", "combinerXL1[1]", 0},
+ {"combinerXL1[1]", "lstmFL1[1]", 0},
+ {"lstmFL1[1]", "combinerHFL1[1]", 0},
+ {"combinerHFL1[1]", "lstmFL1[2]", 1},
+ {"lstmFL1[2]", "lstmFL1[3]", 1},
+ {"combinerXL1[2]", "lstmRL1[1]", 0},
+ {"lstmRL1[1]", "combinerHRL1[1]", 0},
+ {"combinerHRL1[1]", "lstmRL1[2]", -1},
+ {"lstmRL1[2]", "lstmRL1[3]", -1},
+ {"combinerHFL1[2]", "biAffineL1[1]", 0},
+ {"combinerHRL1[2]", "biAffineL1[2]", -2},
+ {"biAffineL1[1]", "biTanhL1[1]", 0},
+ {"biTanhL1[1]", "dropoutL1[1]", 0},
+
+ {"dropoutL"..global_conf.layer_num.."[1]", "outputL[1]", 0},
+ {"outputL[1]", "softmaxL[1]", 0},
+ {"<input>[2]", "softmaxL[2]", 0},
+ {"softmaxL[1]", "<output>[1]", 0}
+ }
+
+ --[[
+ for l = 2, global_conf.layer_num do
+ table.insert(connections_t, {"dropoutL"..(l-1).."[1]", "lstmL"..l.."[1]", 0})
+ table.insert(connections_t, {"lstmL"..l.."[2]", "lstmL"..l.."[3]", 1})
+ table.insert(connections_t, {"lstmL"..l.."[1]", "combinerL"..l.."[1]", 0})
+ table.insert(connections_t, {"combinerL"..l.."[1]", "lstmL"..l.."[2]", 1})
+ table.insert(connections_t, {"combinerL"..l.."[2]", "dropoutL"..l.."[1]", 0})
+ end
+ ]]--
+
+ --[[
+ printf("%s printing DAG connections:\n", global_conf.sche_log_pre)
+ for key, value in pairs(connections_t) do
+ printf("\t%s->%s\n", key, value)
+ end
+ ]]--
+
+ local tnn = nerv.TNN("TNN", global_conf, {["dim_in"] = {1, global_conf.vocab:size()},
+ ["dim_out"] = {1}, ["sub_layers"] = layerRepo,
+ ["connections"] = connections_t, ["clip_t"] = global_conf.clip_t,
+ })
+
+ tnn:init(global_conf.batch_size, global_conf.chunk_size)
+
+ nerv.printf("%s Initing TNN end.\n", global_conf.sche_log_pre)
+ return tnn
+end
+
+function load_net(global_conf, next_iter)
+ prepare_parameters(global_conf, next_iter)
+ local layerRepo = prepare_layers(global_conf)
+ local tnn = prepare_tnn(global_conf, layerRepo)
+ return tnn
+end
+
+local train_fn, valid_fn, test_fn
+global_conf = {}
+local set = arg[1] --"test"
+
+if (set == "ptb") then
+
+root_dir = '/home/slhome/txh18/workspace'
+data_dir = root_dir .. '/ptb/DATA'
+train_fn = data_dir .. '/ptb.train.txt.adds'
+valid_fn = data_dir .. '/ptb.valid.txt.adds'
+test_fn = data_dir .. '/ptb.test.txt.adds'
+vocab_fn = data_dir .. '/vocab'
+
+qdata_dir = root_dir .. '/ptb/questionGen/gen'
+
+global_conf = {
+ lrate = 0.015, wcost = 1e-5, momentum = 0, clip_t = 5,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 300,
+ layer_num = 1,
+ chunk_size = 90,
+ batch_size = 20,
+ max_iter = 35,
+ lr_decay = 1.003,
+ decay_iter = 10,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+ dropout_str = "0",
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ vocab_fn = vocab_fn,
+ max_sen_len = 90,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 40000, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = '/home/slhome/txh18/workspace/ptb/EXP-nerv/bilstmlm_v2.0'
+}
+
+elseif (set == "msr_sc") then
+
+data_dir = '/home/slhome/txh18/workspace/sentenceCompletion/DATA_PV2'
+train_fn = data_dir .. '/normed_all.sf.len60.adds.train'
+valid_fn = data_dir .. '/normed_all.sf.len60.adds.dev'
+test_fn = data_dir .. '/answer_normed.adds'
+vocab_fn = data_dir .. '/normed_all.choose.vocab30000.addqvocab'
+
+global_conf = {
+ lrate = 1, wcost = 1e-6, momentum = 0,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 300,
+ layer_num = 1,
+ chunk_size = 15,
+ batch_size = 10,
+ max_iter = 30,
+ decay_iter = 10,
+ lr_decay = 1.003,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+ dropout_str = "0",
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ vocab_fn = vocab_fn,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 400000, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = '/home/slhome/txh18/workspace/sentenceCompletion/EXP-Nerv/rnnlm_test'
+}
+
+elseif (set == "twitter") then
+
+root_dir = '/home/slhome/txh18/workspace'
+data_dir = root_dir .. '/twitter_new/DATA'
+train_fn = data_dir .. '/twitter.choose.adds'
+valid_fn = data_dir .. '/twitter.valid.adds'
+test_fn = data_dir .. '/comm.test.choose-ppl.adds'
+vocab_fn = data_dir .. '/twitter.choose.train.vocab'
+
+--qdata_dir = root_dir .. '/ptb/questionGen/gen'
+
+global_conf = {
+ lrate = 0.15, wcost = 1e-5, momentum = 0, clip_t = 5,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 300,
+ layer_num = 1,
+ chunk_size = 15,
+ batch_size = 20,
+ max_iter = 35,
+ lr_decay = 1.003,
+ decay_iter = 10,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+ dropout_str = "0",
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ vocab_fn = vocab_fn,
+ max_sen_len = 90,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 40000, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = root_dir .. '/twitter_new/EXP-nerv/bilstmlm_v1.0'
+}
+
+else
+
+valid_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
+train_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
+test_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
+vocab_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
+
+global_conf = {
+ lrate = 0.01, wcost = 1e-5, momentum = 0,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 20,
+ layer_num = 1,
+ chunk_size = 20,
+ batch_size = 10,
+ max_iter = 2,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+ dropout_str = "0",
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ max_sen_len = 80,
+ lr_decay = 1.003,
+ decay_iter = 10,
+ vocab_fn = vocab_fn,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 10, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = '/home/slhome/txh18/workspace/nerv/play/testEXP/tnn_bilstmlm_test'
+}
+
+end
+
+lr_half = false --can not be local, to be set by loadstring
+start_iter = -1
+ppl_last = 100000
+commands_str = "train:test"
+commands = {}
+test_iter = -1
+start_lr = nil
+
+--for testout(question)
+q_file = "/home/slhome/txh18/workspace/ptb/questionGen/gen/ptb.test.txt.q10rs1_Msss.adds"
+
+if arg[2] ~= nil then
+ nerv.printf("%s applying arg[2](%s)...\n", global_conf.sche_log_pre, arg[2])
+ loadstring(arg[2])()
+ nerv.LMUtil.wait(0.5)
+else
+ nerv.printf("%s no user setting, all default...\n", global_conf.sche_log_pre)
+end
+
+
+global_conf.work_dir = global_conf.work_dir_base .. 'h' .. global_conf.hidden_size .. 'l' .. global_conf.layer_num --.. 'ch' .. global_conf.chunk_size .. 'ba' .. global_conf.batch_size .. 'slr' .. global_conf.lrate .. 'wc' .. global_conf.wcost .. 'dr' .. global_conf.dropout_str
+global_conf.train_fn_shuf = global_conf.work_dir .. '/train_fn_shuf'
+global_conf.train_fn_shuf_bak = global_conf.train_fn_shuf .. '_bak'
+global_conf.param_fn = global_conf.work_dir .. "/params"
+global_conf.dropout_list = nerv.SUtil.parse_schedule(global_conf.dropout_str)
+global_conf.log_fn = global_conf.work_dir .. '/log_lstm_tnn_' .. commands_str ..os.date("_TT%m_%d_%X",os.time())
+global_conf.log_fn, _ = string.gsub(global_conf.log_fn, ':', '-')
+commands = nerv.SUtil.parse_commands_set(commands_str)
+if start_lr ~= nil then
+ global_conf.lrate = start_lr --starting lr can be set by user(arg[2])
+end
+
+nerv.printf("%s creating work_dir(%s)...\n", global_conf.sche_log_pre, global_conf.work_dir)
+nerv.LMUtil.wait(2)
+os.execute("mkdir -p "..global_conf.work_dir)
+os.execute("cp " .. global_conf.train_fn .. " " .. global_conf.train_fn_shuf)
+
+--redirecting log outputs!
+nerv.SUtil.log_redirect(global_conf.log_fn)
+nerv.LMUtil.wait(2)
+
+----------------printing options---------------------------------
+nerv.printf("%s printing global_conf...\n", global_conf.sche_log_pre)
+for id, value in pairs(global_conf) do
+ nerv.printf("%s:\t%s\n", id, tostring(value))
+end
+nerv.LMUtil.wait(2)
+
+nerv.printf("%s printing training scheduling options...\n", global_conf.sche_log_pre)
+nerv.printf("lr_half:\t%s\n", tostring(lr_half))
+nerv.printf("start_iter:\t%s\n", tostring(start_iter))
+nerv.printf("ppl_last:\t%s\n", tostring(ppl_last))
+nerv.printf("commands_str:\t%s\n", commands_str)
+nerv.printf("test_iter:\t%s\n", tostring(test_iter))
+nerv.printf("%s printing training scheduling end.\n", global_conf.sche_log_pre)
+nerv.LMUtil.wait(2)
+------------------printing options end------------------------------
+
+math.randomseed(1)
+
+local vocab = nerv.LMVocab()
+global_conf["vocab"] = vocab
+nerv.printf("%s building vocab...\n", global_conf.sche_log_pre)
+global_conf.vocab:build_file(global_conf.vocab_fn, false)
+ppl_rec = {}
+
+local final_iter = -1
+if commands["train"] == 1 then
+ if start_iter == -1 then
+ prepare_parameters(global_conf, -1) --write pre_generated params to param.0 file
+ end
+
+ if start_iter == -1 or start_iter == 0 then
+ nerv.printf("===INITIAL VALIDATION===\n")
+ local tnn = load_net(global_conf, 0)
+ global_conf.paramRepo = tnn:get_params() --get auto-generted params
+ global_conf.paramRepo:export(global_conf.param_fn .. '.0', nil) --some parameters are auto-generated, saved again to param.0 file
+ global_conf.dropout_rate = 0
+ local result = LMTrainer.lm_process_file_birnn(global_conf, global_conf.valid_fn, tnn, false) --false update!
+ nerv.LMUtil.wait(1)
+ ppl_rec[0] = {}
+ ppl_rec[0].valid = result:ppl_all("birnn")
+ ppl_last = ppl_rec[0].valid
+ ppl_rec[0].train = 0
+ ppl_rec[0].test = 0
+ ppl_rec[0].lr = 0
+
+ start_iter = 1
+
+ nerv.printf("\n")
+ end
+
+ for iter = start_iter, global_conf.max_iter, 1 do
+ final_iter = iter --for final testing
+ global_conf.sche_log_pre = "[SCHEDULER ITER"..iter.." LR"..global_conf.lrate.."]:"
+ tnn = load_net(global_conf, iter - 1)
+ nerv.printf("===ITERATION %d LR %f===\n", iter, global_conf.lrate)
+ global_conf.dropout_rate = nerv.SUtil.sche_get(global_conf.dropout_list, iter)
+ result = LMTrainer.lm_process_file_birnn(global_conf, global_conf.train_fn_shuf, tnn, true) --true update!
+ global_conf.dropout_rate = 0
+ ppl_rec[iter] = {}
+ ppl_rec[iter].train = result:ppl_all("birnn")
+ --shuffling training file
+ nerv.printf("%s shuffling training file\n", global_conf.sche_log_pre)
+ os.execute('cp ' .. global_conf.train_fn_shuf .. ' ' .. global_conf.train_fn_shuf_bak)
+ os.execute('cat ' .. global_conf.train_fn_shuf_bak .. ' | sort -R --random-source=/dev/zero > ' .. global_conf.train_fn_shuf)
+ nerv.printf("===PEEK ON TEST %d===\n", iter)
+ result = LMTrainer.lm_process_file_birnn(global_conf, global_conf.test_fn, tnn, false) --false update!
+ ppl_rec[iter].test = result:ppl_all("birnn")
+ nerv.printf("===VALIDATION %d===\n", iter)
+ result = LMTrainer.lm_process_file_birnn(global_conf, global_conf.valid_fn, tnn, false) --false update!
+ ppl_rec[iter].valid = result:ppl_all("birnn")
+ ppl_rec[iter].lr = global_conf.lrate
+ if ((ppl_last / ppl_rec[iter].valid < global_conf.lr_decay or lr_half == true) and iter > global_conf.decay_iter) then
+ global_conf.lrate = (global_conf.lrate * 0.6)
+ end
+ if ppl_rec[iter].valid < ppl_last then
+ nerv.printf("%s PPL improves, saving net to file %s.%d...\n", global_conf.sche_log_pre, global_conf.param_fn, iter)
+ global_conf.paramRepo:export(global_conf.param_fn .. '.' .. tostring(iter), nil)
+ else
+ nerv.printf("%s PPL did not improve, rejected, copying param file of last iter...\n", global_conf.sche_log_pre)
+ os.execute('cp ' .. global_conf.param_fn..'.'..tostring(iter - 1) .. ' ' .. global_conf.param_fn..'.'..tostring(iter))
+ end
+ if ppl_last / ppl_rec[iter].valid < global_conf.lr_decay or lr_half == true then
+ lr_half = true
+ end
+ if ppl_rec[iter].valid < ppl_last then
+ ppl_last = ppl_rec[iter].valid
+ end
+ nerv.printf("\n")
+ nerv.LMUtil.wait(2)
+ end
+ nerv.info("saving final nn to param.final")
+ os.execute('cp ' .. global_conf.param_fn .. '.' .. tostring(final_iter) .. ' ' .. global_conf.param_fn .. '.final')
+
+ nerv.printf("===VALIDATION PPL record===\n")
+ for i, _ in pairs(ppl_rec) do
+ nerv.printf("<ITER%d LR%.5f train:%.3f valid:%.3f test:%.3f> \n", i, ppl_rec[i].lr, ppl_rec[i].train, ppl_rec[i].valid, ppl_rec[i].test)
+ end
+ nerv.printf("\n")
+end --if commands["train"]
+
+if commands["test"] == 1 then
+ nerv.printf("===FINAL TEST===\n")
+ global_conf.sche_log_pre = "[SCHEDULER FINAL_TEST]:"
+ if final_iter ~= -1 and test_iter == -1 then
+ test_iter = final_iter
+ end
+ if test_iter == -1 then
+ test_iter = "final"
+ end
+ tnn = load_net(global_conf, test_iter)
+ global_conf.dropout_rate = 0
+ LMTrainer.lm_process_file_birnn(global_conf, global_conf.test_fn, tnn, false) --false update!
+end --if commands["test"]
+
+if commands["testout"] == 1 then
+ nerv.printf("===TEST OUT===\n")
+ nerv.printf("q_file:\t%s\n", q_file)
+ local q_fn = q_file --qdata_dir .. '/' .. q_file
+ global_conf.sche_log_pre = "[SCHEDULER FINAL_TEST]:"
+ if final_iter ~= -1 and test_iter == -1 then
+ test_iter = final_iter
+ end
+ if test_iter == -1 then
+ test_iter = "final"
+ end
+ tnn = load_net(global_conf, test_iter)
+ global_conf.dropout_rate = 0
+ LMTrainer.lm_process_file_birnn(global_conf, q_fn, tnn, false,
+ {["one_sen_report"] = true}) --false update!
+end --if commands["testout"]
+
+
diff --git a/nerv/examples/lmptb/lm_trainer.lua b/nerv/examples/lmptb/lm_trainer.lua
index 62d8b50..eab6e2d 100644
--- a/nerv/examples/lmptb/lm_trainer.lua
+++ b/nerv/examples/lmptb/lm_trainer.lua
@@ -2,26 +2,59 @@ require 'lmptb.lmvocab'
require 'lmptb.lmfeeder'
require 'lmptb.lmutil'
require 'lmptb.layer.init'
-require 'rnn.init'
+--require 'tnn.init'
require 'lmptb.lmseqreader'
local LMTrainer = nerv.class('nerv.LMTrainer')
-local printf = nerv.printf
+--local printf = nerv.printf
+
+--The bias param update in nerv don't have wcost added
+function nerv.BiasParam:update_by_gradient(gradient)
+ local gconf = self.gconf
+ local l2 = 1 - gconf.lrate * gconf.wcost
+ self:_update_by_gradient(gradient, l2, l2)
+end
--Returns: LMResult
-function LMTrainer.lm_process_file(global_conf, fn, tnn, do_train)
- local reader = nerv.LMSeqReader(global_conf, global_conf.batch_size, global_conf.chunk_size, global_conf.vocab)
+function LMTrainer.lm_process_file_rnn(global_conf, fn, tnn, do_train, p_conf)
+ if p_conf == nil then
+ p_conf = {}
+ end
+ local reader
+ local r_conf = {}
+ local chunk_size, batch_size
+ if p_conf.one_sen_report == true then --report log prob one by one sentence
+ if do_train == true then
+ nerv.warning("LMTrainer.lm_process_file_rnn: warning, one_sen_report is true while do_train is also true, strange")
+ end
+ nerv.printf("lm_process_file_rnn: one_sen report mode, set batch_size to 1 and chunk_size to max_sen_len(%d)\n",
+ global_conf.max_sen_len)
+ batch_size = 1
+ chunk_size = global_conf.max_sen_len
+ r_conf["se_mode"] = true
+ else
+ batch_size = global_conf.batch_size
+ chunk_size = global_conf.chunk_size
+ end
+
+ reader = nerv.LMSeqReader(global_conf, batch_size, chunk_size, global_conf.vocab, r_conf)
reader:open_file(fn)
+
local result = nerv.LMResult(global_conf, global_conf.vocab)
result:init("rnn")
+ if global_conf.dropout_rate ~= nil then
+ nerv.info("LMTrainer.lm_process_file_rnn: dropout_rate is %f", global_conf.dropout_rate)
+ end
global_conf.timer:flush()
+ tnn:init(batch_size, chunk_size)
tnn:flush_all() --caution: will also flush the inputs from the reader!
local next_log_wcn = global_conf.log_w_num
- local neto_bakm = global_conf.mmat_type(global_conf.batch_size, 1) --space backup matrix for network output
-
+ local neto_bakm = global_conf.mmat_type(batch_size, 1) --space backup matrix for network output
+
+ nerv.info("LMTrainer.lm_process_file_rnn: begin processing...")
while (1) do
global_conf.timer:tic('most_out_loop_lmprocessfile')
@@ -32,9 +65,9 @@ function LMTrainer.lm_process_file(global_conf, fn, tnn, do_train)
break
end
- for t = 1, global_conf.chunk_size do
+ for t = 1, chunk_size do
tnn.err_inputs_m[t][1]:fill(1)
- for i = 1, global_conf.batch_size do
+ for i = 1, batch_size do
if bit.band(feeds.flags_now[t][i], nerv.TNN.FC.HAS_LABEL) == 0 then
tnn.err_inputs_m[t][1][i - 1][0] = 0
end
@@ -60,15 +93,26 @@ function LMTrainer.lm_process_file(global_conf, fn, tnn, do_train)
end
global_conf.timer:tic('tnn_afterprocess')
- for t = 1, global_conf.chunk_size, 1 do
+ local sen_logp = {}
+ for t = 1, chunk_size, 1 do
tnn.outputs_m[t][1]:copy_toh(neto_bakm)
- for i = 1, global_conf.batch_size, 1 do
+ for i = 1, batch_size, 1 do
if (feeds.labels_s[t][i] ~= global_conf.vocab.null_token) then
--result:add("rnn", feeds.labels_s[t][i], math.exp(tnn.outputs_m[t][1][i - 1][0]))
result:add("rnn", feeds.labels_s[t][i], math.exp(neto_bakm[i - 1][0]))
+ if sen_logp[i] == nil then
+ sen_logp[i] = 0
+ end
+ sen_logp[i] = sen_logp[i] + neto_bakm[i - 1][0]
end
end
end
+ if p_conf.one_sen_report == true then
+ for i = 1, batch_size do
+ nerv.printf("LMTrainer.lm_process_file_rnn: one_sen_report_output, %f\n", sen_logp[i])
+ end
+ end
+
tnn:move_right_to_nextmb({0}) --only copy for time 0
global_conf.timer:toc('tnn_afterprocess')
@@ -77,10 +121,10 @@ function LMTrainer.lm_process_file(global_conf, fn, tnn, do_train)
--print log
if result["rnn"].cn_w > next_log_wcn then
next_log_wcn = next_log_wcn + global_conf.log_w_num
- printf("%s %d words processed %s.\n", global_conf.sche_log_pre, result["rnn"].cn_w, os.date())
- printf("\t%s log prob per sample :%f.\n", global_conf.sche_log_pre, result:logp_sample("rnn"))
+ nerv.printf("%s %d words processed %s.\n", global_conf.sche_log_pre, result["rnn"].cn_w, os.date())
+ nerv.printf("\t%s log prob per sample :%f.\n", global_conf.sche_log_pre, result:logp_sample("rnn"))
for key, value in pairs(global_conf.timer.rec) do
- printf("\t [global_conf.timer]: time spent on %s:%.5f clock time\n", key, value)
+ nerv.printf("\t [global_conf.timer]: time spent on %s:%.5f clock time\n", key, value)
end
global_conf.timer:flush()
nerv.LMUtil.wait(0.1)
@@ -92,15 +136,146 @@ function LMTrainer.lm_process_file(global_conf, fn, tnn, do_train)
end
]]--
+ collectgarbage("collect")
+
+ --break --debug
+ end
+
+ nerv.printf("%s Displaying result:\n", global_conf.sche_log_pre)
+ nerv.printf("%s %s\n", global_conf.sche_log_pre, result:status("rnn"))
+ nerv.printf("%s Doing on %s end.\n", global_conf.sche_log_pre, fn)
+
+ return result
+end
+
+--Returns: LMResult
+function LMTrainer.lm_process_file_birnn(global_conf, fn, tnn, do_train, p_conf)
+ if p_conf == nil then
+ p_conf = {}
+ end
+ local reader
+ local chunk_size, batch_size
+ local r_conf = {["se_mode"] = true}
+ if p_conf.one_sen_report == true then --report log prob one by one sentence
+ if do_train == true then
+ nerv.warning("LMTrainer.lm_process_file_birnn: warning, one_sen_report is true while do_train is also true, strange")
+ end
+ nerv.printf("lm_process_file_birnn: one_sen report mode, set batch_size to 1 and chunk_size to max_sen_len(%d)\n",
+ global_conf.max_sen_len)
+ batch_size = 1
+ chunk_size = global_conf.max_sen_len
+ else
+ batch_size = global_conf.batch_size
+ chunk_size = global_conf.chunk_size
+ end
+
+ reader = nerv.LMSeqReader(global_conf, batch_size, chunk_size, global_conf.vocab, r_conf)
+ reader:open_file(fn)
+
+ local result = nerv.LMResult(global_conf, global_conf.vocab)
+ result:init("birnn")
+ if global_conf.dropout_rate ~= nil then
+ nerv.info("LMTrainer.lm_process_file_birnn: dropout_rate is %f", global_conf.dropout_rate)
+ end
+
+ global_conf.timer:flush()
+ tnn:init(batch_size, chunk_size)
+ tnn:flush_all() --caution: will also flush the inputs from the reader!
+
+ local next_log_wcn = global_conf.log_w_num
+ local neto_bakm = global_conf.mmat_type(batch_size, 1) --space backup matrix for network output
+
+ nerv.info("LMTrainer.lm_process_file_birnn: begin processing...")
+
+ while (1) do
+ global_conf.timer:tic('most_out_loop_lmprocessfile')
+
+ local r, feeds
+ global_conf.timer:tic('tnn_beforeprocess')
+ r, feeds = tnn:getfeed_from_reader(reader)
+ if r == false then
+ break
+ end
+ for t = 1, chunk_size do
+ tnn.err_inputs_m[t][1]:fill(1)
+ for i = 1, batch_size do
+ if bit.band(feeds.flags_now[t][i], nerv.TNN.FC.HAS_LABEL) == 0 then
+ tnn.err_inputs_m[t][1][i - 1][0] = 0
+ end
+ end
+ end
+ global_conf.timer:toc('tnn_beforeprocess')
+
+ --[[
+ for j = 1, global_conf.chunk_size, 1 do
+ for i = 1, global_conf.batch_size, 1 do
+ printf("%s[L(%s)] ", feeds.inputs_s[j][i], feeds.labels_s[j][i]) --vocab:get_word_str(input[i][j]).id
+ end
+ printf("\n")
+ end
+ printf("\n")
+ ]]--
+
+ tnn:net_propagate()
+
+ if do_train == true then
+ tnn:net_backpropagate(false)
+ tnn:net_backpropagate(true)
+ end
+
+ global_conf.timer:tic('tnn_afterprocess')
+ local sen_logp = {}
+ for t = 1, chunk_size, 1 do
+ tnn.outputs_m[t][1]:copy_toh(neto_bakm)
+ for i = 1, batch_size, 1 do
+ if (feeds.labels_s[t][i] ~= global_conf.vocab.null_token) then
+ result:add("birnn", feeds.labels_s[t][i], math.exp(neto_bakm[i - 1][0]))
+ if sen_logp[i] == nil then
+ sen_logp[i] = 0
+ end
+ sen_logp[i] = sen_logp[i] + neto_bakm[i - 1][0]
+ end
+ end
+ end
+ if p_conf.one_sen_report == true then
+ for i = 1, batch_size do
+ nerv.printf("LMTrainer.lm_process_file_birnn: one_sen_report_output, %f\n", sen_logp[i])
+ end
+ end
+
+ --tnn:move_right_to_nextmb({0}) --do not need history for bi directional model
+ global_conf.timer:toc('tnn_afterprocess')
+
+ --tnn:flush_all() --you need this for bilstmlm_ptb_v2, because it has connection across 2 time steps
+
+ global_conf.timer:toc('most_out_loop_lmprocessfile')
+
+ --print log
+ if result["birnn"].cn_w > next_log_wcn then
+ next_log_wcn = next_log_wcn + global_conf.log_w_num
+ nerv.printf("%s %d words processed %s.\n", global_conf.sche_log_pre, result["birnn"].cn_w, os.date())
+ nerv.printf("\t%s log prob per sample :%f.\n", global_conf.sche_log_pre, result:logp_sample("birnn"))
+ for key, value in pairs(global_conf.timer.rec) do
+ nerv.printf("\t [global_conf.timer]: time spent on %s:%.5f clock time\n", key, value)
+ end
+ global_conf.timer:flush()
+ nerv.LMUtil.wait(0.1)
+ end
+
+ --[[
+ for t = 1, global_conf.chunk_size do
+ print(tnn.outputs_m[t][1])
+ end
+ ]]--
collectgarbage("collect")
--break --debug
end
- printf("%s Displaying result:\n", global_conf.sche_log_pre)
- printf("%s %s\n", global_conf.sche_log_pre, result:status("rnn"))
- printf("%s Doing on %s end.\n", global_conf.sche_log_pre, fn)
+ nerv.printf("%s Displaying result:\n", global_conf.sche_log_pre)
+ nerv.printf("%s %s\n", global_conf.sche_log_pre, result:status("birnn"))
+ nerv.printf("%s Doing on %s end.\n", global_conf.sche_log_pre, fn)
return result
end
diff --git a/nerv/examples/lmptb/lmptb/layer/lm_affine_recurrent.lua b/nerv/examples/lmptb/lmptb/layer/lm_affine_recurrent.lua
index a5ecce1..c43e567 100644
--- a/nerv/examples/lmptb/lmptb/layer/lm_affine_recurrent.lua
+++ b/nerv/examples/lmptb/lmptb/layer/lm_affine_recurrent.lua
@@ -14,7 +14,7 @@ function LMRecurrent:propagate(input, output)
output[1]:copy_fromd(input[1])
if (self.independent == true) then
for i = 1, input[1]:nrow() do
- if (self.gconf.input_word_id[self.id][0][i - 1] == self.break_id) then --here is sentence break
+ if (self.gconf.input_word_id[self.id][i - 1][0] == self.break_id) then --here is sentence break
input[2][i - 1]:fill(0)
end
end
diff --git a/nerv/examples/lmptb/lmptb/layer/select_linear.lua b/nerv/examples/lmptb/lmptb/layer/select_linear.lua
index e96296f..431ef3a 100644
--- a/nerv/examples/lmptb/lmptb/layer/select_linear.lua
+++ b/nerv/examples/lmptb/lmptb/layer/select_linear.lua
@@ -10,9 +10,9 @@ function SL:__init(id, global_conf, layer_conf)
self.dim_out = layer_conf.dim_out
self.gconf = global_conf
- self.ltp = layer_conf.ltp
self.vocab = layer_conf.vocab
-
+ self.ltp = self:find_param("ltp", layer_conf, global_conf, nerv.LinearTransParam, {self.vocab:size(), self.dim_out[1]}) --layer_conf.ltp
+
self:check_dim_len(1, 1)
end
@@ -30,7 +30,7 @@ function SL:init(batch_size)
end
function SL:update(bp_err, input, output)
- --use this to produce reproducable result
+ --use this to produce reproducable result, don't forget to set the dropout to zero!
--for i = 1, input[1]:nrow(), 1 do
-- local word_vec = self.ltp.trans[input[1][i - 1][0]]
-- word_vec:add(word_vec, bp_err[1][i - 1], 1, - self.gconf.lrate / self.gconf.batch_size)
diff --git a/nerv/examples/lmptb/lmptb/lmseqreader.lua b/nerv/examples/lmptb/lmptb/lmseqreader.lua
index cc805a4..ed791d2 100644
--- a/nerv/examples/lmptb/lmptb/lmseqreader.lua
+++ b/nerv/examples/lmptb/lmptb/lmseqreader.lua
@@ -1,4 +1,5 @@
require 'lmptb.lmvocab'
+--require 'tnn.init'
local LMReader = nerv.class("nerv.LMSeqReader")
@@ -7,7 +8,7 @@ local printf = nerv.printf
--global_conf: table
--batch_size: int
--vocab: nerv.LMVocab
-function LMReader:__init(global_conf, batch_size, chunk_size, vocab)
+function LMReader:__init(global_conf, batch_size, chunk_size, vocab, r_conf)
self.gconf = global_conf
self.fh = nil --file handle to read, nil means currently no file
self.batch_size = batch_size
@@ -15,6 +16,13 @@ function LMReader:__init(global_conf, batch_size, chunk_size, vocab)
self.log_pre = "[LOG]LMSeqReader:"
self.vocab = vocab
self.streams = nil
+ if r_conf == nil then
+ r_conf = {}
+ end
+ self.se_mode = false --sentence end mode, when a sentence end is met, the stream after will be null
+ if r_conf.se_mode == true then
+ self.se_mode = true
+ end
end
--fn: string
@@ -24,18 +32,20 @@ function LMReader:open_file(fn)
nerv.error("%s error: in open_file(fn is %s), file handle not nil.", self.log_pre, fn)
end
printf("%s opening file %s...\n", self.log_pre, fn)
- print("batch_size:", self.batch_size, "chunk_size", self.chunk_size)
+ print(self.log_pre, "batch_size:", self.batch_size, "chunk_size", self.chunk_size)
+ print(self.log_pre, "se_mode:", self.se_mode)
self.fh = io.open(fn, "r")
self.streams = {}
for i = 1, self.batch_size, 1 do
self.streams[i] = {["store"] = {}, ["head"] = 1, ["tail"] = 0}
end
-
+ self.stat = {} --stat collected during file reading
+ self.stat.al_sen_start = true --check whether it's always sentence_start at the begining of a minibatch
self.bak_inputs_m = {} --backup MMatrix for temporary storey(then copy to TNN CuMatrix)
for j = 1, self.chunk_size, 1 do
self.bak_inputs_m[j] = {}
self.bak_inputs_m[j][1] = self.gconf.mmat_type(self.batch_size, 1)
- self.bak_inputs_m[j][2] = self.gconf.mmat_type(self.batch_size, self.vocab:size()) --since MMatrix does not yet have fill, this m[j][2] is not used
+ --self.bak_inputs_m[j][2] = self.gconf.mmat_type(self.batch_size, self.vocab:size()) --since MMatrix does not yet have fill, this m[j][2] is not used
end
end
@@ -98,44 +108,57 @@ function LMReader:get_batch(feeds)
end
for i = 1, self.batch_size, 1 do
local st = self.streams[i]
+ local end_stream = false --used for se_mode, indicating that this stream is ended
for j = 1, self.chunk_size, 1 do
flags[j][i] = 0
- self:refresh_stream(i)
- if st.store[st.head] ~= nil then
- inputs_s[j][i] = st.store[st.head]
- --inputs_m[j][1][i - 1][0] = self.vocab:get_word_str(st.store[st.head]).id - 1
- self.bak_inputs_m[j][1][i - 1][0] = self.vocab:get_word_str(st.store[st.head]).id - 1
- else
+ if end_stream == true then
+ if self.se_mode == false then
+ nerv.error("lmseqreader:getbatch: error, end_stream is true while se_mode is false")
+ end
inputs_s[j][i] = self.vocab.null_token
- --inputs_m[j][1][i - 1][0] = 0
self.bak_inputs_m[j][1][i - 1][0] = 0
- end
- if st.store[st.head + 1] ~= nil then
- labels_s[j][i] = st.store[st.head + 1]
- inputs_m[j][2][i - 1][self.vocab:get_word_str(st.store[st.head + 1]).id - 1] = 1
+ labels_s[j][i] = self.vocab.null_token
else
- if (inputs_s[j][i] ~= self.vocab.null_token) then
- nerv.error("reader error : input not null but label is null_token")
+ self:refresh_stream(i)
+ if st.store[st.head] ~= nil then
+ inputs_s[j][i] = st.store[st.head]
+ --inputs_m[j][1][i - 1][0] = self.vocab:get_word_str(st.store[st.head]).id - 1
+ self.bak_inputs_m[j][1][i - 1][0] = self.vocab:get_word_str(st.store[st.head]).id - 1
+ else
+ inputs_s[j][i] = self.vocab.null_token
+ --inputs_m[j][1][i - 1][0] = 0
+ self.bak_inputs_m[j][1][i - 1][0] = 0
end
- labels_s[j][i] = self.vocab.null_token
- end
- if (inputs_s[j][i] ~= self.vocab.null_token) then
- if (labels_s[j][i] == self.vocab.null_token) then
- nerv.error("reader error : label is null while input is not null")
+ if st.store[st.head + 1] ~= nil then
+ labels_s[j][i] = st.store[st.head + 1]
+ inputs_m[j][2][i - 1][self.vocab:get_word_str(st.store[st.head + 1]).id - 1] = 1
+ else
+ if (inputs_s[j][i] ~= self.vocab.null_token) then
+ nerv.error("reader error : input not null but label is null_token")
+ end
+ labels_s[j][i] = self.vocab.null_token
end
- flags[j][i] = bit.bor(flags[j][i], nerv.TNN.FC.SEQ_NORM)
- got_new = true
- st.store[st.head] = nil
- st.head = st.head + 1
- if labels_s[j][i] == self.vocab.sen_end_token then
- flags[j][i] = bit.bor(flags[j][i], nerv.TNN.FC.SEQ_END)
- st.store[st.head] = nil --sentence end is passed
+ if inputs_s[j][i] ~= self.vocab.null_token then
+ if labels_s[j][i] == self.vocab.null_token then
+ nerv.error("reader error : label is null while input is not null")
+ end
+ flags[j][i] = bit.bor(flags[j][i], nerv.TNN.FC.SEQ_NORM) --has both input and label
+ got_new = true
+ st.store[st.head] = nil
st.head = st.head + 1
- end
- if inputs_s[j][i] == self.vocab.sen_end_token then
- flags[j][i] = bit.bor(flags[j][i], nerv.TNN.FC.SEQ_START)
- end
- end
+ if labels_s[j][i] == self.vocab.sen_end_token then
+ flags[j][i] = bit.bor(flags[j][i], nerv.TNN.FC.SEQ_END)
+ st.store[st.head] = nil --sentence end is passed
+ st.head = st.head + 1
+ if self.se_mode == true then
+ end_stream = true --meet sentence end, this stream ends now
+ end
+ end
+ if inputs_s[j][i] == self.vocab.sen_end_token then
+ flags[j][i] = bit.bor(flags[j][i], nerv.TNN.FC.SEQ_START)
+ end
+ end
+ end
end
end
@@ -147,7 +170,17 @@ function LMReader:get_batch(feeds)
inputs_m[j][1]:copy_fromh(self.bak_inputs_m[j][1])
end
- if (got_new == false) then
+ --check for self.al_sen_start
+ for i = 1, self.batch_size do
+ if inputs_s[1][i] ~= self.vocab.sen_end_token and inputs_s[1][i] ~= self.vocab.null_token then
+ self.stat.al_sen_start = false
+ end
+ end
+
+ if got_new == false then
+ nerv.info("lmseqreader file ends, printing stats...")
+ nerv.printf("al_sen_start:%s\n", tostring(self.stat.al_sen_start))
+
return false
else
return true
diff --git a/nerv/examples/lmptb/lmptb/lmutil.lua b/nerv/examples/lmptb/lmptb/lmutil.lua
index 821aa94..71e8e17 100644
--- a/nerv/examples/lmptb/lmptb/lmutil.lua
+++ b/nerv/examples/lmptb/lmptb/lmutil.lua
@@ -48,15 +48,15 @@ end
--Returns: nerv.MMatrixInt
--Set the matrix to be ids of the words, id starting at 1, not 0
function Util.set_id(m, list, vocab)
- if (m:ncol() ~= #list or m:nrow() ~= 1) then
+ if (m:nrow() ~= #list or m:ncol() ~= 1) then
nerv.error("nrow of matrix mismatch with list or its col not one")
end
for i = 1, #list, 1 do
--index in matrix starts at 0
if (list[i] ~= vocab.null_token) then
- m[0][i - 1] = vocab:get_word_str(list[i]).id
+ m[i - 1][0] = vocab:get_word_str(list[i]).id
else
- m[0][i - 1] = 0
+ m[i - 1][0] = 0
end
end
return m
diff --git a/nerv/examples/lmptb/lmptb/lstm_t_v2.lua b/nerv/examples/lmptb/lmptb/lstm_t_v2.lua
new file mode 100644
index 0000000..dc2fe45
--- /dev/null
+++ b/nerv/examples/lmptb/lmptb/lstm_t_v2.lua
@@ -0,0 +1,123 @@
+local LSTMLayerT = nerv.class('nerv.LSTMLayerTv2', 'nerv.LayerT')
+--a version of LSTM that only feed h into the gates
+
+function LSTMLayerT:__init(id, global_conf, layer_conf)
+ --input1:x input2:h input3:c
+ self.id = id
+ self.dim_in = layer_conf.dim_in
+ self.dim_out = layer_conf.dim_out
+ self.gconf = global_conf
+
+ --prepare a DAGLayerT to hold the lstm structure
+ local pr = layer_conf.pr
+ if pr == nil then
+ pr = nerv.ParamRepo()
+ end
+
+ local function ap(str)
+ return self.id .. '.' .. str
+ end
+
+ local layers = {
+ ["nerv.CombinerLayer"] = {
+ [ap("inputXDup")] = {{}, {["dim_in"] = {self.dim_in[1]},
+ ["dim_out"] = {self.dim_in[1], self.dim_in[1], self.dim_in[1], self.dim_in[1]}, ["lambda"] = {1}}},
+ [ap("inputHDup")] = {{}, {["dim_in"] = {self.dim_in[2]},
+ ["dim_out"] = {self.dim_in[2], self.dim_in[2], self.dim_in[2], self.dim_in[2]}, ["lambda"] = {1}}},
+ [ap("inputCDup")] = {{}, {["dim_in"] = {self.dim_in[3]},
+ ["dim_out"] = {self.dim_in[3]}, ["lambda"] = {1}}},
+ [ap("mainCDup")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]},
+ ["dim_out"] = {self.dim_in[3], self.dim_in[3]}, ["lambda"] = {1, 1}}},
+ },
+ ["nerv.AffineLayer"] = {
+ [ap("mainAffineL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2]},
+ ["dim_out"] = {self.dim_out[1]}, ["pr"] = pr}},
+ },
+ ["nerv.TanhLayer"] = {
+ [ap("mainTanhL")] = {{}, {["dim_in"] = {self.dim_out[1]}, ["dim_out"] = {self.dim_out[1]}}},
+ [ap("outputTanhL")] = {{}, {["dim_in"] = {self.dim_out[1]}, ["dim_out"] = {self.dim_out[1]}}},
+ },
+ ["nerv.GateFLayer"] = {
+ [ap("forgetGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2]},
+ ["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}},
+ [ap("inputGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2]},
+ ["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}},
+ [ap("outputGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2]},
+ ["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}},
+
+ },
+ ["nerv.ElemMulLayer"] = {
+ [ap("inputGMulL")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3]}}},
+ [ap("forgetGMulL")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3]}}},
+ [ap("outputGMulL")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3]}}},
+ },
+ }
+
+ local layerRepo = nerv.LayerRepo(layers, pr, global_conf)
+
+ local connections_t = {
+ ["<input>[1]"] = ap("inputXDup[1]"),
+ ["<input>[2]"] = ap("inputHDup[1]"),
+ ["<input>[3]"] = ap("inputCDup[1]"),
+
+ [ap("inputXDup[1]")] = ap("mainAffineL[1]"),
+ [ap("inputHDup[1]")] = ap("mainAffineL[2]"),
+
+ [ap("mainAffineL[1]")] = ap("mainTanhL[1]"),
+
+ [ap("inputXDup[2]")] = ap("inputGateL[1]"),
+ [ap("inputHDup[2]")] = ap("inputGateL[2]"),
+
+ [ap("inputXDup[3]")] = ap("forgetGateL[1]"),
+ [ap("inputHDup[3]")] = ap("forgetGateL[2]"),
+
+ [ap("mainTanhL[1]")] = ap("inputGMulL[1]"),
+ [ap("inputGateL[1]")] = ap("inputGMulL[2]"),
+
+ [ap("inputCDup[1]")] = ap("forgetGMulL[1]"),
+ [ap("forgetGateL[1]")] = ap("forgetGMulL[2]"),
+
+ [ap("inputGMulL[1]")] = ap("mainCDup[1]"),
+ [ap("forgetGMulL[1]")] = ap("mainCDup[2]"),
+
+ [ap("inputXDup[4]")] = ap("outputGateL[1]"),
+ [ap("inputHDup[4]")] = ap("outputGateL[2]"),
+
+ [ap("mainCDup[2]")] = "<output>[2]",
+ [ap("mainCDup[1]")] = ap("outputTanhL[1]"),
+
+ [ap("outputTanhL[1]")] = ap("outputGMulL[1]"),
+ [ap("outputGateL[1]")] = ap("outputGMulL[2]"),
+
+ [ap("outputGMulL[1]")] = "<output>[1]",
+ }
+ self.dagL = nerv.DAGLayerT(self.id, global_conf,
+ {["dim_in"] = self.dim_in, ["dim_out"] = self.dim_out, ["sub_layers"] = layerRepo,
+ ["connections"] = connections_t})
+
+ self:check_dim_len(3, 2) -- x, h, c and h, c
+end
+
+function LSTMLayerT:init(batch_size, chunk_size)
+ self.dagL:init(batch_size, chunk_size)
+end
+
+function LSTMLayerT:batch_resize(batch_size, chunk_size)
+ self.dagL:batch_resize(batch_size, chunk_size)
+end
+
+function LSTMLayerT:update(bp_err, input, output, t)
+ self.dagL:update(bp_err, input, output, t)
+end
+
+function LSTMLayerT:propagate(input, output, t)
+ self.dagL:propagate(input, output, t)
+end
+
+function LSTMLayerT:back_propagate(bp_err, next_bp_err, input, output, t)
+ self.dagL:back_propagate(bp_err, next_bp_err, input, output, t)
+end
+
+function LSTMLayerT:get_params()
+ return self.dagL:get_params()
+end
diff --git a/nerv/examples/lmptb/lstmlm_ptb_main.lua b/nerv/examples/lmptb/lstmlm_ptb_main.lua
new file mode 100644
index 0000000..6e3fab9
--- /dev/null
+++ b/nerv/examples/lmptb/lstmlm_ptb_main.lua
@@ -0,0 +1,504 @@
+require 'lmptb.lmvocab'
+require 'lmptb.lmfeeder'
+require 'lmptb.lmutil'
+require 'lmptb.layer.init'
+--require 'tnn.init'
+require 'lmptb.lmseqreader'
+require 'lm_trainer'
+
+--[[global function rename]]--
+--local printf = nerv.printf
+local LMTrainer = nerv.LMTrainer
+--[[global function rename ends]]--
+
+--global_conf: table
+--first_time: bool
+--Returns: a ParamRepo
+function prepare_parameters(global_conf, iter)
+ nerv.printf("%s preparing parameters...\n", global_conf.sche_log_pre)
+
+ global_conf.paramRepo = nerv.ParamRepo()
+ local paramRepo = global_conf.paramRepo
+
+ if iter == -1 then --first time
+ nerv.printf("%s first time, prepare some pre-set parameters, and leaving other parameters to auto-generation...\n", global_conf.sche_log_pre)
+ local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
+ f:close()
+ --[[
+ ltp_ih = nerv.LinearTransParam("ltp_ih", global_conf)
+ ltp_ih.trans = global_conf.cumat_type(global_conf.vocab:size(), global_conf.hidden_size) --index 0 is for zero, others correspond to vocab index(starting from 1)
+ ltp_ih.trans:generate(global_conf.param_random)
+
+ ltp_hh = nerv.LinearTransParam("ltp_hh", global_conf)
+ ltp_hh.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.hidden_size)
+ ltp_hh.trans:generate(global_conf.param_random)
+
+ --ltp_ho = nerv.LinearTransParam("ltp_ho", global_conf)
+ --ltp_ho.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.vocab:size())
+ --ltp_ho.trans:generate(global_conf.param_random)
+
+ bp_h = nerv.BiasParam("bp_h", global_conf)
+ bp_h.trans = global_conf.cumat_type(1, global_conf.hidden_size)
+ bp_h.trans:generate(global_conf.param_random)
+
+ --bp_o = nerv.BiasParam("bp_o", global_conf)
+ --bp_o.trans = global_conf.cumat_type(1, global_conf.vocab:size())
+ --bp_o.trans:generate(global_conf.param_random)
+
+ local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
+ f:write_chunk(ltp_ih)
+ f:write_chunk(ltp_hh)
+ --f:write_chunk(ltp_ho)
+ f:write_chunk(bp_h)
+ --f:write_chunk(bp_o)
+ f:close()
+ ]]--
+ return nil
+ end
+
+ nerv.printf("%s loading parameter from file %s...\n", global_conf.sche_log_pre, global_conf.param_fn .. '.' .. tostring(iter))
+ paramRepo:import({global_conf.param_fn .. '.' .. tostring(iter)}, nil, global_conf)
+
+ nerv.printf("%s preparing parameters end.\n", global_conf.sche_log_pre)
+
+ return nil
+end
+
+--global_conf: table
+--Returns: nerv.LayerRepo
+function prepare_layers(global_conf)
+ nerv.printf("%s preparing layers...\n", global_conf.sche_log_pre)
+
+ local pr = global_conf.paramRepo
+
+ local du = false
+
+ --local recurrentLconfig = {{["bp"] = "bp_h", ["ltp_hh"] = "ltp_hh"}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["break_id"] = global_conf.vocab:get_sen_entry().id, ["independent"] = global_conf.independent, ["clip"] = 10}}
+ --local recurrentLconfig = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["clip"] = 10, ["direct_update"] = du, ["pr"] = pr}}
+
+ local layers = {
+ ["nerv.LSTMLayerT"] = {
+ ["lstmL1"] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["pr"] = pr}},
+ },
+
+ ["nerv.DropoutLayerT"] = {
+ ["dropoutL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}},
+ },
+
+ ["nerv.SelectLinearLayer"] = {
+ ["selectL1"] = {{}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}, ["vocab"] = global_conf.vocab, ["pr"] = pr}},
+ },
+
+ ["nerv.CombinerLayer"] = {
+ ["combinerL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}},
+ },
+
+ ["nerv.AffineLayer"] = {
+ ["outputL"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.vocab:size()}, ["direct_update"] = du, ["pr"] = pr}},
+ },
+
+ ["nerv.SoftmaxCELayerT"] = {
+ ["softmaxL"] = {{}, {["dim_in"] = {global_conf.vocab:size(), global_conf.vocab:size()}, ["dim_out"] = {1}}},
+ },
+ }
+
+ for l = 2, global_conf.layer_num do
+ layers["nerv.DropoutLayerT"]["dropoutL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}
+ layers["nerv.LSTMLayerT"]["lstmL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["pr"] = pr}}
+ layers["nerv.CombinerLayer"]["combinerL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}}
+ end
+ --[[ --we do not need those in the new tnn framework
+ printf("%s adding %d bptt layers...\n", global_conf.sche_log_pre, global_conf.bptt)
+ for i = 1, global_conf.bptt do
+ layers["nerv.IndRecurrentLayer"]["recurrentL" .. (i + 1)] = recurrentLconfig
+ layers["nerv.SigmoidLayer"]["sigmoidL" .. (i + 1)] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}
+ layers["nerv.SelectLinearLayer"]["selectL" .. (i + 1)] = {{["ltp"] = "ltp_ih"}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}}}
+ end
+ --]]
+
+ local layerRepo = nerv.LayerRepo(layers, pr, global_conf)
+ nerv.printf("%s preparing layers end.\n", global_conf.sche_log_pre)
+ return layerRepo
+end
+
+--global_conf: table
+--layerRepo: nerv.LayerRepo
+--Returns: a nerv.TNN
+function prepare_tnn(global_conf, layerRepo)
+ nerv.printf("%s Generate and initing TNN ...\n", global_conf.sche_log_pre)
+
+ --input: input_w, input_w, ... input_w_now, last_activation
+ local connections_t = {
+ {"<input>[1]", "selectL1[1]", 0},
+
+ --{"selectL1[1]", "recurrentL1[1]", 0},
+ --{"recurrentL1[1]", "sigmoidL1[1]", 0},
+ --{"sigmoidL1[1]", "combinerL1[1]", 0},
+ --{"combinerL1[1]", "recurrentL1[2]", 1},
+
+ {"selectL1[1]", "lstmL1[1]", 0},
+ {"lstmL1[2]", "lstmL1[3]", 1},
+ {"lstmL1[1]", "combinerL1[1]", 0},
+ {"combinerL1[1]", "lstmL1[2]", 1},
+ {"combinerL1[2]", "dropoutL1[1]", 0},
+
+ {"dropoutL"..global_conf.layer_num.."[1]", "outputL[1]", 0},
+ {"outputL[1]", "softmaxL[1]", 0},
+ {"<input>[2]", "softmaxL[2]", 0},
+ {"softmaxL[1]", "<output>[1]", 0}
+ }
+
+ for l = 2, global_conf.layer_num do
+ table.insert(connections_t, {"dropoutL"..(l-1).."[1]", "lstmL"..l.."[1]", 0})
+ table.insert(connections_t, {"lstmL"..l.."[2]", "lstmL"..l.."[3]", 1})
+ table.insert(connections_t, {"lstmL"..l.."[1]", "combinerL"..l.."[1]", 0})
+ table.insert(connections_t, {"combinerL"..l.."[1]", "lstmL"..l.."[2]", 1})
+ table.insert(connections_t, {"combinerL"..l.."[2]", "dropoutL"..l.."[1]", 0})
+ end
+
+ --[[
+ printf("%s printing DAG connections:\n", global_conf.sche_log_pre)
+ for key, value in pairs(connections_t) do
+ printf("\t%s->%s\n", key, value)
+ end
+ ]]--
+
+ local tnn = nerv.TNN("TNN", global_conf, {["dim_in"] = {1, global_conf.vocab:size()},
+ ["dim_out"] = {1}, ["sub_layers"] = layerRepo,
+ ["connections"] = connections_t, ["clip_t"] = global_conf.clip_t,
+ })
+
+ tnn:init(global_conf.batch_size, global_conf.chunk_size)
+
+ nerv.printf("%s Initing TNN end.\n", global_conf.sche_log_pre)
+ return tnn
+end
+
+function load_net(global_conf, next_iter)
+ prepare_parameters(global_conf, next_iter)
+ local layerRepo = prepare_layers(global_conf)
+ local tnn = prepare_tnn(global_conf, layerRepo)
+ return tnn
+end
+
+local train_fn, valid_fn, test_fn
+global_conf = {}
+local set = arg[1] --"test"
+
+if (set == "ptb") then
+
+root_dir = '/home/slhome/txh18/workspace'
+data_dir = root_dir .. '/ptb/DATA'
+train_fn = data_dir .. '/ptb.train.txt.adds'
+valid_fn = data_dir .. '/ptb.valid.txt.adds'
+test_fn = data_dir .. '/ptb.test.txt.adds'
+vocab_fn = data_dir .. '/vocab'
+
+qdata_dir = root_dir .. '/ptb/questionGen/gen'
+
+global_conf = {
+ lrate = 0.15, wcost = 1e-5, momentum = 0, clip_t = 5,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 300,
+ layer_num = 1,
+ chunk_size = 15,
+ batch_size = 20,
+ max_iter = 35,
+ lr_decay = 1.003,
+ decay_iter = 10,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+ dropout_str = "0.5",
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ vocab_fn = vocab_fn,
+ max_sen_len = 90,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 40000, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = '/home/slhome/txh18/workspace/ptb/EXP-nerv/lstmlm_v1.0'
+}
+
+elseif (set == "msr_sc") then
+
+data_dir = '/home/slhome/txh18/workspace/sentenceCompletion/DATA_PV2'
+train_fn = data_dir .. '/normed_all.sf.len60.adds.train'
+valid_fn = data_dir .. '/normed_all.sf.len60.adds.dev'
+test_fn = data_dir .. '/answer_normed.adds'
+vocab_fn = data_dir .. '/normed_all.choose.vocab30000.addqvocab'
+
+global_conf = {
+ lrate = 1, wcost = 1e-6, momentum = 0,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 300,
+ layer_num = 1,
+ chunk_size = 15,
+ batch_size = 10,
+ max_iter = 30,
+ decay_iter = 10,
+ lr_decay = 1.003,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+ dropout_str = "0",
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ vocab_fn = vocab_fn,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 400000, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = '/home/slhome/txh18/workspace/sentenceCompletion/EXP-Nerv/rnnlm_test'
+}
+
+elseif (set == "twitter") then
+
+root_dir = '/home/slhome/txh18/workspace'
+data_dir = root_dir .. '/twitter_new/DATA'
+train_fn = data_dir .. '/twitter.choose.adds'
+valid_fn = data_dir .. '/twitter.valid.adds'
+test_fn = data_dir .. '/comm.test.choose-ppl.adds'
+vocab_fn = data_dir .. '/twitter.choose.train.vocab'
+
+--qdata_dir = root_dir .. '/ptb/questionGen/gen'
+
+global_conf = {
+ lrate = 0.15, wcost = 1e-5, momentum = 0, clip_t = 5,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 300,
+ layer_num = 1,
+ chunk_size = 15,
+ batch_size = 20,
+ max_iter = 35,
+ lr_decay = 1.003,
+ decay_iter = 10,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+ dropout_str = "0",
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ vocab_fn = vocab_fn,
+ max_sen_len = 90,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 40000, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = root_dir .. '/twitter_new/EXP-nerv/lstmlm_v1.0'
+}
+
+else
+
+valid_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn'
+train_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn'
+test_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn'
+vocab_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn'
+
+global_conf = {
+ lrate = 0.01, wcost = 1e-5, momentum = 0,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 20,
+ layer_num = 1,
+ chunk_size = 2,
+ batch_size = 10,
+ max_iter = 3,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+ dropout_str = "0",
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ max_sen_len = 80,
+ lr_decay = 1.003,
+ decay_iter = 10,
+ vocab_fn = vocab_fn,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 10, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = '/home/slhome/txh18/workspace/nerv/play/testEXP/tnn_lstmlm_test'
+}
+
+end
+
+lr_half = false --can not be local, to be set by loadstring
+start_iter = -1
+start_lr = global_conf.lrate
+ppl_last = 100000
+commands_str = "train:test"
+commands = {}
+test_iter = -1
+--for testout(question)
+q_file = "/home/slhome/txh18/workspace/ptb/questionGen/gen/ptb.test.txt.q10rs1_Msss.adds"
+
+if arg[2] ~= nil then
+ nerv.printf("%s applying arg[2](%s)...\n", global_conf.sche_log_pre, arg[2])
+ loadstring(arg[2])()
+ nerv.LMUtil.wait(0.5)
+else
+ nerv.printf("%s no user setting, all default...\n", global_conf.sche_log_pre)
+end
+
+global_conf.work_dir = global_conf.work_dir_base .. 'h' .. global_conf.hidden_size .. 'l' .. global_conf.layer_num .. 'ch' .. global_conf.chunk_size .. 'ba' .. global_conf.batch_size .. 'slr' .. global_conf.lrate .. 'wc' .. global_conf.wcost .. 'dr' .. global_conf.dropout_str
+global_conf.train_fn_shuf = global_conf.work_dir .. '/train_fn_shuf'
+global_conf.train_fn_shuf_bak = global_conf.train_fn_shuf .. '_bak'
+global_conf.param_fn = global_conf.work_dir .. "/params"
+global_conf.dropout_list = nerv.SUtil.parse_schedule(global_conf.dropout_str)
+global_conf.log_fn = global_conf.work_dir .. '/log_lstm_tnn_' .. commands_str ..os.date("_TT%m_%d_%X",os.time())
+global_conf.log_fn, _ = string.gsub(global_conf.log_fn, ':', '-')
+commands = nerv.SUtil.parse_commands_set(commands_str)
+
+global_conf.lrate = start_lr
+
+nerv.printf("%s creating work_dir(%s)...\n", global_conf.sche_log_pre, global_conf.work_dir)
+nerv.LMUtil.wait(2)
+os.execute("mkdir -p "..global_conf.work_dir)
+os.execute("cp " .. global_conf.train_fn .. " " .. global_conf.train_fn_shuf)
+
+--redirecting log outputs!
+nerv.SUtil.log_redirect(global_conf.log_fn)
+nerv.LMUtil.wait(2)
+
+----------------printing options---------------------------------
+nerv.printf("%s printing global_conf...\n", global_conf.sche_log_pre)
+for id, value in pairs(global_conf) do
+ nerv.printf("%s:\t%s\n", id, tostring(value))
+end
+nerv.LMUtil.wait(2)
+
+nerv.printf("%s printing training scheduling options...\n", global_conf.sche_log_pre)
+nerv.printf("lr_half:\t%s\n", tostring(lr_half))
+nerv.printf("start_iter:\t%s\n", tostring(start_iter))
+nerv.printf("ppl_last:\t%s\n", tostring(ppl_last))
+nerv.printf("commands_str:\t%s\n", commands_str)
+nerv.printf("test_iter:\t%s\n", tostring(test_iter))
+nerv.printf("%s printing training scheduling end.\n", global_conf.sche_log_pre)
+nerv.LMUtil.wait(2)
+------------------printing options end------------------------------
+
+math.randomseed(1)
+
+local vocab = nerv.LMVocab()
+global_conf["vocab"] = vocab
+nerv.printf("%s building vocab...\n", global_conf.sche_log_pre)
+global_conf.vocab:build_file(global_conf.vocab_fn, false)
+ppl_rec = {}
+
+local final_iter = -1
+if commands["train"] == 1 then
+ if start_iter == -1 then
+ prepare_parameters(global_conf, -1) --write pre_generated params to param.0 file
+ end
+
+ if start_iter == -1 or start_iter == 0 then
+ nerv.printf("===INITIAL VALIDATION===\n")
+ local tnn = load_net(global_conf, 0)
+ global_conf.paramRepo = tnn:get_params() --get auto-generted params
+ global_conf.paramRepo:export(global_conf.param_fn .. '.0', nil) --some parameters are auto-generated, saved again to param.0 file
+ global_conf.dropout_rate = 0
+ local result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.valid_fn, tnn, false) --false update!
+ nerv.LMUtil.wait(1)
+ ppl_rec[0] = {}
+ ppl_rec[0].valid = result:ppl_all("rnn")
+ ppl_last = ppl_rec[0].valid
+ ppl_rec[0].train = 0
+ ppl_rec[0].test = 0
+ ppl_rec[0].lr = 0
+
+ start_iter = 1
+
+ nerv.printf("\n")
+ end
+
+ for iter = start_iter, global_conf.max_iter, 1 do
+ final_iter = iter --for final testing
+ global_conf.sche_log_pre = "[SCHEDULER ITER"..iter.." LR"..global_conf.lrate.."]:"
+ tnn = load_net(global_conf, iter - 1)
+ nerv.printf("===ITERATION %d LR %f===\n", iter, global_conf.lrate)
+ global_conf.dropout_rate = nerv.SUtil.sche_get(global_conf.dropout_list, iter)
+ result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.train_fn_shuf, tnn, true) --true update!
+ global_conf.dropout_rate = 0
+ ppl_rec[iter] = {}
+ ppl_rec[iter].train = result:ppl_all("rnn")
+ --shuffling training file
+ nerv.printf("%s shuffling training file\n", global_conf.sche_log_pre)
+ os.execute('cp ' .. global_conf.train_fn_shuf .. ' ' .. global_conf.train_fn_shuf_bak)
+ os.execute('cat ' .. global_conf.train_fn_shuf_bak .. ' | sort -R --random-source=/dev/zero > ' .. global_conf.train_fn_shuf)
+ nerv.printf("===PEEK ON TEST %d===\n", iter)
+ result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.test_fn, tnn, false) --false update!
+ ppl_rec[iter].test = result:ppl_all("rnn")
+ nerv.printf("===VALIDATION %d===\n", iter)
+ result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.valid_fn, tnn, false) --false update!
+ ppl_rec[iter].valid = result:ppl_all("rnn")
+ ppl_rec[iter].lr = global_conf.lrate
+ if ((ppl_last / ppl_rec[iter].valid < global_conf.lr_decay or lr_half == true) and iter > global_conf.decay_iter) then
+ global_conf.lrate = (global_conf.lrate * 0.6)
+ end
+ if ppl_rec[iter].valid < ppl_last then
+ nerv.printf("%s PPL improves, saving net to file %s.%d...\n", global_conf.sche_log_pre, global_conf.param_fn, iter)
+ global_conf.paramRepo:export(global_conf.param_fn .. '.' .. tostring(iter), nil)
+ else
+ nerv.printf("%s PPL did not improve, rejected, copying param file of last iter...\n", global_conf.sche_log_pre)
+ os.execute('cp ' .. global_conf.param_fn..'.'..tostring(iter - 1) .. ' ' .. global_conf.param_fn..'.'..tostring(iter))
+ end
+ if ppl_last / ppl_rec[iter].valid < global_conf.lr_decay or lr_half == true then
+ lr_half = true
+ end
+ if ppl_rec[iter].valid < ppl_last then
+ ppl_last = ppl_rec[iter].valid
+ end
+ nerv.printf("\n")
+ nerv.LMUtil.wait(2)
+ end
+ nerv.info("saving final nn to param.final")
+ os.execute('cp ' .. global_conf.param_fn .. '.' .. tostring(final_iter) .. ' ' .. global_conf.param_fn .. '.final')
+
+ nerv.printf("===VALIDATION PPL record===\n")
+ for i, _ in pairs(ppl_rec) do
+ nerv.printf("<ITER%d LR%.5f train:%.3f valid:%.3f test:%.3f> \n", i, ppl_rec[i].lr, ppl_rec[i].train, ppl_rec[i].valid, ppl_rec[i].test)
+ end
+ nerv.printf("\n")
+end --if commands["train"]
+
+if commands["test"] == 1 then
+ nerv.printf("===FINAL TEST===\n")
+ global_conf.sche_log_pre = "[SCHEDULER FINAL_TEST]:"
+ if final_iter ~= -1 and test_iter == -1 then
+ test_iter = final_iter
+ end
+ if test_iter == -1 then
+ test_iter = "final"
+ end
+ tnn = load_net(global_conf, test_iter)
+ global_conf.dropout_rate = 0
+ LMTrainer.lm_process_file_rnn(global_conf, global_conf.test_fn, tnn, false) --false update!
+end --if commands["test"]
+
+if commands["testout"] == 1 then
+ nerv.printf("===TEST OUT===\n")
+ nerv.printf("q_file:\t%s\n", q_file)
+ local q_fn = q_file --qdata_dir .. '/' .. q_file
+ global_conf.sche_log_pre = "[SCHEDULER TESTOUT]:"
+ if final_iter ~= -1 and test_iter == -1 then
+ test_iter = final_iter
+ end
+ if test_iter == -1 then
+ test_iter = "final"
+ end
+ tnn = load_net(global_conf, test_iter)
+ global_conf.dropout_rate = 0
+ LMTrainer.lm_process_file_rnn(global_conf, q_fn, tnn, false,
+ {["one_sen_report"] = true}) --false update!
+end --if commands["testout"]
+
+
diff --git a/nerv/examples/lmptb/m-tests/lmseqreader_test.lua b/nerv/examples/lmptb/m-tests/lmseqreader_test.lua
index cbcdcbe..9127559 100644
--- a/nerv/examples/lmptb/m-tests/lmseqreader_test.lua
+++ b/nerv/examples/lmptb/m-tests/lmseqreader_test.lua
@@ -7,12 +7,12 @@ local test_fn = "/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-te
--local test_fn = "/home/slhome/txh18/workspace/nerv-project/nerv/examples/lmptb/PTBdata/ptb.train.txt"
local vocab = nerv.LMVocab()
vocab:build_file(test_fn)
-local chunk_size = 5
+local chunk_size = 20
local batch_size = 3
local global_conf = {
lrate = 1, wcost = 1e-6, momentum = 0,
cumat_type = nerv.CuMatrixFloat,
- mmat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
hidden_size = 20,
chunk_size = chunk_size,
@@ -30,11 +30,12 @@ local global_conf = {
vocab = vocab
}
-local reader = nerv.LMSeqReader(global_conf, batch_size, chunk_size, vocab)
+local reader = nerv.LMSeqReader(global_conf, batch_size, chunk_size, vocab, {["se_mode"] = true})
reader:open_file(test_fn)
local feeds = {}
feeds.flags_now = {}
feeds.inputs_m = {}
+feeds.flagsPack_now = {}
for j = 1, chunk_size do
feeds.inputs_m[j] = {global_conf.cumat_type(batch_size, 1), global_conf.cumat_type(batch_size, global_conf.vocab:size())}
feeds.flags_now[j] = {}
diff --git a/nerv/examples/lmptb/m-tests/some-text-chn b/nerv/examples/lmptb/m-tests/some-text-chn
new file mode 100644
index 0000000..da474ce
--- /dev/null
+++ b/nerv/examples/lmptb/m-tests/some-text-chn
@@ -0,0 +1,5 @@
+</s> 你好 我 是 一个 人 </s>
+</s> 想 一起 玩 吗 </s>
+</s> 一个 人 很 好 玩 </s>
+</s> 不 想 一个 人 玩 </s>
+</s> 不 想 一个 人 玩 </s>
diff --git a/nerv/examples/lmptb/m-tests/sutil_test.lua b/nerv/examples/lmptb/m-tests/sutil_test.lua
new file mode 100644
index 0000000..08a812c
--- /dev/null
+++ b/nerv/examples/lmptb/m-tests/sutil_test.lua
@@ -0,0 +1,15 @@
+--require "tnn.init"
+
+local ss = "0.1*1:2"
+nerv.SUtil.parse_schedule(ss)
+ss = "train:test"
+local coms = nerv.SUtil.parse_commands_set(ss)
+print("!!!")
+for p, v in pairs(coms) do
+ print(p,v)
+end
+nerv.sss = "sss"
+print(nerv.sss)
+
+fh = assert(io.open("/home/slhome/txh18/workspace/nerv/play/try", "w"))
+fh:write("!!!2")
diff --git a/nerv/examples/lmptb/tnn_ptb_main.lua b/nerv/examples/lmptb/rnnlm_ptb_main.lua
index 50286c9..ca62023 100644
--- a/nerv/examples/lmptb/tnn_ptb_main.lua
+++ b/nerv/examples/lmptb/rnnlm_ptb_main.lua
@@ -2,7 +2,7 @@ require 'lmptb.lmvocab'
require 'lmptb.lmfeeder'
require 'lmptb.lmutil'
require 'lmptb.layer.init'
-require 'rnn.init'
+--require 'tnn.init'
require 'lmptb.lmseqreader'
require 'lm_trainer'
@@ -17,8 +17,14 @@ local LMTrainer = nerv.LMTrainer
function prepare_parameters(global_conf, iter)
printf("%s preparing parameters...\n", global_conf.sche_log_pre)
+ global_conf.paramRepo = nerv.ParamRepo()
+ local paramRepo = global_conf.paramRepo
+
if iter == -1 then --first time
- printf("%s first time, generating parameters...\n", global_conf.sche_log_pre)
+ printf("%s first time, prepare some pre-set parameters, and leaving other parameters to auto-generation...\n", global_conf.sche_log_pre)
+ local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
+ f:close()
+ --[[
ltp_ih = nerv.LinearTransParam("ltp_ih", global_conf)
ltp_ih.trans = global_conf.cumat_type(global_conf.vocab:size(), global_conf.hidden_size) --index 0 is for zero, others correspond to vocab index(starting from 1)
ltp_ih.trans:generate(global_conf.param_random)
@@ -27,47 +33,48 @@ function prepare_parameters(global_conf, iter)
ltp_hh.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.hidden_size)
ltp_hh.trans:generate(global_conf.param_random)
- ltp_ho = nerv.LinearTransParam("ltp_ho", global_conf)
- ltp_ho.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.vocab:size())
- ltp_ho.trans:generate(global_conf.param_random)
+ --ltp_ho = nerv.LinearTransParam("ltp_ho", global_conf)
+ --ltp_ho.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.vocab:size())
+ --ltp_ho.trans:generate(global_conf.param_random)
bp_h = nerv.BiasParam("bp_h", global_conf)
bp_h.trans = global_conf.cumat_type(1, global_conf.hidden_size)
bp_h.trans:generate(global_conf.param_random)
- bp_o = nerv.BiasParam("bp_o", global_conf)
- bp_o.trans = global_conf.cumat_type(1, global_conf.vocab:size())
- bp_o.trans:generate(global_conf.param_random)
+ --bp_o = nerv.BiasParam("bp_o", global_conf)
+ --bp_o.trans = global_conf.cumat_type(1, global_conf.vocab:size())
+ --bp_o.trans:generate(global_conf.param_random)
local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
f:write_chunk(ltp_ih)
f:write_chunk(ltp_hh)
- f:write_chunk(ltp_ho)
+ --f:write_chunk(ltp_ho)
f:write_chunk(bp_h)
- f:write_chunk(bp_o)
+ --f:write_chunk(bp_o)
f:close()
-
+ ]]--
return nil
end
printf("%s loading parameter from file %s...\n", global_conf.sche_log_pre, global_conf.param_fn .. '.' .. tostring(iter))
- local paramRepo = nerv.ParamRepo()
paramRepo:import({global_conf.param_fn .. '.' .. tostring(iter)}, nil, global_conf)
printf("%s preparing parameters end.\n", global_conf.sche_log_pre)
- return paramRepo
+ return nil
end
--global_conf: table
--Returns: nerv.LayerRepo
-function prepare_layers(global_conf, paramRepo)
+function prepare_layers(global_conf)
printf("%s preparing layers...\n", global_conf.sche_log_pre)
+ local pr = global_conf.paramRepo
+
local du = false
--local recurrentLconfig = {{["bp"] = "bp_h", ["ltp_hh"] = "ltp_hh"}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["break_id"] = global_conf.vocab:get_sen_entry().id, ["independent"] = global_conf.independent, ["clip"] = 10}}
- local recurrentLconfig = {{["bp"] = "bp_h", ["ltp_hh"] = "ltp_hh"}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["clip"] = 10, ["direct_update"] = du}}
+ local recurrentLconfig = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["clip"] = 10, ["direct_update"] = du, ["pr"] = pr}}
local layers = {
["nerv.AffineRecurrentLayer"] = {
@@ -75,7 +82,7 @@ function prepare_layers(global_conf, paramRepo)
},
["nerv.SelectLinearLayer"] = {
- ["selectL1"] = {{["ltp"] = "ltp_ih"}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}}},
+ ["selectL1"] = {{}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}, ["vocab"] = global_conf.vocab, ["pr"] = pr}},
},
["nerv.SigmoidLayer"] = {
@@ -87,7 +94,7 @@ function prepare_layers(global_conf, paramRepo)
},
["nerv.AffineLayer"] = {
- ["outputL"] = {{["ltp"] = "ltp_ho", ["bp"] = "bp_o"}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.vocab:size()}, ["direct_update"] = du}},
+ ["outputL"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.vocab:size()}, ["direct_update"] = du, ["pr"] = pr}},
},
["nerv.SoftmaxCELayerT"] = {
@@ -104,7 +111,7 @@ function prepare_layers(global_conf, paramRepo)
end
--]]
- local layerRepo = nerv.LayerRepo(layers, paramRepo, global_conf)
+ local layerRepo = nerv.LayerRepo(layers, pr, global_conf)
printf("%s preparing layers end.\n", global_conf.sche_log_pre)
return layerRepo
end
@@ -146,10 +153,10 @@ function prepare_tnn(global_conf, layerRepo)
end
function load_net(global_conf, next_iter)
- local paramRepo = prepare_parameters(global_conf, next_iter)
- local layerRepo = prepare_layers(global_conf, paramRepo)
+ prepare_parameters(global_conf, next_iter)
+ local layerRepo = prepare_layers(global_conf)
local tnn = prepare_tnn(global_conf, layerRepo)
- return tnn, paramRepo
+ return tnn
end
local train_fn, valid_fn, test_fn
@@ -165,7 +172,7 @@ test_fn = data_dir .. '/ptb.test.txt.adds'
vocab_fn = data_dir .. '/vocab'
global_conf = {
- lrate = 1, wcost = 1e-5, momentum = 0,
+ lrate = 1, wcost = 1e-6, momentum = 0,
cumat_type = nerv.CuMatrixFloat,
mmat_type = nerv.MMatrixFloat,
nn_act_default = 0,
@@ -174,7 +181,7 @@ global_conf = {
chunk_size = 15,
batch_size = 10,
max_iter = 35,
- decay_iter = 16,
+ decay_iter = 15,
param_random = function() return (math.random() / 5 - 0.1) end,
train_fn = train_fn,
@@ -184,7 +191,7 @@ global_conf = {
sche_log_pre = "[SCHEDULER]:",
log_w_num = 40000, --give a message when log_w_num words have been processed
timer = nerv.Timer(),
- work_dir = '/home/slhome/txh18/workspace/nerv/play/dagL_test'
+ work_dir_base = '/home/slhome/txh18/workspace/nerv/play/ptbEXP/tnn_test'
}
elseif (set == "msr_sc") then
@@ -213,9 +220,9 @@ global_conf = {
test_fn = test_fn,
vocab_fn = vocab_fn,
sche_log_pre = "[SCHEDULER]:",
- log_w_num = 40000, --give a message when log_w_num words have been processed
+ log_w_num = 400000, --give a message when log_w_num words have been processed
timer = nerv.Timer(),
- work_dir = '/home/slhome/txh18/workspace/sentenceCompletion/EXP-Nerv/rnnlm_test'
+ work_dir_base = '/home/slhome/txh18/workspace/sentenceCompletion/EXP-Nerv/rnnlm_test'
}
else
@@ -233,7 +240,7 @@ global_conf = {
hidden_size = 20,
chunk_size = 2,
- batch_size = 3,
+ batch_size = 10,
max_iter = 3,
param_random = function() return (math.random() / 5 - 0.1) end,
@@ -244,15 +251,11 @@ global_conf = {
sche_log_pre = "[SCHEDULER]:",
log_w_num = 10, --give a message when log_w_num words have been processed
timer = nerv.Timer(),
- work_dir = '/home/slhome/txh18/workspace/nerv/play/dagL_test'
+ work_dir_base = '/home/slhome/txh18/workspace/nerv/play/testEXP/tnn_test'
}
end
-global_conf.train_fn_shuf = global_conf.work_dir .. '/train_fn_shuf'
-global_conf.train_fn_shuf_bak = global_conf.train_fn_shuf .. '_bak'
-global_conf.param_fn = global_conf.work_dir .. "/params"
-
lr_half = false --can not be local, to be set by loadstring
start_iter = -1
ppl_last = 100000
@@ -264,6 +267,11 @@ else
printf("%s not user setting, all default...\n", global_conf.sche_log_pre)
end
+global_conf.work_dir = global_conf.work_dir_base .. 'h' .. global_conf.hidden_size .. 'ch' .. global_conf.chunk_size .. 'ba' .. global_conf.batch_size .. 'slr' .. global_conf.lrate .. 'wc' .. global_conf.wcost
+global_conf.train_fn_shuf = global_conf.work_dir .. '/train_fn_shuf'
+global_conf.train_fn_shuf_bak = global_conf.train_fn_shuf .. '_bak'
+global_conf.param_fn = global_conf.work_dir .. "/params"
+
----------------printing options---------------------------------
printf("%s printing global_conf...\n", global_conf.sche_log_pre)
for id, value in pairs(global_conf) do
@@ -291,13 +299,15 @@ global_conf.vocab:build_file(global_conf.vocab_fn, false)
ppl_rec = {}
if start_iter == -1 then
- prepare_parameters(global_conf, -1) --randomly generate parameters
+ prepare_parameters(global_conf, -1) --write pre_generated params to param.0 file
end
if start_iter == -1 or start_iter == 0 then
print("===INITIAL VALIDATION===")
- local tnn, paramRepo = load_net(global_conf, 0)
- local result = LMTrainer.lm_process_file(global_conf, global_conf.valid_fn, tnn, false) --false update!
+ local tnn = load_net(global_conf, 0)
+ global_conf.paramRepo = tnn:get_params() --get auto-generted params
+ global_conf.paramRepo:export(global_conf.param_fn .. '.0', nil) --some parameters are auto-generated, saved again to param.0 file
+ local result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.valid_fn, tnn, false) --false update!
nerv.LMUtil.wait(1)
ppl_rec[0] = {}
ppl_rec[0].valid = result:ppl_all("rnn")
@@ -315,9 +325,9 @@ local final_iter
for iter = start_iter, global_conf.max_iter, 1 do
final_iter = iter --for final testing
global_conf.sche_log_pre = "[SCHEDULER ITER"..iter.." LR"..global_conf.lrate.."]:"
- tnn, paramRepo = load_net(global_conf, iter - 1)
+ tnn = load_net(global_conf, iter - 1)
printf("===ITERATION %d LR %f===\n", iter, global_conf.lrate)
- result = LMTrainer.lm_process_file(global_conf, global_conf.train_fn_shuf, tnn, true) --true update!
+ result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.train_fn_shuf, tnn, true) --true update!
ppl_rec[iter] = {}
ppl_rec[iter].train = result:ppl_all("rnn")
--shuffling training file
@@ -325,10 +335,10 @@ for iter = start_iter, global_conf.max_iter, 1 do
os.execute('cp ' .. global_conf.train_fn_shuf .. ' ' .. global_conf.train_fn_shuf_bak)
os.execute('cat ' .. global_conf.train_fn_shuf_bak .. ' | sort -R --random-source=/dev/zero > ' .. global_conf.train_fn_shuf)
printf("===PEEK ON TEST %d===\n", iter)
- result = LMTrainer.lm_process_file(global_conf, global_conf.test_fn, tnn, false) --false update!
+ result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.test_fn, tnn, false) --false update!
ppl_rec[iter].test = result:ppl_all("rnn")
printf("===VALIDATION %d===\n", iter)
- result = LMTrainer.lm_process_file(global_conf, global_conf.valid_fn, tnn, false) --false update!
+ result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.valid_fn, tnn, false) --false update!
ppl_rec[iter].valid = result:ppl_all("rnn")
ppl_rec[iter].lr = global_conf.lrate
if ((ppl_last / ppl_rec[iter].valid < 1.0003 or lr_half == true) and iter > global_conf.decay_iter) then
@@ -336,7 +346,7 @@ for iter = start_iter, global_conf.max_iter, 1 do
end
if ppl_rec[iter].valid < ppl_last then
printf("%s PPL improves, saving net to file %s.%d...\n", global_conf.sche_log_pre, global_conf.param_fn, iter)
- paramRepo:export(global_conf.param_fn .. '.' .. tostring(iter), nil)
+ global_conf.paramRepo:export(global_conf.param_fn .. '.' .. tostring(iter), nil)
else
printf("%s PPL did not improve, rejected, copying param file of last iter...\n", global_conf.sche_log_pre)
os.execute('cp ' .. global_conf.param_fn..'.'..tostring(iter - 1) .. ' ' .. global_conf.param_fn..'.'..tostring(iter))
@@ -357,6 +367,6 @@ end
printf("\n")
printf("===FINAL TEST===\n")
global_conf.sche_log_pre = "[SCHEDULER FINAL_TEST]:"
-tnn, paramRepo = load_net(global_conf, final_iter)
-LMTrainer.lm_process_file(global_conf, global_conf.test_fn, tnn, false) --false update!
+tnn = load_net(global_conf, final_iter)
+LMTrainer.lm_process_file_rnn(global_conf, global_conf.test_fn, tnn, false) --false update!
diff --git a/nerv/examples/lmptb/unfold_ptb_main.lua b/nerv/examples/lmptb/unfold_ptb_main.lua
index 6c4ead3..eebab2b 100644
--- a/nerv/examples/lmptb/unfold_ptb_main.lua
+++ b/nerv/examples/lmptb/unfold_ptb_main.lua
@@ -8,6 +8,7 @@
require 'lmptb.lmvocab'
require 'lmptb.lmfeeder'
require 'lmptb.lmutil'
+require 'tnn.init'
nerv.include('lmptb/layer/init.lua')
--[[global function rename]]--
@@ -70,7 +71,7 @@ function prepare_layers(global_conf, paramRepo)
},
["nerv.SelectLinearLayer"] = {
- ["selectL1"] = {{["ltp"] = "ltp_ih"}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}}},
+ ["selectL1"] = {{["ltp"] = "ltp_ih"}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}, ["vocab"] = global_conf.vocab}},
},
["nerv.SigmoidLayer"] = {
@@ -90,7 +91,7 @@ function prepare_layers(global_conf, paramRepo)
for i = 1, global_conf.bptt do
layers["nerv.IndRecurrentLayer"]["recurrentL" .. (i + 1)] = recurrentLconfig
layers["nerv.SigmoidLayer"]["sigmoidL" .. (i + 1)] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}
- layers["nerv.SelectLinearLayer"]["selectL" .. (i + 1)] = {{["ltp"] = "ltp_ih"}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}}}
+ layers["nerv.SelectLinearLayer"]["selectL" .. (i + 1)] = {{["ltp"] = "ltp_ih"}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}, ["vocab"] = global_conf.vocab}}
end
local layerRepo = nerv.LayerRepo(layers, paramRepo, global_conf)
printf("%s preparing layers end.\n", global_conf.sche_log_pre)
@@ -138,7 +139,7 @@ function prepare_dagLayer(global_conf, layerRepo)
printf("\t%s->%s\n", key, value)
end
- local dagL = nerv.DAGLayer("dagL", global_conf, {["dim_in"] = dim_in_t, ["dim_out"] = {1}, ["sub_layers"] = layerRepo,
+ local dagL = nerv.DAGLayerT("dagL", global_conf, {["dim_in"] = dim_in_t, ["dim_out"] = {1}, ["sub_layers"] = layerRepo,
["connections"] = connections_t,
})
dagL:init(global_conf.batch_size)
@@ -173,7 +174,7 @@ function propagateFile(global_conf, dagL, fn, config)
local dagL_input = {}
for i = 1, global_conf.bptt + 1 do
- dagL_input[i] = global_conf.cumat_type(1, global_conf.batch_size) --changed to row vector, debughtx
+ dagL_input[i] = global_conf.cumat_type(global_conf.batch_size, 1) --changed to row vector, debughtx
end
dagL_input[global_conf.bptt + 2] = global_conf.cumat_type(global_conf.batch_size, global_conf.hidden_size)
dagL_input[global_conf.bptt + 3] = global_conf.cumat_type(global_conf.batch_size, global_conf.vocab:size())
@@ -209,7 +210,7 @@ function propagateFile(global_conf, dagL, fn, config)
global_conf.timer:toc("dagL-propagate")
hidden_store[tnow] = global_conf.cumat_type(global_conf.batch_size, global_conf.hidden_size)
- hidden_store[tnow]:copy_fromd(sigmoidL_ref.outputs[1])
+ hidden_store[tnow]:copy_fromd(sigmoidL_ref.outputs[1][1])
if (config.do_train == true) then
global_conf.timer:tic("dagL-back_propagate")
@@ -277,9 +278,9 @@ if (set == "ptb") then
global_conf = {
lrate = 1, wcost = 1e-6, momentum = 0,
cumat_type = nerv.CuMatrixFloat,
- mmat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
- hidden_size = 200,
+ hidden_size = 50,
batch_size = 10,
bptt = 6, --train bptt_block's words. could be set to zero
max_iter = 18,
@@ -290,7 +291,7 @@ if (set == "ptb") then
valid_fn = valid_fn,
test_fn = test_fn,
sche_log_pre = "[SCHEDULER]:",
- log_w_num = 100000, --give a message when log_w_num words have been processed
+ log_w_num = 1000, --give a message when log_w_num words have been processed
timer = nerv.Timer()
}
global_conf.work_dir = work_dir_base.."/h"..global_conf.hidden_size.."bp"..global_conf.bptt.."slr"..global_conf.lrate --..os.date("_%bD%dH%H") --comment this for testing
@@ -303,7 +304,7 @@ elseif (set == "test") then
global_conf = {
lrate = 0.1, wcost = 1e-6, momentum = 0,
cumat_type = nerv.CuMatrixFloat,
- mmat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
hidden_size = 5,
batch_size = 1,
diff --git a/nerv/init.lua b/nerv/init.lua
index 9c1a5c8..b5d20a2 100644
--- a/nerv/init.lua
+++ b/nerv/init.lua
@@ -130,3 +130,4 @@ nerv.include('matrix/init.lua')
nerv.include('io/init.lua')
nerv.include('layer/init.lua')
nerv.include('nn/init.lua')
+nerv.include('tnn/init.lua')
diff --git a/nerv/layer/affine.lua b/nerv/layer/affine.lua
index 6903c51..566e9bc 100644
--- a/nerv/layer/affine.lua
+++ b/nerv/layer/affine.lua
@@ -61,28 +61,38 @@ end
function AffineLayer:__init(id, global_conf, layer_conf)
self.id = id
- self.ltp = layer_conf.ltp
- self.bp = layer_conf.bp
self.dim_in = layer_conf.dim_in
self.dim_out = layer_conf.dim_out
+ self.ltp = self:find_param("ltp", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[1], self.dim_out[1]}) --layer_conf.ltp
+ for i = 2, #self.dim_in do
+ self["ltp" .. i] = self:find_param("ltp" .. i, layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[i], self.dim_out[1]})
+ end
+ self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]}) --layer_conf.bp
self.gconf = global_conf
- self:check_dim_len(1, 1) -- exactly one input and one output
- -- self.direct_update = layer_conf.direct_update or global_conf.direct_update
+ self:check_dim_len(-1, 1) -- exactly one output, allow multiple inputs
end
function AffineLayer:init(batch_size)
if self.ltp.trans:ncol() ~= self.bp.trans:ncol() then
nerv.error("mismatching dimensions of linear transform and bias paramter")
end
+ self.bp:train_init()
if self.dim_in[1] ~= self.ltp.trans:nrow() then
nerv.error("mismatching dimensions of linear transform parameter and input")
end
if self.dim_out[1] ~= self.ltp.trans:ncol() then
nerv.error("mismatching dimensions of linear transform parameter and output")
end
- self.ltp_grad = self.ltp.trans:create()
self.ltp:train_init()
- self.bp:train_init()
+ for i = 2, #self.dim_in do
+ if self.dim_in[i] ~= self["ltp" .. i].trans:nrow() then
+ nerv.error("mismatching dimensions of linear transform parameter and input")
+ end
+ if self.dim_out[1] ~= self["ltp" .. i].trans:ncol() then
+ nerv.error("mismatching dimensions of linear transform parameter and output")
+ end
+ self["ltp" .. i]:train_init()
+ end
end
function AffineLayer:batch_resize(batch_size)
@@ -91,20 +101,31 @@ end
function AffineLayer:update(bp_err, input, output)
self.ltp:update_by_err_input(bp_err[1], input[1])
+ for i = 2, #self.dim_in do
+ self["ltp" .. i]:update_by_err_input(bp_err[1], input[i])
+ end
self.bp:update_by_gradient(bp_err[1]:colsum())
end
function AffineLayer:propagate(input, output)
- -- apply linear transform
output[1]:mul(input[1], self.ltp.trans, 1.0, 0.0, 'N', 'N')
- -- add bias
+ for i = 2, #self.dim_in do
+ output[1]:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N')
+ end
output[1]:add_row(self.bp.trans, 1.0)
end
function AffineLayer:back_propagate(bp_err, next_bp_err, input, output)
next_bp_err[1]:mul(bp_err[1], self.ltp.trans, 1.0, 0.0, 'N', 'T')
+ for i = 2, #self.dim_in do
+ next_bp_err[i]:mul(bp_err[1], self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T')
+ end
end
function AffineLayer:get_params()
- return nerv.ParamRepo({self.ltp, self.bp})
+ local pr = nerv.ParamRepo({self.ltp, self.bp})
+ for i = 2, #self.dim_in do
+ pr:add(self["ltp" .. i].id, self["ltp" .. i])
+ end
+ return pr
end
diff --git a/nerv/layer/affine_recurrent.lua b/nerv/layer/affine_recurrent.lua
index da189e0..d537f4a 100644
--- a/nerv/layer/affine_recurrent.lua
+++ b/nerv/layer/affine_recurrent.lua
@@ -10,8 +10,8 @@ function Recurrent:__init(id, global_conf, layer_conf)
self.dim_out = layer_conf.dim_out
self.gconf = global_conf
- self.bp = layer_conf.bp
- self.ltp_hh = layer_conf.ltp_hh --from hidden to hidden
+ self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]}) --layer_conf.bp
+ self.ltp_hh = self:find_param("ltp_hh", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[2], self.dim_out[1]}) --layer_conf.ltp_hh --from hidden to hidden
self:check_dim_len(2, 1)
self.direct_update = layer_conf.direct_update
diff --git a/nerv/layer/elem_mul.lua b/nerv/layer/elem_mul.lua
new file mode 100644
index 0000000..c809d3e
--- /dev/null
+++ b/nerv/layer/elem_mul.lua
@@ -0,0 +1,38 @@
+local ElemMulLayer = nerv.class('nerv.ElemMulLayer', 'nerv.Layer')
+
+function ElemMulLayer:__init(id, global_conf, layer_conf)
+ self.id = id
+ self.dim_in = layer_conf.dim_in
+ self.dim_out = layer_conf.dim_out
+ self.gconf = global_conf
+
+ self:check_dim_len(2, 1) -- Element-multiply input[1] and input[2]
+end
+
+function ElemMulLayer:init(batch_size)
+ if self.dim_in[1] ~= self.dim_in[2] or
+ self.dim_in[1] ~= self.dim_out[1] then
+ nerv.error("dim_in and dim_out mismatch for ElemMulLayer")
+ end
+end
+
+function ElemMulLayer:batch_resize(batch_size)
+ --do nothing
+end
+
+function ElemMulLayer:propagate(input, output)
+ output[1]:mul_elem(input[1], input[2])
+end
+
+function ElemMulLayer:back_propagate(bp_err, next_bp_err, input, output)
+ next_bp_err[1]:mul_elem(bp_err[1], input[2])
+ next_bp_err[2]:mul_elem(bp_err[1], input[1])
+end
+
+function ElemMulLayer:update(bp_err, input, output)
+ --do nothing
+end
+
+function ElemMulLayer:get_params()
+ return nerv.ParamRepo({})
+end
diff --git a/nerv/layer/gate_fff.lua b/nerv/layer/gate_fff.lua
new file mode 100644
index 0000000..6082e27
--- /dev/null
+++ b/nerv/layer/gate_fff.lua
@@ -0,0 +1,73 @@
+local GateFFFLayer = nerv.class('nerv.GateFLayer', 'nerv.Layer') --Full matrix gate
+
+function GateFFFLayer:__init(id, global_conf, layer_conf)
+ self.id = id
+ self.dim_in = layer_conf.dim_in
+ self.dim_out = layer_conf.dim_out
+ self.gconf = global_conf
+
+ for i = 1, #self.dim_in do
+ self["ltp" .. i] = self:find_param("ltp" .. i, layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[i], self.dim_out[1]}) --layer_conf.ltp
+ end
+ self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]})--layer_conf.bp
+
+ self:check_dim_len(-1, 1) --accept multiple inputs
+end
+
+function GateFFFLayer:init(batch_size)
+ for i = 1, #self.dim_in do
+ if self["ltp" .. i].trans:ncol() ~= self.bp.trans:ncol() then
+ nerv.error("mismatching dimensions of linear transform and bias paramter")
+ end
+ if self.dim_in[i] ~= self["ltp" .. i].trans:nrow() then
+ nerv.error("mismatching dimensions of linear transform parameter and input")
+ end
+ self["ltp"..i]:train_init()
+ end
+
+ if self.dim_out[1] ~= self.ltp1.trans:ncol() then
+ nerv.error("mismatching dimensions of linear transform parameter and output")
+ end
+ self.bp:train_init()
+ self.err_bakm = self.gconf.cumat_type(batch_size, self.dim_out[1])
+end
+
+function GateFFFLayer:batch_resize(batch_size)
+ if self.err_m:nrow() ~= batch_size then
+ self.err_bakm = self.gconf.cumat_type(batch_size, self.dim_out[1])
+ end
+end
+
+function GateFFFLayer:propagate(input, output)
+ -- apply linear transform
+ output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N')
+ for i = 2, #self.dim_in do
+ output[1]:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N')
+ end
+ -- add bias
+ output[1]:add_row(self.bp.trans, 1.0)
+ output[1]:sigmoid(output[1])
+end
+
+function GateFFFLayer:back_propagate(bp_err, next_bp_err, input, output)
+ self.err_bakm:sigmoid_grad(bp_err[1], output[1])
+ for i = 1, #self.dim_in do
+ next_bp_err[i]:mul(self.err_bakm, self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T')
+ end
+end
+
+function GateFFFLayer:update(bp_err, input, output)
+ self.err_bakm:sigmoid_grad(bp_err[1], output[1])
+ for i = 1, #self.dim_in do
+ self["ltp" .. i]:update_by_err_input(self.err_bakm, input[i])
+ end
+ self.bp:update_by_gradient(self.err_bakm:colsum())
+end
+
+function GateFFFLayer:get_params()
+ local pr = nerv.ParamRepo({self.bp})
+ for i = 1, #self.dim_in do
+ pr:add(self["ltp" .. i].id, self["ltp" .. i])
+ end
+ return pr
+end
diff --git a/nerv/layer/init.lua b/nerv/layer/init.lua
index 6861b0e..23606e1 100644
--- a/nerv/layer/init.lua
+++ b/nerv/layer/init.lua
@@ -70,8 +70,29 @@ function Layer:get_dim()
return self.dim_in, self.dim_out
end
+function Layer:find_param(pid, l_conf, gconf, p_type, p_dim)
+ if l_conf[pid] ~= nil then
+ nerv.info("Param [%s] of layer [%s] found in layer_conf.", pid, self.id)
+ return l_conf[pid]
+ end
+ local pid_g = self.id .. '_' .. pid --global identifier
+ local pr = l_conf.pr
+ local p
+ if pr ~= nil and pr:has_param(pid_g) == true then
+ nerv.info("Param [%s] of layer [%s] found in layer_conf.paramRepo.", pid, self.id)
+ p = pr:get_param(pid_g)
+ return p
+ end
+ nerv.info("Param [%s] of layer [%s] is not found in layer_conf or layer_conf.paramRepo, switch to auto-generate.", pid, self.id)
+ p = p_type(pid_g, gconf)
+ p.trans = gconf.cumat_type(unpack(p_dim))
+ p.trans:generate(gconf.param_random)
+ return p
+end
+
nerv.include('affine.lua')
nerv.include('sigmoid.lua')
+nerv.include('tanh.lua')
nerv.include('softmax_ce.lua')
nerv.include('bias.lua')
nerv.include('window.lua')
@@ -79,3 +100,5 @@ nerv.include('mse.lua')
nerv.include('combiner.lua')
nerv.include('affine_recurrent.lua')
nerv.include('softmax.lua')
+nerv.include('elem_mul.lua')
+nerv.include('gate_fff.lua')
diff --git a/nerv/layer/tanh.lua b/nerv/layer/tanh.lua
new file mode 100644
index 0000000..e1c32f2
--- /dev/null
+++ b/nerv/layer/tanh.lua
@@ -0,0 +1,35 @@
+local TanhLayer = nerv.class("nerv.TanhLayer", "nerv.Layer")
+
+function TanhLayer:__init(id, global_conf, layer_conf)
+ self.id = id
+ self.gconf = global_conf
+ self.dim_in = layer_conf.dim_in
+ self.dim_out = layer_conf.dim_out
+ self:check_dim_len(1, 1)
+end
+
+function TanhLayer:init()
+ if self.dim_in[1] ~= self.dim_out[1] then
+ nerv.error("mismatching dimensions of input and output")
+ end
+end
+
+function TanhLayer:batch_resize(batch_size)
+ -- do nothing
+end
+
+function TanhLayer:update(bp_err, input, output)
+ -- no params, therefore do nothing
+end
+
+function TanhLayer:propagate(input, output)
+ output[1]:tanh(input[1])
+end
+
+function TanhLayer:back_propagate(bp_err, next_bp_err, input, output)
+ next_bp_err[1]:tanh_grad(bp_err[1], output[1])
+end
+
+function TanhLayer:get_params()
+ return nerv.ParamRepo({})
+end
diff --git a/nerv/lib/io/chunk_file.c b/nerv/lib/io/chunk_file.c
index 4e00b0b..71db820 100644
--- a/nerv/lib/io/chunk_file.c
+++ b/nerv/lib/io/chunk_file.c
@@ -112,7 +112,7 @@ static ChunkFile *open_read(const char *fn, Status *status) {
for (i = 0;; offset += chunk_len, i++)
{
ChunkInfo *cip;
- fprintf(stderr, "reading chunk %d from %d\n", i, (int)offset);
+ fprintf(stdout, "reading chunk %d from %d\n", i, (int)offset);
/* skip to the begining of chunk i */
if (fseeko(fp, offset, SEEK_SET) != 0)
{
diff --git a/nerv/lib/matrix/cukernel.h b/nerv/lib/matrix/cukernel.h
index fffe0bc..fe682d3 100644
--- a/nerv/lib/matrix/cukernel.h
+++ b/nerv/lib/matrix/cukernel.h
@@ -3,6 +3,10 @@ void cudak_(cuda_mul_elem)(const Matrix *a, const Matrix *b, Matrix *c);
void cudak_(cuda_log_elem)(const Matrix *a, Matrix *b);
void cudak_(cuda_sigmoid)(const Matrix *a, Matrix *b);
void cudak_(cuda_sigmoid_grad)(const Matrix *output, const Matrix *err, Matrix *nerr);
+void cudak_(cuda_rand_uniform)(const Matrix *a); //a's curand_gen may be modified
+void cudak_(cuda_thres_mask)(const Matrix *a, const Matrix *b, double thres, double low, double high);
+void cudak_(cuda_tanh)(const Matrix *a, Matrix *b);
+void cudak_(cuda_tanh_grad)(const Matrix *output, const Matrix *err, Matrix *nerr);
void cudak_(cuda_rowsum)(const Matrix *a, Matrix *b);
void cudak_(cuda_rowmax)(const Matrix *a, Matrix *b);
void cudak_(cuda_rowmax_idx)(const Matrix *a, Matrix *b, Matrix *idx);
diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu
index e58c488..aa830b5 100644
--- a/nerv/lib/matrix/generic/cukernel.cu
+++ b/nerv/lib/matrix/generic/cukernel.cu
@@ -20,6 +20,19 @@ __global__ void cudak_(log_elem)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
b[idx] = log(tmp);
}
+__global__ void cudak_(thres_mask)(MATRIX_ELEM *a, MATRIX_ELEM *b, double thres, double low, double high,
+ int nrow, int ncol, int stride) {
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
+ long idx;
+ if (i >= nrow || j >= ncol) return;
+ idx = j + i * stride;
+ if (b[idx] < thres)
+ a[idx] = low;
+ else
+ a[idx] = high;
+}
+
__global__ void cudak_(mul_elem)(const MATRIX_ELEM *a, const MATRIX_ELEM *b,
MATRIX_ELEM *c,
int nrow, int ncol, int stride) {
@@ -53,6 +66,29 @@ __global__ void cudak_(sigmoid_grad)(const MATRIX_ELEM *output,
nerr[idx] = output[idx] * (1.0 - output[idx]) * err[idx];
}
+__global__ void cudak_(tanh)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
+ int nrow, int ncol, int stride) {
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
+ long idx;
+ if (i >= nrow || j >= ncol) return;
+ idx = j + i * stride;
+ //b[idx] = (exp(a[idx]) - exp(-a[idx])) / (exp(a[idx]) + exp(-a[idx])); //could cause nan
+ b[idx] = tanh(a[idx]);
+}
+
+__global__ void cudak_(tanh_grad)(const MATRIX_ELEM *output,
+ const MATRIX_ELEM *err,
+ MATRIX_ELEM *nerr,
+ int nrow, int ncol, int stride) {
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
+ long idx;
+ if (i >= nrow || j >= ncol) return;
+ idx = j + i * stride;
+ nerr[idx] = (1.0 - output[idx] * output[idx]) * err[idx];
+}
+
__global__ void cudak_(softmax_final)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
const MATRIX_ELEM *max, const MATRIX_ELEM *deno,
int nrow, int ncol, int stride, int mstride) {
@@ -353,6 +389,48 @@ extern "C" {
cudaStreamSynchronize(0);
}
+ void cudak_(cuda_rand_uniform)(const Matrix *a) {
+ #ifdef MATRIX_USE_FLOAT
+ curandGenerateUniform(*(a->curand_gen), MATRIX_ELEM_PTR(a), a->nrow * a->stride / sizeof(MATRIX_ELEM));
+ #endif
+ #ifdef MATRIX_USE_DOUBLE
+ curandGenerateUniformDouble(*(a->curand_gen), MATRIX_ELEM_PTR(a), a->nrow * a->stride / sizeof(MATRIX_ELEM));
+ #endif
+ }
+
+ void cudak_(cuda_thres_mask)(const Matrix *a, const Matrix *b, double thres, double low, double high) {
+ dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
+ dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x),
+ CEIL_DIV(a->nrow, threadsPerBlock.y));
+ cudak_(thres_mask)<<<numBlocks, threadsPerBlock>>> \
+ (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b),
+ thres, low, high, a->nrow, a->ncol, a->stride / sizeof(MATRIX_ELEM));
+ cudaStreamSynchronize(0);
+ }
+
+ void cudak_(cuda_tanh)(const Matrix *a, Matrix *b) {
+ dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
+ dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x),
+ CEIL_DIV(b->nrow, threadsPerBlock.y));
+ cudak_(tanh)<<<numBlocks, threadsPerBlock>>> \
+ (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), b->nrow, b->ncol,
+ b->stride / sizeof(MATRIX_ELEM));
+ cudaStreamSynchronize(0);
+ }
+
+ void cudak_(cuda_tanh_grad)(const Matrix *output,
+ const Matrix *err, Matrix *nerr) {
+ dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
+ dim3 numBlocks(CEIL_DIV(nerr->ncol, threadsPerBlock.x),
+ CEIL_DIV(nerr->nrow, threadsPerBlock.y));
+ cudak_(tanh_grad)<<<numBlocks, threadsPerBlock>>> \
+ (MATRIX_ELEM_PTR(output), MATRIX_ELEM_PTR(err),
+ MATRIX_ELEM_PTR(nerr),
+ nerr->nrow, nerr->ncol,
+ nerr->stride / sizeof(MATRIX_ELEM));
+ cudaStreamSynchronize(0);
+ }
+
void cudak_(cuda_rowsum)(const Matrix *a, Matrix *b) {
dim3 block(CUDA_THREADS_NN, 1);
int ncol = a->ncol;
diff --git a/nerv/lib/matrix/generic/cumatrix.c b/nerv/lib/matrix/generic/cumatrix.c
index 7643c01..68889ad 100644
--- a/nerv/lib/matrix/generic/cumatrix.c
+++ b/nerv/lib/matrix/generic/cumatrix.c
@@ -10,6 +10,7 @@
#include "../../common.h"
#include "../cukernel.h"
#include "../cuda_helper.h"
+#include <curand.h>
void nerv_matrix_(add)(Matrix *c, const Matrix *a, const Matrix *b,
MATRIX_ELEM alpha, MATRIX_ELEM beta,
@@ -75,6 +76,39 @@ void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
+void nerv_matrix_(rand_uniform)(Matrix *a, Status *status) {
+ PROFILE_START
+ cudak_(cuda_rand_uniform)(a);
+ PROFILE_STOP
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
+void nerv_matrix_(thres_mask)(Matrix *a, Matrix *b, double thres, double low, double high, Status *status) {
+ CHECK_SAME_DIMENSION(a, b, status);
+ PROFILE_START
+ cudak_(cuda_thres_mask)(a, b, thres, low, high);
+ PROFILE_STOP
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
+void nerv_matrix_(tanh)(Matrix *a, const Matrix *b, Status *status) {
+ CHECK_SAME_DIMENSION(a, b, status);
+ PROFILE_START
+ cudak_(cuda_tanh)(b, a);
+ PROFILE_STOP
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
+void nerv_matrix_(tanh_grad)(Matrix *nerr, const Matrix *err,
+ const Matrix *output, Status *status) {
+ CHECK_SAME_DIMENSION(nerr, err, status);
+ CHECK_SAME_DIMENSION(nerr, output, status);
+ PROFILE_START
+ cudak_(cuda_tanh_grad)(output, err, nerr);
+ PROFILE_STOP
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, Status *status) {
Matrix *max, *max_idx;
Matrix *dno;
diff --git a/nerv/lib/matrix/generic/cumatrix.h b/nerv/lib/matrix/generic/cumatrix.h
index 21c29b7..aa8805a 100644
--- a/nerv/lib/matrix/generic/cumatrix.h
+++ b/nerv/lib/matrix/generic/cumatrix.h
@@ -9,6 +9,9 @@ void nerv_matrix_(mul)(Matrix *c, const Matrix *a, const Matrix *b,
void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, Status *status);
void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err,
const Matrix *output, Status *status);
+void nerv_matrix_(tanh)(Matrix *a, const Matrix *b, Status *status);
+void nerv_matrix_(tanh_grad)(Matrix *nerr, const Matrix *err,
+ const Matrix *output, Status *status);
Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, Status *status);
Matrix *nerv_matrix_(rowsum)(Matrix *a, Status *status);
diff --git a/nerv/lib/matrix/generic/matrix.c b/nerv/lib/matrix/generic/matrix.c
index 998d107..004d9aa 100644
--- a/nerv/lib/matrix/generic/matrix.c
+++ b/nerv/lib/matrix/generic/matrix.c
@@ -9,6 +9,8 @@ void nerv_matrix_(data_free)(Matrix *self, Status *status) {
{
/* free matrix data */
MATRIX_DATA_FREE(MATRIX_ELEM_PTR_BASE(self), status);
+ curandDestroyGenerator(*(self->curand_gen));
+ free(self->curand_gen);
free(self->data_ref);
free(self);
}
@@ -38,6 +40,11 @@ Matrix *nerv_matrix_(create)(long nrow, long ncol, Status *status) {
}
self->data_ref = (long *)malloc(sizeof(long));
*self->data_ref = 0;
+
+ self->curand_gen = (curandGenerator_t*)malloc(sizeof(curandGenerator_t));
+ curandCreateGenerator(self->curand_gen, CURAND_RNG_PSEUDO_DEFAULT);
+ curandSetPseudoRandomGeneratorSeed(*(self->curand_gen), time(NULL));
+
self->offset = 0;
nerv_matrix_(data_retain)(self);
NERV_SET_STATUS(status, NERV_NORMAL, 0);
@@ -57,6 +64,7 @@ Matrix *nerv_matrix_(getrow)(Matrix *self, int row) {
prow->nmax = prow->ncol;
prow->data = self->data;
prow->data_ref = self->data_ref;
+ prow->curand_gen = self->curand_gen;
prow->offset = row * self->stride;
nerv_matrix_(data_retain)(prow);
return prow;
diff --git a/nerv/lib/matrix/matrix.h b/nerv/lib/matrix/matrix.h
index 073bd13..a28fd97 100644
--- a/nerv/lib/matrix/matrix.h
+++ b/nerv/lib/matrix/matrix.h
@@ -2,6 +2,7 @@
#define NERV_GENERIC_MATRIX_H
#include <stddef.h>
+#include <curand.h>
typedef struct Matrix {
size_t stride; /* size of a row */
@@ -14,6 +15,7 @@ typedef struct Matrix {
} data; /* pointer to actual storage */
unsigned long offset; /* the actual beginning of the matrix */
long *data_ref;
+ curandGenerator_t *curand_gen;
} Matrix;
#define MATRIX_ROW_PTR(self, row) \
diff --git a/nerv/matrix/generic/cumatrix.c b/nerv/matrix/generic/cumatrix.c
index be3d627..df858e6 100644
--- a/nerv/matrix/generic/cumatrix.c
+++ b/nerv/matrix/generic/cumatrix.c
@@ -62,6 +62,45 @@ static int nerv_matrix_(lua_sigmoid_grad)(lua_State *L) {
return 0;
}
+static int nerv_matrix_(lua_thres_mask)(lua_State *L) {
+ Status status;
+ Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
+ Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname));
+ MATRIX_ELEM thres = luaL_checknumber(L, 3);
+ MATRIX_ELEM low = luaL_checknumber(L, 4);
+ MATRIX_ELEM high = luaL_checknumber(L, 5);
+ nerv_matrix_(thres_mask)(a, b, thres, low, high, &status);
+ NERV_LUA_CHECK_STATUS(L, status);
+ return 0;
+}
+
+static int nerv_matrix_(lua_rand_uniform)(lua_State *L) {
+ Status status;
+ Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
+ nerv_matrix_(rand_uniform)(a, &status);
+ NERV_LUA_CHECK_STATUS(L, status);
+ return 0;
+}
+
+static int nerv_matrix_(lua_tanh)(lua_State *L) {
+ Status status;
+ Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
+ Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname));
+ nerv_matrix_(tanh)(a, b, &status);
+ NERV_LUA_CHECK_STATUS(L, status);
+ return 0;
+}
+
+static int nerv_matrix_(lua_tanh_grad)(lua_State *L) {
+ Status status;
+ Matrix *nerr = luaT_checkudata(L, 1, nerv_matrix_(tname));
+ Matrix *err = luaT_checkudata(L, 2, nerv_matrix_(tname));
+ Matrix *output = luaT_checkudata(L, 3, nerv_matrix_(tname));
+ nerv_matrix_(tanh_grad)(nerr, err, output, &status);
+ NERV_LUA_CHECK_STATUS(L, status);
+ return 0;
+}
+
static int nerv_matrix_(lua_softmax)(lua_State *L) {
Status status;
Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname));
@@ -328,9 +367,13 @@ static const luaL_Reg nerv_matrix_(extra_methods)[] = {
{"fill", nerv_matrix_(lua_fill)},
{"sigmoid", nerv_matrix_(lua_sigmoid)},
{"sigmoid_grad", nerv_matrix_(lua_sigmoid_grad)},
+ {"tanh", nerv_matrix_(lua_tanh)},
+ {"tanh_grad", nerv_matrix_(lua_tanh_grad)},
+ {"rand_uniform", nerv_matrix_(lua_rand_uniform)},
{"softmax", nerv_matrix_(lua_softmax)},
{"mul_elem", nerv_matrix_(lua_mul_elem)},
{"log_elem", nerv_matrix_(lua_log_elem)},
+ {"thres_mask", nerv_matrix_(lua_thres_mask)},
{"copy_rows_fromh_by_idx", nerv_matrix_(lua_copy_rows_fromh_by_idx)},
{"copy_rows_fromd_by_idx", nerv_matrix_(lua_copy_rows_fromd_by_idx)},
{"expand_frm", nerv_matrix_(lua_expand_frm)},
diff --git a/nerv/nn/param_repo.lua b/nerv/nn/param_repo.lua
index ab971ba..6d52691 100644
--- a/nerv/nn/param_repo.lua
+++ b/nerv/nn/param_repo.lua
@@ -67,6 +67,14 @@ function ParamRepo:export(param_file, pids)
cf:close()
end
+function ParamRepo:has_param(pid)
+ if self.params[pid] ~= nil then
+ return true
+ else
+ return false
+ end
+end
+
function ParamRepo:get_param(pid)
local p = self.params[pid]
if p == nil then
diff --git a/nerv/examples/lmptb/rnn/init.lua b/nerv/tnn/init.lua
index 0e08cb6..979f5d8 100644
--- a/nerv/examples/lmptb/rnn/init.lua
+++ b/nerv/tnn/init.lua
@@ -1,26 +1,26 @@
-local Layer = nerv.class('nerv.LayerT')
+local LayerT = nerv.class('nerv.LayerT')
-function Layer:__init(id, global_conf, layer_conf)
+function LayerT:__init(id, global_conf, layer_conf)
nerv.error_method_not_implemented()
end
-function Layer:init(batch_size, chunk_size)
+function LayerT:init(batch_size, chunk_size)
nerv.error_method_not_implemented()
end
-function Layer:update(bp_err, input, output, t)
+function LayerT:update(bp_err, input, output, t)
nerv.error_method_not_implemented()
end
-function Layer:propagate(input, output, t)
+function LayerT:propagate(input, output, t)
nerv.error_method_not_implemented()
end
-function Layer:back_propagate(bp_err, next_bp_err, input, output, t)
+function LayerT:back_propagate(bp_err, next_bp_err, input, output, t)
nerv.error_method_not_implemented()
end
-function Layer:check_dim_len(len_in, len_out)
+function LayerT:check_dim_len(len_in, len_out)
local expected_in = #self.dim_in
local expected_out = #self.dim_out
if len_in > 0 and expected_in ~= len_in then
@@ -33,13 +33,17 @@ function Layer:check_dim_len(len_in, len_out)
end
end
-function Layer:get_params()
+function LayerT:get_params()
nerv.error_method_not_implemented()
end
-function Layer:get_dim()
+function LayerT:get_dim()
return self.dim_in, self.dim_out
end
+nerv.include('sutil.lua')
nerv.include('tnn.lua')
-nerv.include('softmax_ce_t.lua')
+nerv.include('layersT/softmax_ce_t.lua')
+nerv.include('layersT/lstm_t.lua')
+nerv.include('layersT/dropout_t.lua')
+nerv.include('layer_dag_t.lua')
diff --git a/nerv/tnn/layer_dag_t.lua b/nerv/tnn/layer_dag_t.lua
new file mode 100644
index 0000000..b651f4e
--- /dev/null
+++ b/nerv/tnn/layer_dag_t.lua
@@ -0,0 +1,386 @@
+local DAGLayerT = nerv.class("nerv.DAGLayerT", "nerv.LayerT")
+
+local function parse_id(str)
+ local id, port, _
+ _, _, id, port = string.find(str, "([a-zA-Z0-9_.]+)%[([0-9]+)%]")
+ if id == nil or port == nil then
+ _, _, id, port = string.find(str, "(.+)%[([0-9]+)%]")
+ if not (id == "<input>" or id == "<output>") then
+ nerv.error("wrong format of connection id")
+ end
+ end
+ port = tonumber(port)
+ return id, port
+end
+
+local function discover(id, layers, layer_repo)
+ local ref = layers[id]
+ if id == "<input>" or id == "<output>" then
+ return nil
+ end
+ if ref == nil then
+ local layer = layer_repo:get_layer(id)
+ local dim_in, dim_out = layer:get_dim()
+ ref = {
+ id = layer.id,
+ layer = layer,
+ inputs = {},
+ outputs = {},
+ err_inputs = {},
+ err_outputs = {},
+ next_layers = {},
+ input_len = #dim_in,
+ output_len = #dim_out,
+ in_deg = 0,
+ visited = false
+ }
+ layers[id] = ref
+ end
+ return ref
+end
+
+function DAGLayerT:__init(id, global_conf, layer_conf)
+ local layers = {}
+ local inputs = {}
+ local outputs = {}
+ local dim_in = layer_conf.dim_in
+ local dim_out = layer_conf.dim_out
+ local parsed_conn = {}
+ for from, to in pairs(layer_conf.connections) do
+ local id_from, port_from = parse_id(from)
+ local id_to, port_to = parse_id(to)
+ local ref_from = discover(id_from, layers, layer_conf.sub_layers)
+ local ref_to = discover(id_to, layers, layer_conf.sub_layers)
+ local input_dim, output_dim, _
+ if id_from == "<input>" then
+ input_dim, _ = ref_to.layer:get_dim()
+ if dim_in[port_from] ~= input_dim[port_to] then
+ nerv.error("mismatching data dimension between %s and %s", from, to)
+ end
+ inputs[port_from] = {ref_to, port_to}
+ if ref_to.inputs[1] == nil then
+ ref_to.inputs[1] = {}
+ end
+ if ref_to.inputs[1][port_to] ~= nil then
+ nerv.error("port(%d) for layer(%s) already attached", port_to, to)
+ end
+ ref_to.inputs[1][port_to] = inputs -- just a place holder
+ elseif id_to == "<output>" then
+ _, output_dim = ref_from.layer:get_dim()
+ if output_dim[port_from] ~= dim_out[port_to] then
+ nerv.error("mismatching data dimension between %s and %s", from, to)
+ end
+ outputs[port_to] = {ref_from, port_from}
+ if ref_from.outputs[1] == nil then
+ ref_from.outputs[1] = {}
+ end
+ if ref_from.outputs[1][port_from] ~= nil then
+ nerv.error("port(%d) for layer(%s) already attached", port_from, from)
+ end
+ ref_from.outputs[1] = {}
+ ref_from.outputs[1][port_from] = outputs -- just a place holder
+ else
+ _, output_dim = ref_from.layer:get_dim()
+ input_dim, _ = ref_to.layer:get_dim()
+ if output_dim[port_from] ~= input_dim[port_to] then
+ nerv.error("mismatching data dimension between %s and %s", from, to)
+ end
+
+ table.insert(parsed_conn,
+ {{ref_from, port_from}, {ref_to, port_to}})
+ table.insert(ref_from.next_layers, ref_to) -- add edge
+ ref_to.in_deg = ref_to.in_deg + 1 -- increase the in-degree of the target layer
+ end
+ end
+
+ -- topology sort
+ local queue = {}
+ local l = 1
+ local r = 1
+ for id, ref in pairs(layers) do
+ if ref.in_deg == 0 then
+ table.insert(queue, ref)
+ nerv.info("adding source layer: %s", id)
+ r = r + 1
+ end
+ end
+ if l == r then
+ nerv.error("loop detected")
+ end
+ while l < r do
+ local cur = queue[l]
+ cur.visited = true
+ l = l + 1
+ for _, nl in pairs(cur.next_layers) do
+ nl.in_deg = nl.in_deg - 1
+ if nl.in_deg == 0 then
+ table.insert(queue, nl)
+ r = r + 1
+ end
+ end
+ end
+ for i = 1, #queue do
+ nerv.info("enqueued layer: %s %s", queue[i].layer, queue[i].layer.id)
+ end
+
+ for id, ref in pairs(layers) do
+ -- check wether the graph is connected
+ if ref.visited == false then
+ nerv.warning("layer %s is ignored", id)
+ end
+ end
+
+ self.layers = layers
+ self.inputs = inputs
+ self.outputs = outputs
+ self.id = id
+ self.dim_in = dim_in
+ self.dim_out = dim_out
+ self.parsed_conn = parsed_conn
+ self.queue = queue
+ self.gconf = global_conf
+end
+
+function DAGLayerT:init(batch_size, chunk_size)
+ nerv.info("initing DAGLayerT %s...", self.id)
+ if chunk_size == nil then
+ chunk_size = 1
+ nerv.info("(Initing DAGLayerT) chunk_size is nil, setting it to default 1\n")
+ end
+
+ self.chunk_size = chunk_size
+
+ for i, conn in ipairs(self.parsed_conn) do
+ local _, output_dim
+ local ref_from, port_from, ref_to, port_to
+ ref_from, port_from = unpack(conn[1])
+ ref_to, port_to = unpack(conn[2])
+ _, output_dim = ref_from.layer:get_dim()
+ local dim = 1
+ if output_dim[port_from] > 0 then
+ dim = output_dim[port_from]
+ end
+
+ for t = 1, chunk_size do
+ local mid = self.gconf.cumat_type(batch_size, dim)
+ local err_mid = mid:create()
+
+ if ref_from.outputs[t] == nil then
+ ref_from.outputs[t] = {}
+ end
+ if ref_to.inputs[t] == nil then
+ ref_to.inputs[t] = {}
+ end
+ if ref_to.err_outputs[t] == nil then
+ ref_to.err_outputs[t] = {}
+ end
+ if ref_from.err_inputs[t] == nil then
+ ref_from.err_inputs[t] = {}
+ end
+
+ ref_from.outputs[t][port_from] = mid
+ ref_to.inputs[t][port_to] = mid
+
+ ref_from.err_inputs[t][port_from] = err_mid
+ ref_to.err_outputs[t][port_to] = err_mid
+ end
+ end
+ for id, ref in pairs(self.layers) do
+ for i = 1, ref.input_len do
+ if ref.inputs[1][i] == nil then --peek at time 1
+ nerv.error("dangling input port %d of layer %s", i, id)
+ end
+ end
+ for i = 1, ref.output_len do
+ if ref.outputs[1][i] == nil then --peek at time 1
+ nerv.error("dangling output port %d of layer %s", i, id)
+ end
+ end
+ -- initialize sub layers
+ ref.layer:init(batch_size, chunk_size)
+ end
+ for i = 1, #self.dim_in do
+ if self.inputs[i] == nil then
+ nerv.error("dangling port %d of layer <input>", i)
+ end
+ end
+ for i = 1, #self.dim_out do
+ if self.outputs[i] == nil then
+ nerv.error("dangling port %d of layer <output>", i)
+ end
+ end
+end
+
+function DAGLayerT:batch_resize(batch_size, chunk_size)
+ if chunk_size == nil then
+ chunk_size = 1
+ end
+ if batch_size ~= self.gconf.batch_size
+ or chunk_size ~= self.gconf.chunk_size then
+ nerv.printf("warn: in DAGLayerT:batch_resize, the batch_size ~= gconf.batch_size, or chunk_size ~= gconf.chunk_size")
+ end
+ self.gconf.batch_size = batch_size
+ self.gconf.chunk_size = chunk_size
+
+ for i, conn in ipairs(self.parsed_conn) do
+ local _, output_dim
+ local ref_from, port_from, ref_to, port_to
+ ref_from, port_from = unpack(conn[1])
+ ref_to, port_to = unpack(conn[2])
+ _, output_dim = ref_from.layer:get_dim()
+
+ for t = 1, chunk_size do
+ if ref_from.outputs[t] == nil then
+ ref_from.outputs[t] = {}
+ end
+ if ref_to.inputs[t] == nil then
+ ref_to.inputs[t] = {}
+ end
+ if ref_from.err_outputs[t] == nil then
+ ref_from.err_outputs[t] = {}
+ end
+ if ref_from.err_inputs[t] == nil then
+ ref_from.err_inputs[t] = {}
+ end
+
+ local mid = self.gconf.cumat_type(batch_size, dim)
+ local err_mid = mid:create()
+
+ ref_from.outputs[t][port_from] = mid
+ ref_to.inputs[t][port_to] = mid
+
+ ref_from.err_inputs[t][port_from] = err_mid
+ ref_to.err_outputs[t][port_to] = err_mid
+ end
+ end
+ for id, ref in pairs(self.layers) do
+ ref.layer:batch_resize(batch_size, chunk_size)
+ end
+ collectgarbage("collect")
+end
+
+function DAGLayerT:set_inputs(input, t)
+ for i = 1, #self.dim_in do
+ if input[i] == nil then
+ nerv.error("some input is not provided");
+ end
+ local layer = self.inputs[i][1]
+ local port = self.inputs[i][2]
+ if layer.inputs[t] == nil then
+ layer.inputs[t] = {}
+ end
+ layer.inputs[t][port] = input[i]
+ end
+end
+
+function DAGLayerT:set_outputs(output, t)
+ for i = 1, #self.dim_out do
+ if output[i] == nil then
+ nerv.error("some output is not provided");
+ end
+ local layer = self.outputs[i][1]
+ local port = self.outputs[i][2]
+ if layer.outputs[t] == nil then
+ layer.outputs[t] = {}
+ end
+ layer.outputs[t][port] = output[i]
+ end
+end
+
+function DAGLayerT:set_err_inputs(bp_err, t)
+ for i = 1, #self.dim_out do
+ local layer = self.outputs[i][1]
+ local port = self.outputs[i][2]
+ if layer.err_inputs[t] == nil then
+ layer.err_inputs[t] = {}
+ end
+ layer.err_inputs[t][port] = bp_err[i]
+ end
+end
+
+function DAGLayerT:set_err_outputs(next_bp_err, t)
+ for i = 1, #self.dim_in do
+ local layer = self.inputs[i][1]
+ local port = self.inputs[i][2]
+ if layer.err_outputs[t] == nil then
+ layer.err_outputs[t] = {}
+ end
+ layer.err_outputs[t][port] = next_bp_err[i]
+ end
+end
+
+function DAGLayerT:update(bp_err, input, output, t)
+ if t == nil then
+ t = 1
+ end
+ self:set_err_inputs(bp_err, t)
+ self:set_inputs(input, t)
+ self:set_outputs(output, t)
+ for id, ref in pairs(self.queue) do
+ ref.layer:update(ref.err_inputs[t], ref.inputs[t], ref.outputs[t], t)
+ end
+end
+
+function DAGLayerT:propagate(input, output, t)
+ if t == nil then
+ t = 1
+ end
+ self:set_inputs(input, t)
+ self:set_outputs(output, t)
+ local ret = false
+ for i = 1, #self.queue do
+ local ref = self.queue[i]
+ --print("debug DAGLAyerT:propagate", ref.id, t)
+ ret = ref.layer:propagate(ref.inputs[t], ref.outputs[t], t)
+ end
+ return ret
+end
+
+function DAGLayerT:back_propagate(bp_err, next_bp_err, input, output, t)
+ if t == nil then
+ t = 1
+ end
+ self:set_err_outputs(next_bp_err, t)
+ self:set_err_inputs(bp_err, t)
+ self:set_inputs(input, t)
+ self:set_outputs(output, t)
+ for i = #self.queue, 1, -1 do
+ local ref = self.queue[i]
+ ref.layer:back_propagate(ref.err_inputs[t], ref.err_outputs[t], ref.inputs[t], ref.outputs[t], t)
+ end
+end
+
+function DAGLayerT:get_params()
+ local param_repos = {}
+ for id, ref in pairs(self.queue) do
+ table.insert(param_repos, ref.layer:get_params())
+ end
+ return nerv.ParamRepo.merge(param_repos)
+end
+
+DAGLayerT.PORT_TYPES = {
+ INPUT = {},
+ OUTPUT = {},
+ ERR_INPUT = {},
+ ERR_OUTPUT = {}
+}
+
+function DAGLayerT:get_intermediate(id, port_type)
+ if id == "<input>" or id == "<output>" then
+ nerv.error("an actual real layer id is expected")
+ end
+ local layer = self.layers[id]
+ if layer == nil then
+ nerv.error("layer id %s not found", id)
+ end
+ if port_type == DAGLayerT.PORT_TYPES.INPUT then
+ return layer.inputs
+ elseif port_type == DAGLayerT.PORT_TYPES.OUTPUT then
+ return layer.outputs
+ elseif port_type == DAGLayerT.PORT_TYPES.ERR_INPUT then
+ return layer.err_inputs
+ elseif port_type == DAGLayerT.PORT_TYPES.ERR_OUTPUT then
+ return layer.err_outputs
+ end
+ nerv.error("unrecognized port type")
+end
diff --git a/nerv/tnn/layersT/dropout_t.lua b/nerv/tnn/layersT/dropout_t.lua
new file mode 100644
index 0000000..4351285
--- /dev/null
+++ b/nerv/tnn/layersT/dropout_t.lua
@@ -0,0 +1,71 @@
+local Dropout = nerv.class("nerv.DropoutLayerT", "nerv.LayerT")
+
+function Dropout:__init(id, global_conf, layer_conf)
+ self.id = id
+ self.gconf = global_conf
+ self.dim_in = layer_conf.dim_in
+ self.dim_out = layer_conf.dim_out
+ self:check_dim_len(1, 1) -- two inputs: nn output and label
+end
+
+function Dropout:init(batch_size, chunk_size)
+ if self.dim_in[1] ~= self.dim_out[1] then
+ nerv.error("mismatching dimensions of input and output")
+ end
+ if chunk_size == nil then
+ chunk_size = 1
+ end
+ self.mask_t = {}
+ for t = 1, chunk_size do
+ self.mask_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1])
+ end
+end
+
+function Dropout:batch_resize(batch_size, chunk_size)
+ if chunk_size == nil then
+ chunk_size = 1
+ end
+ for t = 1, chunk_size do
+ if self.mask_t[t] == nil or self.mask_t[t]:nrow() ~= batch_size then
+ self.mask_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1])
+ end
+ end
+end
+
+function Dropout:propagate(input, output, t)
+ if t == nil then
+ t = 1
+ end
+ if self.gconf.dropout_rate == nil then
+ nerv.info("DropoutLayerT:propagate warning, global_conf.dropout_rate is nil, setting it zero")
+ self.gconf.dropout_rate = 0
+ end
+
+ if self.gconf.dropout_rate == 0 then
+ output[1]:copy_fromd(input[1])
+ else
+ self.mask_t[t]:rand_uniform()
+ --since we will lose a portion of the actvations, we multiply the activations by 1/(1-dr) to compensate
+ self.mask_t[t]:thres_mask(self.mask_t[t], self.gconf.dropout_rate, 0, 1 / (1.0 - self.gconf.dropout_rate))
+ output[1]:mul_elem(input[1], self.mask_t[t])
+ end
+end
+
+function Dropout:update(bp_err, input, output, t)
+ -- no params, therefore do nothing
+end
+
+function Dropout:back_propagate(bp_err, next_bp_err, input, output, t)
+ if t == nil then
+ t = 1
+ end
+ if self.gconf.dropout_rate == 0 then
+ next_bp_err[1]:copy_fromd(bp_err[1])
+ else
+ next_bp_err[1]:mul_elem(bp_err[1], self.mask_t[t])
+ end
+end
+
+function Dropout:get_params()
+ return nerv.ParamRepo({})
+end
diff --git a/nerv/tnn/layersT/lstm_t.lua b/nerv/tnn/layersT/lstm_t.lua
new file mode 100644
index 0000000..04d0600
--- /dev/null
+++ b/nerv/tnn/layersT/lstm_t.lua
@@ -0,0 +1,124 @@
+local LSTMLayerT = nerv.class('nerv.LSTMLayerT', 'nerv.LayerT')
+
+function LSTMLayerT:__init(id, global_conf, layer_conf)
+ --input1:x input2:h input3:c
+ self.id = id
+ self.dim_in = layer_conf.dim_in
+ self.dim_out = layer_conf.dim_out
+ self.gconf = global_conf
+
+ --prepare a DAGLayerT to hold the lstm structure
+ local pr = layer_conf.pr
+ if pr == nil then
+ pr = nerv.ParamRepo()
+ end
+
+ local function ap(str)
+ return self.id .. '.' .. str
+ end
+
+ local layers = {
+ ["nerv.CombinerLayer"] = {
+ [ap("inputXDup")] = {{}, {["dim_in"] = {self.dim_in[1]},
+ ["dim_out"] = {self.dim_in[1], self.dim_in[1], self.dim_in[1], self.dim_in[1]}, ["lambda"] = {1}}},
+ [ap("inputHDup")] = {{}, {["dim_in"] = {self.dim_in[2]},
+ ["dim_out"] = {self.dim_in[2], self.dim_in[2], self.dim_in[2], self.dim_in[2]}, ["lambda"] = {1}}},
+ [ap("inputCDup")] = {{}, {["dim_in"] = {self.dim_in[3]},
+ ["dim_out"] = {self.dim_in[3], self.dim_in[3], self.dim_in[3]}, ["lambda"] = {1}}},
+ [ap("mainCDup")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3], self.dim_in[3], self.dim_in[3]},
+ ["lambda"] = {1, 1}}},
+ },
+ ["nerv.AffineLayer"] = {
+ [ap("mainAffineL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2]},
+ ["dim_out"] = {self.dim_out[1]}, ["pr"] = pr}},
+ },
+ ["nerv.TanhLayer"] = {
+ [ap("mainTanhL")] = {{}, {["dim_in"] = {self.dim_out[1]}, ["dim_out"] = {self.dim_out[1]}}},
+ [ap("outputTanhL")] = {{}, {["dim_in"] = {self.dim_out[1]}, ["dim_out"] = {self.dim_out[1]}}},
+ },
+ ["nerv.GateFLayer"] = {
+ [ap("forgetGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2], self.dim_in[3]},
+ ["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}},
+ [ap("inputGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2], self.dim_in[3]},
+ ["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}},
+ [ap("outputGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2], self.dim_in[3]},
+ ["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}},
+
+ },
+ ["nerv.ElemMulLayer"] = {
+ [ap("inputGMulL")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3]}}},
+ [ap("forgetGMulL")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3]}}},
+ [ap("outputGMulL")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3]}}},
+ },
+ }
+
+ local layerRepo = nerv.LayerRepo(layers, pr, global_conf)
+
+ local connections_t = {
+ ["<input>[1]"] = ap("inputXDup[1]"),
+ ["<input>[2]"] = ap("inputHDup[1]"),
+ ["<input>[3]"] = ap("inputCDup[1]"),
+
+ [ap("inputXDup[1]")] = ap("mainAffineL[1]"),
+ [ap("inputHDup[1]")] = ap("mainAffineL[2]"),
+ [ap("mainAffineL[1]")] = ap("mainTanhL[1]"),
+
+ [ap("inputXDup[2]")] = ap("inputGateL[1]"),
+ [ap("inputHDup[2]")] = ap("inputGateL[2]"),
+ [ap("inputCDup[1]")] = ap("inputGateL[3]"),
+
+ [ap("inputXDup[3]")] = ap("forgetGateL[1]"),
+ [ap("inputHDup[3]")] = ap("forgetGateL[2]"),
+ [ap("inputCDup[2]")] = ap("forgetGateL[3]"),
+
+ [ap("mainTanhL[1]")] = ap("inputGMulL[1]"),
+ [ap("inputGateL[1]")] = ap("inputGMulL[2]"),
+
+ [ap("inputCDup[3]")] = ap("forgetGMulL[1]"),
+ [ap("forgetGateL[1]")] = ap("forgetGMulL[2]"),
+
+ [ap("inputGMulL[1]")] = ap("mainCDup[1]"),
+ [ap("forgetGMulL[1]")] = ap("mainCDup[2]"),
+
+ [ap("inputXDup[4]")] = ap("outputGateL[1]"),
+ [ap("inputHDup[4]")] = ap("outputGateL[2]"),
+ [ap("mainCDup[3]")] = ap("outputGateL[3]"),
+
+ [ap("mainCDup[2]")] = "<output>[2]",
+ [ap("mainCDup[1]")] = ap("outputTanhL[1]"),
+
+ [ap("outputTanhL[1]")] = ap("outputGMulL[1]"),
+ [ap("outputGateL[1]")] = ap("outputGMulL[2]"),
+
+ [ap("outputGMulL[1]")] = "<output>[1]",
+ }
+ self.dagL = nerv.DAGLayerT(self.id, global_conf,
+ {["dim_in"] = self.dim_in, ["dim_out"] = self.dim_out, ["sub_layers"] = layerRepo,
+ ["connections"] = connections_t})
+
+ self:check_dim_len(3, 2) -- x, h, c and h, c
+end
+
+function LSTMLayerT:init(batch_size, chunk_size)
+ self.dagL:init(batch_size, chunk_size)
+end
+
+function LSTMLayerT:batch_resize(batch_size, chunk_size)
+ self.dagL:batch_resize(batch_size, chunk_size)
+end
+
+function LSTMLayerT:update(bp_err, input, output, t)
+ self.dagL:update(bp_err, input, output, t)
+end
+
+function LSTMLayerT:propagate(input, output, t)
+ self.dagL:propagate(input, output, t)
+end
+
+function LSTMLayerT:back_propagate(bp_err, next_bp_err, input, output, t)
+ self.dagL:back_propagate(bp_err, next_bp_err, input, output, t)
+end
+
+function LSTMLayerT:get_params()
+ return self.dagL:get_params()
+end
diff --git a/nerv/examples/lmptb/rnn/softmax_ce_t.lua b/nerv/tnn/layersT/softmax_ce_t.lua
index dddb05a..a9ce975 100644
--- a/nerv/examples/lmptb/rnn/softmax_ce_t.lua
+++ b/nerv/tnn/layersT/softmax_ce_t.lua
@@ -16,6 +16,9 @@ function SoftmaxCELayer:init(batch_size, chunk_size)
if not self.compressed and (self.dim_in[1] ~= self.dim_in[2]) then
nerv.error("mismatching dimensions of previous network output and labels")
end
+ if chunk_size == nil then
+ chunk_size = 1
+ end
self.total_ce = 0.0
self.total_correct = 0
self.total_frames = 0
@@ -27,9 +30,12 @@ function SoftmaxCELayer:init(batch_size, chunk_size)
end
end
-function SoftmaxCELayer:batch_resize(batch_size)
+function SoftmaxCELayer:batch_resize(batch_size, chunk_size)
+ if chunk_size == nil then
+ chunk_size = 1
+ end
for t = 1, chunk_size do
- if self.softmax_t[t]:nrow() ~= batch_resize then
+ if self.softmax_t[t]:nrow() ~= batch_size then
self.softmax_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1])
self.ce_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1])
end
@@ -41,6 +47,9 @@ function SoftmaxCELayer:update(bp_err, input, output, t)
end
function SoftmaxCELayer:propagate(input, output, t)
+ if t == nil then
+ t = 1
+ end
local softmax = self.softmax_t[t]
local ce = self.ce_t[t]
local classified = softmax:softmax(input[1])
@@ -65,6 +74,9 @@ end
function SoftmaxCELayer:back_propagate(bp_err, next_bp_err, input, output, t)
-- softmax output - label
+ if t == nil then
+ t = 1
+ end
local label = input[2]
if self.compressed then
label = label:decompress(input[1]:ncol())
diff --git a/nerv/tnn/sutil.lua b/nerv/tnn/sutil.lua
new file mode 100644
index 0000000..6a968b7
--- /dev/null
+++ b/nerv/tnn/sutil.lua
@@ -0,0 +1,80 @@
+local Util = nerv.class("nerv.SUtil") --Scheduler Utility
+
+function Util.simple_split(inputstr, sep)
+ if sep == nil then
+ sep = "%s"
+ end
+ local t={} ; i=1
+ for str in string.gmatch(inputstr, "([^"..sep.."]+)") do
+ t[i] = str
+ i = i + 1
+ end
+ return t
+end
+
+function Util.parse_schedule(str)
+ --parse a string like "1.2*10:1.5" to a list of numbers
+ local sch = {}
+ local s = Util.simple_split(str, ':')
+ for i = 1, #s do
+ local p = Util.simple_split(s[i], "%*")
+ if #p ~= 2 and #p ~= 1 then
+ nerv.error("nerv.SUtil:parse_schedule error, unit(%s) not proper, has %d components.", s[i], #p)
+ end
+ if p[2] == nil then
+ p[2] = "1"
+ end
+ p[1] = tonumber(p[1])
+ p[2] = tonumber(p[2])
+ for j = 1, p[2] do
+ table.insert(sch, p[1])
+ end
+ end
+
+ --for i = 1, #sch do
+ -- print(sch[i])
+ --end
+ return sch
+end
+
+function Util.sche_get(s, it)
+ --get s[it]
+ if s == nil then
+ nerv.info("Util.sche_get: warning, scheule is nil, returning zero...")
+ return 0
+ end
+ if #s >= it then
+ return s[it]
+ else
+ nerv.info("Util.sche_get: warning, it(%d) > #schedule(%d), returning the last one of schedule(%f)...", it, #s, s[#s])
+ return s[#s]
+ end
+end
+
+function Util.parse_commands_set(str)
+ local coms = {}
+ local s = Util.simple_split(str, ':,')
+ for i = 1 ,#s do
+ if coms[s[i]] == 1 then
+ nerv.warning("nerv.SUtil.parse_commands_set command(%s) appered more than once in command_set(%s)", s[i], str)
+ end
+ coms[s[i]] = 1
+ end
+ return coms
+end
+
+function Util.log_redirect(fn)
+ nerv.log_fh = assert(io.open(fn, "w"))
+ nerv.info("CAUTION[LOG_REDIRECT], all nerv.printf/info/warning/error calls will be double-written to %s", fn)
+ nerv.printf =
+ function (fmt, ...)
+ io.write(nerv.sprintf(fmt, ...))
+ nerv.log_fh:write(nerv.sprintf(fmt, ...))
+ nerv.log_fh:flush()
+ end
+ nerv.error =
+ function (fmt, ...)
+ nerv.log_fh:write(nerv.sprintf("[nerv] internal error:" .. fmt .. "\n", ...))
+ error(nerv.sprintf("[nerv] internal error: " .. fmt .. "\n", ...))
+ end
+end
diff --git a/nerv/examples/lmptb/rnn/tnn.lua b/nerv/tnn/tnn.lua
index c2e397c..7ae3172 100644
--- a/nerv/examples/lmptb/rnn/tnn.lua
+++ b/nerv/tnn/tnn.lua
@@ -1,4 +1,4 @@
-local TNN = nerv.class("nerv.TNN", "nerv.Layer")
+local TNN = nerv.class("nerv.TNN")
local function parse_id(str)
--used to parse layerid[portid],time
@@ -31,6 +31,7 @@ local function discover(id, layers, layer_repo)
local dim_in, dim_out = layer:get_dim()
ref = {
layer = layer,
+ id = layer.id,
inputs_m = {}, --storage for computation, inputs_m[time][port]
inputs_b = {}, --inputs_g[time][port], whether this input can been computed
inputs_matbak_p = {}, --which is a back-up space to handle some cross-border computation, inputs_p_matbak[port]
@@ -58,12 +59,12 @@ nerv.TNN.FC.HAS_INPUT = 1
nerv.TNN.FC.HAS_LABEL = 2
nerv.TNN.FC.SEQ_NORM = bit.bor(nerv.TNN.FC.HAS_INPUT, nerv.TNN.FC.HAS_LABEL) --This instance have both input and label
-function TNN.make_initial_store(st, p, dim, batch_size, chunk_size, global_conf, st_c, p_c, t_c)
- --Return a table of matrix storage from time (1-chunk_size)..(2*chunk_size)
+function TNN.make_initial_store(st, p, dim, batch_size, chunk_size, extend_t, global_conf, st_c, p_c, t_c)
+ --Return a table of matrix storage from time (1-extend_t)..(chunk_size+extend_t)
if (type(st) ~= "table") then
nerv.error("st should be a table")
end
- for i = 1 - chunk_size - 1, chunk_size * 2 + 1 do --intentionally allocated more time, should be [1-chunk_size, chunk_size*2]
+ for i = 1 - extend_t - 2, chunk_size + extend_t + 2 do --intentionally allocated more time
if (st[i] == nil) then
st[i] = {}
end
@@ -89,6 +90,20 @@ function TNN:out_of_feedrange(t) --out of chunk, or no input, for the current fe
end
function TNN:__init(id, global_conf, layer_conf)
+ self.clip_t = layer_conf.clip_t
+ if self.clip_t == nil then
+ self.clip_t = 0
+ end
+ if self.clip_t > 0 then
+ nerv.info("tnn(%s) will clip gradient across time with %f...", id, self.clip_t)
+ end
+
+ self.extend_t = layer_conf.extend_t --TNN will allocate storage of time for 1-extend_t .. chunk_size+extend_t
+ if self.extend_t == nil then
+ self.extend_t = 5
+ end
+ nerv.info("tnn(%s) will extend storage beyond MB border for time steps %d...", id, self.extend_t)
+
local layers = {}
local inputs_p = {} --map:port of the TNN to layer ref and port
local outputs_p = {}
@@ -163,11 +178,11 @@ function TNN:init(batch_size, chunk_size)
nerv.error("layer %s has a zero dim port", ref_from.layer.id)
end
- print("TNN initing storage", ref_from.layer.id, "->", ref_to.layer.id)
+ nerv.info("TNN initing storage %s->%s", ref_from.layer.id, ref_to.layer.id)
ref_to.inputs_matbak_p[port_to] = self.gconf.cumat_type(batch_size, dim)
- self.make_initial_store(ref_from.outputs_m, port_from, dim, batch_size, chunk_size, self.gconf, ref_to.inputs_m, port_to, time)
+ self.make_initial_store(ref_from.outputs_m, port_from, dim, batch_size, chunk_size, self.extend_t, self.gconf, ref_to.inputs_m, port_to, time)
ref_from.err_inputs_matbak_p[port_from] = self.gconf.cumat_type(batch_size, dim)
- self.make_initial_store(ref_from.err_inputs_m, port_from, dim, batch_size, chunk_size, self.gconf, ref_to.err_outputs_m, port_to, time)
+ self.make_initial_store(ref_from.err_inputs_m, port_from, dim, batch_size, chunk_size, self.extend_t, self.gconf, ref_to.err_outputs_m, port_to, time)
end
@@ -176,8 +191,8 @@ function TNN:init(batch_size, chunk_size)
for i = 1, #self.dim_out do --Init storage for output ports
local ref = self.outputs_p[i].ref
local p = self.outputs_p[i].port
- self.make_initial_store(ref.outputs_m, p, self.dim_out[i], batch_size, chunk_size, self.gconf, self.outputs_m, i, 0)
- self.make_initial_store(ref.err_inputs_m, p, self.dim_out[i], batch_size, chunk_size, self.gconf, self.err_inputs_m, i, 0)
+ self.make_initial_store(ref.outputs_m, p, self.dim_out[i], batch_size, chunk_size, self.extend_t, self.gconf, self.outputs_m, i, 0)
+ self.make_initial_store(ref.err_inputs_m, p, self.dim_out[i], batch_size, chunk_size, self.extend_t, self.gconf, self.err_inputs_m, i, 0)
end
self.inputs_m = {}
@@ -185,8 +200,8 @@ function TNN:init(batch_size, chunk_size)
for i = 1, #self.dim_in do --Init storage for input ports
local ref = self.inputs_p[i].ref
local p = self.inputs_p[i].port
- self.make_initial_store(ref.inputs_m, p, self.dim_in[i], batch_size, chunk_size, self.gconf, self.inputs_m, i, 0)
- self.make_initial_store(ref.err_outputs_m, p, self.dim_in[i], batch_size, chunk_size, self.gconf, self.err_outputs_m, i, 0)
+ self.make_initial_store(ref.inputs_m, p, self.dim_in[i], batch_size, chunk_size, self.extend_t, self.gconf, self.inputs_m, i, 0)
+ self.make_initial_store(ref.err_outputs_m, p, self.dim_in[i], batch_size, chunk_size, self.extend_t, self.gconf, self.err_outputs_m, i, 0)
end
for id, ref in pairs(self.layers) do --Calling init for child layers
@@ -252,7 +267,7 @@ function TNN:flush_all() --flush all history and activation
local _, ref
for _, ref in pairs(self.layers) do
for i = 1, #ref.dim_in do
- for t = 1 - self.chunk_size, self.chunk_size * 2 do
+ for t = 1 - self.extend_t, self.chunk_size + self.extend_t do
ref.inputs_m[t][i]:fill(self.gconf.nn_act_default)
if (ref.inputs_b[t] == nil) then
ref.inputs_b[t] = {}
@@ -266,7 +281,7 @@ function TNN:flush_all() --flush all history and activation
end
end
for i = 1, #ref.dim_out do
- for t = 1 - self.chunk_size, self.chunk_size * 2 do
+ for t = 1 - self.extend_t, self.chunk_size + self.extend_t do
ref.outputs_m[t][i]:fill(self.gconf.nn_act_default)
if (ref.outputs_b[t] == nil) then
ref.outputs_b[t] = {}
@@ -294,13 +309,13 @@ end
function TNN:move_right_to_nextmb(list_t) --move output history activations of 1..chunk_size to 1-chunk_size..0
if list_t == nil then
list_t = {}
- for i = 1, self.chunk_size do
- list_t[i] = i - self.chunk_size
+ for i = self.extend_t, 1, -1 do
+ list_t[i] = 1 - i
end
end
for i = 1, #list_t do
t = list_t[i]
- if t < 1 - self.chunk_size or t > 0 then
+ if t < 1 - self.extend_t or t > 0 then
nerv.error("MB move range error")
end
for id, ref in pairs(self.layers) do
@@ -324,6 +339,11 @@ function TNN:net_propagate() --propagate according to feeds_now
end
local feeds_now = self.feeds_now
+ for t = 1, self.chunk_size do --some layer maybe do not have inputs from time 1..chunk_size
+ for id, ref in pairs(self.layers) do
+ self:propagate_dfs(ref, t)
+ end
+ end
for t = 1, self.chunk_size do
if (bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_INPUT) > 0) then
for i = 1, #self.dim_in do
@@ -347,6 +367,7 @@ function TNN:net_propagate() --propagate according to feeds_now
end
end
end
+
if (flag_out == false) then
nerv.error("some thing wrong, some labeled output is not propagated")
end
@@ -378,7 +399,7 @@ function TNN:propagate_dfs(ref, t)
--ok, do propagate
--print("debug ok, propagating");
- --[[
+ --The MB moving will cause bordering history to be changed, so it is more wise to flush the input activation
if (bit.band(self.feeds_now.flagsPack_now[t], bit.bor(nerv.TNN.FC.SEQ_START, nerv.TNN.FC.SEQ_END)) > 0) then --flush cross-border history
for i = 1, self.batch_size do
local seq_start = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_START)
@@ -386,18 +407,16 @@ function TNN:propagate_dfs(ref, t)
if (seq_start > 0 or seq_end > 0) then
for p, conn in pairs(ref.i_conns_p) do
if ((ref.i_conns_p[p].time > 0 and seq_start > 0) or (ref.i_conns_p[p].time < 0 and seq_end > 0)) then --cross-border, set to default
- ref.inputs_matbak_p[p][i - 1]:copy_fromd(ref.inputs_m[t][p][i - 1])
ref.inputs_m[t][p][i - 1]:fill(self.gconf.nn_act_default)
end
end
end
end
end
- ]]--
self.gconf.timer:tic("tnn_actual_layer_propagate")
ref.layer:propagate(ref.inputs_m[t], ref.outputs_m[t], t) --propagate!
self.gconf.timer:toc("tnn_actual_layer_propagate")
-
+ --[[
if (bit.band(self.feeds_now.flagsPack_now[t], bit.bor(nerv.TNN.FC.SEQ_START, nerv.TNN.FC.SEQ_END)) > 0) then --restore cross-border history
for i = 1, self.batch_size do
local seq_start = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_START)
@@ -411,6 +430,7 @@ function TNN:propagate_dfs(ref, t)
end
end
end
+ ]]--
--set input flag for future layers
for i = 1, #ref.dim_out do
if (ref.outputs_b[t][i] == true) then
@@ -429,7 +449,7 @@ end
--do_update: bool, whether we are doing back-propagate or updating the parameters
function TNN:net_backpropagate(do_update) --propagate according to feeds_now
- if (do_update == nil) then
+ if do_update == nil then
nerv.error("do_update should not be nil")
end
for t = 1, self.chunk_size, 1 do
@@ -444,8 +464,13 @@ function TNN:net_backpropagate(do_update) --propagate according to feeds_now
end
local feeds_now = self.feeds_now
+ for t = 1, self.chunk_size do --some layer maybe do not have outputs from time 1..chunk_size
+ for id, ref in pairs(self.layers) do
+ self:backpropagate_dfs(ref, t, do_update)
+ end
+ end
for t = 1, self.chunk_size do
- if (bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_LABEL) > 0) then
+ if bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_LABEL) > 0 then
for i = 1, #self.dim_out do
local ref = self.outputs_p[i].ref
local p = self.outputs_p[i].port
@@ -457,10 +482,10 @@ function TNN:net_backpropagate(do_update) --propagate according to feeds_now
local flag_out = true
for t = 1, self.chunk_size do --check whether every output has been computed
- if (bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_INPUT) > 0) then
+ if bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_INPUT) > 0 then
for i = 1, #self.dim_in do
local ref = self.inputs_p[i].ref
- if (ref.err_outputs_b[t][1] ~= true) then
+ if ref.err_outputs_b[t][1] ~= true then
flag_out = false
break
end
@@ -475,10 +500,13 @@ end
--ref: the TNN_ref of a layer
--t: the current time to propagate
function TNN:backpropagate_dfs(ref, t, do_update)
- if (self:out_of_feedrange(t)) then
+ if do_update == nil then
+ nerv.error("got a nil do_update")
+ end
+ if self:out_of_feedrange(t) then
return
end
- if (ref.err_outputs_b[t][1] == true) then --already back_propagated, 1 is just a random port
+ if ref.err_outputs_b[t][1] == true then --already back_propagated, 1 is just a random port
return
end
@@ -501,7 +529,16 @@ function TNN:backpropagate_dfs(ref, t, do_update)
if (do_update == false) then
self.gconf.timer:tic("tnn_actual_layer_backpropagate")
ref.layer:back_propagate(ref.err_inputs_m[t], ref.err_outputs_m[t], ref.inputs_m[t], ref.outputs_m[t], t)
- self.gconf.timer:toc("tnn_actual_layer_backpropagate")
+ self.gconf.timer:toc("tnn_actual_layer_backpropagate")
+ if self.clip_t > 0 then
+ for _, conn in pairs(ref.i_conns_p) do
+ local p = conn.dst.port --port for ref
+ if conn.time ~= 0 then
+ --print("debug clip_t tnn", ref.id, "port:", p, "clip:", self.clip_t)
+ ref.err_outputs_m[t][p]:clip(-self.clip_t, self.clip_t)
+ end
+ end
+ end
else
--print(ref.err_inputs_m[t][1])
self.gconf.timer:tic("tnn_actual_layer_update")
@@ -541,7 +578,7 @@ end
--Return: nerv.ParamRepo
function TNN:get_params()
local param_repos = {}
- for id, ref in pairs(self.queue) do
+ for id, ref in pairs(self.layers) do
table.insert(param_repos, ref.layer:get_params())
end
return nerv.ParamRepo.merge(param_repos)