aboutsummaryrefslogtreecommitdiff
path: root/nerv
diff options
context:
space:
mode:
Diffstat (limited to 'nerv')
-rw-r--r--nerv/examples/lmptb/lmptb/layer/select_linear.lua4
-rw-r--r--nerv/examples/lmptb/rnn/init.lua21
-rw-r--r--nerv/examples/lmptb/rnn/layers/gate_fff.lua71
-rw-r--r--nerv/examples/lmptb/rnn/layersT/softmax_ce_t.lua (renamed from nerv/examples/lmptb/rnn/softmax_ce_t.lua)0
-rw-r--r--nerv/examples/lmptb/tnn_ptb_main.lua73
-rw-r--r--nerv/layer/affine.lua4
-rw-r--r--nerv/layer/affine_recurrent.lua4
-rw-r--r--nerv/layer/init.lua21
-rw-r--r--nerv/nn/param_repo.lua8
9 files changed, 158 insertions, 48 deletions
diff --git a/nerv/examples/lmptb/lmptb/layer/select_linear.lua b/nerv/examples/lmptb/lmptb/layer/select_linear.lua
index e96296f..580b9c5 100644
--- a/nerv/examples/lmptb/lmptb/layer/select_linear.lua
+++ b/nerv/examples/lmptb/lmptb/layer/select_linear.lua
@@ -10,9 +10,9 @@ function SL:__init(id, global_conf, layer_conf)
self.dim_out = layer_conf.dim_out
self.gconf = global_conf
- self.ltp = layer_conf.ltp
self.vocab = layer_conf.vocab
-
+ self.ltp = self:find_param("ltp", layer_conf, global_conf, nerv.LinearTransParam, {self.vocab:size(), self.dim_out[1]}) --layer_conf.ltp
+
self:check_dim_len(1, 1)
end
diff --git a/nerv/examples/lmptb/rnn/init.lua b/nerv/examples/lmptb/rnn/init.lua
index 0e08cb6..6507582 100644
--- a/nerv/examples/lmptb/rnn/init.lua
+++ b/nerv/examples/lmptb/rnn/init.lua
@@ -1,26 +1,26 @@
-local Layer = nerv.class('nerv.LayerT')
+local LayerT = nerv.class('nerv.LayerT')
-function Layer:__init(id, global_conf, layer_conf)
+function LayerT:__init(id, global_conf, layer_conf)
nerv.error_method_not_implemented()
end
-function Layer:init(batch_size, chunk_size)
+function LayerT:init(batch_size, chunk_size)
nerv.error_method_not_implemented()
end
-function Layer:update(bp_err, input, output, t)
+function LayerT:update(bp_err, input, output, t)
nerv.error_method_not_implemented()
end
-function Layer:propagate(input, output, t)
+function LayerT:propagate(input, output, t)
nerv.error_method_not_implemented()
end
-function Layer:back_propagate(bp_err, next_bp_err, input, output, t)
+function LayerT:back_propagate(bp_err, next_bp_err, input, output, t)
nerv.error_method_not_implemented()
end
-function Layer:check_dim_len(len_in, len_out)
+function LayerT:check_dim_len(len_in, len_out)
local expected_in = #self.dim_in
local expected_out = #self.dim_out
if len_in > 0 and expected_in ~= len_in then
@@ -33,13 +33,14 @@ function Layer:check_dim_len(len_in, len_out)
end
end
-function Layer:get_params()
+function LayerT:get_params()
nerv.error_method_not_implemented()
end
-function Layer:get_dim()
+function LayerT:get_dim()
return self.dim_in, self.dim_out
end
nerv.include('tnn.lua')
-nerv.include('softmax_ce_t.lua')
+nerv.include('layersT/softmax_ce_t.lua')
+nerv.include('layers/gate_fff.lua')
diff --git a/nerv/examples/lmptb/rnn/layers/gate_fff.lua b/nerv/examples/lmptb/rnn/layers/gate_fff.lua
new file mode 100644
index 0000000..751dde1
--- /dev/null
+++ b/nerv/examples/lmptb/rnn/layers/gate_fff.lua
@@ -0,0 +1,71 @@
+local GateFFFLayer = nerv.class('nerv.GateFFFLayer', 'nerv.Layer')
+
+function GateFFFLayer:__init(id, global_conf, layer_conf)
+ self.id = id
+ self.dim_in = layer_conf.dim_in
+ self.dim_out = layer_conf.dim_out
+ self.gconf = global_conf
+
+ self.ltp1 = self:find_param("ltp1", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[1], self.dim_out[1]}) --layer_conf.ltp
+ self.ltp2 = self:find_param("ltp2", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[2], self.dim_out[1]}) --layer_conf.ltp
+ self.ltp3 = self:find_param("ltp3", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[3], self.dim_out[1]}) --layer_conf.ltp
+ self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]})--layer_conf.bp
+
+ self:check_dim_len(3, 1) -- exactly one input and one output
+end
+
+function GateFFFLayer:init(batch_size)
+ if self.ltp1.trans:ncol() ~= self.bp.trans:ncol() or
+ self.ltp2.trans:ncol() ~= self.bp.trans:ncol() or
+ self.ltp3.trans:ncol() ~= self.bp.trans:ncol() then
+ nerv.error("mismatching dimensions of linear transform and bias paramter")
+ end
+ if self.dim_in[1] ~= self.ltp1.trans:nrow() or
+ self.dim_in[2] ~= self.ltp2.trans:nrow() or
+ self.dim_in[3] ~= self.ltp3.trans:nrow() then
+ nerv.error("mismatching dimensions of linear transform parameter and input")
+ end
+ if self.dim_out[1] ~= self.ltp1.trans:ncol() then
+ nerv.error("mismatching dimensions of linear transform parameter and output")
+ end
+ self.ltp1:train_init()
+ self.ltp2:train_init()
+ self.ltp3:train_init()
+ self.bp:train_init()
+ self.err_bakm = self.gconf.cumat_type(batch_size, self.dim_out[1])
+end
+
+function GateFFFLayer:batch_resize(batch_size)
+ if self.err_m:nrow() ~= batch_size then
+ self.err_bakm = self.gconf.cumat_type(batch_size, self.dim_out[1])
+ end
+end
+
+function GateFFFLayer:propagate(input, output)
+ -- apply linear transform
+ output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N')
+ output[1]:mul(input[2], self.ltp2.trans, 1.0, 1.0, 'N', 'N')
+ output[1]:mul(input[3], self.ltp3.trans, 1.0, 1.0, 'N', 'N')
+ -- add bias
+ output[1]:add_row(self.bp.trans, 1.0)
+ output[1]:sigmoid(output[1])
+end
+
+function GateFFFLayer:back_propagate(bp_err, next_bp_err, input, output)
+ self.err_bakm:sigmoid_grad(bp_err[1], output[1])
+ next_bp_err[1]:mul(self.err_bakm, self.ltp1.trans, 1.0, 0.0, 'N', 'T')
+ next_bp_err[2]:mul(self.err_bakm, self.ltp2.trans, 1.0, 0.0, 'N', 'T')
+ next_bp_err[3]:mul(self.err_bakm, self.ltp3.trans, 1.0, 0.0, 'N', 'T')
+end
+
+function GateFFFLayer:update(bp_err, input, output)
+ self.err_bakm:sigmoid_grad(bp_err[1], output[1])
+ self.ltp1:update_by_err_input(self.err_bakm, input[1])
+ self.ltp2:update_by_err_input(self.err_bakm, input[2])
+ self.ltp3:update_by_err_input(self.err_bakm, input[3])
+ self.bp:update_by_gradient(self.err_bakm:colsum())
+end
+
+function GateFFFLayer:get_params()
+ return nerv.ParamRepo({self.ltp1, self.ltp2, self.ltp3, self.bp})
+end
diff --git a/nerv/examples/lmptb/rnn/softmax_ce_t.lua b/nerv/examples/lmptb/rnn/layersT/softmax_ce_t.lua
index dddb05a..dddb05a 100644
--- a/nerv/examples/lmptb/rnn/softmax_ce_t.lua
+++ b/nerv/examples/lmptb/rnn/layersT/softmax_ce_t.lua
diff --git a/nerv/examples/lmptb/tnn_ptb_main.lua b/nerv/examples/lmptb/tnn_ptb_main.lua
index 50286c9..3096a3f 100644
--- a/nerv/examples/lmptb/tnn_ptb_main.lua
+++ b/nerv/examples/lmptb/tnn_ptb_main.lua
@@ -17,8 +17,14 @@ local LMTrainer = nerv.LMTrainer
function prepare_parameters(global_conf, iter)
printf("%s preparing parameters...\n", global_conf.sche_log_pre)
+ global_conf.paramRepo = nerv.ParamRepo()
+ local paramRepo = global_conf.paramRepo
+
if iter == -1 then --first time
- printf("%s first time, generating parameters...\n", global_conf.sche_log_pre)
+ printf("%s first time, prepare some pre-set parameters, and leaving other parameters to auto-generation...\n", global_conf.sche_log_pre)
+ local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
+ f:close()
+ --[[
ltp_ih = nerv.LinearTransParam("ltp_ih", global_conf)
ltp_ih.trans = global_conf.cumat_type(global_conf.vocab:size(), global_conf.hidden_size) --index 0 is for zero, others correspond to vocab index(starting from 1)
ltp_ih.trans:generate(global_conf.param_random)
@@ -27,47 +33,48 @@ function prepare_parameters(global_conf, iter)
ltp_hh.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.hidden_size)
ltp_hh.trans:generate(global_conf.param_random)
- ltp_ho = nerv.LinearTransParam("ltp_ho", global_conf)
- ltp_ho.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.vocab:size())
- ltp_ho.trans:generate(global_conf.param_random)
+ --ltp_ho = nerv.LinearTransParam("ltp_ho", global_conf)
+ --ltp_ho.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.vocab:size())
+ --ltp_ho.trans:generate(global_conf.param_random)
bp_h = nerv.BiasParam("bp_h", global_conf)
bp_h.trans = global_conf.cumat_type(1, global_conf.hidden_size)
bp_h.trans:generate(global_conf.param_random)
- bp_o = nerv.BiasParam("bp_o", global_conf)
- bp_o.trans = global_conf.cumat_type(1, global_conf.vocab:size())
- bp_o.trans:generate(global_conf.param_random)
+ --bp_o = nerv.BiasParam("bp_o", global_conf)
+ --bp_o.trans = global_conf.cumat_type(1, global_conf.vocab:size())
+ --bp_o.trans:generate(global_conf.param_random)
local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
f:write_chunk(ltp_ih)
f:write_chunk(ltp_hh)
- f:write_chunk(ltp_ho)
+ --f:write_chunk(ltp_ho)
f:write_chunk(bp_h)
- f:write_chunk(bp_o)
+ --f:write_chunk(bp_o)
f:close()
-
+ ]]--
return nil
end
printf("%s loading parameter from file %s...\n", global_conf.sche_log_pre, global_conf.param_fn .. '.' .. tostring(iter))
- local paramRepo = nerv.ParamRepo()
paramRepo:import({global_conf.param_fn .. '.' .. tostring(iter)}, nil, global_conf)
printf("%s preparing parameters end.\n", global_conf.sche_log_pre)
- return paramRepo
+ return nil
end
--global_conf: table
--Returns: nerv.LayerRepo
-function prepare_layers(global_conf, paramRepo)
+function prepare_layers(global_conf)
printf("%s preparing layers...\n", global_conf.sche_log_pre)
+ local paramRepo = global_conf.paramRepo
+
local du = false
--local recurrentLconfig = {{["bp"] = "bp_h", ["ltp_hh"] = "ltp_hh"}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["break_id"] = global_conf.vocab:get_sen_entry().id, ["independent"] = global_conf.independent, ["clip"] = 10}}
- local recurrentLconfig = {{["bp"] = "bp_h", ["ltp_hh"] = "ltp_hh"}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["clip"] = 10, ["direct_update"] = du}}
+ local recurrentLconfig = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["clip"] = 10, ["direct_update"] = du}}
local layers = {
["nerv.AffineRecurrentLayer"] = {
@@ -75,7 +82,7 @@ function prepare_layers(global_conf, paramRepo)
},
["nerv.SelectLinearLayer"] = {
- ["selectL1"] = {{["ltp"] = "ltp_ih"}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}}},
+ ["selectL1"] = {{}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}, ["vocab"] = global_conf.vocab}},
},
["nerv.SigmoidLayer"] = {
@@ -87,7 +94,7 @@ function prepare_layers(global_conf, paramRepo)
},
["nerv.AffineLayer"] = {
- ["outputL"] = {{["ltp"] = "ltp_ho", ["bp"] = "bp_o"}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.vocab:size()}, ["direct_update"] = du}},
+ ["outputL"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.vocab:size()}, ["direct_update"] = du}},
},
["nerv.SoftmaxCELayerT"] = {
@@ -146,10 +153,10 @@ function prepare_tnn(global_conf, layerRepo)
end
function load_net(global_conf, next_iter)
- local paramRepo = prepare_parameters(global_conf, next_iter)
- local layerRepo = prepare_layers(global_conf, paramRepo)
+ prepare_parameters(global_conf, next_iter)
+ local layerRepo = prepare_layers(global_conf)
local tnn = prepare_tnn(global_conf, layerRepo)
- return tnn, paramRepo
+ return tnn
end
local train_fn, valid_fn, test_fn
@@ -184,7 +191,7 @@ global_conf = {
sche_log_pre = "[SCHEDULER]:",
log_w_num = 40000, --give a message when log_w_num words have been processed
timer = nerv.Timer(),
- work_dir = '/home/slhome/txh18/workspace/nerv/play/dagL_test'
+ work_dir_base = '/home/slhome/txh18/workspace/nerv/play/ptbEXP/tnn_test'
}
elseif (set == "msr_sc") then
@@ -215,7 +222,7 @@ global_conf = {
sche_log_pre = "[SCHEDULER]:",
log_w_num = 40000, --give a message when log_w_num words have been processed
timer = nerv.Timer(),
- work_dir = '/home/slhome/txh18/workspace/sentenceCompletion/EXP-Nerv/rnnlm_test'
+ work_dir_base = '/home/slhome/txh18/workspace/sentenceCompletion/EXP-Nerv/rnnlm_test'
}
else
@@ -233,7 +240,7 @@ global_conf = {
hidden_size = 20,
chunk_size = 2,
- batch_size = 3,
+ batch_size = 10,
max_iter = 3,
param_random = function() return (math.random() / 5 - 0.1) end,
@@ -244,15 +251,11 @@ global_conf = {
sche_log_pre = "[SCHEDULER]:",
log_w_num = 10, --give a message when log_w_num words have been processed
timer = nerv.Timer(),
- work_dir = '/home/slhome/txh18/workspace/nerv/play/dagL_test'
+ work_dir_base = '/home/slhome/txh18/workspace/nerv/play/testEXP/tnn_test'
}
end
-global_conf.train_fn_shuf = global_conf.work_dir .. '/train_fn_shuf'
-global_conf.train_fn_shuf_bak = global_conf.train_fn_shuf .. '_bak'
-global_conf.param_fn = global_conf.work_dir .. "/params"
-
lr_half = false --can not be local, to be set by loadstring
start_iter = -1
ppl_last = 100000
@@ -264,6 +267,11 @@ else
printf("%s not user setting, all default...\n", global_conf.sche_log_pre)
end
+global_conf.work_dir = global_conf.work_dir_base .. 'h' .. global_conf.hidden_size .. 'ch' .. global_conf.chunk_size .. 'ba' .. global_conf.batch_size .. 'slr' .. global_conf.lrate
+global_conf.train_fn_shuf = global_conf.work_dir .. '/train_fn_shuf'
+global_conf.train_fn_shuf_bak = global_conf.train_fn_shuf .. '_bak'
+global_conf.param_fn = global_conf.work_dir .. "/params"
+
----------------printing options---------------------------------
printf("%s printing global_conf...\n", global_conf.sche_log_pre)
for id, value in pairs(global_conf) do
@@ -291,12 +299,13 @@ global_conf.vocab:build_file(global_conf.vocab_fn, false)
ppl_rec = {}
if start_iter == -1 then
- prepare_parameters(global_conf, -1) --randomly generate parameters
+ prepare_parameters(global_conf, -1) --write pre_generated params to param.0 file
end
if start_iter == -1 or start_iter == 0 then
print("===INITIAL VALIDATION===")
- local tnn, paramRepo = load_net(global_conf, 0)
+ local tnn = load_net(global_conf, 0)
+ global_conf.paramRepo:export(global_conf.param_fn .. '.0', nil) --some parameters are auto-generated, saved again to param.0 file
local result = LMTrainer.lm_process_file(global_conf, global_conf.valid_fn, tnn, false) --false update!
nerv.LMUtil.wait(1)
ppl_rec[0] = {}
@@ -315,7 +324,7 @@ local final_iter
for iter = start_iter, global_conf.max_iter, 1 do
final_iter = iter --for final testing
global_conf.sche_log_pre = "[SCHEDULER ITER"..iter.." LR"..global_conf.lrate.."]:"
- tnn, paramRepo = load_net(global_conf, iter - 1)
+ tnn = load_net(global_conf, iter - 1)
printf("===ITERATION %d LR %f===\n", iter, global_conf.lrate)
result = LMTrainer.lm_process_file(global_conf, global_conf.train_fn_shuf, tnn, true) --true update!
ppl_rec[iter] = {}
@@ -336,7 +345,7 @@ for iter = start_iter, global_conf.max_iter, 1 do
end
if ppl_rec[iter].valid < ppl_last then
printf("%s PPL improves, saving net to file %s.%d...\n", global_conf.sche_log_pre, global_conf.param_fn, iter)
- paramRepo:export(global_conf.param_fn .. '.' .. tostring(iter), nil)
+ global_conf.paramRepo:export(global_conf.param_fn .. '.' .. tostring(iter), nil)
else
printf("%s PPL did not improve, rejected, copying param file of last iter...\n", global_conf.sche_log_pre)
os.execute('cp ' .. global_conf.param_fn..'.'..tostring(iter - 1) .. ' ' .. global_conf.param_fn..'.'..tostring(iter))
@@ -357,6 +366,6 @@ end
printf("\n")
printf("===FINAL TEST===\n")
global_conf.sche_log_pre = "[SCHEDULER FINAL_TEST]:"
-tnn, paramRepo = load_net(global_conf, final_iter)
+tnn = load_net(global_conf, final_iter)
LMTrainer.lm_process_file(global_conf, global_conf.test_fn, tnn, false) --false update!
diff --git a/nerv/layer/affine.lua b/nerv/layer/affine.lua
index 6903c51..e24a0c6 100644
--- a/nerv/layer/affine.lua
+++ b/nerv/layer/affine.lua
@@ -61,10 +61,10 @@ end
function AffineLayer:__init(id, global_conf, layer_conf)
self.id = id
- self.ltp = layer_conf.ltp
- self.bp = layer_conf.bp
self.dim_in = layer_conf.dim_in
self.dim_out = layer_conf.dim_out
+ self.ltp = self:find_param("ltp", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[1], self.dim_out[1]}) --layer_conf.ltp
+ self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]})--layer_conf.bp
self.gconf = global_conf
self:check_dim_len(1, 1) -- exactly one input and one output
-- self.direct_update = layer_conf.direct_update or global_conf.direct_update
diff --git a/nerv/layer/affine_recurrent.lua b/nerv/layer/affine_recurrent.lua
index da189e0..d537f4a 100644
--- a/nerv/layer/affine_recurrent.lua
+++ b/nerv/layer/affine_recurrent.lua
@@ -10,8 +10,8 @@ function Recurrent:__init(id, global_conf, layer_conf)
self.dim_out = layer_conf.dim_out
self.gconf = global_conf
- self.bp = layer_conf.bp
- self.ltp_hh = layer_conf.ltp_hh --from hidden to hidden
+ self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]}) --layer_conf.bp
+ self.ltp_hh = self:find_param("ltp_hh", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[2], self.dim_out[1]}) --layer_conf.ltp_hh --from hidden to hidden
self:check_dim_len(2, 1)
self.direct_update = layer_conf.direct_update
diff --git a/nerv/layer/init.lua b/nerv/layer/init.lua
index 6861b0e..c6d0a98 100644
--- a/nerv/layer/init.lua
+++ b/nerv/layer/init.lua
@@ -70,6 +70,27 @@ function Layer:get_dim()
return self.dim_in, self.dim_out
end
+function Layer:find_param(pid, l_conf, gconf, p_type, p_dim)
+ if l_conf[pid] ~= nil then
+ nerv.printf("Param [%s] of layer [%s] found in layer_conf.\n", pid, self.id)
+ return l_conf[pid]
+ end
+ local pid_g = self.id .. '_' .. pid --global identifier
+ local pr = gconf.paramRepo
+ local p
+ if pr:has_param(pid_g) == true then
+ nerv.printf("Param [%s] of layer [%s] found in paramRepo.\n", pid, self.id)
+ p = pr:get_param(pid_g)
+ return p
+ end
+ nerv.printf("Param [%s] of layer [%s] is not found in layer_conf or paramRepo, switch to auto-generate.\n", pid, self.id)
+ p = p_type(pid_g, gconf)
+ p.trans = gconf.cumat_type(unpack(p_dim))
+ p.trans:generate(gconf.param_random)
+ pr:add(pid_g, p) --add the parameter into the paramRepo
+ return p
+end
+
nerv.include('affine.lua')
nerv.include('sigmoid.lua')
nerv.include('softmax_ce.lua')
diff --git a/nerv/nn/param_repo.lua b/nerv/nn/param_repo.lua
index ab971ba..6d52691 100644
--- a/nerv/nn/param_repo.lua
+++ b/nerv/nn/param_repo.lua
@@ -67,6 +67,14 @@ function ParamRepo:export(param_file, pids)
cf:close()
end
+function ParamRepo:has_param(pid)
+ if self.params[pid] ~= nil then
+ return true
+ else
+ return false
+ end
+end
+
function ParamRepo:get_param(pid)
local p = self.params[pid]
if p == nil then