aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortxh18 <cloudygooseg@gmail.com>2015-11-20 19:58:14 +0800
committertxh18 <cloudygooseg@gmail.com>2015-11-20 19:58:14 +0800
commit4f5b45b79b8d5f6a9094888cf6b929fe86ac24a3 (patch)
tree60d5b6232b8d33da9178717c27ecb6dc3591d0b6
parent6456f5d0b97c5ad7e35c58751f74b8c9fefb635e (diff)
working on automatic parameter for layers
-rw-r--r--nerv/examples/lmptb/rnn/init.lua20
-rw-r--r--nerv/examples/lmptb/rnn/layers/gate_fff.lua67
-rw-r--r--nerv/examples/lmptb/rnn/layersT/softmax_ce_t.lua (renamed from nerv/examples/lmptb/rnn/softmax_ce_t.lua)0
-rw-r--r--nerv/examples/lmptb/tnn_ptb_main.lua42
-rw-r--r--nerv/layer/affine.lua4
-rw-r--r--nerv/layer/init.lua21
-rw-r--r--nerv/nn/layer_repo.lua3
-rw-r--r--nerv/nn/param_repo.lua7
8 files changed, 128 insertions, 36 deletions
diff --git a/nerv/examples/lmptb/rnn/init.lua b/nerv/examples/lmptb/rnn/init.lua
index 0e08cb6..1370781 100644
--- a/nerv/examples/lmptb/rnn/init.lua
+++ b/nerv/examples/lmptb/rnn/init.lua
@@ -1,26 +1,26 @@
-local Layer = nerv.class('nerv.LayerT')
+local LayerT = nerv.class('nerv.LayerT')
-function Layer:__init(id, global_conf, layer_conf)
+function LayerT:__init(id, global_conf, layer_conf)
nerv.error_method_not_implemented()
end
-function Layer:init(batch_size, chunk_size)
+function LayerT:init(batch_size, chunk_size)
nerv.error_method_not_implemented()
end
-function Layer:update(bp_err, input, output, t)
+function LayerT:update(bp_err, input, output, t)
nerv.error_method_not_implemented()
end
-function Layer:propagate(input, output, t)
+function LayerT:propagate(input, output, t)
nerv.error_method_not_implemented()
end
-function Layer:back_propagate(bp_err, next_bp_err, input, output, t)
+function LayerT:back_propagate(bp_err, next_bp_err, input, output, t)
nerv.error_method_not_implemented()
end
-function Layer:check_dim_len(len_in, len_out)
+function LayerT:check_dim_len(len_in, len_out)
local expected_in = #self.dim_in
local expected_out = #self.dim_out
if len_in > 0 and expected_in ~= len_in then
@@ -33,13 +33,13 @@ function Layer:check_dim_len(len_in, len_out)
end
end
-function Layer:get_params()
+function LayerT:get_params()
nerv.error_method_not_implemented()
end
-function Layer:get_dim()
+function LayerT:get_dim()
return self.dim_in, self.dim_out
end
nerv.include('tnn.lua')
-nerv.include('softmax_ce_t.lua')
+nerv.include('layersT/softmax_ce_t.lua')
diff --git a/nerv/examples/lmptb/rnn/layers/gate_fff.lua b/nerv/examples/lmptb/rnn/layers/gate_fff.lua
new file mode 100644
index 0000000..74e19ce
--- /dev/null
+++ b/nerv/examples/lmptb/rnn/layers/gate_fff.lua
@@ -0,0 +1,67 @@
+local GateFFFLayer = nerv.class('nerv.GateFFFLayer', 'nerv.Layer')
+
+function AffineLayer:__init(id, global_conf, layer_conf)
+ self.id = id
+ self.ltp = layer_conf.ltp
+ self.bp = layer_conf.bp
+ self.dim_in = layer_conf.dim_in
+ self.dim_out = layer_conf.dim_out
+ self.gconf = global_conf
+ self:check_dim_len(1, 1) -- exactly one input and one output
+end
+
+function AffineLayer:init(batch_size)
+ if self.ltp.trans:ncol() ~= self.bp.trans:ncol() then
+ nerv.error("mismatching dimensions of linear transform and bias paramter")
+ end
+ if self.dim_in[1] ~= self.ltp.trans:nrow() then
+ nerv.error("mismatching dimensions of linear transform parameter and input")
+ end
+ if self.dim_out[1] ~= self.ltp.trans:ncol() then
+ nerv.error("mismatching dimensions of linear transform parameter and output")
+ end
+ self.ltp_grad = self.ltp.trans:create()
+ self.ltp:train_init()
+ self.bp:train_init()
+end
+
+function AffineLayer:batch_resize(batch_size)
+ -- do nothing
+end
+
+function AffineLayer:update(bp_err, input, output)
+ if self.direct_update == true then
+ local gconf = self.gconf
+ if gconf.momentum > 0 then
+ self.ltp.correction:mul(input[1], bp_err[1], 1.0, gconf.momentum, 'T', 'N')
+ self.bp.correction:add(self.bp.correction, bp_err[1]:colsum(), gconf.momentum, 1)
+ -- momentum gain
+ local mmt_gain = 1.0 / (1.0 - gconf.momentum)
+ local n = self.gconf.batch_size * mmt_gain
+ -- perform update
+ self.ltp.trans:add(self.ltp.trans, self.ltp.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n)
+ self.bp.trans:add(self.bp.trans, self.bp.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n)
+ else
+ self.ltp.trans:mul(input[1], bp_err[1], - gconf.lrate / gconf.batch_size, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, 'T', 'N')
+ self.bp.trans:add(self.bp.trans, bp_err[1]:colsum(), 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / gconf.batch_size)
+ end
+ else
+ self.ltp:update_by_err_input(bp_err[1], input[1])
+ self.bp:update_by_gradient(bp_err[1]:colsum())
+ end
+end
+
+function AffineLayer:propagate(input, output)
+ -- apply linear transform
+ output[1]:mul(input[1], self.ltp.trans, 1.0, 0.0, 'N', 'N')
+ -- add bias
+ output[1]:add_row(self.bp.trans, 1.0)
+end
+
+function AffineLayer:back_propagate(bp_err, next_bp_err, input, output)
+ next_bp_err[1]:mul(bp_err[1], self.ltp.trans, 1.0, 0.0, 'N', 'T')
+end
+
+function AffineLayer:get_params()
+ return nerv.ParamRepo({self.ltp, self.bp})
+end
diff --git a/nerv/examples/lmptb/rnn/softmax_ce_t.lua b/nerv/examples/lmptb/rnn/layersT/softmax_ce_t.lua
index dddb05a..dddb05a 100644
--- a/nerv/examples/lmptb/rnn/softmax_ce_t.lua
+++ b/nerv/examples/lmptb/rnn/layersT/softmax_ce_t.lua
diff --git a/nerv/examples/lmptb/tnn_ptb_main.lua b/nerv/examples/lmptb/tnn_ptb_main.lua
index 059d52a..6afecbf 100644
--- a/nerv/examples/lmptb/tnn_ptb_main.lua
+++ b/nerv/examples/lmptb/tnn_ptb_main.lua
@@ -17,6 +17,9 @@ local LMTrainer = nerv.LMTrainer
function prepare_parameters(global_conf, iter)
printf("%s preparing parameters...\n", global_conf.sche_log_pre)
+ global_conf.paramRepo = nerv.ParamRepo()
+ local paramRepo = global_conf.paramRepo
+
if iter == -1 then --first time
printf("%s first time, generating parameters...\n", global_conf.sche_log_pre)
ltp_ih = nerv.LinearTransParam("ltp_ih", global_conf)
@@ -27,43 +30,44 @@ function prepare_parameters(global_conf, iter)
ltp_hh.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.hidden_size)
ltp_hh.trans:generate(global_conf.param_random)
- ltp_ho = nerv.LinearTransParam("ltp_ho", global_conf)
- ltp_ho.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.vocab:size())
- ltp_ho.trans:generate(global_conf.param_random)
+ --ltp_ho = nerv.LinearTransParam("ltp_ho", global_conf)
+ --ltp_ho.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.vocab:size())
+ --ltp_ho.trans:generate(global_conf.param_random)
bp_h = nerv.BiasParam("bp_h", global_conf)
bp_h.trans = global_conf.cumat_type(1, global_conf.hidden_size)
bp_h.trans:generate(global_conf.param_random)
- bp_o = nerv.BiasParam("bp_o", global_conf)
- bp_o.trans = global_conf.cumat_type(1, global_conf.vocab:size())
- bp_o.trans:generate(global_conf.param_random)
+ --bp_o = nerv.BiasParam("bp_o", global_conf)
+ --bp_o.trans = global_conf.cumat_type(1, global_conf.vocab:size())
+ --bp_o.trans:generate(global_conf.param_random)
local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
f:write_chunk(ltp_ih)
f:write_chunk(ltp_hh)
- f:write_chunk(ltp_ho)
+ --f:write_chunk(ltp_ho)
f:write_chunk(bp_h)
- f:write_chunk(bp_o)
+ --f:write_chunk(bp_o)
f:close()
return nil
end
printf("%s loading parameter from file %s...\n", global_conf.sche_log_pre, global_conf.param_fn .. '.' .. tostring(iter))
- local paramRepo = nerv.ParamRepo()
paramRepo:import({global_conf.param_fn .. '.' .. tostring(iter)}, nil, global_conf)
printf("%s preparing parameters end.\n", global_conf.sche_log_pre)
- return paramRepo
+ return nil
end
--global_conf: table
--Returns: nerv.LayerRepo
-function prepare_layers(global_conf, paramRepo)
+function prepare_layers(global_conf)
printf("%s preparing layers...\n", global_conf.sche_log_pre)
+ local paramRepo = global_conf.paramRepo
+
local du = false
--local recurrentLconfig = {{["bp"] = "bp_h", ["ltp_hh"] = "ltp_hh"}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["break_id"] = global_conf.vocab:get_sen_entry().id, ["independent"] = global_conf.independent, ["clip"] = 10}}
@@ -146,10 +150,10 @@ function prepare_tnn(global_conf, layerRepo)
end
function load_net(global_conf, next_iter)
- local paramRepo = prepare_parameters(global_conf, next_iter)
- local layerRepo = prepare_layers(global_conf, paramRepo)
+ prepare_parameters(global_conf, next_iter)
+ local layerRepo = prepare_layers(global_conf)
local tnn = prepare_tnn(global_conf, layerRepo)
- return tnn, paramRepo
+ return tnn
end
local train_fn, valid_fn, test_fn
@@ -233,7 +237,7 @@ global_conf = {
hidden_size = 20,
chunk_size = 2,
- batch_size = 3,
+ batch_size = 10,
max_iter = 3,
param_random = function() return (math.random() / 5 - 0.1) end,
@@ -297,7 +301,7 @@ end
if start_iter == -1 or start_iter == 0 then
print("===INITIAL VALIDATION===")
- local tnn, paramRepo = load_net(global_conf, 0)
+ local tnn = load_net(global_conf, 0)
local result = LMTrainer.lm_process_file(global_conf, global_conf.valid_fn, tnn, false) --false update!
nerv.LMUtil.wait(1)
ppl_rec[0] = {}
@@ -316,7 +320,7 @@ local final_iter
for iter = start_iter, global_conf.max_iter, 1 do
final_iter = iter --for final testing
global_conf.sche_log_pre = "[SCHEDULER ITER"..iter.." LR"..global_conf.lrate.."]:"
- tnn, paramRepo = load_net(global_conf, iter - 1)
+ tnn = load_net(global_conf, iter - 1)
printf("===ITERATION %d LR %f===\n", iter, global_conf.lrate)
result = LMTrainer.lm_process_file(global_conf, global_conf.train_fn_shuf, tnn, true) --true update!
ppl_rec[iter] = {}
@@ -337,7 +341,7 @@ for iter = start_iter, global_conf.max_iter, 1 do
end
if ppl_rec[iter].valid < ppl_last then
printf("%s PPL improves, saving net to file %s.%d...\n", global_conf.sche_log_pre, global_conf.param_fn, iter)
- paramRepo:export(global_conf.param_fn .. '.' .. tostring(iter), nil)
+ global_conf.paramRepo:export(global_conf.param_fn .. '.' .. tostring(iter), nil)
else
printf("%s PPL did not improve, rejected, copying param file of last iter...\n", global_conf.sche_log_pre)
os.execute('cp ' .. global_conf.param_fn..'.'..tostring(iter - 1) .. ' ' .. global_conf.param_fn..'.'..tostring(iter))
@@ -358,6 +362,6 @@ end
printf("\n")
printf("===FINAL TEST===\n")
global_conf.sche_log_pre = "[SCHEDULER FINAL_TEST]:"
-tnn, paramRepo = load_net(global_conf, final_iter)
+tnn = load_net(global_conf, final_iter)
LMTrainer.lm_process_file(global_conf, global_conf.test_fn, tnn, false) --false update!
diff --git a/nerv/layer/affine.lua b/nerv/layer/affine.lua
index 6a541e8..3e84ec0 100644
--- a/nerv/layer/affine.lua
+++ b/nerv/layer/affine.lua
@@ -63,10 +63,10 @@ end
function AffineLayer:__init(id, global_conf, layer_conf)
self.id = id
- self.ltp = layer_conf.ltp
- self.bp = layer_conf.bp
self.dim_in = layer_conf.dim_in
self.dim_out = layer_conf.dim_out
+ self.ltp = self:find_param("ltp", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[1], self.dim_out[1]}) --layer_conf.ltp
+ self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]})--layer_conf.bp
self.gconf = global_conf
self:check_dim_len(1, 1) -- exactly one input and one output
self.direct_update = layer_conf.direct_update or global_conf.direct_update
diff --git a/nerv/layer/init.lua b/nerv/layer/init.lua
index 6861b0e..d268caa 100644
--- a/nerv/layer/init.lua
+++ b/nerv/layer/init.lua
@@ -70,6 +70,27 @@ function Layer:get_dim()
return self.dim_in, self.dim_out
end
+function Layer:find_param(pid, l_conf, gconf, p_type, p_dim)
+ if l_conf[pid] ~= nil then
+ nerv.printf("Param [%s] of layer [%s] found in layer_conf.\n", pid, self.id)
+ return l_conf[pid]
+ end
+ local pid_g = self.id .. '_' .. pid --global identifier
+ local pr = gconf.paramRepo
+ local p
+ p = pr:get_param(pid_g)
+ if p ~= nil then
+ nerv.printf("Param [%s] of layer [%s] found in paramRepo.\n", pid, self.id)
+ return p
+ end
+ nerv.printf("Param [%s] of layer [%s] is not found in layer_conf or paramRepo, switch to auto-generate.\n", pid, self.id)
+ p = p_type(pid_g, gconf)
+ p.trans = gconf.cumat_type(unpack(p_dim))
+ p.trans:generate(global_conf.param_random)
+ pr:add(pid_g, p) --add the parameter into the paramRepo
+ return p
+end
+
nerv.include('affine.lua')
nerv.include('sigmoid.lua')
nerv.include('softmax_ce.lua')
diff --git a/nerv/nn/layer_repo.lua b/nerv/nn/layer_repo.lua
index ef333a7..ec0f80a 100644
--- a/nerv/nn/layer_repo.lua
+++ b/nerv/nn/layer_repo.lua
@@ -23,6 +23,9 @@ function LayerRepo:add_layers(layer_spec, param_repo, global_conf)
end
for pname, pid in pairs(spec[1]) do
layer_config[pname] = param_repo:get_param(pid)
+ if layer_config[pname] == nil then
+ nerv.error("did not find parameter in paramRepo")
+ end
end
layers[id] = layer_type(id, global_conf, layer_config)
end
diff --git a/nerv/nn/param_repo.lua b/nerv/nn/param_repo.lua
index ab971ba..7fc0498 100644
--- a/nerv/nn/param_repo.lua
+++ b/nerv/nn/param_repo.lua
@@ -68,9 +68,6 @@ function ParamRepo:export(param_file, pids)
end
function ParamRepo:get_param(pid)
- local p = self.params[pid]
- if p == nil then
- nerv.error("param with id %s not found", pid)
- end
- return p
+ --if pid does not exist, return nil
+ return self.params[pid]
end