aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortxh18 <cloudygooseg@gmail.com>2015-12-02 20:29:56 +0800
committertxh18 <cloudygooseg@gmail.com>2015-12-02 20:29:56 +0800
commit103a4291349c0f55155ca97bd236fc7784d286ff (patch)
treef9b4c7e021779ba803791148cec6dcea28053e76
parent094fc872d3e62c5f0950ac1747f130e30a08bee8 (diff)
function name change in LMTrainer
-rw-r--r--nerv/examples/lmptb/lm_trainer.lua5
-rw-r--r--nerv/examples/lmptb/lmptb/lmseqreader.lua2
-rw-r--r--nerv/examples/lmptb/lstmlm_ptb_main.lua16
-rw-r--r--nerv/examples/lmptb/rnnlm_ptb_main.lua10
-rw-r--r--nerv/examples/lmptb/tnn/sutil.lua7
5 files changed, 24 insertions, 16 deletions
diff --git a/nerv/examples/lmptb/lm_trainer.lua b/nerv/examples/lmptb/lm_trainer.lua
index 185bc6d..a203cc6 100644
--- a/nerv/examples/lmptb/lm_trainer.lua
+++ b/nerv/examples/lmptb/lm_trainer.lua
@@ -17,11 +17,14 @@ function nerv.BiasParam:update_by_gradient(gradient)
end
--Returns: LMResult
-function LMTrainer.lm_process_file(global_conf, fn, tnn, do_train)
+function LMTrainer.lm_process_file_rnn(global_conf, fn, tnn, do_train)
local reader = nerv.LMSeqReader(global_conf, global_conf.batch_size, global_conf.chunk_size, global_conf.vocab)
reader:open_file(fn)
local result = nerv.LMResult(global_conf, global_conf.vocab)
result:init("rnn")
+ if global_conf.dropout_rate ~= nil then
+ nerv.info("LMTrainer.lm_process_file_rnn: dropout_rate is %f", global_conf.dropout_rate)
+ end
global_conf.timer:flush()
tnn:flush_all() --caution: will also flush the inputs from the reader!
diff --git a/nerv/examples/lmptb/lmptb/lmseqreader.lua b/nerv/examples/lmptb/lmptb/lmseqreader.lua
index cc805a4..04eba45 100644
--- a/nerv/examples/lmptb/lmptb/lmseqreader.lua
+++ b/nerv/examples/lmptb/lmptb/lmseqreader.lua
@@ -24,7 +24,7 @@ function LMReader:open_file(fn)
nerv.error("%s error: in open_file(fn is %s), file handle not nil.", self.log_pre, fn)
end
printf("%s opening file %s...\n", self.log_pre, fn)
- print("batch_size:", self.batch_size, "chunk_size", self.chunk_size)
+ print(self.log_pre, "batch_size:", self.batch_size, "chunk_size", self.chunk_size)
self.fh = io.open(fn, "r")
self.streams = {}
for i = 1, self.batch_size, 1 do
diff --git a/nerv/examples/lmptb/lstmlm_ptb_main.lua b/nerv/examples/lmptb/lstmlm_ptb_main.lua
index 4c46369..53a7bd5 100644
--- a/nerv/examples/lmptb/lstmlm_ptb_main.lua
+++ b/nerv/examples/lmptb/lstmlm_ptb_main.lua
@@ -200,6 +200,7 @@ global_conf = {
max_iter = 35,
decay_iter = 10,
param_random = function() return (math.random() / 5 - 0.1) end,
+ dropout_str = "0.5*15:0",
train_fn = train_fn,
valid_fn = valid_fn,
@@ -288,7 +289,8 @@ global_conf.work_dir = global_conf.work_dir_base .. 'h' .. global_conf.hidden_si
global_conf.train_fn_shuf = global_conf.work_dir .. '/train_fn_shuf'
global_conf.train_fn_shuf_bak = global_conf.train_fn_shuf .. '_bak'
global_conf.param_fn = global_conf.work_dir .. "/params"
-
+global_conf.dropout_list = nerv.SUtil.parse_schedule(global_conf.dropout_str)
+global_conf.dropout_rate = 0
----------------printing options---------------------------------
printf("%s printing global_conf...\n", global_conf.sche_log_pre)
for id, value in pairs(global_conf) do
@@ -324,7 +326,7 @@ if start_iter == -1 or start_iter == 0 then
local tnn = load_net(global_conf, 0)
global_conf.paramRepo = tnn:get_params() --get auto-generted params
global_conf.paramRepo:export(global_conf.param_fn .. '.0', nil) --some parameters are auto-generated, saved again to param.0 file
- local result = LMTrainer.lm_process_file(global_conf, global_conf.valid_fn, tnn, false) --false update!
+ local result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.valid_fn, tnn, false) --false update!
nerv.LMUtil.wait(1)
ppl_rec[0] = {}
ppl_rec[0].valid = result:ppl_all("rnn")
@@ -344,7 +346,9 @@ for iter = start_iter, global_conf.max_iter, 1 do
global_conf.sche_log_pre = "[SCHEDULER ITER"..iter.." LR"..global_conf.lrate.."]:"
tnn = load_net(global_conf, iter - 1)
printf("===ITERATION %d LR %f===\n", iter, global_conf.lrate)
- result = LMTrainer.lm_process_file(global_conf, global_conf.train_fn_shuf, tnn, true) --true update!
+ global_conf.dropout_rate = nerv.SUtil.sche_get(global_conf.dropout_list, iter)
+ result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.train_fn_shuf, tnn, true) --true update!
+ global_conf.dropout_rate = 0
ppl_rec[iter] = {}
ppl_rec[iter].train = result:ppl_all("rnn")
--shuffling training file
@@ -352,10 +356,10 @@ for iter = start_iter, global_conf.max_iter, 1 do
os.execute('cp ' .. global_conf.train_fn_shuf .. ' ' .. global_conf.train_fn_shuf_bak)
os.execute('cat ' .. global_conf.train_fn_shuf_bak .. ' | sort -R --random-source=/dev/zero > ' .. global_conf.train_fn_shuf)
printf("===PEEK ON TEST %d===\n", iter)
- result = LMTrainer.lm_process_file(global_conf, global_conf.test_fn, tnn, false) --false update!
+ result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.test_fn, tnn, false) --false update!
ppl_rec[iter].test = result:ppl_all("rnn")
printf("===VALIDATION %d===\n", iter)
- result = LMTrainer.lm_process_file(global_conf, global_conf.valid_fn, tnn, false) --false update!
+ result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.valid_fn, tnn, false) --false update!
ppl_rec[iter].valid = result:ppl_all("rnn")
ppl_rec[iter].lr = global_conf.lrate
if ((ppl_last / ppl_rec[iter].valid < 1.0003 or lr_half == true) and iter > global_conf.decay_iter) then
@@ -385,5 +389,5 @@ printf("\n")
printf("===FINAL TEST===\n")
global_conf.sche_log_pre = "[SCHEDULER FINAL_TEST]:"
tnn = load_net(global_conf, final_iter)
-LMTrainer.lm_process_file(global_conf, global_conf.test_fn, tnn, false) --false update!
+LMTrainer.lm_process_file_rnn(global_conf, global_conf.test_fn, tnn, false) --false update!
diff --git a/nerv/examples/lmptb/rnnlm_ptb_main.lua b/nerv/examples/lmptb/rnnlm_ptb_main.lua
index 16024a8..35b2e08 100644
--- a/nerv/examples/lmptb/rnnlm_ptb_main.lua
+++ b/nerv/examples/lmptb/rnnlm_ptb_main.lua
@@ -307,7 +307,7 @@ if start_iter == -1 or start_iter == 0 then
local tnn = load_net(global_conf, 0)
global_conf.paramRepo = tnn:get_params() --get auto-generted params
global_conf.paramRepo:export(global_conf.param_fn .. '.0', nil) --some parameters are auto-generated, saved again to param.0 file
- local result = LMTrainer.lm_process_file(global_conf, global_conf.valid_fn, tnn, false) --false update!
+ local result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.valid_fn, tnn, false) --false update!
nerv.LMUtil.wait(1)
ppl_rec[0] = {}
ppl_rec[0].valid = result:ppl_all("rnn")
@@ -327,7 +327,7 @@ for iter = start_iter, global_conf.max_iter, 1 do
global_conf.sche_log_pre = "[SCHEDULER ITER"..iter.." LR"..global_conf.lrate.."]:"
tnn = load_net(global_conf, iter - 1)
printf("===ITERATION %d LR %f===\n", iter, global_conf.lrate)
- result = LMTrainer.lm_process_file(global_conf, global_conf.train_fn_shuf, tnn, true) --true update!
+ result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.train_fn_shuf, tnn, true) --true update!
ppl_rec[iter] = {}
ppl_rec[iter].train = result:ppl_all("rnn")
--shuffling training file
@@ -335,10 +335,10 @@ for iter = start_iter, global_conf.max_iter, 1 do
os.execute('cp ' .. global_conf.train_fn_shuf .. ' ' .. global_conf.train_fn_shuf_bak)
os.execute('cat ' .. global_conf.train_fn_shuf_bak .. ' | sort -R --random-source=/dev/zero > ' .. global_conf.train_fn_shuf)
printf("===PEEK ON TEST %d===\n", iter)
- result = LMTrainer.lm_process_file(global_conf, global_conf.test_fn, tnn, false) --false update!
+ result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.test_fn, tnn, false) --false update!
ppl_rec[iter].test = result:ppl_all("rnn")
printf("===VALIDATION %d===\n", iter)
- result = LMTrainer.lm_process_file(global_conf, global_conf.valid_fn, tnn, false) --false update!
+ result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.valid_fn, tnn, false) --false update!
ppl_rec[iter].valid = result:ppl_all("rnn")
ppl_rec[iter].lr = global_conf.lrate
if ((ppl_last / ppl_rec[iter].valid < 1.0003 or lr_half == true) and iter > global_conf.decay_iter) then
@@ -368,5 +368,5 @@ printf("\n")
printf("===FINAL TEST===\n")
global_conf.sche_log_pre = "[SCHEDULER FINAL_TEST]:"
tnn = load_net(global_conf, final_iter)
-LMTrainer.lm_process_file(global_conf, global_conf.test_fn, tnn, false) --false update!
+LMTrainer.lm_process_file_rnn(global_conf, global_conf.test_fn, tnn, false) --false update!
diff --git a/nerv/examples/lmptb/tnn/sutil.lua b/nerv/examples/lmptb/tnn/sutil.lua
index d157a26..f5bc408 100644
--- a/nerv/examples/lmptb/tnn/sutil.lua
+++ b/nerv/examples/lmptb/tnn/sutil.lua
@@ -31,9 +31,10 @@ function Util.parse_schedule(str)
end
end
- for i = 1, #sch do
- print(sch[i])
- end
+ --for i = 1, #sch do
+ -- print(sch[i])
+ --end
+ return sch
end
function Util.sche_get(s, it)