aboutsummaryrefslogtreecommitdiff
path: root/nerv/examples
diff options
context:
space:
mode:
authortxh18 <cloudygooseg@gmail.com>2015-11-16 11:44:43 +0800
committertxh18 <cloudygooseg@gmail.com>2015-11-16 11:44:43 +0800
commit267a486fb78a985cbfdc60ef8549b3128f716713 (patch)
treec60697e60ef5053203b5148cb3f0bfbf88a81c94 /nerv/examples
parentef40688d5a0a3b7eae18dc364a40ae4e8e7619e7 (diff)
fixed direct update, did not know the result
Diffstat (limited to 'nerv/examples')
-rw-r--r--nerv/examples/lmptb/lm_trainer.lua1
-rw-r--r--nerv/examples/lmptb/tnn_ptb_main.lua8
2 files changed, 6 insertions, 3 deletions
diff --git a/nerv/examples/lmptb/lm_trainer.lua b/nerv/examples/lmptb/lm_trainer.lua
index 226873b..2be97c8 100644
--- a/nerv/examples/lmptb/lm_trainer.lua
+++ b/nerv/examples/lmptb/lm_trainer.lua
@@ -16,6 +16,7 @@ function LMTrainer.lm_process_file(global_conf, fn, tnn, do_train)
local result = nerv.LMResult(global_conf, global_conf.vocab)
result:init("rnn")
+ global_conf.timer:flush()
tnn:flush_all() --caution: will also flush the inputs from the reader!
local next_log_wcn = global_conf.log_w_num
diff --git a/nerv/examples/lmptb/tnn_ptb_main.lua b/nerv/examples/lmptb/tnn_ptb_main.lua
index 891487c..19d0f8a 100644
--- a/nerv/examples/lmptb/tnn_ptb_main.lua
+++ b/nerv/examples/lmptb/tnn_ptb_main.lua
@@ -63,9 +63,11 @@ end
--Returns: nerv.LayerRepo
function prepare_layers(global_conf, paramRepo)
printf("%s preparing layers...\n", global_conf.sche_log_pre)
+
+ local du = true
--local recurrentLconfig = {{["bp"] = "bp_h", ["ltp_hh"] = "ltp_hh"}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["break_id"] = global_conf.vocab:get_sen_entry().id, ["independent"] = global_conf.independent, ["clip"] = 10}}
- local recurrentLconfig = {{["bp"] = "bp_h", ["ltp_hh"] = "ltp_hh"}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["clip"] = 10}}
+ local recurrentLconfig = {{["bp"] = "bp_h", ["ltp_hh"] = "ltp_hh"}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["clip"] = 10, ["direct_update"] = du}}
local layers = {
["nerv.AffineRecurrentLayer"] = {
@@ -85,7 +87,7 @@ function prepare_layers(global_conf, paramRepo)
},
["nerv.AffineLayer"] = {
- ["outputL"] = {{["ltp"] = "ltp_ho", ["bp"] = "bp_o"}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.vocab:size()}}},
+ ["outputL"] = {{["ltp"] = "ltp_ho", ["bp"] = "bp_o"}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.vocab:size()}, ["direct_update"] = du}},
},
["nerv.SoftmaxCELayerT"] = {
@@ -168,7 +170,7 @@ global_conf = {
mmat_type = nerv.MMatrixFloat,
nn_act_default = 0,
- hidden_size = 300, --set to 400 for a stable good test PPL
+ hidden_size = 400, --set to 400 for a stable good test PPL
chunk_size = 15,
batch_size = 10,
max_iter = 35,