aboutsummaryrefslogtreecommitdiff
path: root/nerv/examples/swb_baseline2.lua
diff options
context:
space:
mode:
Diffstat (limited to 'nerv/examples/swb_baseline2.lua')
-rw-r--r--nerv/examples/swb_baseline2.lua150
1 files changed, 76 insertions, 74 deletions
diff --git a/nerv/examples/swb_baseline2.lua b/nerv/examples/swb_baseline2.lua
index 38cfb9a..87b01fa 100644
--- a/nerv/examples/swb_baseline2.lua
+++ b/nerv/examples/swb_baseline2.lua
@@ -1,65 +1,79 @@
require 'htk_io'
-gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, frm_ext = 5,
- rearrange = true, -- just to make the context order consistent with old results, deprecated
+gconf = {lrate = 0.8,
+ wcost = 1e-6,
+ momentum = 0.9,
+ frm_ext = 5,
+ rearrange = true, -- just to make the context order consistent with old TNet results, deprecated
frm_trim = 5, -- trim the first and last 5 frames, TNet just does this, deprecated
+ chunk_size = 1,
tr_scp = "/speechlab/users/mfy43/swb50/train_bp.scp",
cv_scp = "/speechlab/users/mfy43/swb50/train_cv.scp",
+ ali = {file = "/speechlab/users/mfy43/swb50/ref.mlf",
+ format = "map",
+ format_arg = "/speechlab/users/mfy43/swb50/dict",
+ dir = "*/",
+ ext = "lab"},
htk_conf = "/speechlab/users/mfy43/swb50/plp_0_d_a.conf",
initialized_param = {"/speechlab/users/mfy43/swb50/swb_init.nerv",
"/speechlab/users/mfy43/swb50/swb_global_transf.nerv"},
- chunk_size = 1}
+}
-function make_layer_repo(param_repo)
+local input_size = 429
+local output_size = 3001
+local hidden_size = 2048
+local trainer = nerv.Trainer
+
+function trainer:make_layer_repo(param_repo)
local layer_repo = nerv.LayerRepo(
{
-- global transf
["nerv.BiasLayer"] =
{
- blayer1 = {dim_in = {429}, dim_out = {429}, params = {bias = "bias0"}},
- blayer2 = {dim_in = {429}, dim_out = {429}, params = {bias = "bias1"}}
+ blayer1 = {dim_in = {input_size}, dim_out = {input_size}, params = {bias = "bias0"}},
+ blayer2 = {dim_in = {input_size}, dim_out = {input_size}, params = {bias = "bias1"}}
},
["nerv.WindowLayer"] =
{
- wlayer1 = {dim_in = {429}, dim_out = {429}, params = {window = "window0"}},
- wlayer2 = {dim_in = {429}, dim_out = {429}, params = {window = "window1"}}
+ wlayer1 = {dim_in = {input_size}, dim_out = {input_size}, params = {window = "window0"}},
+ wlayer2 = {dim_in = {input_size}, dim_out = {input_size}, params = {window = "window1"}}
},
-- biased linearity
["nerv.AffineLayer"] =
{
- affine0 = {dim_in = {429}, dim_out = {2048},
+ affine0 = {dim_in = {input_size}, dim_out = {hidden_size},
params = {ltp = "affine0_ltp", bp = "affine0_bp"}},
- affine1 = {dim_in = {2048}, dim_out = {2048},
+ affine1 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine1_ltp", bp = "affine1_bp"}},
- affine2 = {dim_in = {2048}, dim_out = {2048},
+ affine2 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine2_ltp", bp = "affine2_bp"}},
- affine3 = {dim_in = {2048}, dim_out = {2048},
+ affine3 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine3_ltp", bp = "affine3_bp"}},
- affine4 = {dim_in = {2048}, dim_out = {2048},
+ affine4 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine4_ltp", bp = "affine4_bp"}},
- affine5 = {dim_in = {2048}, dim_out = {2048},
+ affine5 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine5_ltp", bp = "affine5_bp"}},
- affine6 = {dim_in = {2048}, dim_out = {2048},
+ affine6 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine6_ltp", bp = "affine6_bp"}},
- affine7 = {dim_in = {2048}, dim_out = {3001},
+ affine7 = {dim_in = {hidden_size}, dim_out = {output_size},
params = {ltp = "affine7_ltp", bp = "affine7_bp"}}
},
["nerv.SigmoidLayer"] =
{
- sigmoid0 = {dim_in = {2048}, dim_out = {2048}},
- sigmoid1 = {dim_in = {2048}, dim_out = {2048}},
- sigmoid2 = {dim_in = {2048}, dim_out = {2048}},
- sigmoid3 = {dim_in = {2048}, dim_out = {2048}},
- sigmoid4 = {dim_in = {2048}, dim_out = {2048}},
- sigmoid5 = {dim_in = {2048}, dim_out = {2048}},
- sigmoid6 = {dim_in = {2048}, dim_out = {2048}}
+ sigmoid0 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
+ sigmoid1 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
+ sigmoid2 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
+ sigmoid3 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
+ sigmoid4 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
+ sigmoid5 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
+ sigmoid6 = {dim_in = {hidden_size}, dim_out = {hidden_size}}
},
["nerv.SoftmaxCELayer"] = -- softmax + ce criterion layer for finetune output
{
- ce_crit = {dim_in = {3001, 1}, dim_out = {1}, compressed = true}
+ ce_crit = {dim_in = {output_size, 1}, dim_out = {1}, compressed = true}
},
["nerv.SoftmaxLayer"] = -- softmax for decode output
{
- softmax = {dim_in = {3001}, dim_out = {3001}}
+ softmax = {dim_in = {output_size}, dim_out = {output_size}}
}
}, param_repo, gconf)
@@ -68,7 +82,7 @@ function make_layer_repo(param_repo)
["nerv.GraphLayer"] =
{
global_transf = {
- dim_in = {429}, dim_out = {429},
+ dim_in = {input_size}, dim_out = {input_size},
layer_repo = layer_repo,
connections = {
{"<input>[1]", "blayer1[1]", 0},
@@ -79,7 +93,7 @@ function make_layer_repo(param_repo)
}
},
main = {
- dim_in = {429}, dim_out = {3001},
+ dim_in = {input_size}, dim_out = {output_size},
layer_repo = layer_repo,
connections = {
{"<input>[1]", "affine0[1]", 0},
@@ -108,20 +122,22 @@ function make_layer_repo(param_repo)
["nerv.GraphLayer"] =
{
ce_output = {
- dim_in = {429, 1}, dim_out = {1},
+ dim_in = {input_size, 1}, dim_out = {1},
layer_repo = layer_repo,
connections = {
- {"<input>[1]", "main[1]", 0},
+ {"<input>[1]", "global_transf[1]", 0},
+ {"global_transf[1]", "main[1]", 0},
{"main[1]", "ce_crit[1]", 0},
{"<input>[2]", "ce_crit[2]", 0},
{"ce_crit[1]", "<output>[1]", 0}
}
},
softmax_output = {
- dim_in = {429}, dim_out = {3001},
+ dim_in = {input_size}, dim_out = {output_size},
layer_repo = layer_repo,
connections = {
- {"<input>[1]", "main[1]", 0},
+ {"<input>[1]", "global_transf[1]", 0},
+ {"global_transf[1]", "main[1]", 0},
{"main[1]", "softmax[1]", 0},
{"softmax[1]", "<output>[1]", 0}
}
@@ -132,73 +148,59 @@ function make_layer_repo(param_repo)
return layer_repo
end
-function get_network(layer_repo)
+function trainer:get_network(layer_repo)
return layer_repo:get_layer("ce_output")
end
-function get_decode_network(layer_repo)
- return layer_repo:get_layer("softmax_output")
-end
-
-function get_global_transf(layer_repo)
- return layer_repo:get_layer("global_transf")
-end
-
-function make_readers(scp_file, layer_repo)
- return {
- {reader = nerv.HTKReader(gconf,
+function trainer:get_readers(dataset)
+ local function reader_gen(scp, ali)
+ return {{reader = nerv.HTKReader(gconf,
{
id = "main_scp",
- scp_file = scp_file,
+ scp_file = scp,
conf_file = gconf.htk_conf,
frm_ext = gconf.frm_ext,
mlfs = {
- phone_state = {
- file = "/speechlab/users/mfy43/swb50/ref.mlf",
- format = "map",
- format_arg = "/speechlab/users/mfy43/swb50/dict",
- dir = "*/",
- ext = "lab"
- }
+ phone_state = ali
}
}),
- data = {main_scp = 429, phone_state = 1}}
- }
+ data = {main_scp = input_size, phone_state = 1}}}
+ end
+ if dataset == 'train' then
+ return reader_gen(gconf.tr_scp, gconf.tr_ali or gconf.ali)
+ elseif dataset == 'validate' then
+ return reader_gen(gconf.cv_scp, gconf.cv_ali or gconf.ali)
+ else
+ nerv.error('no such dataset')
+ end
end
-function make_buffer(readers)
- return nerv.FrmBuffer(gconf,
- {
- buffer_size = gconf.buffer_size,
- batch_size = gconf.batch_size,
- chunk_size = gconf.chunk_size,
- randomize = gconf.randomize,
- readers = readers,
- use_gpu = true
- })
+function trainer:get_input_order()
+ return {"main_scp", "phone_state"}
end
-function get_input_order()
- return {{id = "main_scp", global_transf = true},
- {id = "phone_state"}}
+function trainer:get_decode_input_order()
+ return {"main_scp"}
end
-function get_decode_input_order()
- return {{id = "main_scp", global_transf = true}}
+function trainer:get_error()
+ local ce_crit = self.layer_repo:get_layer("ce_crit")
+ return ce_crit.total_ce / ce_crit.total_frames
end
-function get_accuracy(layer_repo)
- local ce_crit = layer_repo:get_layer("ce_crit")
- return ce_crit.total_correct / ce_crit.total_frames * 100
+function trainer:mini_batch_afterprocess(cnt, info)
+ if cnt % 1000 == 0 then
+ self:epoch_afterprocess()
+ end
end
-function print_stat(layer_repo)
- local ce_crit = layer_repo:get_layer("ce_crit")
+function trainer:epoch_afterprocess()
+ local ce_crit = self.layer_repo:get_layer("ce_crit")
nerv.info("*** training stat begin ***")
nerv.printf("cross entropy:\t\t%.8f\n", ce_crit.total_ce)
nerv.printf("correct:\t\t%d\n", ce_crit.total_correct)
nerv.printf("frames:\t\t\t%d\n", ce_crit.total_frames)
nerv.printf("err/frm:\t\t%.8f\n", ce_crit.total_ce / ce_crit.total_frames)
- nerv.printf("accuracy:\t\t%.3f%%\n", get_accuracy(layer_repo))
+ nerv.printf("accuracy:\t\t%.3f%%\n", ce_crit.total_correct / ce_crit.total_frames * 100)
nerv.info("*** training stat end ***")
end