aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--nerv/examples/swb_baseline2.lua150
-rw-r--r--nerv/examples/timit_baseline2.lua166
-rw-r--r--nerv/examples/trainer.lua2
3 files changed, 160 insertions, 158 deletions
diff --git a/nerv/examples/swb_baseline2.lua b/nerv/examples/swb_baseline2.lua
index 38cfb9a..87b01fa 100644
--- a/nerv/examples/swb_baseline2.lua
+++ b/nerv/examples/swb_baseline2.lua
@@ -1,65 +1,79 @@
require 'htk_io'
-gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, frm_ext = 5,
- rearrange = true, -- just to make the context order consistent with old results, deprecated
+gconf = {lrate = 0.8,
+ wcost = 1e-6,
+ momentum = 0.9,
+ frm_ext = 5,
+ rearrange = true, -- just to make the context order consistent with old TNet results, deprecated
frm_trim = 5, -- trim the first and last 5 frames, TNet just does this, deprecated
+ chunk_size = 1,
tr_scp = "/speechlab/users/mfy43/swb50/train_bp.scp",
cv_scp = "/speechlab/users/mfy43/swb50/train_cv.scp",
+ ali = {file = "/speechlab/users/mfy43/swb50/ref.mlf",
+ format = "map",
+ format_arg = "/speechlab/users/mfy43/swb50/dict",
+ dir = "*/",
+ ext = "lab"},
htk_conf = "/speechlab/users/mfy43/swb50/plp_0_d_a.conf",
initialized_param = {"/speechlab/users/mfy43/swb50/swb_init.nerv",
"/speechlab/users/mfy43/swb50/swb_global_transf.nerv"},
- chunk_size = 1}
+}
-function make_layer_repo(param_repo)
+local input_size = 429
+local output_size = 3001
+local hidden_size = 2048
+local trainer = nerv.Trainer
+
+function trainer:make_layer_repo(param_repo)
local layer_repo = nerv.LayerRepo(
{
-- global transf
["nerv.BiasLayer"] =
{
- blayer1 = {dim_in = {429}, dim_out = {429}, params = {bias = "bias0"}},
- blayer2 = {dim_in = {429}, dim_out = {429}, params = {bias = "bias1"}}
+ blayer1 = {dim_in = {input_size}, dim_out = {input_size}, params = {bias = "bias0"}},
+ blayer2 = {dim_in = {input_size}, dim_out = {input_size}, params = {bias = "bias1"}}
},
["nerv.WindowLayer"] =
{
- wlayer1 = {dim_in = {429}, dim_out = {429}, params = {window = "window0"}},
- wlayer2 = {dim_in = {429}, dim_out = {429}, params = {window = "window1"}}
+ wlayer1 = {dim_in = {input_size}, dim_out = {input_size}, params = {window = "window0"}},
+ wlayer2 = {dim_in = {input_size}, dim_out = {input_size}, params = {window = "window1"}}
},
-- biased linearity
["nerv.AffineLayer"] =
{
- affine0 = {dim_in = {429}, dim_out = {2048},
+ affine0 = {dim_in = {input_size}, dim_out = {hidden_size},
params = {ltp = "affine0_ltp", bp = "affine0_bp"}},
- affine1 = {dim_in = {2048}, dim_out = {2048},
+ affine1 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine1_ltp", bp = "affine1_bp"}},
- affine2 = {dim_in = {2048}, dim_out = {2048},
+ affine2 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine2_ltp", bp = "affine2_bp"}},
- affine3 = {dim_in = {2048}, dim_out = {2048},
+ affine3 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine3_ltp", bp = "affine3_bp"}},
- affine4 = {dim_in = {2048}, dim_out = {2048},
+ affine4 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine4_ltp", bp = "affine4_bp"}},
- affine5 = {dim_in = {2048}, dim_out = {2048},
+ affine5 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine5_ltp", bp = "affine5_bp"}},
- affine6 = {dim_in = {2048}, dim_out = {2048},
+ affine6 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine6_ltp", bp = "affine6_bp"}},
- affine7 = {dim_in = {2048}, dim_out = {3001},
+ affine7 = {dim_in = {hidden_size}, dim_out = {output_size},
params = {ltp = "affine7_ltp", bp = "affine7_bp"}}
},
["nerv.SigmoidLayer"] =
{
- sigmoid0 = {dim_in = {2048}, dim_out = {2048}},
- sigmoid1 = {dim_in = {2048}, dim_out = {2048}},
- sigmoid2 = {dim_in = {2048}, dim_out = {2048}},
- sigmoid3 = {dim_in = {2048}, dim_out = {2048}},
- sigmoid4 = {dim_in = {2048}, dim_out = {2048}},
- sigmoid5 = {dim_in = {2048}, dim_out = {2048}},
- sigmoid6 = {dim_in = {2048}, dim_out = {2048}}
+ sigmoid0 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
+ sigmoid1 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
+ sigmoid2 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
+ sigmoid3 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
+ sigmoid4 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
+ sigmoid5 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
+ sigmoid6 = {dim_in = {hidden_size}, dim_out = {hidden_size}}
},
["nerv.SoftmaxCELayer"] = -- softmax + ce criterion layer for finetune output
{
- ce_crit = {dim_in = {3001, 1}, dim_out = {1}, compressed = true}
+ ce_crit = {dim_in = {output_size, 1}, dim_out = {1}, compressed = true}
},
["nerv.SoftmaxLayer"] = -- softmax for decode output
{
- softmax = {dim_in = {3001}, dim_out = {3001}}
+ softmax = {dim_in = {output_size}, dim_out = {output_size}}
}
}, param_repo, gconf)
@@ -68,7 +82,7 @@ function make_layer_repo(param_repo)
["nerv.GraphLayer"] =
{
global_transf = {
- dim_in = {429}, dim_out = {429},
+ dim_in = {input_size}, dim_out = {input_size},
layer_repo = layer_repo,
connections = {
{"<input>[1]", "blayer1[1]", 0},
@@ -79,7 +93,7 @@ function make_layer_repo(param_repo)
}
},
main = {
- dim_in = {429}, dim_out = {3001},
+ dim_in = {input_size}, dim_out = {output_size},
layer_repo = layer_repo,
connections = {
{"<input>[1]", "affine0[1]", 0},
@@ -108,20 +122,22 @@ function make_layer_repo(param_repo)
["nerv.GraphLayer"] =
{
ce_output = {
- dim_in = {429, 1}, dim_out = {1},
+ dim_in = {input_size, 1}, dim_out = {1},
layer_repo = layer_repo,
connections = {
- {"<input>[1]", "main[1]", 0},
+ {"<input>[1]", "global_transf[1]", 0},
+ {"global_transf[1]", "main[1]", 0},
{"main[1]", "ce_crit[1]", 0},
{"<input>[2]", "ce_crit[2]", 0},
{"ce_crit[1]", "<output>[1]", 0}
}
},
softmax_output = {
- dim_in = {429}, dim_out = {3001},
+ dim_in = {input_size}, dim_out = {output_size},
layer_repo = layer_repo,
connections = {
- {"<input>[1]", "main[1]", 0},
+ {"<input>[1]", "global_transf[1]", 0},
+ {"global_transf[1]", "main[1]", 0},
{"main[1]", "softmax[1]", 0},
{"softmax[1]", "<output>[1]", 0}
}
@@ -132,73 +148,59 @@ function make_layer_repo(param_repo)
return layer_repo
end
-function get_network(layer_repo)
+function trainer:get_network(layer_repo)
return layer_repo:get_layer("ce_output")
end
-function get_decode_network(layer_repo)
- return layer_repo:get_layer("softmax_output")
-end
-
-function get_global_transf(layer_repo)
- return layer_repo:get_layer("global_transf")
-end
-
-function make_readers(scp_file, layer_repo)
- return {
- {reader = nerv.HTKReader(gconf,
+function trainer:get_readers(dataset)
+ local function reader_gen(scp, ali)
+ return {{reader = nerv.HTKReader(gconf,
{
id = "main_scp",
- scp_file = scp_file,
+ scp_file = scp,
conf_file = gconf.htk_conf,
frm_ext = gconf.frm_ext,
mlfs = {
- phone_state = {
- file = "/speechlab/users/mfy43/swb50/ref.mlf",
- format = "map",
- format_arg = "/speechlab/users/mfy43/swb50/dict",
- dir = "*/",
- ext = "lab"
- }
+ phone_state = ali
}
}),
- data = {main_scp = 429, phone_state = 1}}
- }
+ data = {main_scp = input_size, phone_state = 1}}}
+ end
+ if dataset == 'train' then
+ return reader_gen(gconf.tr_scp, gconf.tr_ali or gconf.ali)
+ elseif dataset == 'validate' then
+ return reader_gen(gconf.cv_scp, gconf.cv_ali or gconf.ali)
+ else
+ nerv.error('no such dataset')
+ end
end
-function make_buffer(readers)
- return nerv.FrmBuffer(gconf,
- {
- buffer_size = gconf.buffer_size,
- batch_size = gconf.batch_size,
- chunk_size = gconf.chunk_size,
- randomize = gconf.randomize,
- readers = readers,
- use_gpu = true
- })
+function trainer:get_input_order()
+ return {"main_scp", "phone_state"}
end
-function get_input_order()
- return {{id = "main_scp", global_transf = true},
- {id = "phone_state"}}
+function trainer:get_decode_input_order()
+ return {"main_scp"}
end
-function get_decode_input_order()
- return {{id = "main_scp", global_transf = true}}
+function trainer:get_error()
+ local ce_crit = self.layer_repo:get_layer("ce_crit")
+ return ce_crit.total_ce / ce_crit.total_frames
end
-function get_accuracy(layer_repo)
- local ce_crit = layer_repo:get_layer("ce_crit")
- return ce_crit.total_correct / ce_crit.total_frames * 100
+function trainer:mini_batch_afterprocess(cnt, info)
+ if cnt % 1000 == 0 then
+ self:epoch_afterprocess()
+ end
end
-function print_stat(layer_repo)
- local ce_crit = layer_repo:get_layer("ce_crit")
+function trainer:epoch_afterprocess()
+ local ce_crit = self.layer_repo:get_layer("ce_crit")
nerv.info("*** training stat begin ***")
nerv.printf("cross entropy:\t\t%.8f\n", ce_crit.total_ce)
nerv.printf("correct:\t\t%d\n", ce_crit.total_correct)
nerv.printf("frames:\t\t\t%d\n", ce_crit.total_frames)
nerv.printf("err/frm:\t\t%.8f\n", ce_crit.total_ce / ce_crit.total_frames)
- nerv.printf("accuracy:\t\t%.3f%%\n", get_accuracy(layer_repo))
+ nerv.printf("accuracy:\t\t%.3f%%\n", ce_crit.total_correct / ce_crit.total_frames * 100)
nerv.info("*** training stat end ***")
end
diff --git a/nerv/examples/timit_baseline2.lua b/nerv/examples/timit_baseline2.lua
index 658aa2e..313156f 100644
--- a/nerv/examples/timit_baseline2.lua
+++ b/nerv/examples/timit_baseline2.lua
@@ -1,62 +1,77 @@
require 'kaldi_io'
-gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, frm_ext = 5,
+gconf = {lrate = 0.8,
+ wcost = 1e-6,
+ momentum = 0.9,
+ frm_ext = 5,
+ chunk_size = 1,
tr_scp = "ark:/speechlab/tools/KALDI/kaldi-master/src/featbin/copy-feats " ..
"scp:/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/train.scp ark:- |",
cv_scp = "ark:/speechlab/tools/KALDI/kaldi-master/src/featbin/copy-feats " ..
"scp:/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/cv.scp ark:- |",
+ ali = {targets_rspecifier = "ark:/speechlab/tools/KALDI/kaldi-master/src/bin/ali-to-pdf " ..
+ "/speechlab/users/mfy43/timit/s5/exp/tri3_ali/final.mdl " ..
+ "\"ark:gunzip -c /speechlab/users/mfy43/timit/s5/exp/tri3_ali/ali.*.gz |\" " ..
+ "ark:- | " ..
+ "/speechlab/tools/KALDI/kaldi-master/src/bin/ali-to-post " ..
+ "ark:- ark:- |"},
initialized_param = {"/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/nnet_init.nerv",
"/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/nnet_output.nerv",
"/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/nnet_trans.nerv"},
-- params in nnet_trans.nerv are included in the trained model
- decode_param = {"/speechlab/users/mfy43/timit/s5/nerv_20160311205342/nnet_init_20160311211609_iter_13_lr0.013437_tr72.572_cv58.709.nerv"},
- chunk_size = 1}
+ decode_param = {"/speechlab/users/mfy43/timit/s5/nerv_2016-05-06_17:40:54/2016-05-06_19:44:43_iter_20_lr0.012500_tr0.867_cv1.464.nerv"}
+}
-function make_layer_repo(param_repo)
+local input_size = 440
+local output_size = 1959
+local hidden_size = 1024
+local trainer = nerv.Trainer
+
+function trainer:make_layer_repo(param_repo)
local layer_repo = nerv.LayerRepo(
{
-- global transf
["nerv.BiasLayer"] =
{
- blayer1 = {dim_in = {440}, dim_out = {440}, params = {bias = "bias0"}}
+ blayer1 = {dim_in = {input_size}, dim_out = {input_size}, params = {bias = "bias0"}, no_update_all = true}
},
["nerv.WindowLayer"] =
{
- wlayer1 = {dim_in = {440}, dim_out = {440}, params = {window = "window0"}}
+ wlayer1 = {dim_in = {input_size}, dim_out = {input_size}, params = {window = "window0"}, no_update_all = true}
},
-- biased linearity
["nerv.AffineLayer"] =
{
- affine0 = {dim_in = {440}, dim_out = {1024},
+ affine0 = {dim_in = {input_size}, dim_out = {hidden_size},
params = {ltp = "affine0_ltp", bp = "affine0_bp"}},
- affine1 = {dim_in = {1024}, dim_out = {1024},
+ affine1 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine1_ltp", bp = "affine1_bp"}},
- affine2 = {dim_in = {1024}, dim_out = {1024},
+ affine2 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine2_ltp", bp = "affine2_bp"}},
- affine3 = {dim_in = {1024}, dim_out = {1024},
+ affine3 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine3_ltp", bp = "affine3_bp"}},
- affine4 = {dim_in = {1024}, dim_out = {1024},
+ affine4 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine4_ltp", bp = "affine4_bp"}},
- affine5 = {dim_in = {1024}, dim_out = {1024},
+ affine5 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine5_ltp", bp = "affine5_bp"}},
- affine6 = {dim_in = {1024}, dim_out = {1959},
+ affine6 = {dim_in = {hidden_size}, dim_out = {output_size},
params = {ltp = "affine6_ltp", bp = "affine6_bp"}}
},
["nerv.SigmoidLayer"] =
{
- sigmoid0 = {dim_in = {1024}, dim_out = {1024}},
- sigmoid1 = {dim_in = {1024}, dim_out = {1024}},
- sigmoid2 = {dim_in = {1024}, dim_out = {1024}},
- sigmoid3 = {dim_in = {1024}, dim_out = {1024}},
- sigmoid4 = {dim_in = {1024}, dim_out = {1024}},
- sigmoid5 = {dim_in = {1024}, dim_out = {1024}}
+ sigmoid0 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
+ sigmoid1 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
+ sigmoid2 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
+ sigmoid3 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
+ sigmoid4 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
+ sigmoid5 = {dim_in = {hidden_size}, dim_out = {hidden_size}}
},
["nerv.SoftmaxCELayer"] = -- softmax + ce criterion layer for finetune output
{
- ce_crit = {dim_in = {1959, 1}, dim_out = {1}, compressed = true}
+ ce_crit = {dim_in = {output_size, 1}, dim_out = {1}, compressed = true}
},
["nerv.SoftmaxLayer"] = -- softmax for decode output
{
- softmax = {dim_in = {1959}, dim_out = {1959}}
+ softmax = {dim_in = {output_size}, dim_out = {output_size}}
}
}, param_repo, gconf)
@@ -65,7 +80,7 @@ function make_layer_repo(param_repo)
["nerv.GraphLayer"] =
{
global_transf = {
- dim_in = {440}, dim_out = {440},
+ dim_in = {input_size}, dim_out = {input_size},
layer_repo = layer_repo,
connections = {
{"<input>[1]", "blayer1[1]", 0},
@@ -74,7 +89,7 @@ function make_layer_repo(param_repo)
}
},
main = {
- dim_in = {440}, dim_out = {1959},
+ dim_in = {input_size}, dim_out = {output_size},
layer_repo = layer_repo,
connections = {
{"<input>[1]", "affine0[1]", 0},
@@ -101,20 +116,22 @@ function make_layer_repo(param_repo)
["nerv.GraphLayer"] =
{
ce_output = {
- dim_in = {440, 1}, dim_out = {1},
+ dim_in = {input_size, 1}, dim_out = {1},
layer_repo = layer_repo,
connections = {
- {"<input>[1]", "main[1]", 0},
+ {"<input>[1]", "global_transf[1]", 0},
+ {"global_transf[1]", "main[1]", 0},
{"main[1]", "ce_crit[1]", 0},
{"<input>[2]", "ce_crit[2]", 0},
{"ce_crit[1]", "<output>[1]", 0}
}
},
softmax_output = {
- dim_in = {440}, dim_out = {1959},
+ dim_in = {input_size}, dim_out = {output_size},
layer_repo = layer_repo,
connections = {
- {"<input>[1]", "main[1]", 0},
+ {"<input>[1]", "global_transf[1]", 0},
+ {"global_transf[1]", "main[1]", 0},
{"main[1]", "softmax[1]", 0},
{"softmax[1]", "<output>[1]", 0}
}
@@ -125,90 +142,73 @@ function make_layer_repo(param_repo)
return layer_repo
end
-function get_network(layer_repo)
+function trainer:get_network(layer_repo)
return layer_repo:get_layer("ce_output")
end
-function get_decode_network(layer_repo)
- return layer_repo:get_layer("softmax_output")
-end
-
-function get_global_transf(layer_repo)
- return layer_repo:get_layer("global_transf")
-end
-
-function make_readers(scp_file, layer_repo)
- return {
- {reader = nerv.KaldiReader(gconf,
+function trainer:get_readers(dataset)
+ local function reader_gen(scp, ali)
+ return {{reader = nerv.KaldiReader(gconf,
{
id = "main_scp",
- feature_rspecifier = scp_file,
- conf_file = gconf.htk_conf,
+ feature_rspecifier = scp,
frm_ext = gconf.frm_ext,
mlfs = {
- phone_state = {
- targets_rspecifier = "ark:/speechlab/tools/KALDI/kaldi-master/src/bin/ali-to-pdf " ..
- "/speechlab/users/mfy43/timit/s5/exp/tri3_ali/final.mdl " ..
- "\"ark:gunzip -c /speechlab/users/mfy43/timit/s5/exp/tri3_ali/ali.*.gz |\" " ..
- "ark:- | " ..
- "/speechlab/tools/KALDI/kaldi-master/src/bin/ali-to-post " ..
- "ark:- ark:- |",
- format = "map"
- }
+ phone_state = ali
}
}),
- data = {main_scp = 440, phone_state = 1}}
- }
+ data = {main_scp = input_size, phone_state = 1}}}
+ end
+ if dataset == 'train' then
+ return reader_gen(gconf.tr_scp, gconf.tr_ali or gconf.ali)
+ elseif dataset == 'validate' then
+ return reader_gen(gconf.cv_scp, gconf.cv_ali or gconf.ali)
+ else
+ nerv.error('no such dataset')
+ end
+end
+
+function trainer:get_input_order()
+ return {"main_scp", "phone_state"}
+end
+
+function trainer:get_decode_network(layer_repo)
+ return layer_repo:get_layer("softmax_output")
end
-function make_decode_readers(scp_file, layer_repo)
- return {
- {reader = nerv.KaldiReader(gconf,
+function trainer:make_decode_readers(scp_file)
+ return {{reader = nerv.KaldiReader(gconf,
{
id = "main_scp",
feature_rspecifier = scp_file,
- conf_file = gconf.htk_conf,
frm_ext = gconf.frm_ext,
- mlfs = {},
- need_key = true
+ mlfs = {}
}),
- data = {main_scp = 440, phone_state = 1}}
- }
-end
-
-function make_buffer(readers)
- return nerv.FrmBuffer(gconf,
- {
- buffer_size = gconf.buffer_size,
- batch_size = gconf.batch_size,
- chunk_size = gconf.chunk_size,
- randomize = gconf.randomize,
- readers = readers,
- use_gpu = true
- })
+ data = {main_scp = input_size, phone_state = 1}}}
end
-function get_input_order()
- return {{id = "main_scp", global_transf = true},
- {id = "phone_state"}}
+function trainer:get_decode_input_order()
+ return {"main_scp"}
end
-function get_decode_input_order()
- return {{id = "main_scp", global_transf = true}}
+function trainer:get_error()
+ local ce_crit = self.layer_repo:get_layer("ce_crit")
+ return ce_crit.total_ce / ce_crit.total_frames
end
-function get_accuracy(layer_repo)
- local ce_crit = layer_repo:get_layer("ce_crit")
- return ce_crit.total_correct / ce_crit.total_frames * 100
+function trainer:mini_batch_afterprocess(cnt, info)
+ if cnt % 1000 == 0 then
+ self:epoch_afterprocess()
+ end
end
-function print_stat(layer_repo)
- local ce_crit = layer_repo:get_layer("ce_crit")
+function trainer:epoch_afterprocess()
+ local ce_crit = self.layer_repo:get_layer("ce_crit")
nerv.info("*** training stat begin ***")
nerv.printf("cross entropy:\t\t%.8f\n", ce_crit.total_ce)
nerv.printf("correct:\t\t%d\n", ce_crit.total_correct)
nerv.printf("frames:\t\t\t%d\n", ce_crit.total_frames)
nerv.printf("err/frm:\t\t%.8f\n", ce_crit.total_ce / ce_crit.total_frames)
- nerv.printf("accuracy:\t\t%.3f%%\n", get_accuracy(layer_repo))
+ nerv.printf("accuracy:\t\t%.3f%%\n", ce_crit.total_correct / ce_crit.total_frames * 100)
nerv.info("*** training stat end ***")
end
diff --git a/nerv/examples/trainer.lua b/nerv/examples/trainer.lua
index 8e3efcb..f6c7a5a 100644
--- a/nerv/examples/trainer.lua
+++ b/nerv/examples/trainer.lua
@@ -36,7 +36,7 @@ local function make_options(spec)
end
local function print_help(options)
- nerv.printf("Usage: <asr_trainer.lua> [options] network_config.lua\n")
+ nerv.printf("Usage: <trainer.lua> [options] <network_config.lua>\n")
nerv.print_usage(options)
end