aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorcloudygoose <[email protected]>2015-06-06 11:03:49 +0800
committercloudygoose <[email protected]>2015-06-06 11:03:49 +0800
commit31330d6c095b2b11b34f524169f56dc8d18355c3 (patch)
treec67e8d625fc5d31c048fca72e3dbeadafec0b9a2
parent3faaef779e384e6283761906552c6c6c4eafb3dd (diff)
parent0bb9cd4271f127c311fd9839855def8f9ea91dab (diff)
...
Merge remote-tracking branch 'upstream/master'
-rw-r--r--examples/asr_trainer.lua87
-rw-r--r--examples/swb_baseline.lua163
-rw-r--r--io/chunk_file.c14
-rw-r--r--matrix/generic/elem_type.h3
-rw-r--r--matrix/generic/mmatrix.c2
-rw-r--r--nerv.lua6
6 files changed, 266 insertions, 9 deletions
diff --git a/examples/asr_trainer.lua b/examples/asr_trainer.lua
new file mode 100644
index 0000000..b43a547
--- /dev/null
+++ b/examples/asr_trainer.lua
@@ -0,0 +1,87 @@
+function build_trainer(ifname)
+ local param_repo = make_param_repo(ifname)
+ local sublayer_repo = make_sublayer_repo(param_repo)
+ local layer_repo = make_layer_repo(sublayer_repo, param_repo)
+ local crit = get_criterion_layer(sublayer_repo)
+ local network = get_network(layer_repo)
+ local iterative_trainer = function (ofname, scp_file, bp)
+ gconf.randomize = bp
+ -- build buffer
+ local buffer = make_buffer(make_reader(scp_file, layer_repo))
+ -- initialize the network
+ network:init(gconf.batch_size)
+ gconf.cnt = 0
+ for data in buffer.get_data, buffer do
+ -- prine stat periodically
+ gconf.cnt = gconf.cnt + 1
+ if gconf.cnt == 1000 then
+ print_stat(crit)
+ gconf.cnt = 0
+ end
+ if gconf.cnt == 100 then break end
+
+ input = {data.main_scp, data.phone_state}
+ output = {}
+ err_input = {}
+ err_output = {input[1]:create()}
+ network:propagate(input, output)
+ if bp then
+ network:back_propagate(err_output, err_input, input, output)
+ network:update(err_input, input, output)
+ end
+ -- collect garbage in-time to save GPU memory
+ collectgarbage("collect")
+ end
+ print_stat(crit)
+ if bp then
+ nerv.info("writing back...")
+ cf = nerv.ChunkFile(ofname, "w")
+ for i, p in ipairs(network:get_params()) do
+ cf:write_chunk(p)
+ end
+ cf:close()
+ end
+ return get_accuracy(crit)
+ end
+ return iterative_trainer
+end
+
+dofile(arg[1])
+start_halving_inc = 0.5
+halving_factor = 0.6
+end_halving_inc = 0.1
+min_iter = 1
+max_iter = 20
+min_halving = 6
+gconf.batch_size = 256
+gconf.buffer_size = 81920
+
+local pf0 = gconf.initialized_param
+local trainer = build_trainer(pf0)
+--local trainer = build_trainer("c3.nerv")
+local accu_best = trainer(nil, gconf.cv_scp, false)
+local do_halving = false
+
+nerv.info("initial cross validation: %.3f", accu_best)
+for i = 1, max_iter do
+ nerv.info("iteration %d with lrate = %.6f", i, gconf.lrate)
+ local accu_tr = trainer(pf0 .. "_iter" .. i .. ".nerv", gconf.tr_scp, true)
+ nerv.info("[TR] training set %d: %.3f", i, accu_tr)
+ local accu_new = trainer(nil, gconf.cv_scp, false)
+ nerv.info("[CV] cross validation %d: %.3f", i, accu_new)
+ -- TODO: revert the weights
+ local accu_diff = accu_new - accu_best
+ if do_halving and accu_diff < end_halving_inc and i > min_iter then
+ break
+ end
+ if accu_diff < start_halving_inc and i >= min_halving then
+ do_halving = true
+ end
+ if do_halving then
+ gconf.lrate = gconf.lrate * halving_factor
+ end
+ if accu_new > accu_best then
+ accu_best = accu_new
+ end
+end
+nerv.Matrix.print_profile()
diff --git a/examples/swb_baseline.lua b/examples/swb_baseline.lua
new file mode 100644
index 0000000..f536777
--- /dev/null
+++ b/examples/swb_baseline.lua
@@ -0,0 +1,163 @@
+require 'speech.init'
+gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ frm_ext = 5,
+ tr_scp = "/slfs1/users/mfy43/swb_ivec/train_bp.scp",
+ cv_scp = "/slfs1/users/mfy43/swb_ivec/train_cv.scp",
+ htk_conf = "/slfs1/users/mfy43/swb_ivec/plp_0_d_a.conf",
+ global_transf = "global_transf.nerv",
+ initialized_param = "converted.nerv",
+ debug = false}
+
+function make_param_repo(param_file)
+ return nerv.ParamRepo({param_file, gconf.global_transf})
+end
+
+function make_sublayer_repo(param_repo)
+ return nerv.LayerRepo(
+ {
+ -- global transf
+ ["nerv.BiasLayer"] =
+ {
+ blayer1 = {{bias = "bias1"}, {dim_in = {429}, dim_out = {429}}},
+ blayer2 = {{bias = "bias2"}, {dim_in = {429}, dim_out = {429}}}
+ },
+ ["nerv.WindowLayer"] =
+ {
+ wlayer1 = {{window = "window1"}, {dim_in = {429}, dim_out = {429}}},
+ wlayer2 = {{window = "window2"}, {dim_in = {429}, dim_out = {429}}}
+ },
+ -- biased linearity
+ ["nerv.AffineLayer"] =
+ {
+ affine0 = {{ltp = "affine0_ltp", bp = "affine0_bp"},
+ {dim_in = {429}, dim_out = {2048}}},
+ affine1 = {{ltp = "affine1_ltp", bp = "affine1_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine2 = {{ltp = "affine2_ltp", bp = "affine2_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine3 = {{ltp = "affine3_ltp", bp = "affine3_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine4 = {{ltp = "affine4_ltp", bp = "affine4_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine5 = {{ltp = "affine5_ltp", bp = "affine5_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine6 = {{ltp = "affine6_ltp", bp = "affine6_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine7 = {{ltp = "affine7_ltp", bp = "affine7_bp"},
+ {dim_in = {2048}, dim_out = {3001}}}
+ },
+ ["nerv.SigmoidLayer"] =
+ {
+ sigmoid0 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid1 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid2 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid3 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid4 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid5 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid6 = {{}, {dim_in = {2048}, dim_out = {2048}}}
+ },
+ ["nerv.SoftmaxCELayer"] =
+ {
+ criterion = {{}, {dim_in = {3001, 1}, dim_out = {}, compressed = true}}
+ }
+ }, param_repo, gconf)
+end
+
+function make_layer_repo(sublayer_repo, param_repo)
+ return nerv.LayerRepo(
+ {
+ ["nerv.DAGLayer"] =
+ {
+ global_transf = {{}, {
+ dim_in = {429}, dim_out = {429},
+ sub_layers = sublayer_repo,
+ connections = {
+ ["<input>[1]"] = "blayer1[1]",
+ ["blayer1[1]"] = "wlayer1[1]",
+ ["wlayer1[1]"] = "blayer2[1]",
+ ["blayer2[1]"] = "wlayer2[1]",
+ ["wlayer2[1]"] = "<output>[1]"
+ }
+ }},
+ main = {{}, {
+ dim_in = {429, 1}, dim_out = {},
+ sub_layers = sublayer_repo,
+ connections = {
+ ["<input>[1]"] = "affine0[1]",
+ ["affine0[1]"] = "sigmoid0[1]",
+ ["sigmoid0[1]"] = "affine1[1]",
+ ["affine1[1]"] = "sigmoid1[1]",
+ ["sigmoid1[1]"] = "affine2[1]",
+ ["affine2[1]"] = "sigmoid2[1]",
+ ["sigmoid2[1]"] = "affine3[1]",
+ ["affine3[1]"] = "sigmoid3[1]",
+ ["sigmoid3[1]"] = "affine4[1]",
+ ["affine4[1]"] = "sigmoid4[1]",
+ ["sigmoid4[1]"] = "affine5[1]",
+ ["affine5[1]"] = "sigmoid5[1]",
+ ["sigmoid5[1]"] = "affine6[1]",
+ ["affine6[1]"] = "sigmoid6[1]",
+ ["sigmoid6[1]"] = "affine7[1]",
+ ["affine7[1]"] = "criterion[1]",
+ ["<input>[2]"] = "criterion[2]"
+ }
+ }}
+ }
+ }, param_repo, gconf)
+end
+
+function get_criterion_layer(sublayer_repo)
+ return sublayer_repo:get_layer("criterion")
+end
+
+function get_network(layer_repo)
+ return layer_repo:get_layer("main")
+end
+
+function make_reader(scp_file, layer_repo)
+ return nerv.TNetReader(gconf,
+ {
+ id = "main_scp",
+ scp_file = scp_file,
+ conf_file = gconf.htk_conf,
+ frm_ext = gconf.frm_ext,
+ mlfs = {
+ phone_state = {
+ file = "/slfs1/users/mfy43/swb_ivec/ref.mlf",
+ format = "map",
+ format_arg = "/slfs1/users/mfy43/swb_ivec/dict",
+ dir = "*/",
+ ext = "lab"
+ }
+ },
+ global_transf = layer_repo:get_layer("global_transf")
+ })
+end
+
+function make_buffer(reader, buffer)
+ return nerv.SGDBuffer(gconf,
+ {
+ buffer_size = gconf.buffer_size,
+ randomize = gconf.randomize,
+ readers = {
+ { reader = reader,
+ data = {main_scp = 429, phone_state = 1}}
+ }
+ })
+end
+
+function get_accuracy(crit)
+ return crit.total_correct / crit.total_frames * 100
+end
+
+function print_stat(crit)
+ nerv.info("*** training stat begin ***")
+ nerv.utils.printf("cross entropy:\t%.8f\n", crit.total_ce)
+ nerv.utils.printf("correct:\t%d\n", crit.total_correct)
+ nerv.utils.printf("frames:\t%d\n", crit.total_frames)
+ nerv.utils.printf("err/frm:\t%.8f\n", crit.total_ce / crit.total_frames)
+ nerv.utils.printf("accuracy:\t%.3f%%\n", get_accuracy(crit))
+ nerv.info("*** training stat end ***")
+end
diff --git a/io/chunk_file.c b/io/chunk_file.c
index aa7dd1c..c0b6b9f 100644
--- a/io/chunk_file.c
+++ b/io/chunk_file.c
@@ -44,7 +44,7 @@ size_t read_chunk_header_plain(FILE *fp, int *status) {
for (i = 0; i < PARAM_HEADER_SIZE; i++)
if (isdigit(buff[i]))
size = size * 10 + buff[i] - '0';
- fprintf(stderr, "header: %lu\n", size);
+/* fprintf(stderr, "header: %lu\n", size); */
return size;
}
@@ -91,7 +91,7 @@ const char *read_chunk_metadata(lua_State *L, FILE *fp, const char *fn) {
#define LINEBUFF_SIZE 1024
static char buff[7 + LINEBUFF_SIZE] = "return ";
CHECK_FORMAT(fgets(buff + 7, LINEBUFF_SIZE, fp), buff + 7, fn);
- fprintf(stderr, "metadata: %s\n", buff);
+ /* fprintf(stderr, "metadata: %s\n", buff); */
return buff;
}
@@ -104,7 +104,7 @@ void write_chunk_metadata(FILE *fp, const char *metadata_str, int *status) {
*status = WRITE_ERROR;
return;
}
- fprintf(stderr, "metadata: %s\n", metadata_str);
+ /* fprintf(stderr, "metadata: %s\n", metadata_str); */
}
@@ -132,11 +132,11 @@ int nerv_chunk_file_open_read(lua_State *L, const char *fn) {
if (!fp) nerv_error(L, "Error while opening chunk file: %s", fn);
offset = ftello(fp);
lua_newtable(L);
- fprintf(stderr, "%d\n", (int)offset);
+ /* fprintf(stderr, "%d\n", (int)offset); */
for (i = 0;; offset += chunk_len, i++)
{
ChunkInfo *pci;
- fprintf(stderr, "reading chunk %d from %d\n", i, (int)offset);
+ /* fprintf(stderr, "reading chunk %d from %d\n", i, (int)offset); */
/* skip to the begining of chunk i */
CHECK_FORMAT(fseeko(fp, offset, SEEK_SET), 0, fn);
/* read header */
@@ -153,8 +153,8 @@ int nerv_chunk_file_open_read(lua_State *L, const char *fn) {
pci = (ChunkInfo *)malloc(sizeof(ChunkInfo));
pci->offset = ftello(fp);
pci->length = chunk_len - (pci->offset - offset);
- fprintf(stderr, "%d + %d (skip %lu)\n", (int)pci->offset,
- (int)pci->length, chunk_len);
+ /* fprintf(stderr, "%d + %d (skip %lu)\n", (int)pci->offset,
+ (int)pci->length, chunk_len); */
luaT_pushudata(L, pci, nerv_chunk_info_tname);
lua_setfield(L, -2, "chunk");
/* stack: obj_table, metadata */
diff --git a/matrix/generic/elem_type.h b/matrix/generic/elem_type.h
index 2a6ffa8..bffe940 100644
--- a/matrix/generic/elem_type.h
+++ b/matrix/generic/elem_type.h
@@ -2,18 +2,21 @@
#define MATRIX_ELEM float
#define MATRIX_ELEM_FMT "%f"
+#define MATRIX_ELEM_WRITE_FMT "%.8f"
#define MATRIX_ELEM_PTR(self) ((self)->data.f)
#elif defined(MATRIX_USE_DOUBLE)
#define MATRIX_ELEM double
#define MATRIX_ELEM_FMT "%lf"
+#define MATRIX_ELEM_WRITE_FMT "%.8lf"
#define MATRIX_ELEM_PTR(self) ((self)->data.d)
#elif defined(MATRIX_USE_INT)
#define MATRIX_ELEM long
#define MATRIX_ELEM_FMT "%ld"
+#define MATRIX_ELEM_WRITE_FMT "%ld"
#define MATRIX_ELEM_PTR(self) ((self)->data.i)
#endif
diff --git a/matrix/generic/mmatrix.c b/matrix/generic/mmatrix.c
index 75d1eb1..2045d65 100644
--- a/matrix/generic/mmatrix.c
+++ b/matrix/generic/mmatrix.c
@@ -86,7 +86,7 @@ int nerv_matrix_(save)(lua_State *L) {
{
MATRIX_ELEM *row = MATRIX_ROW_PTR(self, i);
for (j = 0; j < ncol; j++)
- if (fprintf(fp, MATRIX_ELEM_FMT " ", row[j]) < 0)
+ if (fprintf(fp, MATRIX_ELEM_WRITE_FMT " ", row[j]) < 0)
{
free(self);
return 0;
diff --git a/nerv.lua b/nerv.lua
index cb53f29..ce6bc44 100644
--- a/nerv.lua
+++ b/nerv.lua
@@ -2,13 +2,17 @@ require 'libnerv'
nerv.utils = require 'pl.utils'
function nerv.error(fmt, ...)
- error(nerv.utils.printf("Nerv internal error: " .. fmt .. "\n", ...))
+ error(nerv.utils.printf("[nerv] internal error: " .. fmt .. "\n", ...))
end
function nerv.error_method_not_implemented()
nerv.error("method not implemented");
end
+function nerv.info(fmt, ...)
+ nerv.utils.printf("[nerv] info: " .. fmt .. "\n", ...)
+end
+
-- Torch C API wrapper
function nerv.class(tname, parenttname)