aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorDeterminant <ted.sybil@gmail.com>2015-06-22 19:01:29 +0800
committerDeterminant <ted.sybil@gmail.com>2015-06-22 19:01:29 +0800
commit2497fd9e7a0fae5ee4887890d7a312e0e08a93b8 (patch)
tree382f97575bd2df9ee6abb1662b11b279fc22d72b /examples
parent196e9b48a3541caccdffc5743001cced70667091 (diff)
major change: use luarocks to manage project
Diffstat (limited to 'examples')
-rw-r--r--examples/asr_trainer.lua106
-rw-r--r--examples/chunk_file_example.lua53
-rw-r--r--examples/cumatrix_example.lua31
-rw-r--r--examples/cumatrix_from_mmatrix.lua32
-rw-r--r--examples/mmatrix_example.lua20
-rw-r--r--examples/oop_example.c101
-rw-r--r--examples/oop_example.lua16
-rw-r--r--examples/swb_baseline.lua166
-rw-r--r--examples/test_dnn_layers.lua78
-rw-r--r--examples/test_nn_lib.lua164
10 files changed, 0 insertions, 767 deletions
diff --git a/examples/asr_trainer.lua b/examples/asr_trainer.lua
deleted file mode 100644
index a5727be..0000000
--- a/examples/asr_trainer.lua
+++ /dev/null
@@ -1,106 +0,0 @@
-function build_trainer(ifname)
- local param_repo = nerv.ParamRepo()
- param_repo:import(ifname, nil, gconf)
- local sublayer_repo = make_sublayer_repo(param_repo)
- local layer_repo = make_layer_repo(sublayer_repo, param_repo)
- local crit = get_criterion_layer(sublayer_repo)
- local network = get_network(layer_repo)
- local input_order = get_input_order()
- local iterative_trainer = function (prefix, scp_file, bp)
- gconf.randomize = bp
- -- build buffer
- local buffer = make_buffer(make_readers(scp_file, layer_repo))
- -- initialize the network
- network:init(gconf.batch_size)
- gconf.cnt = 0
- err_input = {nerv.CuMatrixFloat(256, 1)}
- err_input[1]:fill(1)
- for data in buffer.get_data, buffer do
- -- prine stat periodically
- gconf.cnt = gconf.cnt + 1
- if gconf.cnt == 1000 then
- print_stat(sublayer_repo)
- nerv.CuMatrix.print_profile()
- nerv.CuMatrix.clear_profile()
- gconf.cnt = 0
- -- break
- end
- local input = {}
--- if gconf.cnt == 100 then break end
- for i, id in ipairs(input_order) do
- if data[id] == nil then
- nerv.error("input data %s not found", id)
- end
- table.insert(input, data[id])
- end
- local output = {nerv.CuMatrixFloat(256, 1)}
- err_output = {input[1]:create()}
- network:propagate(input, output)
- if bp then
- network:back_propagate(err_input, err_output, input, output)
- network:update(err_input, input, output)
- end
- -- collect garbage in-time to save GPU memory
- collectgarbage("collect")
- end
- print_stat(sublayer_repo)
- nerv.CuMatrix.print_profile()
- nerv.CuMatrix.clear_profile()
- if (not bp) and prefix ~= nil then
- nerv.info("writing back...")
- local fname = string.format("%s_cv%.3f.nerv",
- prefix, get_accuracy(sublayer_repo))
- network:get_params():export(fname, nil)
- end
- return get_accuracy(sublayer_repo)
- end
- return iterative_trainer
-end
-
-dofile(arg[1])
-start_halving_inc = 0.5
-halving_factor = 0.6
-end_halving_inc = 0.1
-min_iter = 1
-max_iter = 20
-min_halving = 5
-gconf.batch_size = 256
-gconf.buffer_size = 81920
-
-local pf0 = gconf.initialized_param
-local trainer = build_trainer(pf0)
---local trainer = build_trainer("c3.nerv")
-local accu_best = trainer(nil, gconf.cv_scp, false)
-local do_halving = false
-
-nerv.info("initial cross validation: %.3f", accu_best)
-for i = 1, max_iter do
- nerv.info("[NN] begin iteration %d with lrate = %.6f", i, gconf.lrate)
- local accu_tr = trainer(nil, gconf.tr_scp, true)
- nerv.info("[TR] training set %d: %.3f", i, accu_tr)
- local accu_new = trainer(
- string.format("%s_%s_iter_%d_lr%f_tr%.3f",
- string.gsub(
- (string.gsub(pf0[1], "(.*/)(.*)", "%2")),
- "(.*)%..*", "%1"),
- os.date("%Y%m%d%H%M%S"),
- i, gconf.lrate,
- accu_tr),
- gconf.cv_scp, false)
- nerv.info("[CV] cross validation %d: %.3f", i, accu_new)
- -- TODO: revert the weights
- local accu_diff = accu_new - accu_best
- if do_halving and accu_diff < end_halving_inc and i > min_iter then
- break
- end
- if accu_diff < start_halving_inc and i >= min_halving then
- do_halving = true
- end
- if do_halving then
- gconf.lrate = gconf.lrate * halving_factor
- end
- if accu_new > accu_best then
- accu_best = accu_new
- end
--- nerv.Matrix.print_profile()
-end
diff --git a/examples/chunk_file_example.lua b/examples/chunk_file_example.lua
deleted file mode 100644
index 5961c98..0000000
--- a/examples/chunk_file_example.lua
+++ /dev/null
@@ -1,53 +0,0 @@
--- To define a readable and writable chunk, one must define a class with the
--- following methods: __init(id, global_conf), read(handle), write(handle),
--- get_info(), set_info(info) and an id attribute. This file demonstrates a
--- basic chunk implementation which manages the I/O of a matrix
-
-local MatrixChunk = nerv.class("nerv.MatrixChunk")
-
-function MatrixChunk:__init(id, global_conf)
- self.id = id
- self.info = {}
- self.gconf = global_conf
-end
-
-function MatrixChunk:read(handle)
- -- pass the read handle to the matrix method
- self.data = nerv.MMatrixFloat.load(handle)
-end
-
-function MatrixChunk:write(handle)
- -- pass the write handle to the matrix method
- self.data:save(handle)
-end
-
-function MatrixChunk:get_info()
- return self.info
-end
-
-function MatrixChunk:set_info(info)
- self.info = info
-end
-
-function MatrixChunk.create_from_matrix(id, mat)
- local ins = nerv.MatrixChunk(id)
- ins.data = mat
- return ins
-end
-
-mat = nerv.MMatrixFloat(3, 4)
-for i = 0, 2 do
- for j = 0, 3 do
- mat[i][j] = i + j
- end
-end
-
-cd = nerv.MatrixChunk.create_from_matrix("matrix1", mat)
-
-cf = nerv.ChunkFile("test.nerv", "w")
-cf:write_chunk(cd)
-cf:close()
-
-cf2 = nerv.ChunkFile("test.nerv", "r")
-cd2 = cf2:read_chunk("matrix1")
-print(cd2.data)
diff --git a/examples/cumatrix_example.lua b/examples/cumatrix_example.lua
deleted file mode 100644
index 544fc7f..0000000
--- a/examples/cumatrix_example.lua
+++ /dev/null
@@ -1,31 +0,0 @@
-m = 4
-n = 4
-fm = nerv.CuMatrixFloat(m, n)
-dm = nerv.CuMatrixDouble(m, n)
-for i = 0, m - 1 do
- for j = 0, n - 1 do
- -- local t = math.random(10)
- t = i / (j + 1)
- fm[i][j] = t
- dm[i][j] = t
- end
-end
-print(fm)
-fs = fm:create()
-fs:softmax(fm)
--- print(fs)
-print(dm)
-ds = dm:create()
-ds:softmax(dm)
--- print(ds)
-print(fs)
-print(fs + fs)
-print(ds + ds)
-print(fs - fs)
-print(ds - ds)
-
-a = fs:create()
-a:mul_elem(fs, fs)
-print(a)
-a:log_elem(fs)
-print(a)
diff --git a/examples/cumatrix_from_mmatrix.lua b/examples/cumatrix_from_mmatrix.lua
deleted file mode 100644
index 2309e14..0000000
--- a/examples/cumatrix_from_mmatrix.lua
+++ /dev/null
@@ -1,32 +0,0 @@
-m = 3
-n = 4
-fm = nerv.MMatrixFloat(m, n)
-dm = nerv.MMatrixDouble(m, n)
-for i = 0, m - 1 do
- for j = 0, n - 1 do
- -- local t = math.random(10)
- t = i / (j + 1)
- fm[i][j] = t
- dm[i][j] = t
- end
-end
-print(fm)
-print(dm)
-
-fc = nerv.CuMatrixFloat(m, n)
-dc = nerv.CuMatrixDouble(m, n)
-fc:copy_fromh(fm)
-dc:copy_fromh(dm)
-print("fc and dc")
-print(fc)
-print(dc)
-dc[1]:copy_tod(dc[0])
-print("dc[1] copied to dc[0]")
-print(dc)
-print("softmax of fc and dc")
-sfc = fc:create()
-sdc = dc:create()
-sfc:softmax(fc)
-print(sfc)
-sdc:softmax(dc)
-print(sdc)
diff --git a/examples/mmatrix_example.lua b/examples/mmatrix_example.lua
deleted file mode 100644
index 8ddfe84..0000000
--- a/examples/mmatrix_example.lua
+++ /dev/null
@@ -1,20 +0,0 @@
-m = 10
-n = 10
-fm = nerv.MMatrixFloat(m, n)
-dm = nerv.MMatrixDouble(m, n)
-for i = 0, m - 1 do
- for j = 0, n - 1 do
- -- local t = math.random(10)
- t = i / (j + 1)
- fm[i][j] = t
- dm[i][j] = t
- end
-end
-print("test fm:get_dataref_value:", fm:get_dataref_value())
-print("forced a garbade collect")
-collectgarbage("collect")
-print("test fm:get_dataref_value:", fm:get_dataref_value())
-print(fm)
--- print(fm:softmax())
-print(dm)
--- print(dm:softmax())
diff --git a/examples/oop_example.c b/examples/oop_example.c
deleted file mode 100644
index 59dfc5a..0000000
--- a/examples/oop_example.c
+++ /dev/null
@@ -1,101 +0,0 @@
-#include <math.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include "../common.h"
-
-#define SQR(x) ((x) * (x))
-
-const char *point_tname = "nerv.Point";
-const char *better_point_tname = "nerv.BetterPoint";
-
-typedef struct {
- double x, y;
-} Point;
-
-static int point_norm (lua_State *L) {
- Point *p = luaT_checkudata(L, 1, point_tname);
- lua_pushnumber(L, sqrt(SQR(p->x) + SQR(p->y)));
- return 1;
-}
-
-static int point_set_x (lua_State *L) {
- Point *p = luaT_checkudata(L, 1, point_tname);
- p->x = luaL_checknumber(L, 2);
- return 0;
-}
-
-static int point_set_y (lua_State *L) {
- Point *p = luaT_checkudata(L, 1, point_tname);
- p->y = luaL_checknumber(L, 2);
- return 0;
-}
-
-/* generic constructor */
-void point_new_(Point *self, double x, double y) {
- self->x = x;
- self->y = y;
-}
-
-int point_new(lua_State *L) {
- /* `_new` function should create the object itself */
- Point *self = (Point *)malloc(sizeof(Point));
- point_new_(self, luaL_checknumber(L, 1), luaL_checknumber(L, 2));
- luaT_pushudata(L, self, point_tname);
- fprintf(stderr, "[example] %s constructor is invoked\n",
- point_tname);
- return 1;
-}
-
-static const luaL_Reg point_methods[] = {
- {"set_x", point_set_x},
- {"set_y", point_set_y},
- {"norm", point_norm},
- {NULL, NULL}
-};
-
-
-/* the subclass method overrides the one from baseclass */
-static int better_point_norm (lua_State *L) {
- Point *p = luaT_checkudata(L, 1, point_tname);
- lua_pushnumber(L, fabs(p->x) + fabs(p->y));
- return 1;
-}
-
-int better_point_new(lua_State *L) {
- /* `_new` function should create the object itself */
- Point *self = (Point *)malloc(sizeof(Point));
- point_new_(self, luaL_checknumber(L, 1), luaL_checknumber(L, 2));
- luaT_pushudata(L, self, better_point_tname);
- fprintf(stderr, "[example] %s constructor is invoked\n",
- better_point_tname);
- return 1;
-}
-
-static const luaL_Reg better_point_methods[] = {
- {"norm", better_point_norm},
- {NULL, NULL}
-};
-
-void nerv_point_init(lua_State *L) {
- /* create a class and let luaT know */
- luaT_newmetatable(L, point_tname, NULL, point_new, NULL, NULL);
- /* register member functions */
- luaL_register(L, NULL, point_methods);
- /* keep the stack balanced, see `nerv.c` */
- lua_pop(L, 1);
-}
-
-void nerv_better_point_init(lua_State *L) {
- /* create a class and let luaT know */
- luaT_newmetatable(L, better_point_tname, point_tname,
- better_point_new, NULL, NULL);
- /* register member functions */
- luaL_register(L, NULL, better_point_methods);
- /* keep the stack balanced, see `nerv.c` */
- lua_pop(L, 1);
-}
-
-void nerv_example_init(lua_State *L) {
- nerv_point_init(L);
- nerv_better_point_init(L);
-}
diff --git a/examples/oop_example.lua b/examples/oop_example.lua
deleted file mode 100644
index b753288..0000000
--- a/examples/oop_example.lua
+++ /dev/null
@@ -1,16 +0,0 @@
-p = nerv.Point(0, 0) -- create a Point instance
-print(p)
-print(p:norm()) -- get 2-norm of the Point
-p:set_x(1.0)
-p:set_y(2.0)
-print(p:norm()) -- get 2-norm of the Point
-
-bp = nerv.BetterPoint(1, 2)
--- use methods from base class
-bp:set_x(1.0)
-bp:set_y(2.0)
-print(bp)
-print(bp:norm()) --get 1-norm of the Point
-
-print(p.__typename)
-print(bp.__typename)
diff --git a/examples/swb_baseline.lua b/examples/swb_baseline.lua
deleted file mode 100644
index 8b7e01a..0000000
--- a/examples/swb_baseline.lua
+++ /dev/null
@@ -1,166 +0,0 @@
-require 'speech.init'
-gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9,
- cumat_type = nerv.CuMatrixFloat,
- mmat_type = nerv.MMatrixFloat,
- frm_ext = 5,
- tr_scp = "/slfs1/users/mfy43/swb_ivec/train_bp.scp",
- cv_scp = "/slfs1/users/mfy43/swb_ivec/train_cv.scp",
- htk_conf = "/slfs1/users/mfy43/swb_ivec/plp_0_d_a.conf",
- initialized_param = {"/slfs1/users/mfy43/swb_init.nerv",
- "/slfs1/users/mfy43/swb_global_transf.nerv"},
- debug = false}
-
-function make_sublayer_repo(param_repo)
- return nerv.LayerRepo(
- {
- -- global transf
- ["nerv.BiasLayer"] =
- {
- blayer1 = {{bias = "bias1"}, {dim_in = {429}, dim_out = {429}}},
- blayer2 = {{bias = "bias2"}, {dim_in = {429}, dim_out = {429}}}
- },
- ["nerv.WindowLayer"] =
- {
- wlayer1 = {{window = "window1"}, {dim_in = {429}, dim_out = {429}}},
- wlayer2 = {{window = "window2"}, {dim_in = {429}, dim_out = {429}}}
- },
- -- biased linearity
- ["nerv.AffineLayer"] =
- {
- affine0 = {{ltp = "affine0_ltp", bp = "affine0_bp"},
- {dim_in = {429}, dim_out = {2048}}},
- affine1 = {{ltp = "affine1_ltp", bp = "affine1_bp"},
- {dim_in = {2048}, dim_out = {2048}}},
- affine2 = {{ltp = "affine2_ltp", bp = "affine2_bp"},
- {dim_in = {2048}, dim_out = {2048}}},
- affine3 = {{ltp = "affine3_ltp", bp = "affine3_bp"},
- {dim_in = {2048}, dim_out = {2048}}},
- affine4 = {{ltp = "affine4_ltp", bp = "affine4_bp"},
- {dim_in = {2048}, dim_out = {2048}}},
- affine5 = {{ltp = "affine5_ltp", bp = "affine5_bp"},
- {dim_in = {2048}, dim_out = {2048}}},
- affine6 = {{ltp = "affine6_ltp", bp = "affine6_bp"},
- {dim_in = {2048}, dim_out = {2048}}},
- affine7 = {{ltp = "affine7_ltp", bp = "affine7_bp"},
- {dim_in = {2048}, dim_out = {3001}}}
- },
- ["nerv.SigmoidLayer"] =
- {
- sigmoid0 = {{}, {dim_in = {2048}, dim_out = {2048}}},
- sigmoid1 = {{}, {dim_in = {2048}, dim_out = {2048}}},
- sigmoid2 = {{}, {dim_in = {2048}, dim_out = {2048}}},
- sigmoid3 = {{}, {dim_in = {2048}, dim_out = {2048}}},
- sigmoid4 = {{}, {dim_in = {2048}, dim_out = {2048}}},
- sigmoid5 = {{}, {dim_in = {2048}, dim_out = {2048}}},
- sigmoid6 = {{}, {dim_in = {2048}, dim_out = {2048}}}
- },
- ["nerv.SoftmaxCELayer"] =
- {
- ce_crit = {{}, {dim_in = {3001, 1}, dim_out = {1}, compressed = true}}
- }
- }, param_repo, gconf)
-end
-
-function make_layer_repo(sublayer_repo, param_repo)
- return nerv.LayerRepo(
- {
- ["nerv.DAGLayer"] =
- {
- global_transf = {{}, {
- dim_in = {429}, dim_out = {429},
- sub_layers = sublayer_repo,
- connections = {
- ["<input>[1]"] = "blayer1[1]",
- ["blayer1[1]"] = "wlayer1[1]",
- ["wlayer1[1]"] = "blayer2[1]",
- ["blayer2[1]"] = "wlayer2[1]",
- ["wlayer2[1]"] = "<output>[1]"
- }
- }},
- main = {{}, {
- dim_in = {429, 1}, dim_out = {1},
- sub_layers = sublayer_repo,
- connections = {
- ["<input>[1]"] = "affine0[1]",
- ["affine0[1]"] = "sigmoid0[1]",
- ["sigmoid0[1]"] = "affine1[1]",
- ["affine1[1]"] = "sigmoid1[1]",
- ["sigmoid1[1]"] = "affine2[1]",
- ["affine2[1]"] = "sigmoid2[1]",
- ["sigmoid2[1]"] = "affine3[1]",
- ["affine3[1]"] = "sigmoid3[1]",
- ["sigmoid3[1]"] = "affine4[1]",
- ["affine4[1]"] = "sigmoid4[1]",
- ["sigmoid4[1]"] = "affine5[1]",
- ["affine5[1]"] = "sigmoid5[1]",
- ["sigmoid5[1]"] = "affine6[1]",
- ["affine6[1]"] = "sigmoid6[1]",
- ["sigmoid6[1]"] = "affine7[1]",
- ["affine7[1]"] = "ce_crit[1]",
- ["<input>[2]"] = "ce_crit[2]",
- ["ce_crit[1]"] = "<output>[1]"
- }
- }}
- }
- }, param_repo, gconf)
-end
-
-function get_criterion_layer(sublayer_repo)
- return sublayer_repo:get_layer("ce_crit")
-end
-
-function get_network(layer_repo)
- return layer_repo:get_layer("main")
-end
-
-function make_readers(scp_file, layer_repo)
- return {
- {reader = nerv.TNetReader(gconf,
- {
- id = "main_scp",
- scp_file = scp_file,
- conf_file = gconf.htk_conf,
- frm_ext = gconf.frm_ext,
- mlfs = {
- phone_state = {
- file = "/slfs1/users/mfy43/swb_ivec/ref.mlf",
- format = "map",
- format_arg = "/slfs1/users/mfy43/swb_ivec/dict",
- dir = "*/",
- ext = "lab"
- }
- },
- global_transf = layer_repo:get_layer("global_transf")
- }),
- data = {main_scp = 429, phone_state = 1}}
- }
-end
-
-function make_buffer(readers)
- return nerv.SGDBuffer(gconf,
- {
- buffer_size = gconf.buffer_size,
- randomize = gconf.randomize,
- readers = readers
- })
-end
-
-function get_input_order()
- return {"main_scp", "phone_state"}
-end
-
-function get_accuracy(sublayer_repo)
- local ce_crit = sublayer_repo:get_layer("ce_crit")
- return ce_crit.total_correct / ce_crit.total_frames * 100
-end
-
-function print_stat(sublayer_repo)
- local ce_crit = sublayer_repo:get_layer("ce_crit")
- nerv.info("*** training stat begin ***")
- nerv.printf("cross entropy:\t\t%.8f\n", ce_crit.total_ce)
- nerv.printf("correct:\t\t%d\n", ce_crit.total_correct)
- nerv.printf("frames:\t\t\t%d\n", ce_crit.total_frames)
- nerv.printf("err/frm:\t\t%.8f\n", ce_crit.total_ce / ce_crit.total_frames)
- nerv.printf("accuracy:\t\t%.3f%%\n", get_accuracy(sublayer_repo))
- nerv.info("*** training stat end ***")
-end
diff --git a/examples/test_dnn_layers.lua b/examples/test_dnn_layers.lua
deleted file mode 100644
index 64c0dec..0000000
--- a/examples/test_dnn_layers.lua
+++ /dev/null
@@ -1,78 +0,0 @@
-require 'layer.affine'
-require 'layer.sigmoid'
-require 'layer.softmax_ce'
-
-global_conf = {lrate = 0.8, wcost = 1e-6,
- momentum = 0.9, cumat_type = nerv.CuMatrixFloat}
-
-pf = nerv.ChunkFile("affine.param", "r")
-ltp = pf:read_chunk("a", global_conf)
-bp = pf:read_chunk("b", global_conf)
-
--- print(bp.trans)
-
-af = nerv.AffineLayer("test", global_conf, {["ltp"] = ltp,
- ["bp"] = bp,
- dim_in = {429},
- dim_out = {2048}})
-sg = nerv.SigmoidLayer("test2", global_conf, {dim_in = {2048},
- dim_out = {2048}})
-sm = nerv.SoftmaxCELayer("test3", global_conf, {dim_in = {2048, 2048},
- dim_out = {}})
-af:init()
-sg:init()
-sm:init()
-
-df = nerv.ChunkFile("input.param", "r")
-
-label = nerv.CuMatrixFloat(10, 2048)
-label:fill(0)
-for i = 0, 9 do
- label[i][i] = 1.0
-end
-
-input1 = {df:read_chunk("input", global_conf).trans}
-output1 = {nerv.CuMatrixFloat(10, 2048)}
-input2 = output1
-output2 = {nerv.CuMatrixFloat(10, 2048)}
-input3 = {output2[1], label}
-output3 = {}
-err_input1 = {}
-err_output1 = {nerv.CuMatrixFloat(10, 2048)}
-err_input2 = err_output1
-err_output2 = {nerv.CuMatrixFloat(10, 2048)}
-err_input3 = err_output2
-err_output3 = {input1[1]:create()}
-
-for i = 0, 3 do
- -- propagate
- af:propagate(input1, output1)
- sg:propagate(input2, output2)
- sm:propagate(input3, output3)
-
- -- back_propagate
- sm:back_propagate(err_output1, err_input1, input3, output3)
- sg:back_propagate(err_output2, err_input2, input2, output2)
- af:back_propagate(err_output3, err_input3, input1, output1)
-
- -- update
- sm:update(err_input1, input3, output3)
- sg:update(err_input2, input2, output2)
- af:update(err_input3, input1, output1)
-
-
- print("output1")
- print(output1[1])
- print("output2")
- print(output2[1])
- print("err_output1")
- print(err_output1[1])
- print("err_output2")
- print(err_output2[1])
- nerv.printf("cross entropy: %.8f\n", sm.total_ce)
- nerv.printf("frames: %.8f\n", sm.total_frames)
-end
-print("linear")
-print(af.ltp.trans)
-print("linear2")
-print(af.bp.trans)
diff --git a/examples/test_nn_lib.lua b/examples/test_nn_lib.lua
deleted file mode 100644
index 5444810..0000000
--- a/examples/test_nn_lib.lua
+++ /dev/null
@@ -1,164 +0,0 @@
-require 'speech.init'
-gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9,
- cumat_type = nerv.CuMatrixFloat,
- mmat_type = nerv.MMatrixFloat,
- batch_size = 256}
-
-param_repo = nerv.ParamRepo({"converted.nerv", "global_transf.nerv"})
-sublayer_repo = nerv.LayerRepo(
- {
- -- global transf
- ["nerv.BiasLayer"] =
- {
- blayer1 = {{bias = "bias1"}, {dim_in = {429}, dim_out = {429}}},
- blayer2 = {{bias = "bias2"}, {dim_in = {429}, dim_out = {429}}}
- },
- ["nerv.WindowLayer"] =
- {
- wlayer1 = {{window = "window1"}, {dim_in = {429}, dim_out = {429}}},
- wlayer2 = {{window = "window2"}, {dim_in = {429}, dim_out = {429}}}
- },
- -- biased linearity
- ["nerv.AffineLayer"] =
- {
- affine0 = {{ltp = "affine0_ltp", bp = "affine0_bp"},
- {dim_in = {429}, dim_out = {2048}}},
- affine1 = {{ltp = "affine1_ltp", bp = "affine1_bp"},
- {dim_in = {2048}, dim_out = {2048}}},
- affine2 = {{ltp = "affine2_ltp", bp = "affine2_bp"},
- {dim_in = {2048}, dim_out = {2048}}},
- affine3 = {{ltp = "affine3_ltp", bp = "affine3_bp"},
- {dim_in = {2048}, dim_out = {2048}}},
- affine4 = {{ltp = "affine4_ltp", bp = "affine4_bp"},
- {dim_in = {2048}, dim_out = {2048}}},
- affine5 = {{ltp = "affine5_ltp", bp = "affine5_bp"},
- {dim_in = {2048}, dim_out = {2048}}},
- affine6 = {{ltp = "affine6_ltp", bp = "affine6_bp"},
- {dim_in = {2048}, dim_out = {2048}}},
- affine7 = {{ltp = "affine7_ltp", bp = "affine7_bp"},
- {dim_in = {2048}, dim_out = {3001}}}
- },
- ["nerv.SigmoidLayer"] =
- {
- sigmoid0 = {{}, {dim_in = {2048}, dim_out = {2048}}},
- sigmoid1 = {{}, {dim_in = {2048}, dim_out = {2048}}},
- sigmoid2 = {{}, {dim_in = {2048}, dim_out = {2048}}},
- sigmoid3 = {{}, {dim_in = {2048}, dim_out = {2048}}},
- sigmoid4 = {{}, {dim_in = {2048}, dim_out = {2048}}},
- sigmoid5 = {{}, {dim_in = {2048}, dim_out = {2048}}},
- sigmoid6 = {{}, {dim_in = {2048}, dim_out = {2048}}}
- },
- ["nerv.SoftmaxCELayer"] =
- {
- softmax_ce0 = {{}, {dim_in = {3001, 1}, dim_out = {}, compressed = true}}
- }
- }, param_repo, gconf)
-
-layer_repo = nerv.LayerRepo(
- {
- ["nerv.DAGLayer"] =
- {
- global_transf = {{}, {
- dim_in = {429}, dim_out = {429},
- sub_layers = sublayer_repo,
- connections = {
- ["<input>[1]"] = "blayer1[1]",
- ["blayer1[1]"] = "wlayer1[1]",
- ["wlayer1[1]"] = "blayer2[1]",
- ["blayer2[1]"] = "wlayer2[1]",
- ["wlayer2[1]"] = "<output>[1]"
- }
- }},
- main = {{}, {
- dim_in = {429, 1}, dim_out = {},
- sub_layers = sublayer_repo,
- connections = {
- ["<input>[1]"] = "affine0[1]",
- ["affine0[1]"] = "sigmoid0[1]",
- ["sigmoid0[1]"] = "affine1[1]",
- ["affine1[1]"] = "sigmoid1[1]",
- ["sigmoid1[1]"] = "affine2[1]",
- ["affine2[1]"] = "sigmoid2[1]",
- ["sigmoid2[1]"] = "affine3[1]",
- ["affine3[1]"] = "sigmoid3[1]",
- ["sigmoid3[1]"] = "affine4[1]",
- ["affine4[1]"] = "sigmoid4[1]",
- ["sigmoid4[1]"] = "affine5[1]",
- ["affine5[1]"] = "sigmoid5[1]",
- ["sigmoid5[1]"] = "affine6[1]",
- ["affine6[1]"] = "sigmoid6[1]",
- ["sigmoid6[1]"] = "affine7[1]",
- ["affine7[1]"] = "softmax_ce0[1]",
- ["<input>[2]"] = "softmax_ce0[2]"
- }
- }}
- }
- }, param_repo, gconf)
-
-tnet_reader = nerv.TNetReader(gconf,
- {
- id = "main_scp",
- scp_file = "/slfs1/users/mfy43/swb_ivec/train_bp.scp",
--- scp_file = "t.scp",
- conf_file = "/slfs1/users/mfy43/swb_ivec/plp_0_d_a.conf",
- frm_ext = 5,
- mlfs = {
- ref = {
- file = "/slfs1/users/mfy43/swb_ivec/ref.mlf",
- format = "map",
- format_arg = "/slfs1/users/mfy43/swb_ivec/dict",
- dir = "*/",
- ext = "lab"
- }
- },
- global_transf = layer_repo:get_layer("global_transf")
- })
-
-buffer = nerv.SGDBuffer(gconf,
- {
- buffer_size = 81920,
- randomize = true,
- readers = {
- { reader = tnet_reader,
- data = {main_scp = 429, ref = 1}}
- }
- })
-
-sm = sublayer_repo:get_layer("softmax_ce0")
-main = layer_repo:get_layer("main")
-main:init(gconf.batch_size)
-gconf.cnt = 0
--- data = buffer:get_data()
--- input = {data.main_scp, data.ref}
--- while true do
-for data in buffer.get_data, buffer do
--- if gconf.cnt == 100 then break end
--- gconf.cnt = gconf.cnt + 1
-
- input = {data.main_scp, data.ref}
- output = {}
- err_input = {}
- err_output = {input[1]:create()}
-
- main:propagate(input, output)
- main:back_propagate(err_output, err_input, input, output)
- main:update(err_input, input, output)
-
--- nerv.printf("cross entropy: %.8f\n", sm.total_ce)
--- nerv.printf("correct: %d\n", sm.total_correct)
--- nerv.printf("frames: %d\n", sm.total_frames)
--- nerv.printf("err/frm: %.8f\n", sm.total_ce / sm.total_frames)
--- nerv.printf("accuracy: %.8f\n", sm.total_correct / sm.total_frames)
- collectgarbage("collect")
-end
-nerv.printf("cross entropy: %.8f\n", sm.total_ce)
-nerv.printf("correct: %d\n", sm.total_correct)
-nerv.printf("accuracy: %.3f%%\n", sm.total_correct / sm.total_frames * 100)
-nerv.printf("writing back...\n")
-cf = nerv.ChunkFile("output.nerv", "w")
-for i, p in ipairs(main:get_params()) do
- print(p)
- cf:write_chunk(p)
-end
-cf:close()
-nerv.Matrix.print_profile()