aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--nerv/Makefile2
-rw-r--r--nerv/examples/seq_chime.lua185
-rw-r--r--nerv/examples/seq_trainer.lua86
-rw-r--r--nerv/layer/affine.lua4
-rw-r--r--nerv/layer/affine_recurrent.lua4
-rw-r--r--nerv/layer/bias.lua4
-rw-r--r--nerv/layer/combiner.lua6
-rw-r--r--nerv/layer/init.lua1
-rw-r--r--nerv/layer/mpe.lua52
-rw-r--r--nerv/layer/mse.lua8
-rw-r--r--nerv/layer/sigmoid.lua4
-rw-r--r--nerv/layer/softmax.lua4
-rw-r--r--nerv/layer/softmax_ce.lua7
-rw-r--r--nerv/layer/window.lua4
-rw-r--r--nerv/lib/matrix/generic/matrix.c1
-rw-r--r--nerv/nn/layer_dag.lua40
16 files changed, 407 insertions, 5 deletions
diff --git a/nerv/Makefile b/nerv/Makefile
index b5d26bd..b874a94 100644
--- a/nerv/Makefile
+++ b/nerv/Makefile
@@ -31,7 +31,7 @@ OBJS := $(CORE_OBJS) $(NERV_OBJS) $(LUAT_OBJS)
LIBS := $(INST_LIBDIR)/libnerv.so $(LIB_PATH)/libnervcore.so $(LIB_PATH)/libluaT.so
LUA_LIBS := matrix/init.lua io/init.lua init.lua \
layer/init.lua layer/affine.lua layer/sigmoid.lua layer/softmax_ce.lua layer/softmax.lua \
- layer/window.lua layer/bias.lua layer/combiner.lua layer/mse.lua layer/affine_recurrent.lua\
+ layer/window.lua layer/bias.lua layer/combiner.lua layer/mse.lua layer/affine_recurrent.lua layer/mpe.lua \
nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/layer_dag.lua \
io/sgd_buffer.lua
diff --git a/nerv/examples/seq_chime.lua b/nerv/examples/seq_chime.lua
new file mode 100644
index 0000000..be723ca
--- /dev/null
+++ b/nerv/examples/seq_chime.lua
@@ -0,0 +1,185 @@
+require 'kaldi_io'
+gconf = {lrate = 0.00001, wcost = 0, momentum = 0.0,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ frm_ext = 5,
+ tr_scp = "ark,s,cs:/slfs6/users/ymz09/kaldi/src/featbin/copy-feats scp:/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced_smbr/train.scp ark:- |",
+ initialized_param = {"/slfs6/users/ymz09/nerv-project/nerv/nerv-speech/kaldi_seq/test/chime3_init.nerv",
+ "/slfs6/users/ymz09/nerv-project/nerv/nerv-speech/kaldi_seq/test/chime3_global_transf.nerv"},
+ debug = false}
+
+function make_layer_repo(param_repo)
+ local layer_repo = nerv.LayerRepo(
+ {
+ -- global transf
+ ["nerv.BiasLayer"] =
+ {
+ blayer1 = {{bias = "bias1"}, {dim_in = {440}, dim_out = {440}}},
+ blayer2 = {{bias = "bias2"}, {dim_in = {440}, dim_out = {440}}}
+ },
+ ["nerv.WindowLayer"] =
+ {
+ wlayer1 = {{window = "window1"}, {dim_in = {440}, dim_out = {440}}},
+ wlayer2 = {{window = "window2"}, {dim_in = {440}, dim_out = {440}}}
+ },
+ -- biased linearity
+ ["nerv.AffineLayer"] =
+ {
+ affine0 = {{ltp = "affine0_ltp", bp = "affine0_bp"},
+ {dim_in = {440}, dim_out = {2048}}},
+ affine1 = {{ltp = "affine1_ltp", bp = "affine1_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine2 = {{ltp = "affine2_ltp", bp = "affine2_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine3 = {{ltp = "affine3_ltp", bp = "affine3_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine4 = {{ltp = "affine4_ltp", bp = "affine4_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine5 = {{ltp = "affine5_ltp", bp = "affine5_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine6 = {{ltp = "affine6_ltp", bp = "affine6_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine7 = {{ltp = "affine7_ltp", bp = "affine7_bp"},
+ {dim_in = {2048}, dim_out = {2011}}}
+ },
+ ["nerv.SigmoidLayer"] =
+ {
+ sigmoid0 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid1 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid2 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid3 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid4 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid5 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid6 = {{}, {dim_in = {2048}, dim_out = {2048}}}
+ },
+ ["nerv.MPELayer"] =
+ {
+ mpe_crit = {{}, {dim_in = {2011, -1}, dim_out = {1},
+ cmd = {
+ arg = "--class-frame-counts=/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced/ali_train_pdf.counts --acoustic-scale=0.1 --lm-scale=1.0 --learn-rate=0.00001 --do-smbr=true --verbose=1",
+ mdl = "/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced_ali/final.mdl",
+ lat = "scp:/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced_denlats/lat.scp",
+ ali = "ark:gunzip -c /slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced_ali/ali.*.gz |"
+ }
+ }
+ }
+ },
+ ["nerv.SoftmaxLayer"] = -- softmax for decode output
+ {
+ softmax = {{}, {dim_in = {2011}, dim_out = {2011}}}
+ }
+ }, param_repo, gconf)
+
+ layer_repo:add_layers(
+ {
+ ["nerv.DAGLayer"] =
+ {
+ global_transf = {{}, {
+ dim_in = {440}, dim_out = {440},
+ sub_layers = layer_repo,
+ connections = {
+ ["<input>[1]"] = "blayer1[1]",
+ ["blayer1[1]"] = "wlayer1[1]",
+ ["wlayer1[1]"] = "blayer2[1]",
+ ["blayer2[1]"] = "wlayer2[1]",
+ ["wlayer2[1]"] = "<output>[1]"
+ }
+ }},
+ main = {{}, {
+ dim_in = {440}, dim_out = {2011},
+ sub_layers = layer_repo,
+ connections = {
+ ["<input>[1]"] = "affine0[1]",
+ ["affine0[1]"] = "sigmoid0[1]",
+ ["sigmoid0[1]"] = "affine1[1]",
+ ["affine1[1]"] = "sigmoid1[1]",
+ ["sigmoid1[1]"] = "affine2[1]",
+ ["affine2[1]"] = "sigmoid2[1]",
+ ["sigmoid2[1]"] = "affine3[1]",
+ ["affine3[1]"] = "sigmoid3[1]",
+ ["sigmoid3[1]"] = "affine4[1]",
+ ["affine4[1]"] = "sigmoid4[1]",
+ ["sigmoid4[1]"] = "affine5[1]",
+ ["affine5[1]"] = "sigmoid5[1]",
+ ["sigmoid5[1]"] = "affine6[1]",
+ ["affine6[1]"] = "sigmoid6[1]",
+ ["sigmoid6[1]"] = "affine7[1]",
+ ["affine7[1]"] = "<output>[1]"
+ }
+ }}
+ }
+ }, param_repo, gconf)
+
+ layer_repo:add_layers(
+ {
+ ["nerv.DAGLayer"] =
+ {
+ mpe_output = {{}, {
+ dim_in = {440, -1}, dim_out = {1},
+ sub_layers = layer_repo,
+ connections = {
+ ["<input>[1]"] = "main[1]",
+ ["main[1]"] = "mpe_crit[1]",
+ ["<input>[2]"] = "mpe_crit[2]",
+ ["mpe_crit[1]"] = "<output>[1]"
+ }
+ }},
+ softmax_output = {{}, {
+ dim_in = {440}, dim_out = {2011},
+ sub_layers = layer_repo,
+ connections = {
+ ["<input>[1]"] = "main[1]",
+ ["main[1]"] = "softmax[1]",
+ ["softmax[1]"] = "<output>[1]"
+ }
+ }}
+ }
+ }, param_repo, gconf)
+
+ return layer_repo
+end
+
+function get_network(layer_repo)
+ return layer_repo:get_layer("mpe_output")
+end
+
+function get_decode_network(layer_repo)
+ return layer_repo:get_layer("softmax_output")
+end
+
+function get_global_transf(layer_repo)
+ return layer_repo:get_layer("global_transf")
+end
+
+function make_readers(feature_rspecifier, layer_repo)
+ return {
+ {reader = nerv.KaldiReader(gconf,
+ {
+ id = "main_scp",
+ feature_rspecifier = feature_rspecifier,
+ frm_ext = gconf.frm_ext,
+ global_transf = layer_repo:get_layer("global_transf"),
+ mlfs = {}
+ })
+ }
+ }
+end
+
+function get_input_order()
+ return {{id = "main_scp", global_transf = true},
+ {id = "key"}}
+end
+
+function get_accuracy(layer_repo)
+ local mpe_crit = layer_repo:get_layer("mpe_crit")
+ return mpe_crit.total_correct / mpe_crit.total_frames * 100
+end
+
+function print_stat(layer_repo)
+ local mpe_crit = layer_repo:get_layer("mpe_crit")
+ nerv.info("*** training stat begin ***")
+ nerv.printf("correct:\t\t%d\n", mpe_crit.total_correct)
+ nerv.printf("frames:\t\t\t%d\n", mpe_crit.total_frames)
+ nerv.printf("accuracy:\t\t%.3f%%\n", get_accuracy(layer_repo))
+ nerv.info("*** training stat end ***")
+end
diff --git a/nerv/examples/seq_trainer.lua b/nerv/examples/seq_trainer.lua
new file mode 100644
index 0000000..df96e68
--- /dev/null
+++ b/nerv/examples/seq_trainer.lua
@@ -0,0 +1,86 @@
+function build_trainer(ifname)
+ local param_repo = nerv.ParamRepo()
+ param_repo:import(ifname, nil, gconf)
+ local layer_repo = make_layer_repo(param_repo)
+ local network = get_network(layer_repo)
+ local global_transf = get_global_transf(layer_repo)
+ local input_order = get_input_order()
+ local iterative_trainer = function (prefix, scp_file, bp)
+ local readers = make_readers(scp_file, layer_repo)
+ -- initialize the network
+ network:init(1)
+ gconf.cnt = 0
+ for ri = 1, #readers, 1 do
+ while true do
+ local data = readers[ri].reader:get_data()
+ if data == nil then
+ break
+ end
+ -- prine stat periodically
+ gconf.cnt = gconf.cnt + 1
+ if gconf.cnt == 1000 then
+ print_stat(layer_repo)
+ nerv.CuMatrix.print_profile()
+ nerv.CuMatrix.clear_profile()
+ gconf.cnt = 0
+ -- break
+ end
+ local input = {}
+ -- if gconf.cnt == 1000 then break end
+ for i, e in ipairs(input_order) do
+ local id = e.id
+ if data[id] == nil then
+ nerv.error("input data %s not found", id)
+ end
+ local transformed
+ if e.global_transf then
+ local batch = gconf.cumat_type(data[id]:nrow(), data[id]:ncol())
+ batch:copy_fromh(data[id])
+ transformed = nerv.speech_utils.global_transf(batch,
+ global_transf,
+ gconf.frm_ext or 0, 0,
+ gconf)
+ else
+ transformed = data[id]
+ end
+ table.insert(input, transformed)
+ end
+ err_output = {input[1]:create()}
+ network:batch_resize(input[1]:nrow())
+ if network:propagate(input, {{}}) == true then
+ network:back_propagate({{}}, err_output, input, {{}})
+ network:update({{}}, input, {{}})
+ end
+ -- collect garbage in-time to save GPU memory
+ collectgarbage("collect")
+ end
+ end
+ print_stat(layer_repo)
+ nerv.CuMatrix.print_profile()
+ nerv.CuMatrix.clear_profile()
+ if prefix ~= nil then
+ nerv.info("writing back...")
+ local fname = string.format("%s_tr%.3f.nerv",
+ prefix, get_accuracy(layer_repo))
+ network:get_params():export(fname, nil)
+ end
+ return get_accuracy(layer_repo)
+ end
+ return iterative_trainer
+end
+
+dofile(arg[1])
+
+local pf0 = gconf.initialized_param
+local trainer = build_trainer(pf0)
+
+local i = 1
+nerv.info("[NN] begin iteration %d with lrate = %.6f", i, gconf.lrate)
+local accu_tr = trainer(string.format("%s_%s_iter_%d_lr%f",
+string.gsub(
+(string.gsub(pf0[1], "(.*/)(.*)", "%2")),
+"(.*)%..*", "%1"),
+os.date("%Y%m%d%H%M%S"),
+i, gconf.lrate), gconf.tr_scp, true)
+nerv.info("[TR] training set %d: %.3f", i, accu_tr)
+
diff --git a/nerv/layer/affine.lua b/nerv/layer/affine.lua
index 00cbcfb..6c90e3e 100644
--- a/nerv/layer/affine.lua
+++ b/nerv/layer/affine.lua
@@ -60,6 +60,10 @@ function AffineLayer:init(batch_size)
self.bp:train_init()
end
+function AffineLayer:batch_resize(batch_size)
+ -- do nothing
+end
+
function AffineLayer:update(bp_err, input, output)
if self.direct_update then
self.ltp.correction:mul(input[1], bp_err[1], 1.0, gconf.momentum, 'T', 'N')
diff --git a/nerv/layer/affine_recurrent.lua b/nerv/layer/affine_recurrent.lua
index 59d259c..92d98e2 100644
--- a/nerv/layer/affine_recurrent.lua
+++ b/nerv/layer/affine_recurrent.lua
@@ -37,6 +37,10 @@ function Recurrent:init(batch_size)
self.bp:train_init()
end
+function Recurrent:batch_resize(batch_size)
+ -- do nothing
+end
+
function Recurrent:update(bp_err, input, output)
if (self.direct_update == true) then
local ltp_hh = self.ltp_hh.trans
diff --git a/nerv/layer/bias.lua b/nerv/layer/bias.lua
index c99274d..7e9fd46 100644
--- a/nerv/layer/bias.lua
+++ b/nerv/layer/bias.lua
@@ -18,6 +18,10 @@ function BiasLayer:init()
end
end
+function BiasLayer:batch_resize(batch_size)
+ -- do nothing
+end
+
function BiasLayer:propagate(input, output)
output[1]:copy_fromd(input[1])
output[1]:add_row(self.bias.trans, 1.0)
diff --git a/nerv/layer/combiner.lua b/nerv/layer/combiner.lua
index 7bd7617..1bcfdfb 100644
--- a/nerv/layer/combiner.lua
+++ b/nerv/layer/combiner.lua
@@ -30,6 +30,12 @@ function CombinerLayer:init(batch_size)
self.sum = self.gconf.cumat_type(batch_size, dim)
end
+function CombinerLayer:batch_resize(batch_size)
+ if self.sum:nrow() ~= batch_size then
+ self.sum = self.gconf.cumat_type(batch_size, self.dim_in[1])
+ end
+end
+
function CombinerLayer:update(bp_err, input, output)
end
diff --git a/nerv/layer/init.lua b/nerv/layer/init.lua
index 6861b0e..b74422f 100644
--- a/nerv/layer/init.lua
+++ b/nerv/layer/init.lua
@@ -79,3 +79,4 @@ nerv.include('mse.lua')
nerv.include('combiner.lua')
nerv.include('affine_recurrent.lua')
nerv.include('softmax.lua')
+nerv.include('mpe.lua')
diff --git a/nerv/layer/mpe.lua b/nerv/layer/mpe.lua
new file mode 100644
index 0000000..ec8a8f3
--- /dev/null
+++ b/nerv/layer/mpe.lua
@@ -0,0 +1,52 @@
+require 'libkaldiseq'
+local MPELayer = nerv.class("nerv.MPELayer", "nerv.Layer")
+
+function MPELayer:__init(id, global_conf, layer_conf)
+ self.id = id
+ self.gconf = global_conf
+ self.dim_in = layer_conf.dim_in
+ self.dim_out = layer_conf.dim_out
+ self.arg = layer_conf.cmd.arg
+ self.mdl = layer_conf.cmd.mdl
+ self.lat = layer_conf.cmd.lat
+ self.ali = layer_conf.cmd.ali
+ self:check_dim_len(2, -1) -- two inputs: nn output and utt key
+end
+
+function MPELayer:init(batch_size)
+ self.total_correct = 0
+ self.total_frames = 0
+ self.kaldi_mpe = nerv.KaldiMPE(self.arg, self.mdl, self.lat, self.ali)
+ if self.kaldi_mpe == nil then
+ nerv.error("kaldi arguments is expected: %s %s %s %s", self.arg,
+ self.mdl, self.lat, self.ali)
+ end
+end
+
+function MPELayer:batch_resize(batch_size)
+ -- do nothing
+end
+
+function MPELayer:update(bp_err, input, output)
+ -- no params, therefore do nothing
+end
+
+function MPELayer:propagate(input, output)
+ self.valid = false
+ self.valid = self.kaldi_mpe:check(input[1], input[2])
+ return self.valid
+end
+
+function MPELayer:back_propagate(bp_err, next_bp_err, input, output)
+ if self.valid ~= true then
+ nerv.error("kaldi sequence training back_propagate fail")
+ end
+ local mmat = input[1]:new_to_host()
+ next_bp_err[1]:copy_fromh(self.kaldi_mpe:calc_diff(mmat, input[2]))
+ self.total_frames = self.total_frames + self.kaldi_mpe:get_num_frames()
+ self.total_correct = self.total_correct + self.kaldi_mpe:get_utt_frame_acc()
+end
+
+function MPELayer:get_params()
+ return nerv.ParamRepo({})
+end
diff --git a/nerv/layer/mse.lua b/nerv/layer/mse.lua
index 2516998..0ee3080 100644
--- a/nerv/layer/mse.lua
+++ b/nerv/layer/mse.lua
@@ -20,6 +20,14 @@ function MSELayer:init(batch_size)
self.diff = self.mse:create()
end
+function MSELayer:batch_resize(batch_size)
+ if self.mse:nrow() ~= batch_resize then
+ self.mse = self.gconf.cumat_type(batch_size, self.dim_in[1])
+ self.mse_sum = self.gconf.cumat_type(batch_size, 1)
+ self.diff = self.mse:create()
+ end
+end
+
function MSELayer:update(bp_err, input, output)
-- no params, therefore do nothing
end
diff --git a/nerv/layer/sigmoid.lua b/nerv/layer/sigmoid.lua
index dfd09eb..0a8bcdc 100644
--- a/nerv/layer/sigmoid.lua
+++ b/nerv/layer/sigmoid.lua
@@ -14,6 +14,10 @@ function SigmoidLayer:init()
end
end
+function SigmoidLayer:batch_resize(batch_size)
+ -- do nothing
+end
+
function SigmoidLayer:update(bp_err, input, output)
-- no params, therefore do nothing
end
diff --git a/nerv/layer/softmax.lua b/nerv/layer/softmax.lua
index e979ebf..4205b66 100644
--- a/nerv/layer/softmax.lua
+++ b/nerv/layer/softmax.lua
@@ -14,6 +14,10 @@ function SoftmaxLayer:init(batch_size)
end
end
+function SoftmaxLayer:batch_resize(batch_size)
+ -- do nothing
+end
+
function SoftmaxLayer:update(bp_err, input, output)
-- no params, therefore do nothing
end
diff --git a/nerv/layer/softmax_ce.lua b/nerv/layer/softmax_ce.lua
index f878a2f..9071e86 100644
--- a/nerv/layer/softmax_ce.lua
+++ b/nerv/layer/softmax_ce.lua
@@ -23,6 +23,13 @@ function SoftmaxCELayer:init(batch_size)
self.ce = self.softmax:create()
end
+function SoftmaxCELayer:batch_resize(batch_size)
+ if self.softmax:nrow() ~= batch_resize then
+ self.softmax = self.gconf.cumat_type(batch_size, self.dim_in[1])
+ self.ce = self.softmax:create()
+ end
+end
+
function SoftmaxCELayer:update(bp_err, input, output)
-- no params, therefore do nothing
end
diff --git a/nerv/layer/window.lua b/nerv/layer/window.lua
index 4e9a3b1..8eed352 100644
--- a/nerv/layer/window.lua
+++ b/nerv/layer/window.lua
@@ -18,6 +18,10 @@ function WindowLayer:init()
end
end
+function WindowLayer:batch_resize(batch_size)
+ -- do nothing
+end
+
function WindowLayer:propagate(input, output)
output[1]:copy_fromd(input[1])
output[1]:scale_rows_by_row(self.window.trans)
diff --git a/nerv/lib/matrix/generic/matrix.c b/nerv/lib/matrix/generic/matrix.c
index 6cb3dc0..4319e13 100644
--- a/nerv/lib/matrix/generic/matrix.c
+++ b/nerv/lib/matrix/generic/matrix.c
@@ -4,6 +4,7 @@
/* FIXME: malloc failure detection */
void nerv_matrix_(data_free)(Matrix *self, Status *status) {
+ if(*self->data_ref == 0) return;
assert(*self->data_ref > 0);
if (--(*self->data_ref) == 0)
{
diff --git a/nerv/nn/layer_dag.lua b/nerv/nn/layer_dag.lua
index f69d31c..73bb77d 100644
--- a/nerv/nn/layer_dag.lua
+++ b/nerv/nn/layer_dag.lua
@@ -79,7 +79,7 @@ function DAGLayer:__init(id, global_conf, layer_conf)
end
table.insert(parsed_conn,
- {{ref_from, port_from}, {ref_to, port_to}})
+ {{ref_from, port_from}, {ref_to, port_to}})
table.insert(ref_from.next_layers, ref_to) -- add edge
ref_to.in_deg = ref_to.in_deg + 1 -- increase the in-degree of the target layer
end
@@ -140,8 +140,11 @@ function DAGLayer:init(batch_size)
ref_from, port_from = unpack(conn[1])
ref_to, port_to = unpack(conn[2])
_, output_dim = ref_from.layer:get_dim()
- local mid = self.gconf.cumat_type(batch_size,
- output_dim[port_from])
+ local dim = 1
+ if output_dim[port_from] > 0 then
+ dim = output_dim[port_from]
+ end
+ local mid = self.gconf.cumat_type(batch_size, dim)
local err_mid = mid:create()
ref_from.outputs[port_from] = mid
@@ -176,6 +179,33 @@ function DAGLayer:init(batch_size)
end
end
+function DAGLayer:batch_resize(batch_size)
+ self.gconf.batch_size = batch_size
+
+ for i, conn in ipairs(self.parsed_conn) do
+ local _, output_dim
+ local ref_from, port_from, ref_to, port_to
+ ref_from, port_from = unpack(conn[1])
+ ref_to, port_to = unpack(conn[2])
+ _, output_dim = ref_from.layer:get_dim()
+
+ if ref_from.outputs[port_from]:nrow() ~= batch_size and output_dim[port_from] > 0 then
+ local mid = self.gconf.cumat_type(batch_size, output_dim[port_from])
+ local err_mid = mid:create()
+
+ ref_from.outputs[port_from] = mid
+ ref_to.inputs[port_to] = mid
+
+ ref_from.err_inputs[port_from] = err_mid
+ ref_to.err_outputs[port_to] = err_mid
+ end
+ end
+ for id, ref in pairs(self.layers) do
+ ref.layer:batch_resize(batch_size)
+ end
+ collectgarbage("collect")
+end
+
function DAGLayer:set_inputs(input)
for i = 1, #self.dim_in do
if input[i] == nil then
@@ -228,11 +258,13 @@ end
function DAGLayer:propagate(input, output)
self:set_inputs(input)
self:set_outputs(output)
+ local ret = false
for i = 1, #self.queue do
local ref = self.queue[i]
-- print(ref.layer.id)
- ref.layer:propagate(ref.inputs, ref.outputs)
+ ret = ref.layer:propagate(ref.inputs, ref.outputs)
end
+ return ret
end
function DAGLayer:back_propagate(bp_err, next_bp_err, input, output)