aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortxh18 <[email protected]>2015-11-06 19:57:46 +0800
committertxh18 <[email protected]>2015-11-06 19:57:46 +0800
commitae4e5218cd96e3888b7eaa90412b2279d14337f3 (patch)
tree05f030ef5b44de3c3dbf7e34f24b7279492a085b
parent26db912e38c3446961831d17be6b4508ec508bca (diff)
first small tnn test seems to work
-rw-r--r--nerv/examples/lmptb/lmptb/layer/select_linear.lua4
-rw-r--r--nerv/examples/lmptb/lmptb/lmseqreader.lua2
-rw-r--r--nerv/examples/lmptb/m-tests/dagl_test.lua180
-rw-r--r--nerv/examples/lmptb/m-tests/some-text2
-rw-r--r--nerv/examples/lmptb/main.lua2
-rw-r--r--nerv/examples/lmptb/rnn/tnn.lua200
6 files changed, 171 insertions, 219 deletions
diff --git a/nerv/examples/lmptb/lmptb/layer/select_linear.lua b/nerv/examples/lmptb/lmptb/layer/select_linear.lua
index d4cff0b..efbaf20 100644
--- a/nerv/examples/lmptb/lmptb/layer/select_linear.lua
+++ b/nerv/examples/lmptb/lmptb/layer/select_linear.lua
@@ -36,7 +36,7 @@ function SL:update(bp_err, input, output)
--word_vec:add(word_vec, bp_err[1][i - 1], 1, - self.gconf.lrate / self.gconf.batch_size)
-- end
--end
- self.ltp.trans:update_select_rows(bp_err[1], input[1], - self.gconf.lrate / self.gconf.batch_size, 0)
+ self.ltp.trans:update_select_rows(bp_err[1], input[1]:trans(), - self.gconf.lrate / self.gconf.batch_size, 0)
end
function SL:propagate(input, output)
@@ -47,7 +47,7 @@ function SL:propagate(input, output)
-- output[1][i]:fill(0)
-- end
--end
- output[1]:copy_rows_fromd_by_idx(self.ltp.trans, input[1])
+ output[1]:copy_rows_fromd_by_idx(self.ltp.trans, input[1]:trans())
end
function SL:back_propagate(bp_err, next_bp_err, input, output)
diff --git a/nerv/examples/lmptb/lmptb/lmseqreader.lua b/nerv/examples/lmptb/lmptb/lmseqreader.lua
index 6cbd0e9..41e3903 100644
--- a/nerv/examples/lmptb/lmptb/lmseqreader.lua
+++ b/nerv/examples/lmptb/lmptb/lmseqreader.lua
@@ -121,7 +121,7 @@ function LMReader:get_batch(feeds)
end
end
end
-
+
for j = 1, self.chunk_size, 1 do
flagsPack[j] = 0
for i = 1, self.batch_size, 1 do
diff --git a/nerv/examples/lmptb/m-tests/dagl_test.lua b/nerv/examples/lmptb/m-tests/dagl_test.lua
deleted file mode 100644
index 6bd11c8..0000000
--- a/nerv/examples/lmptb/m-tests/dagl_test.lua
+++ /dev/null
@@ -1,180 +0,0 @@
-require 'lmptb.lmvocab'
-require 'lmptb.lmfeeder'
-require 'lmptb.lmutil'
-require 'lmptb.layer.init'
-require 'lmptb.lmseqreader'
-require 'rnn.tnn'
-
---[[global function rename]]--
-printf = nerv.printf
---[[global function rename ends]]--
-
---global_conf: table
---first_time: bool
---Returns: a ParamRepo
-function prepare_parameters(global_conf, first_time)
- printf("%s preparing parameters...\n", global_conf.sche_log_pre)
-
- if (first_time) then
- ltp_ih = nerv.LinearTransParam("ltp_ih", global_conf)
- ltp_ih.trans = global_conf.cumat_type(global_conf.vocab:size() + 1, global_conf.hidden_size) --index 0 is for zero, others correspond to vocab index(starting from 1)
- ltp_ih.trans:generate(global_conf.param_random)
- ltp_ih.trans[0]:fill(0)
-
- ltp_hh = nerv.LinearTransParam("ltp_hh", global_conf)
- ltp_hh.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.hidden_size)
- ltp_hh.trans:generate(global_conf.param_random)
-
- ltp_ho = nerv.LinearTransParam("ltp_ho", global_conf)
- ltp_ho.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.vocab:size())
- ltp_ho.trans:generate(global_conf.param_random)
-
- bp_h = nerv.BiasParam("bp_h", global_conf)
- bp_h.trans = global_conf.cumat_type(1, global_conf.hidden_size)
- bp_h.trans:generate(global_conf.param_random)
-
- bp_o = nerv.BiasParam("bp_o", global_conf)
- bp_o.trans = global_conf.cumat_type(1, global_conf.vocab:size())
- bp_o.trans:generate(global_conf.param_random)
-
- local f = nerv.ChunkFile(global_conf.param_fn, 'w')
- f:write_chunk(ltp_ih)
- f:write_chunk(ltp_hh)
- f:write_chunk(ltp_ho)
- f:write_chunk(bp_h)
- f:write_chunk(bp_o)
- f:close()
- end
-
- local paramRepo = nerv.ParamRepo()
- paramRepo:import({global_conf.param_fn}, nil, global_conf)
-
- printf("%s preparing parameters end.\n", global_conf.sche_log_pre)
-
- return paramRepo
-end
-
---global_conf: table
---Returns: nerv.LayerRepo
-function prepare_layers(global_conf, paramRepo)
- printf("%s preparing layers...\n", global_conf.sche_log_pre)
-
- local recurrentLconfig = {{["bp"] = "bp_h", ["ltp_hh"] = "ltp_hh"}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["break_id"] = global_conf.vocab:get_sen_entry().id, ["independent"] = global_conf.independent, ["clip"] = 10}}
-
- local layers = {
- ["nerv.IndRecurrentLayer"] = {
- ["recurrentL1"] = recurrentLconfig,
- },
-
- ["nerv.SelectLinearLayer"] = {
- ["selectL1"] = {{["ltp"] = "ltp_ih"}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}}},
- },
-
- ["nerv.SigmoidLayer"] = {
- ["sigmoidL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}
- },
-
- ["nerv.AffineLayer"] = {
- ["outputL"] = {{["ltp"] = "ltp_ho", ["bp"] = "bp_o"}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.vocab:size()}}},
- },
-
- ["nerv.SoftmaxCELayer"] = {
- ["softmaxL"] = {{}, {["dim_in"] = {global_conf.vocab:size(), global_conf.vocab:size()}, ["dim_out"] = {1}}},
- },
- }
-
- --[[ --we do not need those in the new rnn framework
- printf("%s adding %d bptt layers...\n", global_conf.sche_log_pre, global_conf.bptt)
- for i = 1, global_conf.bptt do
- layers["nerv.IndRecurrentLayer"]["recurrentL" .. (i + 1)] = recurrentLconfig
- layers["nerv.SigmoidLayer"]["sigmoidL" .. (i + 1)] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}
- layers["nerv.SelectLinearLayer"]["selectL" .. (i + 1)] = {{["ltp"] = "ltp_ih"}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}}}
- end
- --]]
-
- local layerRepo = nerv.LayerRepo(layers, paramRepo, global_conf)
- printf("%s preparing layers end.\n", global_conf.sche_log_pre)
- return layerRepo
-end
-
---global_conf: table
---layerRepo: nerv.LayerRepo
---Returns: a nerv.TNN
-function prepare_dagLayer(global_conf, layerRepo)
- printf("%s Initing TNN ...\n", global_conf.sche_log_pre)
-
- --input: input_w, input_w, ... input_w_now, last_activation
- local connections_t = {
- {"<input>[1]", "selectL1[1]", 0},
- {"selectL1[1]", "recurrentL1[1]", 0},
- {"recurrentL1[1]", "sigmoidL1[1]", 0},
- {"sigmoidL1[1]", "outputL[1]", 0},
- {"sigmoidL1[1]", "recurrentL1[2]", 1},
- {"outputL[1]", "softmaxL[1]", 0},
- {"<input>[2]", "softmaxL[2]", 0},
- {"softmaxL[1]", "<output>[1]", 0}
- }
-
- --[[
- printf("%s printing DAG connections:\n", global_conf.sche_log_pre)
- for key, value in pairs(connections_t) do
- printf("\t%s->%s\n", key, value)
- end
- ]]--
-
- local tnn = nerv.TNN("TNN", global_conf, {["dim_in"] = {1, global_conf.vocab:size()}, ["dim_out"] = {1}, ["sub_layers"] = layerRepo,
- ["connections"] = connections_t,
- })
- printf("%s Initing TNN end.\n", global_conf.sche_log_pre)
- return tnn
-end
-
-local train_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
-local test_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
-
-local global_conf = {
- lrate = 1, wcost = 1e-6, momentum = 0,
- cumat_type = nerv.CuMatrixFloat,
- mmat_type = nerv.CuMatrixFloat,
- nn_act_default = 0,
-
- hidden_size = 20,
- chunk_size = 5,
- batch_size = 3,
- max_iter = 18,
- param_random = function() return (math.random() / 5 - 0.1) end,
- independent = true,
-
- train_fn = train_fn,
- test_fn = test_fn,
- sche_log_pre = "[SCHEDULER]:",
- log_w_num = 10, --give a message when log_w_num words have been processed
- timer = nerv.Timer()
-}
-global_conf.work_dir = '/home/slhome/txh18/workspace/nerv/play/dagL_test'
-global_conf.param_fn = global_conf.work_dir.."/params"
-
-local vocab = nerv.LMVocab()
-global_conf["vocab"] = vocab
-global_conf.vocab:build_file(global_conf.train_fn, false)
-local paramRepo = prepare_parameters(global_conf, true)
-local layerRepo = prepare_layers(global_conf, paramRepo)
-local tnn = prepare_dagLayer(global_conf, layerRepo)
-tnn:init(global_conf.batch_size, global_conf.chunk_size)
-
-local reader = nerv.LMSeqReader(global_conf, global_conf.batch_size, global_conf.chunk_size, global_conf.vocab)
-reader:open_file(global_conf.train_fn)
-
-local batch_num = 1
-while (1) do
- local r, feeds
- r, feeds = tnn:getFeedFromReader(reader)
- if (r == false) then break end
- for j = 1, global_conf.chunk_size, 1 do
- for i = 1, global_conf.batch_size, 1 do
- printf("%s[L(%s)] ", feeds.inputs_s[j][i], feeds.labels_s[j][i]) --vocab:get_word_str(input[i][j]).id
- end
- printf("\n")
- end
- printf("\n")
-end
diff --git a/nerv/examples/lmptb/m-tests/some-text b/nerv/examples/lmptb/m-tests/some-text
index cdfbd2c..da4bea9 100644
--- a/nerv/examples/lmptb/m-tests/some-text
+++ b/nerv/examples/lmptb/m-tests/some-text
@@ -1,6 +1,6 @@
</s> aa bb cc aa bb cc aa bb cc aa bb cc aa bb cc aa </s>
</s> aa bb cc aa bb cc aa bb cc aa </s>
-</s> aa bb cc aa bb cc aa bb cc aa </s>
+</s> bb cc aa bb cc aa bb cc aa </s>
</s> aa bb cc aa </s>
</s> aa bb cc aa </s>
</s> aa bb cc aa </s>
diff --git a/nerv/examples/lmptb/main.lua b/nerv/examples/lmptb/main.lua
index 1939eda..a93c148 100644
--- a/nerv/examples/lmptb/main.lua
+++ b/nerv/examples/lmptb/main.lua
@@ -1,3 +1,5 @@
+--TODO: the select_linear now accepts a column vector, instead of a row vector
+
require 'lmptb.lmvocab'
require 'lmptb.lmfeeder'
require 'lmptb.lmutil'
diff --git a/nerv/examples/lmptb/rnn/tnn.lua b/nerv/examples/lmptb/rnn/tnn.lua
index 8037918..460fcc4 100644
--- a/nerv/examples/lmptb/rnn/tnn.lua
+++ b/nerv/examples/lmptb/rnn/tnn.lua
@@ -34,9 +34,11 @@ local function discover(id, layers, layer_repo)
layer = layer,
inputs_m = {}, --storage for computation, inputs_m[time][port]
inputs_b = {}, --inputs_g[time][port], whether this input can been computed
+ inputs_p_matbak = {}, --which is a back-up space to handle some cross-border computation, inputs_p_matbak[port]
outputs_m = {},
outputs_b = {},
err_inputs_m = {},
+ err_inputs_p_matbak = {}, --which is a back-up space to handle some cross-border computation
err_inputs_b = {},
err_outputs_m = {},
err_outputs_b = {},
@@ -57,26 +59,36 @@ nerv.TNN.FC.HAS_INPUT = 1
nerv.TNN.FC.HAS_LABEL = 2
nerv.TNN.FC.SEQ_NORM = bit.bor(nerv.TNN.FC.HAS_INPUT, nerv.TNN.FC.HAS_LABEL) --This instance have both input and label
-function TNN.makeInitialStore(st, p, dim, batch_size, chunk_size, global_conf, st_c, p_c)
+function TNN.makeInitialStore(st, p, dim, batch_size, chunk_size, global_conf, st_c, p_c, t_c)
--Return a table of matrix storage from time (1-chunk_size)..(2*chunk_size)
if (type(st) ~= "table") then
nerv.error("st should be a table")
end
- for i = 1 - chunk_size, chunk_size * 2 do
+ for i = 1 - chunk_size - 1, chunk_size * 2 + 1 do --intentionally allocated more time, should be [1-chunk_size, chunk_size*2]
if (st[i] == nil) then
st[i] = {}
end
st[i][p] = global_conf.cumat_type(batch_size, dim)
st[i][p]:fill(0)
if (st_c ~= nil) then
- if (st_c[i] == nil) then
- st_c[i] = {}
+ if (st_c[i + t_c] == nil) then
+ st_c[i + t_c] = {}
end
- st_c[i][p_c] = st[i][p]
+ st_c[i + t_c][p_c] = st[i][p]
end
end
end
+function TNN:outOfFeedRange(t) --out of chunk, or no input, for the current feed
+ if (t < 1 or t > self.chunk_size) then
+ return true
+ end
+ if (self.feeds_now.flagsPack_now[t] == 0 or self.feeds_now.flagsPack_now[t] == nil) then
+ return true
+ end
+ return false
+end
+
function TNN:__init(id, global_conf, layer_conf)
local layers = {}
local inputs_p = {} --map:port of the TDAGLayer to layer ref and port
@@ -109,7 +121,7 @@ function TNN:__init(id, global_conf, layer_conf)
outputs_p[port_to] = {["ref"] = ref_from, ["port"] = port_from}
ref_from.outputs_m[port_from] = {} --just a place holder
else
- conn_now = {
+ local conn_now = {
["src"] = {["ref"] = ref_from, ["port"] = port_from},
["dst"] = {["ref"] = ref_to, ["port"] = port_to},
["time"] = time_to
@@ -138,9 +150,11 @@ function TNN:init(batch_size, chunk_size)
self.chunk_size = chunk_size
for i, conn in ipairs(self.parsed_conns) do --init storage for connections inside the NN
local _, output_dim
- local ref_from, port_from, ref_to, port_to
+ local ref_from, port_from, ref_to, port_to, time
ref_from, port_from = conn.src.ref, conn.src.port
ref_to, port_to = conn.dst.ref, conn.dst.port
+ time = conn.time
+
local dim = ref_from.dim_out[port_from]
if (dim == 0) then
nerv.error("layer %s has a zero dim port", ref_from.layer.id)
@@ -148,9 +162,9 @@ function TNN:init(batch_size, chunk_size)
print("TNN initing storage", ref_from.layer.id, "->", ref_to.layer.id)
ref_to.inputs_p_matbak[port_to] = self.gconf.cumat_type(batch_size, dim)
- self.makeInitialStore(ref_from.outputs_m, port_from, dim, batch_size, chunk_size, self.gconf, ref_to.inputs_m, port_to)
+ self.makeInitialStore(ref_from.outputs_m, port_from, dim, batch_size, chunk_size, self.gconf, ref_to.inputs_m, port_to, time)
ref_from.err_inputs_p_matbak[port_from] = self.gconf.cumat_type(batch_size, dim)
- self.makeInitialStore(ref_from.err_inputs_m, port_from, dim, batch_size, chunk_size, self.gconf, ref_to.err_outputs_m, port_to)
+ self.makeInitialStore(ref_from.err_inputs_m, port_from, dim, batch_size, chunk_size, self.gconf, ref_to.err_outputs_m, port_to, time)
end
@@ -159,8 +173,8 @@ function TNN:init(batch_size, chunk_size)
for i = 1, #self.dim_out do --Init storage for output ports
local ref = self.outputs_p[i].ref
local p = self.outputs_p[i].port
- self.makeInitialStore(ref.outputs_m, p, self.dim_out[i], batch_size, chunk_size, self.gconf, self.outputs_m, i)
- self.makeInitialStore(ref.err_inputs_m, p, self.dim_out[i], batch_size, chunk_size, self.gconf, self.err_inputs_m, i)
+ self.makeInitialStore(ref.outputs_m, p, self.dim_out[i], batch_size, chunk_size, self.gconf, self.outputs_m, i, 0)
+ self.makeInitialStore(ref.err_inputs_m, p, self.dim_out[i], batch_size, chunk_size, self.gconf, self.err_inputs_m, i, 0)
end
self.inputs_m = {}
@@ -168,8 +182,8 @@ function TNN:init(batch_size, chunk_size)
for i = 1, #self.dim_in do --Init storage for input ports
local ref = self.inputs_p[i].ref
local p = self.inputs_p[i].port
- self.makeInitialStore(ref.inputs_m, p, self.dim_in[i], batch_size, chunk_size, self.gconf, self.inputs_m, i)
- self.makeInitialStore(ref.err_outputs_m, p, self.dim_in[i], batch_size, chunk_size, self.gconf, self.err_outputs_m, i)
+ self.makeInitialStore(ref.inputs_m, p, self.dim_in[i], batch_size, chunk_size, self.gconf, self.inputs_m, i, 0)
+ self.makeInitialStore(ref.err_outputs_m, p, self.dim_in[i], batch_size, chunk_size, self.gconf, self.err_outputs_m, i, 0)
end
for id, ref in pairs(self.layers) do --Calling init for child layers
@@ -274,45 +288,161 @@ function TNN:getFeedFromReader(reader)
return got_new, feeds_now
end
+function TNN:moveRightToNextMB() --move output history activations of 1..chunk_size to 1-chunk_size..0
+ for t = self.chunk_size, 1, -1 do
+ for id, ref in pairs(self.layers) do
+ for p = 1, #ref.dim_out do
+ ref.outputs_m[t - self.chunk_size][p]:copy_fromd(ref.outputs_m[t][p])
+ end
+ end
+ end
+end
+
function TNN:net_propagate() --propagate according to feeds_now
+ for t = 1, self.chunk_size, 1 do
+ for id, ref in pairs(self.layers) do
+ for p = 1, #ref.dim_out do
+ ref.outputs_b[t][p] = false
+ end
+ for p = 1, #ref.dim_in do
+ ref.inputs_b[t][p] = false
+ end
+ end
+ end
+
local feeds_now = self.feeds_now
- for t = 1, chunk_size do
+ for t = 1, self.chunk_size do
if (bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_INPUT) > 0) then
for i = 1, #self.dim_in do
- local ref = inputs_p[i].ref
- local p = inputs_p[i].port
+ local ref = self.inputs_p[i].ref
+ local p = self.inputs_p[i].port
ref.inputs_b[t][p] = true
+ self:propagate_dfs(ref, t)
end
- --TODO
end
end
end
-function DAGLayer:update(bp_err, input, output)
- self:set_err_inputs(bp_err)
- self:set_inputs(input)
- self:set_outputs(output)
- -- print("update")
- for id, ref in pairs(self.queue) do
- -- print(ref.layer.id)
- ref.layer:update(ref.err_inputs, ref.inputs, ref.outputs)
+--ref: the TNN_ref of a layer
+--t: the current time to propagate
+function TNN:propagate_dfs(ref, t)
+ if (self:outOfFeedRange(t)) then
+ return
+ end
+ if (ref.outputs_b[t][1] == true) then --already propagated, 1 is just a random port
+ return
+ end
+
+ --print("debug dfs", ref.layer.id, t)
+
+ local flag = true --whether have all inputs
+ for _, conn in pairs(ref.conns_i) do
+ local p = conn.dst.port
+ if (not (ref.inputs_b[t][p] or self:outOfFeedRange(t - conn.time))) then
+ flag = false
+ break
+ end
+ end
+ if (flag == false) then
+ return
+ end
+
+ --ok, do propagate
+ --print("debug ok, propagating");
+ ref.layer:propagate(ref.inputs_m[t], ref.outputs_m[t])
+ for i = 1, #ref.dim_out do
+ if (ref.outputs_b[t][i] == true) then
+ nerv.error("this time's outputs_b should be false")
+ end
+ ref.outputs_b[t][i] = true
+ end
+
+ --try dfs for further layers
+ for _, conn in pairs(ref.conns_o) do
+ --print("debug dfs-searching", conn.dst.ref.layer.id)
+ conn.dst.ref.inputs_b[t + conn.time][conn.dst.port] = true
+ self:propagate_dfs(conn.dst.ref, t + conn.time)
end
end
-function DAGLayer:back_propagate(bp_err, next_bp_err, input, output)
- self:set_err_outputs(next_bp_err)
- self:set_err_inputs(bp_err)
- self:set_inputs(input)
- self:set_outputs(output)
- for i = #self.queue, 1, -1 do
- local ref = self.queue[i]
- -- print(ref.layer.id)
- ref.layer:back_propagate(ref.err_inputs, ref.err_outputs, ref.inputs, ref.outputs)
+--do_update: bool, whether we are doing back-propagate or updating the parameters
+function TNN:net_backpropagate(do_update) --propagate according to feeds_now
+ if (do_update == nil) then
+ nerv.error("do_update should not be nil")
+ end
+ for t = 1, self.chunk_size, 1 do
+ for id, ref in pairs(self.layers) do
+ for p = 1, #ref.dim_out do
+ ref.err_inputs_b[t][p] = false
+ end
+ for p = 1, #ref.dim_in do
+ ref.err_outputs_b[t][p] = false
+ end
+ end
+ end
+
+ local feeds_now = self.feeds_now
+ for t = 1, self.chunk_size do
+ if (bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_LABEL) > 0) then
+ for i = 1, #self.dim_out do
+ local ref = self.outputs_p[i].ref
+ local p = self.outputs_p[i].port
+ ref.err_inputs_b[t][p] = true
+ self:backpropagate_dfs(ref, t, do_update)
+ end
+ end
+ end
+end
+
+--ref: the TNN_ref of a layer
+--t: the current time to propagate
+function TNN:backpropagate_dfs(ref, t, do_update)
+ if (self:outOfFeedRange(t)) then
+ return
+ end
+ if (ref.err_outputs_b[t][1] == true) then --already back_propagated, 1 is just a random port
+ return
+ end
+
+ --print("debug dfs", ref.layer.id, t)
+
+ local flag = true --whether have all inputs
+ for _, conn in pairs(ref.conns_o) do
+ local p = conn.src.port
+ if (not (ref.err_inputs_b[t][p] or self:outOfFeedRange(t + conn.time))) then
+ flag = false
+ break
+ end
+ end
+ if (flag == false) then
+ return
+ end
+
+ --ok, do back_propagate
+ --print("debug ok, back-propagating(or updating)")
+ if (do_update == false) then
+ ref.layer:back_propagate(ref.err_inputs_m[t], ref.err_outputs_m[t], ref.inputs_m[t], ref.outputs_m[t])
+ else
+ --print(ref.err_inputs_m[t][1])
+ ref.layer:update(ref.err_inputs_m[t], ref.inputs_m[t], ref.outputs_m[t])
+ end
+ for i = 1, #ref.dim_in do
+ if (ref.err_outputs_b[t][i] == true) then
+ nerv.error("this time's outputs_b should be false")
+ end
+ ref.err_outputs_b[t][i] = true
+ end
+
+ --try dfs for further layers
+ for _, conn in pairs(ref.conns_i) do
+ --print("debug dfs-searching", conn.src.ref.layer.id)
+ conn.src.ref.err_inputs_b[t - conn.time][conn.src.port] = true
+ self:backpropagate_dfs(conn.src.ref, t - conn.time, do_update)
end
end
--Return: nerv.ParamRepo
-function DAGLayer:get_params()
+function TNN:get_params()
local param_repos = {}
for id, ref in pairs(self.queue) do
table.insert(param_repos, ref.layer:get_params())