aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--nerv/Makefile6
-rw-r--r--nerv/examples/asr_trainer.lua22
-rw-r--r--nerv/examples/timit_baseline2.lua60
-rw-r--r--nerv/init.lua10
-rw-r--r--nerv/layer/duplicate.lua41
-rw-r--r--nerv/layer/graph.lua156
-rw-r--r--nerv/layer/gru.lua4
-rw-r--r--nerv/layer/identity.lua30
-rw-r--r--nerv/layer/init.lua12
-rw-r--r--nerv/layer/lstm.lua52
-rw-r--r--nerv/layer/rnn.lua38
-rw-r--r--nerv/layer/sigmoid.lua6
-rw-r--r--nerv/nn/init.lua2
-rw-r--r--nerv/nn/layer_dag.lua352
-rw-r--r--nerv/nn/network.lua500
-rw-r--r--nerv/tnn/init.lua47
-rw-r--r--nerv/tnn/sutil.lua80
-rw-r--r--nerv/tnn/tnn.lua596
18 files changed, 869 insertions, 1145 deletions
diff --git a/nerv/Makefile b/nerv/Makefile
index e8bcad2..7921bd9 100644
--- a/nerv/Makefile
+++ b/nerv/Makefile
@@ -42,9 +42,9 @@ LUA_LIBS := matrix/init.lua io/init.lua init.lua \
layer/init.lua layer/affine.lua layer/sigmoid.lua layer/tanh.lua layer/softmax_ce.lua layer/softmax.lua \
layer/window.lua layer/bias.lua layer/combiner.lua layer/mse.lua \
layer/elem_mul.lua layer/lstm.lua layer/lstm_gate.lua layer/dropout.lua layer/gru.lua \
- nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/layer_dag.lua \
- io/sgd_buffer.lua \
- tnn/init.lua tnn/sutil.lua tnn/tnn.lua
+ layer/graph.lua layer/rnn.lua layer/duplicate.lua layer/identity.lua \
+ nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/network.lua \
+ io/sgd_buffer.lua
INCLUDE := -I $(LUA_INCDIR) -DLUA_USE_APICHECK
CUDA_INCLUDE := -I $(CUDA_BASE)/include/
diff --git a/nerv/examples/asr_trainer.lua b/nerv/examples/asr_trainer.lua
index 5bf28bd..6bdf57c 100644
--- a/nerv/examples/asr_trainer.lua
+++ b/nerv/examples/asr_trainer.lua
@@ -20,6 +20,12 @@ local function build_trainer(ifname)
local network = get_network(layer_repo)
local global_transf = get_global_transf(layer_repo)
local input_order = get_input_order()
+
+ network = nerv.Network("nt", gconf, {network = network})
+ network:init(gconf.batch_size, 1)
+ global_transf = nerv.Network("gt", gconf, {network = global_transf})
+ global_transf:init(gconf.batch_size, 1)
+
local iterative_trainer = function (prefix, scp_file, bp, rebind_param_repo)
-- rebind the params if necessary
if rebind_param_repo then
@@ -32,10 +38,11 @@ local function build_trainer(ifname)
-- build buffer
local buffer = make_buffer(make_readers(scp_file, layer_repo))
-- initialize the network
- network:init(gconf.batch_size)
gconf.cnt = 0
err_input = {mat_type(gconf.batch_size, 1)}
err_input[1]:fill(1)
+ network:epoch_init()
+ global_transf:epoch_init()
for data in buffer.get_data, buffer do
-- prine stat periodically
gconf.cnt = gconf.cnt + 1
@@ -69,10 +76,17 @@ local function build_trainer(ifname)
for i = 1, #input do
table.insert(err_output, input[i]:create())
end
- network:propagate(input, output)
+ network:mini_batch_init({seq_length = table.vector(gconf.batch_size, 1),
+ new_seq = {},
+ do_train = bp,
+ input = {input},
+ output = {output},
+ err_input = {err_input},
+ err_output = {err_output}})
+ network:propagate()
if bp then
- network:back_propagate(err_input, err_output, input, output)
- network:update(err_input, input, output)
+ network:back_propagate()
+ network:update()
end
-- collect garbage in-time to save GPU memory
collectgarbage("collect")
diff --git a/nerv/examples/timit_baseline2.lua b/nerv/examples/timit_baseline2.lua
index 2d144b5..d783c3d 100644
--- a/nerv/examples/timit_baseline2.lua
+++ b/nerv/examples/timit_baseline2.lua
@@ -61,35 +61,35 @@ function make_layer_repo(param_repo)
layer_repo:add_layers(
{
- ["nerv.DAGLayer"] =
+ ["nerv.GraphLayer"] =
{
global_transf = {
dim_in = {440}, dim_out = {440},
- sub_layers = layer_repo,
+ layer_repo = layer_repo,
connections = {
- ["<input>[1]"] = "blayer1[1]",
- ["blayer1[1]"] = "wlayer1[1]",
- ["wlayer1[1]"] = "<output>[1]"
+ {"<input>[1]", "blayer1[1]", 0},
+ {"blayer1[1]", "wlayer1[1]", 0},
+ {"wlayer1[1]", "<output>[1]", 0}
}
},
main = {
dim_in = {440}, dim_out = {1959},
- sub_layers = layer_repo,
+ layer_repo = layer_repo,
connections = {
- ["<input>[1]"] = "affine0[1]",
- ["affine0[1]"] = "sigmoid0[1]",
- ["sigmoid0[1]"] = "affine1[1]",
- ["affine1[1]"] = "sigmoid1[1]",
- ["sigmoid1[1]"] = "affine2[1]",
- ["affine2[1]"] = "sigmoid2[1]",
- ["sigmoid2[1]"] = "affine3[1]",
- ["affine3[1]"] = "sigmoid3[1]",
- ["sigmoid3[1]"] = "affine4[1]",
- ["affine4[1]"] = "sigmoid4[1]",
- ["sigmoid4[1]"] = "affine5[1]",
- ["affine5[1]"] = "sigmoid5[1]",
- ["sigmoid5[1]"] = "affine6[1]",
- ["affine6[1]"] = "<output>[1]"
+ {"<input>[1]", "affine0[1]", 0},
+ {"affine0[1]", "sigmoid0[1]", 0},
+ {"sigmoid0[1]", "affine1[1]", 0},
+ {"affine1[1]", "sigmoid1[1]", 0},
+ {"sigmoid1[1]", "affine2[1]", 0},
+ {"affine2[1]", "sigmoid2[1]", 0},
+ {"sigmoid2[1]", "affine3[1]", 0},
+ {"affine3[1]", "sigmoid3[1]", 0},
+ {"sigmoid3[1]", "affine4[1]", 0},
+ {"affine4[1]", "sigmoid4[1]", 0},
+ {"sigmoid4[1]", "affine5[1]", 0},
+ {"affine5[1]", "sigmoid5[1]", 0},
+ {"sigmoid5[1]", "affine6[1]", 0},
+ {"affine6[1]", "<output>[1]", 0}
}
}
}
@@ -97,25 +97,25 @@ function make_layer_repo(param_repo)
layer_repo:add_layers(
{
- ["nerv.DAGLayer"] =
+ ["nerv.GraphLayer"] =
{
ce_output = {
dim_in = {440, 1}, dim_out = {1},
- sub_layers = layer_repo,
+ layer_repo = layer_repo,
connections = {
- ["<input>[1]"] = "main[1]",
- ["main[1]"] = "ce_crit[1]",
- ["<input>[2]"] = "ce_crit[2]",
- ["ce_crit[1]"] = "<output>[1]"
+ {"<input>[1]", "main[1]", 0},
+ {"main[1]", "ce_crit[1]", 0},
+ {"<input>[2]", "ce_crit[2]", 0},
+ {"ce_crit[1]", "<output>[1]", 0}
}
},
softmax_output = {
dim_in = {440}, dim_out = {1959},
- sub_layers = layer_repo,
+ layer_repo = layer_repo,
connections = {
- ["<input>[1]"] = "main[1]",
- ["main[1]"] = "softmax[1]",
- ["softmax[1]"] = "<output>[1]"
+ {"<input>[1]", "main[1]", 0},
+ {"main[1]", "softmax[1]", 0},
+ {"softmax[1]", "<output>[1]", 0}
}
}
}
diff --git a/nerv/init.lua b/nerv/init.lua
index da7df29..ff944b8 100644
--- a/nerv/init.lua
+++ b/nerv/init.lua
@@ -347,10 +347,18 @@ function table.extend(tbl1, tbl2)
end
end
+function table.vector(len, fill)
+ local v = {}
+ fill = fill or 0
+ for i = 1, len do
+ table.insert(v, fill)
+ end
+ return v
+end
+
-- the following lines trigger the initialization of basic modules
nerv.include('matrix/init.lua')
nerv.include('io/init.lua')
nerv.include('layer/init.lua')
nerv.include('nn/init.lua')
-nerv.include('tnn/init.lua')
diff --git a/nerv/layer/duplicate.lua b/nerv/layer/duplicate.lua
new file mode 100644
index 0000000..137472b
--- /dev/null
+++ b/nerv/layer/duplicate.lua
@@ -0,0 +1,41 @@
+local DuplicateLayer = nerv.class('nerv.DuplicateLayer', 'nerv.Layer')
+
+function DuplicateLayer:__init(id, global_conf, layer_conf)
+ nerv.Layer.__init(self, id, global_conf, layer_conf)
+ self:check_dim_len(1, -1)
+ if #self.dim_out < 1 then
+ nerv.error('no output specified')
+ end
+ for i = 1, #self.dim_out do
+ if self.dim_out[i] ~= self.dim_in[1] then
+ nerv.error('mismatching dimensions of outputs')
+ end
+ end
+end
+
+function DuplicateLayer:init()
+end
+
+function DuplicateLayer:batch_resize()
+end
+
+function DuplicateLayer:propagate(input, output)
+ for i = 1, #self.dim_out do
+ output[i]:copy_from(input[1])
+ -- FIXME: use reference copy to speed up
+ end
+end
+
+function DuplicateLayer:back_propagate(bp_err, next_bp_err)
+ next_bp_err[1]:copy_from(bp_err[1])
+ for i = 2, #self.dim_out do
+ next_bp_err[1]:add(next_bp_err[1], bp_err[i], 1.0, 1.0)
+ end
+end
+
+function DuplicateLayer:update()
+end
+
+function DuplicateLayer:get_params()
+ return nerv.ParamRepo({}, self.loc_type)
+end
diff --git a/nerv/layer/graph.lua b/nerv/layer/graph.lua
new file mode 100644
index 0000000..5f42fca
--- /dev/null
+++ b/nerv/layer/graph.lua
@@ -0,0 +1,156 @@
+local GraphLayer = nerv.class('nerv.GraphLayer', 'nerv.Layer')
+
+function GraphLayer:__init(id, global_conf, layer_conf)
+ nerv.Layer.__init(self, id, global_conf, layer_conf)
+ self:graph_init(layer_conf.layer_repo, layer_conf.connections)
+end
+
+local function parse_id(str)
+ local id, port, _
+ _, _, id, port = string.find(str, "([a-zA-Z0-9_.]+)%[([0-9]+)%]")
+ if id == nil or port == nil then
+ _, _, id, port = string.find(str, "(.+)%[([0-9]+)%]")
+ if not (id == "<input>" or id == "<output>") then
+ nerv.error("wrong format of connection id")
+ end
+ end
+ port = tonumber(port)
+ return id, port
+end
+
+function GraphLayer:add_prefix(layers, connections)
+ local function ap(name)
+ return self.id .. '.' .. name
+ end
+
+ for layer_type, sublayers in pairs(layers) do
+ local tmp = {}
+ for name, layer_config in pairs(sublayers) do
+ tmp[ap(name)] = layer_config
+ end
+ layers[layer_type] = tmp
+ end
+
+ for i = 1, #connections do
+ local from, to = connections[i][1], connections[i][2]
+ if parse_id(from) ~= '<input>' then
+ connections[i][1] = ap(from)
+ end
+ if parse_id(to) ~= '<output>' then
+ connections[i][2] = ap(to)
+ end
+ end
+end
+
+function GraphLayer:discover(id, layer_repo)
+ if id == '<output>' then
+ id = '<input>'
+ end
+ local layers = self.layers
+ local ref = layers[id]
+ if ref == nil then
+ local layer = layer_repo:get_layer(id)
+ local dim_in, dim_out = layer:get_dim()
+ self.layer_num = self.layer_num + 1
+ ref = {
+ layer = layer,
+ inputs = {},
+ outputs = {},
+ dim_in = dim_in,
+ dim_out = dim_out,
+ id = self.layer_num,
+ }
+ layers[id] = ref
+ end
+ return ref
+end
+
+function GraphLayer:graph_init(layer_repo, connections)
+ local layers = {}
+ layers['<input>'] = {
+ inputs = {},
+ outputs = {},
+ dim_in = self.dim_out,
+ dim_out = self.dim_in,
+ id = 0,
+ }
+ self.layers = layers
+ self.layer_num = 0
+ self.connections = {}
+
+ -- check data dimension between connected ports
+ for _, edge in pairs(connections) do
+ local from, to, time = edge[1], edge[2], edge[3]
+ local id_from, port_from = parse_id(from)
+ local id_to, port_to = parse_id(to)
+ local ref_from = self:discover(id_from, layer_repo)
+ local ref_to = self:discover(id_to, layer_repo)
+ if ref_from.outputs[port_from] ~= nil then
+ nerv.error('%s has already been attached', from)
+ end
+ if ref_to.inputs[port_to] ~= nil then
+ nerv.error('%s has already been attached', to)
+ end
+ if ref_from.dim_out[port_from] ~= ref_to.dim_in[port_to] then
+ nerv.error('mismatching data dimension between %s and %s', from, to)
+ end
+ if ref_from.id == 0 and ref_to.id == 0 then
+ nerv.error('short-circuit connection between <input> and <output>')
+ end
+ ref_from.outputs[port_from] = true
+ ref_to.inputs[port_to] = true
+ table.insert(self.connections, {ref_from.id, port_from, ref_to.id, port_to, time})
+ end
+
+ -- check dangling ports
+ for id, ref in pairs(layers) do
+ if id ~= '<input>' then
+ for i = 1, #ref.dim_in do
+ if ref.inputs[i] == nil then
+ nerv.error('dangling input port %d of layer %s', i, id)
+ end
+ end
+ for i = 1, #ref.dim_out do
+ if ref.outputs[i] == nil then
+ nerv.error('dangling output port %d os layer %s', i, id)
+ end
+ end
+ end
+ end
+ for i = 1, #self.dim_in do
+ if layers['<input>'].outputs[i] == nil then
+ nerv.error('dangling port %d of layer <input>', i)
+ end
+ end
+ for i = 1, #self.dim_out do
+ if layers['<input>'].inputs[i] == nil then
+ nerv.error('dangling port %d of layer <output>', i)
+ end
+ end
+end
+
+function GraphLayer:set_attr(name, value)
+ self[name] = value
+ for id, ref in pairs(self.layers) do
+ if id ~= '<input>' then
+ ref.layer:set_attr(name, value)
+ end
+ end
+end
+
+function GraphLayer:get_sublayer(id)
+ if self.layers[id] == nil or id == '<input>' then
+ nerv.error('layer with id %s not found', id)
+ end
+ return self.layers[id].layer
+end
+
+function GraphLayer:get_params()
+ local param_repos = {}
+ for id, ref in pairs(self.layers) do
+ if id ~= '<input>' then
+ table.insert(param_repos, ref.layer:get_params())
+ end
+ end
+ return nerv.ParamRepo.merge(param_repos, self.loc_type)
+end
diff --git a/nerv/layer/gru.lua b/nerv/layer/gru.lua
index a590a67..71718d7 100644
--- a/nerv/layer/gru.lua
+++ b/nerv/layer/gru.lua
@@ -13,7 +13,7 @@ function GRULayer:__init(id, global_conf, layer_conf)
-- prepare a DAGLayer to hold the lstm structure
local pr = layer_conf.pr
if pr == nil then
- pr = nerv.ParamRepo(nil, self.loc_type)
+ pr = nerv.ParamRepo({}, self.loc_type)
end
local function ap(str)
@@ -102,7 +102,7 @@ end
function GRULayer:bind_params()
local pr = layer_conf.pr
if pr == nil then
- pr = nerv.ParamRepo(nil, self.loc_type)
+ pr = nerv.ParamRepo({}, self.loc_type)
end
self.lrepo:rebind(pr)
end
diff --git a/nerv/layer/identity.lua b/nerv/layer/identity.lua
new file mode 100644
index 0000000..d56337d
--- /dev/null
+++ b/nerv/layer/identity.lua
@@ -0,0 +1,30 @@
+local IdentityLayer = nerv.class('nerv.IdentityLayer', 'nerv.Layer')
+
+function IdentityLayer:__init(id, global_conf, layer_conf)
+ nerv.Layer.__init(self, id, global_conf, layer_conf)
+ self:check_dim_len(1, 1)
+ if self.dim_in[1] ~= self.dim_out[1] then
+ nerv.error('mismatching dimensions of input and output')
+ end
+end
+
+function IdentityLayer:init()
+end
+
+function IdentityLayer:batch_resize()
+end
+
+function IdentityLayer:propagate(input, output)
+ output[1]:copy_from(input[1])
+end
+
+function IdentityLayer:back_propagate(bp_err, next_bp_err)
+ next_bp_err[1]:copy_from(bp_err[1])
+end
+
+function IdentityLayer:update()
+end
+
+function IdentityLayer:get_params()
+ return nerv.ParamRepo({}, self.loc_type)
+end
diff --git a/nerv/layer/init.lua b/nerv/layer/init.lua
index 146ad8c..475ef62 100644
--- a/nerv/layer/init.lua
+++ b/nerv/layer/init.lua
@@ -85,6 +85,14 @@ function Layer:get_dim()
return self.dim_in, self.dim_out
end
+function Layer:set_attr(name, value)
+ self[name] = value
+end
+
+function Layer:get_sublayer(id)
+ nerv.error('primitive layer does not have sublayers')
+end
+
function Layer:find_param(plist, lconf, gconf, p_type, p_dim)
if type(plist) == "string" then
plist = {plist}
@@ -119,6 +127,7 @@ function Layer:find_param(plist, lconf, gconf, p_type, p_dim)
return p
end
+nerv.include('graph.lua')
nerv.include('affine.lua')
nerv.include('sigmoid.lua')
nerv.include('tanh.lua')
@@ -133,6 +142,9 @@ nerv.include('lstm.lua')
nerv.include('lstm_gate.lua')
nerv.include('dropout.lua')
nerv.include('gru.lua')
+nerv.include('rnn.lua')
+nerv.include('duplicate.lua')
+nerv.include('identity.lua')
-- The following lines are for backward compatibility, and will be removed in
-- the future. The use of these names are deprecated.
diff --git a/nerv/layer/lstm.lua b/nerv/layer/lstm.lua
index d4c9212..641d5dc 100644
--- a/nerv/layer/lstm.lua
+++ b/nerv/layer/lstm.lua
@@ -8,7 +8,7 @@ function LSTMLayer:__init(id, global_conf, layer_conf)
-- prepare a DAGLayer to hold the lstm structure
local pr = layer_conf.pr
if pr == nil then
- pr = nerv.ParamRepo(nil, self.loc_type)
+ pr = nerv.ParamRepo({}, self.loc_type)
end
local function ap(str)
@@ -18,47 +18,47 @@ function LSTMLayer:__init(id, global_conf, layer_conf)
local dout1, dout2, dout3 = self.dim_out[1], self.dim_out[2], self.dim_out[3]
local layers = {
["nerv.CombinerLayer"] = {
- [ap("inputXDup")] = {{}, {dim_in = {din1},
+ [ap("inputXDup")] = {dim_in = {din1},
dim_out = {din1, din1, din1, din1},
- lambda = {1}}},
+ lambda = {1}},
- [ap("inputHDup")] = {{}, {dim_in = {din2},
+ [ap("inputHDup")] = {dim_in = {din2},
dim_out = {din2, din2, din2, din2},
- lambda = {1}}},
+ lambda = {1}},
- [ap("inputCDup")] = {{}, {dim_in = {din3},
+ [ap("inputCDup")] = {dim_in = {din3},
dim_out = {din3, din3, din3},
- lambda = {1}}},
+ lambda = {1}},
- [ap("mainCDup")] = {{}, {dim_in = {din3, din3},
+ [ap("mainCDup")] = {dim_in = {din3, din3},
dim_out = {din3, din3, din3},
- lambda = {1, 1}}},
+ lambda = {1, 1}},
},
["nerv.AffineLayer"] = {
- [ap("mainAffineL")] = {{}, {dim_in = {din1, din2},
+ [ap("mainAffineL")] = {dim_in = {din1, din2},
dim_out = {dout1},
- pr = pr}},
+ pr = pr},
},
["nerv.TanhLayer"] = {
- [ap("mainTanhL")] = {{}, {dim_in = {dout1}, dim_out = {dout1}}},
- [ap("outputTanhL")] = {{}, {dim_in = {dout1}, dim_out = {dout1}}},
+ [ap("mainTanhL")] = {dim_in = {dout1}, dim_out = {dout1}},
+ [ap("outputTanhL")] = {dim_in = {dout1}, dim_out = {dout1}},
},
["nerv.LSTMGateLayer"] = {
- [ap("forgetGateL")] = {{}, {dim_in = {din1, din2, din3},
- dim_out = {din3}, pr = pr}},
- [ap("inputGateL")] = {{}, {dim_in = {din1, din2, din3},
- dim_out = {din3}, pr = pr}},
- [ap("outputGateL")] = {{}, {dim_in = {din1, din2, din3},
- dim_out = {din3}, pr = pr}},
+ [ap("forgetGateL")] = {dim_in = {din1, din2, din3},
+ dim_out = {din3}, pr = pr},
+ [ap("inputGateL")] = {dim_in = {din1, din2, din3},
+ dim_out = {din3}, pr = pr},
+ [ap("outputGateL")] = {dim_in = {din1, din2, din3},
+ dim_out = {din3}, pr = pr},
},
["nerv.ElemMulLayer"] = {
- [ap("inputGMulL")] = {{}, {dim_in = {din3, din3},
- dim_out = {din3}}},
- [ap("forgetGMulL")] = {{}, {dim_in = {din3, din3},
- dim_out = {din3}}},
- [ap("outputGMulL")] = {{}, {dim_in = {din3, din3},
- dim_out = {din3}}},
+ [ap("inputGMulL")] = {dim_in = {din3, din3},
+ dim_out = {din3}},
+ [ap("forgetGMulL")] = {dim_in = {din3, din3},
+ dim_out = {din3}},
+ [ap("outputGMulL")] = {dim_in = {din3, din3},
+ dim_out = {din3}},
},
}
@@ -114,7 +114,7 @@ end
function LSTMLayer:bind_params()
local pr = layer_conf.pr
if pr == nil then
- pr = nerv.ParamRepo(nil, self.loc_type)
+ pr = nerv.ParamRepo({}, self.loc_type)
end
self.lrepo:rebind(pr)
end
diff --git a/nerv/layer/rnn.lua b/nerv/layer/rnn.lua
new file mode 100644
index 0000000..e59cf5b
--- /dev/null
+++ b/nerv/layer/rnn.lua
@@ -0,0 +1,38 @@
+local RNNLayer = nerv.class('nerv.RNNLayer', 'nerv.GraphLayer')
+
+function RNNLayer:__init(id, global_conf, layer_conf)
+ nerv.Layer.__init(self, id, global_conf, layer_conf)
+ self:check_dim_len(1, 1)
+
+ local din = layer_conf.dim_in[1]
+ local dout = layer_conf.dim_out[1]
+
+ local pr = layer_conf.pr
+ if pr == nil then
+ pr = nerv.ParamRepo({}, self.loc_type)
+ end
+
+ local layers = {
+ ['nerv.AffineLayer'] = {
+ main = {dim_in = {din, dout}, dim_out = {dout}, pr = pr},
+ },
+ ['nerv.SigmoidLayer'] = {
+ sigmoid = {dim_in = {dout}, dim_out = {dout}},
+ },
+ ['nerv.DuplicateLayer'] = {
+ dup = {dim_in = {dout}, dim_out = {dout, dout}},
+ }
+ }
+
+ local connections = {
+ {'<input>[1]', 'main[1]', 0},
+ {'main[1]', 'sigmoid[1]', 0},
+ {'sigmoid[1]', 'dup[1]', 0},
+ {'dup[1]', 'main[2]', 1},
+ {'dup[2]', '<output>[1]', 0},
+ }
+
+ self:add_prefix(layers, connections)
+ local layer_repo = nerv.LayerRepo(layers, pr, global_conf)
+ self:graph_init(layer_repo, connections)
+end
diff --git a/nerv/layer/sigmoid.lua b/nerv/layer/sigmoid.lua
index a9f9749..5974ffc 100644
--- a/nerv/layer/sigmoid.lua
+++ b/nerv/layer/sigmoid.lua
@@ -3,6 +3,9 @@ local SigmoidLayer = nerv.class("nerv.SigmoidLayer", "nerv.Layer")
function SigmoidLayer:__init(id, global_conf, layer_conf)
nerv.Layer.__init(self, id, global_conf, layer_conf)
self:check_dim_len(1, 1)
+ if self.dim_in[1] ~= self.dim_out[1] then
+ nerv.error("mismatching dimensions of input and output")
+ end
end
function SigmoidLayer:bind_params()
@@ -10,9 +13,6 @@ function SigmoidLayer:bind_params()
end
function SigmoidLayer:init()
- if self.dim_in[1] ~= self.dim_out[1] then
- nerv.error("mismatching dimensions of input and output")
- end
end
function SigmoidLayer:batch_resize(batch_size)
diff --git a/nerv/nn/init.lua b/nerv/nn/init.lua
index cbaf52b..1037d05 100644
--- a/nerv/nn/init.lua
+++ b/nerv/nn/init.lua
@@ -1,3 +1,3 @@
nerv.include('layer_repo.lua')
nerv.include('param_repo.lua')
-nerv.include('layer_dag.lua')
+nerv.include('network.lua')
diff --git a/nerv/nn/layer_dag.lua b/nerv/nn/layer_dag.lua
deleted file mode 100644
index f999752..0000000
--- a/nerv/nn/layer_dag.lua
+++ /dev/null
@@ -1,352 +0,0 @@
-local DAGLayer = nerv.class("nerv.DAGLayer", "nerv.Layer")
-
-local function parse_id(str)
- local id, port, _
- _, _, id, port = string.find(str, "([a-zA-Z0-9_.]+)%[([0-9]+)%]")
- if id == nil or port == nil then
- _, _, id, port = string.find(str, "(.+)%[([0-9]+)%]")
- if not (id == "<input>" or id == "<output>") then
- nerv.error("wrong format of connection id")
- end
- end
- port = tonumber(port)
- return id, port
-end
-
-local function discover(id, layers, layer_repo)
- local ref = layers[id]
- if id == "<input>" or id == "<output>" then
- return nil
- end
- if ref == nil then
- local layer = layer_repo:get_layer(id)
- local dim_in, dim_out = layer:get_dim()
- ref = {
- layer = layer,
- inputs = {},
- outputs = {},
- err_inputs = {},
- err_outputs = {},
- next_layers = {},
- input_len = #dim_in,
- output_len = #dim_out,
- in_deg = 0,
- visited = false
- }
- layers[id] = ref
- end
- return ref
-end
-
-local function touch_list_by_idx(list, idx)
- if list[idx] == nil then
- list[idx] = {}
- end
-end
-
-function DAGLayer:__init(id, global_conf, layer_conf)
- local layers = {}
- local inputs = {}
- local outputs = {}
- local dim_in = layer_conf.dim_in
- local dim_out = layer_conf.dim_out
- local parsed_conn = {}
- for from, to in pairs(layer_conf.connections) do
- local id_from, port_from = parse_id(from)
- local id_to, port_to = parse_id(to)
- local ref_from = discover(id_from, layers, layer_conf.sub_layers)
- local ref_to = discover(id_to, layers, layer_conf.sub_layers)
- local input_dim, output_dim, _
- if ref_from then
- touch_list_by_idx(ref_from.outputs, 1)
- if ref_from.outputs[1][port_from] ~= nil then
- nerv.error("%s has already been attached", from)
- end
- end
- if ref_to then
- touch_list_by_idx(ref_to.inputs, 1)
- if ref_to.inputs[1][port_to] ~= nil then
- nerv.error("%s has already been attached", to)
- end
- end
- if id_from == "<input>" then
- input_dim, _ = ref_to.layer:get_dim()
- if dim_in[port_from] ~= input_dim[port_to] then
- nerv.error("mismatching data dimension between %s and %s", from, to)
- end
- inputs[port_from] = {ref_to, port_to}
- ref_to.inputs[1][port_to] = inputs -- just a place holder
- elseif id_to == "<output>" then
- _, output_dim = ref_from.layer:get_dim()
- if output_dim[port_from] ~= dim_out[port_to] then
- nerv.error("mismatching data dimension between %s and %s", from, to)
- end
- outputs[port_to] = {ref_from, port_from}
- ref_from.outputs[1][port_from] = outputs -- just a place holder
- else
- _, output_dim = ref_from.layer:get_dim()
- input_dim, _ = ref_to.layer:get_dim()
- if output_dim[port_from] ~= input_dim[port_to] then
- nerv.error("mismatching data dimension between %s and %s", from, to)
- end
-
- table.insert(parsed_conn,
- {{ref_from, port_from}, {ref_to, port_to}})
- table.insert(ref_from.next_layers, ref_to) -- add edge
- ref_to.in_deg = ref_to.in_deg + 1 -- increase the in-degree of the target layer
- end
- end
-
- -- topology sort
- local queue = {}
- local l = 1
- local r = 1
- for id, ref in pairs(layers) do
- if ref.in_deg == 0 then
- table.insert(queue, ref)
- nerv.info("adding source layer: %s", id)
- r = r + 1
- end
- end
- if l == r then
- nerv.error("loop detected")
- end
- while l < r do
- local cur = queue[l]
- cur.visited = true
- l = l + 1
- for _, nl in pairs(cur.next_layers) do
- nl.in_deg = nl.in_deg - 1
- if nl.in_deg == 0 then
- table.insert(queue, nl)
- r = r + 1
- end
- end
- end
- for i = 1, #queue do
- nerv.info("enqueued layer: %s %s", queue[i].layer, queue[i].layer.id)
- end
-
- for id, ref in pairs(layers) do
- -- check wether the graph is connected
- if ref.visited == false then
- nerv.warning("layer %s is ignored", id)
- end
- end
-
- nerv.Layer.__init(self, id, global_conf, layer_conf)
- self.layers = layers
- self.inputs = inputs
- self.outputs = outputs
- self.parsed_conn = parsed_conn
- self.queue = queue
-end
-
-function DAGLayer:bind_params()
- -- do nothing (instead of rebinding params for each layer)
-end
-
-function DAGLayer:init(batch_size, chunk_size)
- if chunk_size == nil then
- chunk_size = 1
- end
- for i, conn in ipairs(self.parsed_conn) do
- local _, output_dim
- local ref_from, port_from, ref_to, port_to
- ref_from, port_from = unpack(conn[1])
- ref_to, port_to = unpack(conn[2])
- _, output_dim = ref_from.layer:get_dim()
- local dim = 1
- if output_dim[port_from] > 0 then
- dim = output_dim[port_from]
- end
-
- for t = 1, chunk_size do
- local mid = self.mat_type(batch_size, dim)
- local err_mid = mid:create()
- touch_list_by_idx(ref_to.inputs, t)
- touch_list_by_idx(ref_from.outputs, t)
- touch_list_by_idx(ref_from.err_inputs, t)
- touch_list_by_idx(ref_to.err_outputs, t)
-
- ref_from.outputs[t][port_from] = mid
- ref_to.inputs[t][port_to] = mid
-
- ref_from.err_inputs[t][port_from] = err_mid
- ref_to.err_outputs[t][port_to] = err_mid
- end
- end
- for id, ref in pairs(self.layers) do
- for i = 1, ref.input_len do
- if ref.inputs[1][i] == nil then
- nerv.error("dangling input port %d of layer %s", i, id)
- end
- end
- for i = 1, ref.output_len do
- if ref.outputs[1][i] == nil then
- nerv.error("dangling output port %d of layer %s", i, id)
- end
- end
- -- initialize sub layers
- ref.layer:init(batch_size, chunk_size)
- end
- for i = 1, #self.dim_in do
- if self.inputs[i] == nil then
- nerv.error("dangling port %d of layer <input>", i)
- end
- end
- for i = 1, #self.dim_out do
- if self.outputs[i] == nil then
- nerv.error("dangling port %d of layer <output>", i)
- end
- end
-end
-
-function DAGLayer:batch_resize(batch_size, chunk_size)
- if chunk_size == nil then
- chunk_size = 1
- end
-
- for i, conn in ipairs(self.parsed_conn) do
- local _, output_dim
- local ref_from, port_from, ref_to, port_to
- ref_from, port_from = unpack(conn[1])
- ref_to, port_to = unpack(conn[2])
- _, output_dim = ref_from.layer:get_dim()
-
- if ref_from.outputs[1][port_from]:nrow() ~= batch_size
- and output_dim[port_from] > 0 then
- for t = 1, chunk_size do
- local mid = self.mat_type(batch_size, output_dim[port_from])
- local err_mid = mid:create()
-
- ref_from.outputs[t][port_from] = mid
- ref_to.inputs[t][port_to] = mid
-
- ref_from.err_inputs[t][port_from] = err_mid
- ref_to.err_outputs[t][port_to] = err_mid
- end
- end
- end
- for id, ref in pairs(self.layers) do
- ref.layer:batch_resize(batch_size, chunk_size)
- end
- collectgarbage("collect")
-end
-
-function DAGLayer:set_inputs(input, t)
- for i = 1, #self.dim_in do
- if input[i] == nil then
- nerv.error("some input is not provided");
- end
- local layer = self.inputs[i][1]
- local port = self.inputs[i][2]
- touch_list_by_idx(layer.inputs, t)
- layer.inputs[t][port] = input[i]
- end
-end
-
-function DAGLayer:set_outputs(output, t)
- for i = 1, #self.dim_out do
- if output[i] == nil then
- nerv.error("some output is not provided");
- end
- local layer = self.outputs[i][1]
- local port = self.outputs[i][2]
- touch_list_by_idx(layer.outputs, t)
- layer.outputs[t][port] = output[i]
- end
-end
-
-function DAGLayer:set_err_inputs(bp_err, t)
- for i = 1, #self.dim_out do
- local layer = self.outputs[i][1]
- local port = self.outputs[i][2]
- touch_list_by_idx(layer.err_inputs, t)
- layer.err_inputs[t][port] = bp_err[i]
- end
-end
-
-function DAGLayer:set_err_outputs(next_bp_err, t)
- for i = 1, #self.dim_in do
- local layer = self.inputs[i][1]
- local port = self.inputs[i][2]
- touch_list_by_idx(layer.err_outputs, t)
- layer.err_outputs[t][port] = next_bp_err[i]
- end
-end
-
-function DAGLayer:update(bp_err, input, output, t)
- if t == nil then
- t = 1
- end
- self:set_err_inputs(bp_err, t)
- self:set_inputs(input, t)
- self:set_outputs(output, t)
- for id, ref in pairs(self.queue) do
- ref.layer:update(ref.err_inputs[t], ref.inputs[t], ref.outputs[t], t)
- end
-end
-
-function DAGLayer:propagate(input, output, t)
- if t == nil then
- t = 1
- end
- self:set_inputs(input, t)
- self:set_outputs(output, t)
- local ret = false
- for i = 1, #self.queue do
- local ref = self.queue[i]
- ret = ref.layer:propagate(ref.inputs[t], ref.outputs[t], t)
- end
- return ret
-end
-
-function DAGLayer:back_propagate(bp_err, next_bp_err, input, output, t)
- if t == nil then
- t = 1
- end
- self:set_err_outputs(next_bp_err, t)
- self:set_err_inputs(bp_err, t)
- self:set_inputs(input, t)
- self:set_outputs(output, t)
- for i = #self.queue, 1, -1 do
- local ref = self.queue[i]
- ref.layer:back_propagate(ref.err_inputs[t], ref.err_outputs[t], ref.inputs[t], ref.outputs[t], t)
- end
-end
-
-function DAGLayer:get_params()
- local param_repos = {}
- for id, ref in pairs(self.queue) do
- table.insert(param_repos, ref.layer:get_params())
- end
- return nerv.ParamRepo.merge(param_repos, self.loc_type)
-end
-
-DAGLayer.PORT_TYPES = {
- INPUT = {},
- OUTPUT = {},
- ERR_INPUT = {},
- ERR_OUTPUT = {}
-}
-
-function DAGLayer:get_intermediate(id, port_type)
- if id == "<input>" or id == "<output>" then
- nerv.error("an actual real layer id is expected")
- end
- local layer = self.layers[id]
- if layer == nil then
- nerv.error("layer id %s not found", id)
- end
- if port_type == DAGLayer.PORT_TYPES.INPUT then
- return layer.inputs
- elseif port_type == DAGLayer.PORT_TYPES.OUTPUT then
- return layer.outputs
- elseif port_type == DAGLayer.PORT_TYPES.ERR_INPUT then
- return layer.err_inputs
- elseif port_type == DAGLayer.PORT_TYPES.ERR_OUTPUT then
- return layer.err_outputs
- end
- nerv.error("unrecognized port type")
-end
diff --git a/nerv/nn/network.lua b/nerv/nn/network.lua
new file mode 100644
index 0000000..2cb83ce
--- /dev/null
+++ b/nerv/nn/network.lua
@@ -0,0 +1,500 @@
+local network = nerv.class('nerv.Network')
+
+function network:__init(id, global_conf, network_conf)
+ self.id = id
+ self.network = network_conf.network
+ self.dim_in = self.network.dim_in
+ self.dim_out = self.network.dim_out
+ self.gconf = global_conf
+ if self.gconf.use_cpu then
+ self.mat_type = self.gconf.mmat_type
+ else
+ self.mat_type = self.gconf.cumat_type
+ end
+ self.clip = network_conf.clip
+ self.nn_act_default = network_conf.nn_act_default
+ if self.nn_act_default == nil then
+ self.nn_act_default = 0
+ end
+ self.layers = {}
+ self.input_conn = {}
+ self.output_conn = {}
+ self.socket = self:compile(self.network)
+ for i = 1, #self.dim_in do
+ local edge = self.socket.inputs[i]
+ local id, port, time = edge[1], edge[2], edge[3]
+ if self.input_conn[id][port] ~= nil then
+ nerv.error('duplicate edge')
+ end
+ self.input_conn[id][port] = {0, i, time}
+ end
+ for i = 1, #self.dim_out do
+ local edge = self.socket.outputs[i]
+ local id, port, time = edge[1], edge[2], edge[3]
+ if self.output_conn[id][port] ~= nil then
+ nerv.error('duplicate edge')
+ end
+ self.output_conn[id][port] = {0, i, time}
+ end
+ self.delay = 0
+ for i = 1, #self.layers do
+ local dim_in, _ = self.layers[i]:get_dim()
+ for j = 1, #dim_in do
+ local time = self.input_conn[i][j][3]
+ if math.abs(time) > self.delay then
+ self.delay = math.abs(time)
+ end
+ end
+ end
+end
+
+function network:compile(layer)
+ local socket = {inputs = {}, outputs = {}}
+ if not nerv.is_type(layer, 'nerv.GraphLayer') then
+ table.insert(self.layers, layer)
+ local id = #self.layers
+ self.input_conn[id] = {}
+ self.output_conn[id] = {}
+ local dim_in, dim_out = layer:get_dim()
+ for i = 1, #dim_in do
+ socket.inputs[i] = {id, i, 0}
+ end
+ for i = 1, #dim_out do
+ socket.outputs[i] = {id, i, 0}
+ end
+ else
+ local sublayer_socket = {}
+ for id, sublayer in pairs(layer.layers) do
+ if id ~= '<input>' then
+ sublayer_socket[sublayer.id] = self:compile(sublayer.layer)
+ end
+ end
+ for _, edge in pairs(layer.connections) do
+ -- id = 0 means <input> or <output>
+ local id_from, port_from = edge[1], edge[2]
+ local id_to, port_to = edge[3], edge[4]
+ local time = edge[5]
+ if id_from == 0 then
+ if socket.inputs[port_from] ~= nil then
+ nerv.error('duplicate input socket')
+ end
+ local input = sublayer_socket[id_to].inputs[port_to]
+ local id, port, t = input[1], input[2], input[3] + time
+ socket.inputs[port_from] = {id, port, t}
+ else
+ local output = sublayer_socket[id_from].outputs[port_from]
+ local id, port, t = output[1], output[2], output[3] + time
+ if id_to == 0 then
+ if socket.outputs[port_to] ~= nil then
+ nerv.error('duplicate output socket')
+ end
+ socket.outputs[port_to] = {id, port, t}
+ else
+ local input = sublayer_socket[id_to].inputs[port_to]
+ local id1, port1, t1 = input[1], input[2], input[3]
+ if self.input_conn[id1][port1] ~= nil or self.output_conn[id][port] ~= nil then
+ nerv.error('duplicate edge')
+ end
+ self.input_conn[id1][port1] = {id, port, t + t1}
+ self.output_conn[id][port] = {id1, port1, t + t1}
+ end
+ end
+ end
+ end
+ return socket
+end
+
+function network:init(batch_size, chunk_size)
+ self.batch_size = batch_size
+ self.chunk_size = chunk_size
+
+ self:topsort()
+
+ self:make_initial_store()
+ collectgarbage('collect')
+end
+
+function network:epoch_init()
+ for i = 1, #self.layers do
+ self.layers[i]:init(self.batch_size, self.chunk_size)
+ end
+end
+
+function network:topsort()
+ nerv.info('network topology sort')
+ local degree = {}
+ for t = 1, self.chunk_size do
+ degree[t] = {}
+ for i = 1, #self.layers do
+ degree[t][i] = 0
+ end
+ end
+
+ for t = 1, self.chunk_size do
+ for i = 1, #self.layers do
+ local _, dim_out = self.layers[i]:get_dim()
+ for j = 1, #dim_out do
+ if self.output_conn[i][j] ~= nil then
+ local edge = self.output_conn[i][j]
+ local id, time = edge[1], edge[3] + t
+ if time >= 1 and time <= self.chunk_size and id ~= 0 then
+ degree[time][id] = degree[time][id] + 1
+ end
+ end
+ end
+ end
+ end
+
+ self.queue = {}
+ local l = 1
+ local r = 0
+ for t = 1, self.chunk_size do
+ for i = 1, #self.layers do
+ if degree[t][i] == 0 then
+ r = r + 1
+ self.queue[r] = {chunk = t, id = i}
+ end
+ end
+ end
+ while l <= r do
+ local t, i = self.queue[l].chunk, self.queue[l].id
+ l = l + 1
+ local _, dim_out = self.layers[i]:get_dim()
+ for j = 1, #dim_out do
+ if self.output_conn[i][j] ~= nil then
+ local edge = self.output_conn[i][j]
+ local id, time = edge[1], edge[3] + t
+ if time >= 1 and time <= self.chunk_size and id ~= 0 then
+ degree[time][id] = degree[time][id] - 1
+ if degree[time][id] == 0 then
+ r = r + 1
+ self.queue[r] = {chunk = time, id = id}
+ end
+ end
+ end
+ end
+ end
+
+ if r ~= self.chunk_size * #self.layers then
+ nerv.error('loop detected')
+ end
+end
+
+function network:make_initial_store()
+ nerv.info('network initing storage')
+
+ -- allocate memory
+ local memory = {}
+ local err_memory = {}
+ for t = 1 - self.delay, self.chunk_size + self.delay do
+ memory[t] = {}
+ err_memory[t] = {}
+ for i = 1, #self.layers do
+ memory[t][i] = {}
+ err_memory[t][i] = {}
+ local dim_in, dim_out = self.layers[i]:get_dim()
+ for j = 1, #dim_in do
+ err_memory[t][i][j] = self.mat_type(self.batch_size, dim_in[j])
+ err_memory[t][i][j]:fill(0)
+ end
+ for j = 1, #dim_out do
+ memory[t][i][j] = self.mat_type(self.batch_size, dim_out[j])
+ memory[t][i][j]:fill(self.nn_act_default)
+ end
+ end
+ -- memory[t][0] stores network input
+ memory[t][0] = {}
+ for j = 1, #self.dim_in do
+ memory[t][0][j] = self.mat_type(self.batch_size, self.dim_in[j])
+ memory[t][0][j]:fill(self.nn_act_default)
+ end
+ -- err_memory[t][0] stores network err_input
+ err_memory[t][0] = {}
+ for j = 1, #self.dim_out do
+ err_memory[t][0][j] = self.mat_type(self.batch_size, self.dim_out[j])
+ err_memory[t][0][j]:fill(0)
+ end
+ end
+
+ -- connect memory and reference
+ self.input = {}
+ self.output = {}
+ self.err_input = {}
+ self.err_output = {}
+ for t = 1, self.chunk_size do
+ self.input[t] = {}
+ self.output[t] = {}
+ self.err_input[t] = {}
+ self.err_output[t] = {}
+ for i = 1, #self.layers do
+ self.input[t][i] = {}
+ self.output[t][i] = {}
+ self.err_input[t][i] = {}
+ self.err_output[t][i] = {}
+ local dim_in, dim_out = self.layers[i]:get_dim()
+ for j = 1, #dim_in do
+ local edge = self.input_conn[i][j]
+ local id, port, time = edge[1], edge[2], edge[3]
+ if id ~= 0 or t - time < 1 or t - time > self.chunk_size then
+ self.input[t][i][j] = memory[t - time][id][port]
+ end
+ if id ~= 0 then
+ self.err_output[t][i][j] = err_memory[t][i][j]
+ end
+ end
+ for j = 1, #dim_out do
+ local edge = self.output_conn[i][j]
+ local id, port, time = edge[1], edge[2], edge[3]
+ if id ~= 0 then
+ self.output[t][i][j] = memory[t][i][j]
+ end
+ if id ~= 0 or t + time < 1 or t + time > self.chunk_size then
+ self.err_input[t][i][j] = err_memory[t + time][id][port]
+ end
+ end
+ end
+ end
+
+ -- check dangling reference
+ for t = 1, self.chunk_size do
+ for i = 1, #self.dim_in do
+ local edge = self.socket.inputs[i]
+ local id, port, time = edge[1], edge[2], edge[3]
+ if t + time >= 1 and t + time <= self.chunk_size then
+ if self.input[t + time][id][port] ~= nil then
+ nerv.error('input reference not nil')
+ end
+ self.input[t + time][id][port] = true -- just a place holder
+ if self.err_output[t + time][id][port] ~= nil then
+ nerv.error('err_output reference not nil')
+ end
+ self.err_output[t + time][id][port] = true -- just a place holder
+ end
+ end
+ for i = 1, #self.dim_out do
+ local edge = self.socket.outputs[i]
+ local id, port, time = edge[1], edge[2], edge[3]
+ if t - time >= 1 and t - time <= self.chunk_size then
+ if self.output[t - time][id][port] ~= nil then
+ nerv.error('output reference not nil')
+ end
+ self.output[t - time][id][port] = true -- just a place holder
+ if self.err_input[t - time][id][port] ~= nil then
+ nerv.error('err_output reference not nil')
+ end
+ self.err_input[t - time][id][port] = true -- just a place holder
+ end
+ end
+ end
+ for t = 1, self.chunk_size do
+ for i = 1, #self.layers do
+ local dim_in, dim_out = self.layers[i]:get_dim()
+ for j = 1, #dim_in do
+ if self.input[t][i][j] == nil then
+ nerv.error('input reference dangling')
+ end
+ if self.err_output[t][i][j] == nil then
+ nerv.error('err_output reference dangling')
+ end
+ end
+ for j = 1, #dim_out do
+ if self.output[t][i][j] == nil then
+ nerv.error('output reference dangling')
+ end
+ if self.err_input[t][i][j] == nil then
+ nerv.error('err_input reference dangling')
+ end
+ end
+ end
+ end
+
+ -- allocate reference for legacy of previous mini-batch
+ self.legacy = {}
+ for t = 1 - self.delay, 0 do
+ self.legacy[t] = {}
+ for i = 1, #self.layers do
+ self.legacy[t][i] = {}
+ local _, dim_out = self.layers[i]:get_dim()
+ for j = 1, #dim_out do
+ self.legacy[t][i][j] = memory[t][i][j]
+ end
+ end
+ end
+end
+
+function network:set_input(input)
+ for t = 1, self.chunk_size do
+ for i = 1, #self.dim_in do
+ local edge = self.socket.inputs[i]
+ local id, port, time = edge[1], edge[2], edge[3]
+ if t + time >= 1 and t + time <= self.chunk_size then
+ self.input[t + time][id][port] = input[t][i]
+ end
+ end
+ end
+end
+
+function network:set_output(output)
+ for t = 1, self.chunk_size do
+ for i = 1, #self.dim_out do
+ local edge = self.socket.outputs[i]
+ local id, port, time = edge[1], edge[2], edge[3]
+ if t - time >= 1 and t - time <= self.chunk_size then
+ self.output[t - time][id][port] = output[t][i]
+ end
+ end
+ end
+end
+
+function network:set_err_input(err_input)
+ for t = 1, self.chunk_size do
+ for i = 1, #self.dim_out do
+ local edge = self.socket.outputs[i]
+ local id, port, time = edge[1], edge[2], edge[3]
+ if t - time >= 1 and t - time <= self.chunk_size then
+ self.err_input[t - time][id][port] = err_input[t][i]
+ end
+ end
+ end
+end
+
+function network:set_err_output(err_output)
+ for t = 1, self.chunk_size do
+ for i = 1, #self.dim_in do
+ local edge = self.socket.inputs[i]
+ local id, port, time = edge[1], edge[2], edge[3]
+ if t + time >= 1 and t + time <= self.chunk_size then
+ self.err_output[t + time][id][port] = err_output[t][i]
+ end
+ end
+ end
+end
+
+--[[
+ [info] is a table that contains information of current mini-batch. These fields must be contained:
+ [input], [output] : matrix array which stores the network input and output
+ [seq_length] : a table contains the length of every sequences
+ [new_seq]: a table contains the batch number of new sequences
+ [do_train]: a bool value indicates do train or not
+ if [do_train] is true, these fileds also must be contained:
+ [err_input], [err_output] : matrix array which stores the network err_input and err_output
+--]]
+function network:mini_batch_init(info)
+ self.info = info
+ self:set_input(self.info.input)
+ self:set_output(self.info.output)
+
+ -- calculate border
+ self.max_length = 0
+ self.border = {}
+ for i = 1, self.chunk_size do
+ self.border[i] = {}
+ end
+ for i = 1, self.batch_size do
+ if self.info.seq_length[i] > self.max_length then
+ self.max_length = self.info.seq_length[i]
+ end
+ for t = 1, self.delay do
+ local chunk = self.info.seq_length[i] + t
+ if chunk > self.chunk_size then
+ break
+ end
+ table.insert(self.border[chunk], i)
+ end
+ end
+
+ -- copy legacy
+ for t = 1 - self.delay, 0 do
+ for i = 1, #self.layers do
+ local _, dim_out = self.layers[i]:get_dim()
+ for j = 1, #dim_out do
+ if t + self.chunk_size >= 1 and self.output_conn[i][j][1] ~= 0 then
+ self.legacy[t][i][j]:copy_from(self.output[t + self.chunk_size][i][j])
+ end
+ for k = 1, #self.info.new_seq do
+ local batch = self.info.new_seq[k]
+ self.legacy[t][i][j][batch - 1]:fill(self.nn_act_default)
+ end
+ end
+ end
+ end
+
+ if self.info.do_train then
+ self:set_err_input(self.info.err_input)
+ self:set_err_output(self.info.err_output)
+
+ -- flush border gradient
+ for t = self.max_length + 1, self.max_length + self.delay do
+ if t > self.chunk_size then
+ break
+ end
+ for i = 1, #self.layers do
+ local dim_in, _ = self.layers[i]:get_dim()
+ for j = 1, #dim_in do
+ self.err_output[t][i][j]:fill(0)
+ end
+ end
+ end
+ end
+end
+
+function network:propagate()
+ for i = 1, #self.queue do
+ local t, id = self.queue[i].chunk, self.queue[i].id
+ if t <= self.max_length then
+ self.layers[id]:propagate(self.input[t][id], self.output[t][id], t)
+ end
+ -- flush border activation
+ for j = 1, #self.border[t] do
+ local batch = self.border[t][j]
+ local _, dim_out = self.layers[id]:get_dim()
+ for k = 1, #dim_out do
+ self.output[t][id][k][batch - 1]:fill(self.nn_act_default)
+ end
+ end
+ end
+end
+
+function network:back_propagate()
+ for i = #self.queue, 1, -1 do
+ local t, id = self.queue[i].chunk, self.queue[i].id
+ if t <= self.max_length then
+ -- flush border gradient
+ for j = 1, #self.border[t] do
+ local batch = self.border[t][j]
+ local _, dim_out = self.layers[id]:get_dim()
+ for k = 1, #dim_out do
+ self.err_input[t][id][k][batch - 1]:fill(0)
+ end
+ end
+ self.layers[id]:back_propagate(self.err_input[t][id], self.err_output[t][id], self.input[t][id], self.output[t][id], t)
+ if self.clip ~= nil then
+ local dim_in, _ = self.layers[id]:get_dim()
+ for j = 1, #dim_in do
+ self.err_output[t][id][j]:clip(-self.clip, self.clip)
+ end
+ end
+ end
+ end
+end
+
+function network:update()
+ for i = 1, #self.queue do
+ local t, id = self.queue[i].chunk, self.queue[i].id
+ if t <= self.max_length then
+ self.layers[id]:update(self.err_input[t][id], self.input[t][id], self.output[t][id], t)
+ end
+ end
+end
+
+function network:set_attr(name, value)
+ self.network:set_attr(name, value)
+end
+
+function network:get_sublayer(id)
+ return self.network:get_sublayer(id)
+end
+
+function network:get_params()
+ return self.network:get_params()
+end
diff --git a/nerv/tnn/init.lua b/nerv/tnn/init.lua
deleted file mode 100644
index 44ce26b..0000000
--- a/nerv/tnn/init.lua
+++ /dev/null
@@ -1,47 +0,0 @@
-local LayerT = nerv.class('nerv.LayerT')
-
-function LayerT:__init(id, global_conf, layer_conf)
- nerv.error_method_not_implemented()
-end
-
-function LayerT:init(batch_size, chunk_size)
- nerv.error_method_not_implemented()
-end
-
-function LayerT:update(bp_err, input, output, t)
- nerv.error_method_not_implemented()
-end
-
-function LayerT:propagate(input, output, t)
- nerv.error_method_not_implemented()
-end
-
-function LayerT:back_propagate(bp_err, next_bp_err, input, output, t)
- nerv.error_method_not_implemented()
-end
-
-function LayerT:check_dim_len(len_in, len_out)
- local expected_in = #self.dim_in
- local expected_out = #self.dim_out
- if len_in > 0 and expected_in ~= len_in then
- nerv.error("layer %s expects %d inputs, %d given",
- self.id, len_in, expected_in)
- end
- if len_out > 0 and expected_out ~= len_out then
- nerv.error("layer %s expects %d outputs, %d given",
- self.id, len_out, expected_out)
- end
-end
-
-LayerT.find_param = nerv.Layer.find_param
-
-function LayerT:get_params()
- nerv.error_method_not_implemented()
-end
-
-function LayerT:get_dim()
- return self.dim_in, self.dim_out
-end
-
-nerv.include('sutil.lua')
-nerv.include('tnn.lua')
diff --git a/nerv/tnn/sutil.lua b/nerv/tnn/sutil.lua
deleted file mode 100644
index 6a968b7..0000000
--- a/nerv/tnn/sutil.lua
+++ /dev/null
@@ -1,80 +0,0 @@
-local Util = nerv.class("nerv.SUtil") --Scheduler Utility
-
-function Util.simple_split(inputstr, sep)
- if sep == nil then
- sep = "%s"
- end
- local t={} ; i=1
- for str in string.gmatch(inputstr, "([^"..sep.."]+)") do
- t[i] = str
- i = i + 1
- end
- return t
-end
-
-function Util.parse_schedule(str)
- --parse a string like "1.2*10:1.5" to a list of numbers
- local sch = {}
- local s = Util.simple_split(str, ':')
- for i = 1, #s do
- local p = Util.simple_split(s[i], "%*")
- if #p ~= 2 and #p ~= 1 then
- nerv.error("nerv.SUtil:parse_schedule error, unit(%s) not proper, has %d components.", s[i], #p)
- end
- if p[2] == nil then
- p[2] = "1"
- end
- p[1] = tonumber(p[1])
- p[2] = tonumber(p[2])
- for j = 1, p[2] do
- table.insert(sch, p[1])
- end
- end
-
- --for i = 1, #sch do
- -- print(sch[i])
- --end
- return sch
-end
-
-function Util.sche_get(s, it)
- --get s[it]
- if s == nil then
- nerv.info("Util.sche_get: warning, scheule is nil, returning zero...")
- return 0
- end
- if #s >= it then
- return s[it]
- else
- nerv.info("Util.sche_get: warning, it(%d) > #schedule(%d), returning the last one of schedule(%f)...", it, #s, s[#s])
- return s[#s]
- end
-end
-
-function Util.parse_commands_set(str)
- local coms = {}
- local s = Util.simple_split(str, ':,')
- for i = 1 ,#s do
- if coms[s[i]] == 1 then
- nerv.warning("nerv.SUtil.parse_commands_set command(%s) appered more than once in command_set(%s)", s[i], str)
- end
- coms[s[i]] = 1
- end
- return coms
-end
-
-function Util.log_redirect(fn)
- nerv.log_fh = assert(io.open(fn, "w"))
- nerv.info("CAUTION[LOG_REDIRECT], all nerv.printf/info/warning/error calls will be double-written to %s", fn)
- nerv.printf =
- function (fmt, ...)
- io.write(nerv.sprintf(fmt, ...))
- nerv.log_fh:write(nerv.sprintf(fmt, ...))
- nerv.log_fh:flush()
- end
- nerv.error =
- function (fmt, ...)
- nerv.log_fh:write(nerv.sprintf("[nerv] internal error:" .. fmt .. "\n", ...))
- error(nerv.sprintf("[nerv] internal error: " .. fmt .. "\n", ...))
- end
-end
diff --git a/nerv/tnn/tnn.lua b/nerv/tnn/tnn.lua
deleted file mode 100644
index d527fe6..0000000
--- a/nerv/tnn/tnn.lua
+++ /dev/null
@@ -1,596 +0,0 @@
-local TNN = nerv.class("nerv.TNN")
-
-local function parse_id(str)
- --used to parse layerid[portid],time
- local id, port, time, _
- _, _, id, port, time = string.find(str, "([a-zA-Z0-9_]+)%[([0-9]+)%][,]*([0-9]*)")
- if id == nil or port == nil then
- _, _, id, port, time = string.find(str, "(.+)%[([0-9]+)%][,]*([0-9]*)")
- if not (id == "<input>" or id == "<output>") then
- nerv.error("wrong format of connection id")
- end
- end
- --print(str, id, port, time)
- port = tonumber(port)
- if (time == nil) then
- time = 0
- else
- time = tonumber(time)
- end
- --now time don't need to be parsed
- return id, port
-end
-
-local function discover(id, layers, layer_repo)
- local ref = layers[id]
- if id == "<input>" or id == "<output>" then
- return nil
- end
- if ref == nil then
- local layer = layer_repo:get_layer(id)
- local dim_in, dim_out = layer:get_dim()
- ref = {
- layer = layer,
- id = layer.id,
- inputs_m = {}, --storage for computation, inputs_m[time][port]
- inputs_b = {}, --inputs_g[time][port], whether this input can been computed
- inputs_matbak_p = {}, --which is a back-up space to handle some cross-border computation, inputs_p_matbak[port]
- outputs_m = {},
- outputs_b = {},
- err_inputs_m = {},
- err_inputs_matbak_p = {}, --which is a back-up space to handle some cross-border computation
- err_inputs_b = {},
- err_outputs_m = {},
- err_outputs_b = {},
- i_conns_p = {}, --list of inputing connections
- o_conns_p = {}, --list of outputing connections
- dim_in = dim_in, --list of dimensions of ports
- dim_out = dim_out,
- }
- layers[id] = ref
- end
- return ref
-end
-
-nerv.TNN.FC = {} --flag const
-nerv.TNN.FC.SEQ_START = 4
-nerv.TNN.FC.SEQ_END = 8
-nerv.TNN.FC.HAS_INPUT = 1
-nerv.TNN.FC.HAS_LABEL = 2
-nerv.TNN.FC.SEQ_NORM = bit.bor(nerv.TNN.FC.HAS_INPUT, nerv.TNN.FC.HAS_LABEL) --This instance have both input and label
-
-function TNN.make_initial_store(st, p, dim, batch_size, chunk_size, extend_t, global_conf, st_c, p_c, t_c)
- --Return a table of matrix storage from time (1-extend_t)..(chunk_size+extend_t)
- if (type(st) ~= "table") then
- nerv.error("st should be a table")
- end
- for i = 1 - extend_t - 2, chunk_size + extend_t + 2 do --intentionally allocated more time
- if (st[i] == nil) then
- st[i] = {}
- end
- st[i][p] = global_conf.cumat_type(batch_size, dim)
- st[i][p]:fill(0)
- if (st_c ~= nil) then
- if (st_c[i + t_c] == nil) then
- st_c[i + t_c] = {}
- end
- st_c[i + t_c][p_c] = st[i][p]
- end
- end
- collectgarbage("collect") --free the old one to save memory
-end
-
-function TNN:out_of_feedrange(t) --out of chunk, or no input, for the current feed
- if (t < 1 or t > self.chunk_size) then
- return true
- end
- if (self.feeds_now.flagsPack_now[t] == 0 or self.feeds_now.flagsPack_now[t] == nil) then
- return true
- end
- return false
-end
-
-function TNN:__init(id, global_conf, layer_conf)
- self.clip_t = layer_conf.clip_t
- if self.clip_t == nil then
- self.clip_t = 0
- end
- if self.clip_t > 0 then
- nerv.info("tnn(%s) will clip gradient across time with %f...", id, self.clip_t)
- end
-
- self.extend_t = layer_conf.extend_t --TNN will allocate storage of time for 1-extend_t .. chunk_size+extend_t
- if self.extend_t == nil then
- self.extend_t = 5
- end
- nerv.info("tnn(%s) will extend storage beyond MB border for time steps %d...", id, self.extend_t)
-
- local layers = {}
- local inputs_p = {} --map:port of the TNN to layer ref and port
- local outputs_p = {}
- local dim_in = layer_conf.dim_in
- local dim_out = layer_conf.dim_out
- local parsed_conns = {}
- local _
-
- for id, _ in pairs(layer_conf.sub_layers.layers) do --caution: with this line, some layer not connected will be included
- discover(id, layers, layer_conf.sub_layers)
- end
-
- for _, ll in pairs(layer_conf.connections) do
- local id_from, port_from = parse_id(ll[1])
- local id_to, port_to = parse_id(ll[2])
- local time_to = ll[3]
-
- print(id_from, id_to, time_to)
-
- local ref_from = discover(id_from, layers, layer_conf.sub_layers)
- local ref_to = discover(id_to, layers, layer_conf.sub_layers)
-
- if (id_from == "<input>") then
- if (dim_in[port_from] ~= ref_to.dim_in[port_to] or time_to ~= 0) then
- nerv.error("mismatch dimension or wrong time %s,%s,%d", ll[1], ll[2], ll[3])
- end
- inputs_p[port_from] = {["ref"] = ref_to, ["port"] = port_to}
- ref_to.inputs_m[port_to] = {} --just a place holder
- elseif (id_to == "<output>") then
- if (dim_out[port_to] ~= ref_from.dim_out[port_from] or time_to ~= 0) then
- nerv.error("mismatch dimension or wrong time %s,%s,%d", ll[1], ll[2], ll[3])
- end
- outputs_p[port_to] = {["ref"] = ref_from, ["port"] = port_from}
- ref_from.outputs_m[port_from] = {} --just a place holder
- else
- local conn_now = {
- ["src"] = {["ref"] = ref_from, ["port"] = port_from},
- ["dst"] = {["ref"] = ref_to, ["port"] = port_to},
- ["time"] = time_to
- }
- if (ref_to.dim_in[port_to] ~= ref_from.dim_out[port_from]) then
- nerv.error("mismatch dimension or wrong time %s,%s,%d", ll[1], ll[2], ll[3])
- end
- table.insert(parsed_conns, conn_now)
- ref_to.i_conns_p[conn_now.dst.port] = conn_now
- ref_from.o_conns_p[conn_now.src.port] = conn_now
- end
- end
-
- for id, ref in pairs(layers) do
- print(id, "#dim_in:", #ref.dim_in, "#dim_out:", #ref.dim_out, "#i_conns_p:", #ref.i_conns_p, "#o_conns_p", #ref.o_conns_p)
- end
-
- self.layers = layers
- self.inputs_p = inputs_p
- self.outputs_p = outputs_p
- self.id = id
- self.dim_in = dim_in
- self.dim_out = dim_out
- self.parsed_conns = parsed_conns
- self.gconf = global_conf
-end
-
-function TNN:init(batch_size, chunk_size)
- self.batch_size = batch_size
- self.chunk_size = chunk_size
- for i, conn in ipairs(self.parsed_conns) do --init storage for connections inside the NN
- local _, output_dim
- local ref_from, port_from, ref_to, port_to, time
- ref_from, port_from = conn.src.ref, conn.src.port
- ref_to, port_to = conn.dst.ref, conn.dst.port
- time = conn.time
-
- local dim = ref_from.dim_out[port_from]
- if (dim == 0) then
- nerv.error("layer %s has a zero dim port", ref_from.layer.id)
- end
-
- nerv.info("TNN initing storage %s->%s", ref_from.layer.id, ref_to.layer.id)
- ref_to.inputs_matbak_p[port_to] = self.gconf.cumat_type(batch_size, dim)
- self.make_initial_store(ref_from.outputs_m, port_from, dim, batch_size, chunk_size, self.extend_t, self.gconf, ref_to.inputs_m, port_to, time)
- ref_from.err_inputs_matbak_p[port_from] = self.gconf.cumat_type(batch_size, dim)
- self.make_initial_store(ref_from.err_inputs_m, port_from, dim, batch_size, chunk_size, self.extend_t, self.gconf, ref_to.err_outputs_m, port_to, time)
- end
-
- self.outputs_m = {}
- self.err_inputs_m = {}
- for i = 1, #self.dim_out do --Init storage for output ports
- local ref = self.outputs_p[i].ref
- local p = self.outputs_p[i].port
- self.make_initial_store(ref.outputs_m, p, self.dim_out[i], batch_size, chunk_size, self.extend_t, self.gconf, self.outputs_m, i, 0)
- self.make_initial_store(ref.err_inputs_m, p, self.dim_out[i], batch_size, chunk_size, self.extend_t, self.gconf, self.err_inputs_m, i, 0)
- end
-
- self.inputs_m = {}
- self.err_outputs_m = {}
- for i = 1, #self.dim_in do --Init storage for input ports
- local ref = self.inputs_p[i].ref
- local p = self.inputs_p[i].port
- self.make_initial_store(ref.inputs_m, p, self.dim_in[i], batch_size, chunk_size, self.extend_t, self.gconf, self.inputs_m, i, 0)
- self.make_initial_store(ref.err_outputs_m, p, self.dim_in[i], batch_size, chunk_size, self.extend_t, self.gconf, self.err_outputs_m, i, 0)
- end
-
- for id, ref in pairs(self.layers) do --Calling init for child layers
- for i = 1, #ref.dim_in do
- if (ref.inputs_m[i] == nil or ref.err_outputs_m[i] == nil) then
- nerv.error("dangling input port %d of layer %s", i, id)
- end
- end
- for i = 1, #ref.dim_out do
- if (ref.outputs_m[i] == nil or ref.err_inputs_m[i] == nil) then
- nerv.error("dangling output port %d of layer %s", i, id)
- end
- end
- -- initialize sub layers
- nerv.info("TNN initing sub-layer %s", ref.id)
- ref.layer:init(batch_size, chunk_size)
- collectgarbage("collect")
- end
-
- local flags_now = {}
- local flagsPack_now = {}
- for i = 1, chunk_size do
- flags_now[i] = {}
- flagsPack_now[i] = 0
- end
-
- self.feeds_now = {} --feeds is for the reader to fill
- self.feeds_now.inputs_m = self.inputs_m
- self.feeds_now.flags_now = flags_now
- self.feeds_now.flagsPack_now = flagsPack_now
-
- self:flush_all()
-end
-
---[[
-function DAGLayer:batch_resize(batch_size)
- self.gconf.batch_size = batch_size
-
- for i, conn in ipairs(self.parsed_conn) do
- local _, output_dim
- local ref_from, port_from, ref_to, port_to
- ref_from, port_from = unpack(conn[1])
- ref_to, port_to = unpack(conn[2])
- _, output_dim = ref_from.layer:get_dim()
-
- if ref_from.outputs[port_from]:nrow() ~= batch_size and output_dim[port_from] > 0 then
- local mid = self.gconf.cumat_type(batch_size, output_dim[port_from])
- local err_mid = mid:create()
-
- ref_from.outputs[port_from] = mid
- ref_to.inputs[port_to] = mid
-
- ref_from.err_inputs[port_from] = err_mid
- ref_to.err_outputs[port_to] = err_mid
- end
- end
- for id, ref in pairs(self.layers) do
- ref.layer:batch_resize(batch_size)
- end
- collectgarbage("collect")
-end
-]]--
-
-function TNN:flush_all() --flush all history and activation
- local _, ref
- for _, ref in pairs(self.layers) do
- for i = 1, #ref.dim_in do
- for t = 1 - self.extend_t, self.chunk_size + self.extend_t do
- ref.inputs_m[t][i]:fill(self.gconf.nn_act_default)
- if (ref.inputs_b[t] == nil) then
- ref.inputs_b[t] = {}
- end
- ref.inputs_b[t][i] = false
- ref.err_outputs_m[t][i]:fill(0)
- if (ref.err_outputs_b[t] == nil) then
- ref.err_outputs_b[t] = {}
- end
- ref.err_outputs_b[t][i] = false
- end
- end
- for i = 1, #ref.dim_out do
- for t = 1 - self.extend_t, self.chunk_size + self.extend_t do
- ref.outputs_m[t][i]:fill(self.gconf.nn_act_default)
- if (ref.outputs_b[t] == nil) then
- ref.outputs_b[t] = {}
- end
- ref.outputs_b[t][i] = false
- ref.err_inputs_m[t][i]:fill(0)
- if (ref.err_inputs_b[t] == nil) then
- ref.err_inputs_b[t] = {}
- end
- ref.err_inputs_b[t][i] = false
- end
- end
- end
-end
-
---reader: some reader
---Returns: bool, whether has new feed
---Returns: feeds, a table that will be filled with the reader's feeds
-function TNN:getfeed_from_reader(reader)
- local feeds_now = self.feeds_now
- local got_new = reader:get_batch(feeds_now)
- return got_new, feeds_now
-end
-
-function TNN:move_right_to_nextmb(list_t) --move output history activations of 1..chunk_size to 1-chunk_size..0
- if list_t == nil then
- list_t = {}
- for i = self.extend_t, 1, -1 do
- list_t[i] = 1 - i
- end
- end
- for i = 1, #list_t do
- t = list_t[i]
- if t < 1 - self.extend_t or t > 0 then
- nerv.error("MB move range error")
- end
- for id, ref in pairs(self.layers) do
- for p = 1, #ref.dim_out do
- ref.outputs_m[t][p]:copy_fromd(ref.outputs_m[t + self.chunk_size][p])
- end
- end
- end
-end
-
-function TNN:net_propagate() --propagate according to feeds_now
- for t = 1, self.chunk_size, 1 do
- for id, ref in pairs(self.layers) do
- for p = 1, #ref.dim_out do
- ref.outputs_b[t][p] = false
- end
- for p = 1, #ref.dim_in do
- ref.inputs_b[t][p] = false
- end
- end
- end
-
- local feeds_now = self.feeds_now
- for t = 1, self.chunk_size do --some layer maybe do not have inputs from time 1..chunk_size
- for id, ref in pairs(self.layers) do
- if #ref.dim_in > 0 then --some layer is just there(only to save some parameter)
- self:propagate_dfs(ref, t)
- end
- end
- end
- for t = 1, self.chunk_size do
- if (bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_INPUT) > 0) then
- for i = 1, #self.dim_in do
- local ref = self.inputs_p[i].ref
- local p = self.inputs_p[i].port
- ref.inputs_b[t][p] = true
- self:propagate_dfs(ref, t)
- end
- end
- end
-
- local flag_out = true
- for t = 1, self.chunk_size do --check whether every output has been computed
- if (bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_LABEL) > 0) then
- for i = 1, #self.dim_out do
- local ref = self.outputs_p[i].ref
- if (ref.outputs_b[t][1] ~= true) then
- flag_out = false
- break
- end
- end
- end
- end
-
- if (flag_out == false) then
- nerv.error("some thing wrong, some labeled output is not propagated")
- end
-end
-
---ref: the TNN_ref of a layer
---t: the current time to propagate
-function TNN:propagate_dfs(ref, t)
- if (self:out_of_feedrange(t)) then
- return
- end
- if (ref.outputs_b[t][1] == true) then --already propagated, 1 is just a random port
- return
- end
-
- --print("debug dfs", ref.layer.id, t)
-
- local flag = true --whether have all inputs
- for _, conn in pairs(ref.i_conns_p) do
- local p = conn.dst.port
- if (not (ref.inputs_b[t][p] or self:out_of_feedrange(t - conn.time))) then
- flag = false
- break
- end
- end
- if (flag == false) then
- return
- end
-
- --ok, do propagate
- --print("debug ok, propagating");
- --The MB moving will cause bordering history to be changed, so it is more wise to flush the input activation
- if (bit.band(self.feeds_now.flagsPack_now[t], bit.bor(nerv.TNN.FC.SEQ_START, nerv.TNN.FC.SEQ_END)) > 0) then --flush cross-border history
- for i = 1, self.batch_size do
- local seq_start = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_START)
- local seq_end = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_END)
- if (seq_start > 0 or seq_end > 0) then
- for p, conn in pairs(ref.i_conns_p) do
- if ((ref.i_conns_p[p].time > 0 and seq_start > 0) or (ref.i_conns_p[p].time < 0 and seq_end > 0)) then --cross-border, set to default
- ref.inputs_m[t][p][i - 1]:fill(self.gconf.nn_act_default)
- end
- end
- end
- end
- end
- self.gconf.timer:tic("tnn_actual_layer_propagate")
- ref.layer:propagate(ref.inputs_m[t], ref.outputs_m[t], t) --propagate!
- self.gconf.timer:toc("tnn_actual_layer_propagate")
- --[[
- if (bit.band(self.feeds_now.flagsPack_now[t], bit.bor(nerv.TNN.FC.SEQ_START, nerv.TNN.FC.SEQ_END)) > 0) then --restore cross-border history
- for i = 1, self.batch_size do
- local seq_start = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_START)
- local seq_end = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_END)
- if (seq_start > 0 or seq_end > 0) then
- for p, conn in pairs(ref.o_conns_p) do
- if ((ref.o_conns_p[p].time > 0 and seq_end > 0) or (ref.o_conns_p[p].time < 0 and seq_start > 0)) then
- ref.outputs_m[t][p][i - 1]:fill(self.gconf.nn_act_default)
- end
- end
- end
- end
- end
- ]]--
- --set input flag for future layers
- for i = 1, #ref.dim_out do
- if (ref.outputs_b[t][i] == true) then
- nerv.error("this time's outputs_b should be false")
- end
- ref.outputs_b[t][i] = true
- end
-
- --try dfs for further layers
- for _, conn in pairs(ref.o_conns_p) do
- --print("debug dfs-searching", conn.dst.ref.layer.id)
- conn.dst.ref.inputs_b[t + conn.time][conn.dst.port] = true
- self:propagate_dfs(conn.dst.ref, t + conn.time)
- end
-end
-
---do_update: bool, whether we are doing back-propagate or updating the parameters
-function TNN:net_backpropagate(do_update) --propagate according to feeds_now
- if do_update == nil then
- nerv.error("do_update should not be nil")
- end
- for t = 1, self.chunk_size, 1 do
- for id, ref in pairs(self.layers) do
- for p = 1, #ref.dim_out do
- ref.err_inputs_b[t][p] = false
- end
- for p = 1, #ref.dim_in do
- ref.err_outputs_b[t][p] = false
- end
- end
- end
-
- local feeds_now = self.feeds_now
- for t = 1, self.chunk_size do --some layer maybe do not have outputs from time 1..chunk_size
- for id, ref in pairs(self.layers) do
- if #ref.dim_out > 0 then --some layer is just there(only to save some parameter)
- self:backpropagate_dfs(ref, t, do_update)
- end
- end
- end
- for t = 1, self.chunk_size do
- if bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_LABEL) > 0 then
- for i = 1, #self.dim_out do
- local ref = self.outputs_p[i].ref
- local p = self.outputs_p[i].port
- ref.err_inputs_b[t][p] = true
- self:backpropagate_dfs(ref, t, do_update)
- end
- end
- end
-
- local flag_out = true
- for t = 1, self.chunk_size do --check whether every output has been computed
- if bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_INPUT) > 0 then
- for i = 1, #self.dim_in do
- local ref = self.inputs_p[i].ref
- if ref.err_outputs_b[t][1] ~= true then
- flag_out = false
- break
- end
- end
- end
- end
- if (flag_out == false) then
- nerv.error("some thing wrong, some input is not back_propagated")
- end
-end
-
---ref: the TNN_ref of a layer
---t: the current time to propagate
-function TNN:backpropagate_dfs(ref, t, do_update)
- if do_update == nil then
- nerv.error("got a nil do_update")
- end
- if self:out_of_feedrange(t) then
- return
- end
- if ref.err_outputs_b[t][1] == true then --already back_propagated, 1 is just a random port
- return
- end
-
- --print("debug dfs", ref.layer.id, t)
-
- local flag = true --whether have all inputs
- for _, conn in pairs(ref.o_conns_p) do
- local p = conn.src.port
- if (not (ref.err_inputs_b[t][p] or self:out_of_feedrange(t + conn.time))) then
- flag = false
- break
- end
- end
- if (flag == false) then
- return
- end
-
- --ok, do back_propagate
- --print("debug ok, back-propagating(or updating)")
- if (do_update == false) then
- self.gconf.timer:tic("tnn_actual_layer_backpropagate")
- ref.layer:back_propagate(ref.err_inputs_m[t], ref.err_outputs_m[t], ref.inputs_m[t], ref.outputs_m[t], t)
- self.gconf.timer:toc("tnn_actual_layer_backpropagate")
- if self.clip_t > 0 then
- for _, conn in pairs(ref.i_conns_p) do
- local p = conn.dst.port --port for ref
- if conn.time ~= 0 then
- --print("debug clip_t tnn", ref.id, "port:", p, "clip:", self.clip_t)
- ref.err_outputs_m[t][p]:clip(-self.clip_t, self.clip_t)
- end
- end
- end
- else
- --print(ref.err_inputs_m[t][1])
- self.gconf.timer:tic("tnn_actual_layer_update")
- ref.layer:update(ref.err_inputs_m[t], ref.inputs_m[t], ref.outputs_m[t], t)
- self.gconf.timer:toc("tnn_actual_layer_update")
- end
-
- if (do_update == false and bit.band(self.feeds_now.flagsPack_now[t], bit.bor(nerv.TNN.FC.SEQ_START, nerv.TNN.FC.SEQ_END)) > 0) then --flush cross-border errors
- for i = 1, self.batch_size do
- local seq_start = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_START)
- local seq_end = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_END)
- if (seq_start > 0 or seq_end > 0) then
- for p, conn in pairs(ref.i_conns_p) do
- if ((ref.i_conns_p[p].time > 0 and seq_start > 0) or (ref.i_conns_p[p].time < 0 and seq_end > 0)) then --cross-border, set to zero
- ref.err_outputs_m[t][p][i - 1]:fill(0)
- end
- end
- end
- end
- end
-
- for i = 1, #ref.dim_in do
- if (ref.err_outputs_b[t][i] == true) then
- nerv.error("this time's outputs_b should be false")
- end
- ref.err_outputs_b[t][i] = true
- end
-
- --try dfs for further layers
- for _, conn in pairs(ref.i_conns_p) do
- --print("debug dfs-searching", conn.src.ref.layer.id)
- conn.src.ref.err_inputs_b[t - conn.time][conn.src.port] = true
- self:backpropagate_dfs(conn.src.ref, t - conn.time, do_update)
- end
-end
-
---Return: nerv.ParamRepo
-function TNN:get_params()
- local param_repos = {}
- for id, ref in pairs(self.layers) do
- table.insert(param_repos, ref.layer:get_params())
- end
- return nerv.ParamRepo.merge(param_repos)
-end
-