diff options
author | Determinant <[email protected]> | 2016-03-11 17:33:35 +0800 |
---|---|---|
committer | Determinant <[email protected]> | 2016-03-11 17:33:35 +0800 |
commit | a54332ce81129e81fbb1d041ec41aa5955868c5e (patch) | |
tree | cf5c43f1ddad7bc2430ea8191f943b0783e5fc2c /nerv | |
parent | e6d28de460dfd06d696d369119247179c7a7525d (diff) |
adapt asr_trainer.lua to new architecture
Diffstat (limited to 'nerv')
-rw-r--r-- | nerv/Makefile | 5 | ||||
-rw-r--r-- | nerv/examples/asr_trainer.lua | 22 | ||||
-rw-r--r-- | nerv/examples/timit_baseline2.lua | 60 | ||||
-rw-r--r-- | nerv/init.lua | 10 | ||||
-rw-r--r-- | nerv/layer/sigmoid.lua | 6 | ||||
-rw-r--r-- | nerv/nn/init.lua | 1 | ||||
-rw-r--r-- | nerv/nn/layer_dag.lua | 352 | ||||
-rw-r--r-- | nerv/nn/network.lua | 14 | ||||
-rw-r--r-- | nerv/tnn/init.lua | 47 | ||||
-rw-r--r-- | nerv/tnn/sutil.lua | 80 | ||||
-rw-r--r-- | nerv/tnn/tnn.lua | 596 |
11 files changed, 70 insertions, 1123 deletions
diff --git a/nerv/Makefile b/nerv/Makefile index 421eda0..7921bd9 100644 --- a/nerv/Makefile +++ b/nerv/Makefile @@ -43,9 +43,8 @@ LUA_LIBS := matrix/init.lua io/init.lua init.lua \ layer/window.lua layer/bias.lua layer/combiner.lua layer/mse.lua \ layer/elem_mul.lua layer/lstm.lua layer/lstm_gate.lua layer/dropout.lua layer/gru.lua \ layer/graph.lua layer/rnn.lua layer/duplicate.lua layer/identity.lua \ - nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/layer_dag.lua nn/network.lua \ - io/sgd_buffer.lua \ - tnn/init.lua tnn/sutil.lua tnn/tnn.lua + nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/network.lua \ + io/sgd_buffer.lua INCLUDE := -I $(LUA_INCDIR) -DLUA_USE_APICHECK CUDA_INCLUDE := -I $(CUDA_BASE)/include/ diff --git a/nerv/examples/asr_trainer.lua b/nerv/examples/asr_trainer.lua index 5bf28bd..6bdf57c 100644 --- a/nerv/examples/asr_trainer.lua +++ b/nerv/examples/asr_trainer.lua @@ -20,6 +20,12 @@ local function build_trainer(ifname) local network = get_network(layer_repo) local global_transf = get_global_transf(layer_repo) local input_order = get_input_order() + + network = nerv.Network("nt", gconf, {network = network}) + network:init(gconf.batch_size, 1) + global_transf = nerv.Network("gt", gconf, {network = global_transf}) + global_transf:init(gconf.batch_size, 1) + local iterative_trainer = function (prefix, scp_file, bp, rebind_param_repo) -- rebind the params if necessary if rebind_param_repo then @@ -32,10 +38,11 @@ local function build_trainer(ifname) -- build buffer local buffer = make_buffer(make_readers(scp_file, layer_repo)) -- initialize the network - network:init(gconf.batch_size) gconf.cnt = 0 err_input = {mat_type(gconf.batch_size, 1)} err_input[1]:fill(1) + network:epoch_init() + global_transf:epoch_init() for data in buffer.get_data, buffer do -- prine stat periodically gconf.cnt = gconf.cnt + 1 @@ -69,10 +76,17 @@ local function build_trainer(ifname) for i = 1, #input do table.insert(err_output, input[i]:create()) end - network:propagate(input, output) + network:mini_batch_init({seq_length = table.vector(gconf.batch_size, 1), + new_seq = {}, + do_train = bp, + input = {input}, + output = {output}, + err_input = {err_input}, + err_output = {err_output}}) + network:propagate() if bp then - network:back_propagate(err_input, err_output, input, output) - network:update(err_input, input, output) + network:back_propagate() + network:update() end -- collect garbage in-time to save GPU memory collectgarbage("collect") diff --git a/nerv/examples/timit_baseline2.lua b/nerv/examples/timit_baseline2.lua index 2d144b5..d783c3d 100644 --- a/nerv/examples/timit_baseline2.lua +++ b/nerv/examples/timit_baseline2.lua @@ -61,35 +61,35 @@ function make_layer_repo(param_repo) layer_repo:add_layers( { - ["nerv.DAGLayer"] = + ["nerv.GraphLayer"] = { global_transf = { dim_in = {440}, dim_out = {440}, - sub_layers = layer_repo, + layer_repo = layer_repo, connections = { - ["<input>[1]"] = "blayer1[1]", - ["blayer1[1]"] = "wlayer1[1]", - ["wlayer1[1]"] = "<output>[1]" + {"<input>[1]", "blayer1[1]", 0}, + {"blayer1[1]", "wlayer1[1]", 0}, + {"wlayer1[1]", "<output>[1]", 0} } }, main = { dim_in = {440}, dim_out = {1959}, - sub_layers = layer_repo, + layer_repo = layer_repo, connections = { - ["<input>[1]"] = "affine0[1]", - ["affine0[1]"] = "sigmoid0[1]", - ["sigmoid0[1]"] = "affine1[1]", - ["affine1[1]"] = "sigmoid1[1]", - ["sigmoid1[1]"] = "affine2[1]", - ["affine2[1]"] = "sigmoid2[1]", - ["sigmoid2[1]"] = "affine3[1]", - ["affine3[1]"] = "sigmoid3[1]", - ["sigmoid3[1]"] = "affine4[1]", - ["affine4[1]"] = "sigmoid4[1]", - ["sigmoid4[1]"] = "affine5[1]", - ["affine5[1]"] = "sigmoid5[1]", - ["sigmoid5[1]"] = "affine6[1]", - ["affine6[1]"] = "<output>[1]" + {"<input>[1]", "affine0[1]", 0}, + {"affine0[1]", "sigmoid0[1]", 0}, + {"sigmoid0[1]", "affine1[1]", 0}, + {"affine1[1]", "sigmoid1[1]", 0}, + {"sigmoid1[1]", "affine2[1]", 0}, + {"affine2[1]", "sigmoid2[1]", 0}, + {"sigmoid2[1]", "affine3[1]", 0}, + {"affine3[1]", "sigmoid3[1]", 0}, + {"sigmoid3[1]", "affine4[1]", 0}, + {"affine4[1]", "sigmoid4[1]", 0}, + {"sigmoid4[1]", "affine5[1]", 0}, + {"affine5[1]", "sigmoid5[1]", 0}, + {"sigmoid5[1]", "affine6[1]", 0}, + {"affine6[1]", "<output>[1]", 0} } } } @@ -97,25 +97,25 @@ function make_layer_repo(param_repo) layer_repo:add_layers( { - ["nerv.DAGLayer"] = + ["nerv.GraphLayer"] = { ce_output = { dim_in = {440, 1}, dim_out = {1}, - sub_layers = layer_repo, + layer_repo = layer_repo, connections = { - ["<input>[1]"] = "main[1]", - ["main[1]"] = "ce_crit[1]", - ["<input>[2]"] = "ce_crit[2]", - ["ce_crit[1]"] = "<output>[1]" + {"<input>[1]", "main[1]", 0}, + {"main[1]", "ce_crit[1]", 0}, + {"<input>[2]", "ce_crit[2]", 0}, + {"ce_crit[1]", "<output>[1]", 0} } }, softmax_output = { dim_in = {440}, dim_out = {1959}, - sub_layers = layer_repo, + layer_repo = layer_repo, connections = { - ["<input>[1]"] = "main[1]", - ["main[1]"] = "softmax[1]", - ["softmax[1]"] = "<output>[1]" + {"<input>[1]", "main[1]", 0}, + {"main[1]", "softmax[1]", 0}, + {"softmax[1]", "<output>[1]", 0} } } } diff --git a/nerv/init.lua b/nerv/init.lua index da7df29..ff944b8 100644 --- a/nerv/init.lua +++ b/nerv/init.lua @@ -347,10 +347,18 @@ function table.extend(tbl1, tbl2) end end +function table.vector(len, fill) + local v = {} + fill = fill or 0 + for i = 1, len do + table.insert(v, fill) + end + return v +end + -- the following lines trigger the initialization of basic modules nerv.include('matrix/init.lua') nerv.include('io/init.lua') nerv.include('layer/init.lua') nerv.include('nn/init.lua') -nerv.include('tnn/init.lua') diff --git a/nerv/layer/sigmoid.lua b/nerv/layer/sigmoid.lua index a9f9749..5974ffc 100644 --- a/nerv/layer/sigmoid.lua +++ b/nerv/layer/sigmoid.lua @@ -3,6 +3,9 @@ local SigmoidLayer = nerv.class("nerv.SigmoidLayer", "nerv.Layer") function SigmoidLayer:__init(id, global_conf, layer_conf) nerv.Layer.__init(self, id, global_conf, layer_conf) self:check_dim_len(1, 1) + if self.dim_in[1] ~= self.dim_out[1] then + nerv.error("mismatching dimensions of input and output") + end end function SigmoidLayer:bind_params() @@ -10,9 +13,6 @@ function SigmoidLayer:bind_params() end function SigmoidLayer:init() - if self.dim_in[1] ~= self.dim_out[1] then - nerv.error("mismatching dimensions of input and output") - end end function SigmoidLayer:batch_resize(batch_size) diff --git a/nerv/nn/init.lua b/nerv/nn/init.lua index c32ea09..1037d05 100644 --- a/nerv/nn/init.lua +++ b/nerv/nn/init.lua @@ -1,4 +1,3 @@ nerv.include('layer_repo.lua') nerv.include('param_repo.lua') -nerv.include('layer_dag.lua') nerv.include('network.lua') diff --git a/nerv/nn/layer_dag.lua b/nerv/nn/layer_dag.lua deleted file mode 100644 index f999752..0000000 --- a/nerv/nn/layer_dag.lua +++ /dev/null @@ -1,352 +0,0 @@ -local DAGLayer = nerv.class("nerv.DAGLayer", "nerv.Layer") - -local function parse_id(str) - local id, port, _ - _, _, id, port = string.find(str, "([a-zA-Z0-9_.]+)%[([0-9]+)%]") - if id == nil or port == nil then - _, _, id, port = string.find(str, "(.+)%[([0-9]+)%]") - if not (id == "<input>" or id == "<output>") then - nerv.error("wrong format of connection id") - end - end - port = tonumber(port) - return id, port -end - -local function discover(id, layers, layer_repo) - local ref = layers[id] - if id == "<input>" or id == "<output>" then - return nil - end - if ref == nil then - local layer = layer_repo:get_layer(id) - local dim_in, dim_out = layer:get_dim() - ref = { - layer = layer, - inputs = {}, - outputs = {}, - err_inputs = {}, - err_outputs = {}, - next_layers = {}, - input_len = #dim_in, - output_len = #dim_out, - in_deg = 0, - visited = false - } - layers[id] = ref - end - return ref -end - -local function touch_list_by_idx(list, idx) - if list[idx] == nil then - list[idx] = {} - end -end - -function DAGLayer:__init(id, global_conf, layer_conf) - local layers = {} - local inputs = {} - local outputs = {} - local dim_in = layer_conf.dim_in - local dim_out = layer_conf.dim_out - local parsed_conn = {} - for from, to in pairs(layer_conf.connections) do - local id_from, port_from = parse_id(from) - local id_to, port_to = parse_id(to) - local ref_from = discover(id_from, layers, layer_conf.sub_layers) - local ref_to = discover(id_to, layers, layer_conf.sub_layers) - local input_dim, output_dim, _ - if ref_from then - touch_list_by_idx(ref_from.outputs, 1) - if ref_from.outputs[1][port_from] ~= nil then - nerv.error("%s has already been attached", from) - end - end - if ref_to then - touch_list_by_idx(ref_to.inputs, 1) - if ref_to.inputs[1][port_to] ~= nil then - nerv.error("%s has already been attached", to) - end - end - if id_from == "<input>" then - input_dim, _ = ref_to.layer:get_dim() - if dim_in[port_from] ~= input_dim[port_to] then - nerv.error("mismatching data dimension between %s and %s", from, to) - end - inputs[port_from] = {ref_to, port_to} - ref_to.inputs[1][port_to] = inputs -- just a place holder - elseif id_to == "<output>" then - _, output_dim = ref_from.layer:get_dim() - if output_dim[port_from] ~= dim_out[port_to] then - nerv.error("mismatching data dimension between %s and %s", from, to) - end - outputs[port_to] = {ref_from, port_from} - ref_from.outputs[1][port_from] = outputs -- just a place holder - else - _, output_dim = ref_from.layer:get_dim() - input_dim, _ = ref_to.layer:get_dim() - if output_dim[port_from] ~= input_dim[port_to] then - nerv.error("mismatching data dimension between %s and %s", from, to) - end - - table.insert(parsed_conn, - {{ref_from, port_from}, {ref_to, port_to}}) - table.insert(ref_from.next_layers, ref_to) -- add edge - ref_to.in_deg = ref_to.in_deg + 1 -- increase the in-degree of the target layer - end - end - - -- topology sort - local queue = {} - local l = 1 - local r = 1 - for id, ref in pairs(layers) do - if ref.in_deg == 0 then - table.insert(queue, ref) - nerv.info("adding source layer: %s", id) - r = r + 1 - end - end - if l == r then - nerv.error("loop detected") - end - while l < r do - local cur = queue[l] - cur.visited = true - l = l + 1 - for _, nl in pairs(cur.next_layers) do - nl.in_deg = nl.in_deg - 1 - if nl.in_deg == 0 then - table.insert(queue, nl) - r = r + 1 - end - end - end - for i = 1, #queue do - nerv.info("enqueued layer: %s %s", queue[i].layer, queue[i].layer.id) - end - - for id, ref in pairs(layers) do - -- check wether the graph is connected - if ref.visited == false then - nerv.warning("layer %s is ignored", id) - end - end - - nerv.Layer.__init(self, id, global_conf, layer_conf) - self.layers = layers - self.inputs = inputs - self.outputs = outputs - self.parsed_conn = parsed_conn - self.queue = queue -end - -function DAGLayer:bind_params() - -- do nothing (instead of rebinding params for each layer) -end - -function DAGLayer:init(batch_size, chunk_size) - if chunk_size == nil then - chunk_size = 1 - end - for i, conn in ipairs(self.parsed_conn) do - local _, output_dim - local ref_from, port_from, ref_to, port_to - ref_from, port_from = unpack(conn[1]) - ref_to, port_to = unpack(conn[2]) - _, output_dim = ref_from.layer:get_dim() - local dim = 1 - if output_dim[port_from] > 0 then - dim = output_dim[port_from] - end - - for t = 1, chunk_size do - local mid = self.mat_type(batch_size, dim) - local err_mid = mid:create() - touch_list_by_idx(ref_to.inputs, t) - touch_list_by_idx(ref_from.outputs, t) - touch_list_by_idx(ref_from.err_inputs, t) - touch_list_by_idx(ref_to.err_outputs, t) - - ref_from.outputs[t][port_from] = mid - ref_to.inputs[t][port_to] = mid - - ref_from.err_inputs[t][port_from] = err_mid - ref_to.err_outputs[t][port_to] = err_mid - end - end - for id, ref in pairs(self.layers) do - for i = 1, ref.input_len do - if ref.inputs[1][i] == nil then - nerv.error("dangling input port %d of layer %s", i, id) - end - end - for i = 1, ref.output_len do - if ref.outputs[1][i] == nil then - nerv.error("dangling output port %d of layer %s", i, id) - end - end - -- initialize sub layers - ref.layer:init(batch_size, chunk_size) - end - for i = 1, #self.dim_in do - if self.inputs[i] == nil then - nerv.error("dangling port %d of layer <input>", i) - end - end - for i = 1, #self.dim_out do - if self.outputs[i] == nil then - nerv.error("dangling port %d of layer <output>", i) - end - end -end - -function DAGLayer:batch_resize(batch_size, chunk_size) - if chunk_size == nil then - chunk_size = 1 - end - - for i, conn in ipairs(self.parsed_conn) do - local _, output_dim - local ref_from, port_from, ref_to, port_to - ref_from, port_from = unpack(conn[1]) - ref_to, port_to = unpack(conn[2]) - _, output_dim = ref_from.layer:get_dim() - - if ref_from.outputs[1][port_from]:nrow() ~= batch_size - and output_dim[port_from] > 0 then - for t = 1, chunk_size do - local mid = self.mat_type(batch_size, output_dim[port_from]) - local err_mid = mid:create() - - ref_from.outputs[t][port_from] = mid - ref_to.inputs[t][port_to] = mid - - ref_from.err_inputs[t][port_from] = err_mid - ref_to.err_outputs[t][port_to] = err_mid - end - end - end - for id, ref in pairs(self.layers) do - ref.layer:batch_resize(batch_size, chunk_size) - end - collectgarbage("collect") -end - -function DAGLayer:set_inputs(input, t) - for i = 1, #self.dim_in do - if input[i] == nil then - nerv.error("some input is not provided"); - end - local layer = self.inputs[i][1] - local port = self.inputs[i][2] - touch_list_by_idx(layer.inputs, t) - layer.inputs[t][port] = input[i] - end -end - -function DAGLayer:set_outputs(output, t) - for i = 1, #self.dim_out do - if output[i] == nil then - nerv.error("some output is not provided"); - end - local layer = self.outputs[i][1] - local port = self.outputs[i][2] - touch_list_by_idx(layer.outputs, t) - layer.outputs[t][port] = output[i] - end -end - -function DAGLayer:set_err_inputs(bp_err, t) - for i = 1, #self.dim_out do - local layer = self.outputs[i][1] - local port = self.outputs[i][2] - touch_list_by_idx(layer.err_inputs, t) - layer.err_inputs[t][port] = bp_err[i] - end -end - -function DAGLayer:set_err_outputs(next_bp_err, t) - for i = 1, #self.dim_in do - local layer = self.inputs[i][1] - local port = self.inputs[i][2] - touch_list_by_idx(layer.err_outputs, t) - layer.err_outputs[t][port] = next_bp_err[i] - end -end - -function DAGLayer:update(bp_err, input, output, t) - if t == nil then - t = 1 - end - self:set_err_inputs(bp_err, t) - self:set_inputs(input, t) - self:set_outputs(output, t) - for id, ref in pairs(self.queue) do - ref.layer:update(ref.err_inputs[t], ref.inputs[t], ref.outputs[t], t) - end -end - -function DAGLayer:propagate(input, output, t) - if t == nil then - t = 1 - end - self:set_inputs(input, t) - self:set_outputs(output, t) - local ret = false - for i = 1, #self.queue do - local ref = self.queue[i] - ret = ref.layer:propagate(ref.inputs[t], ref.outputs[t], t) - end - return ret -end - -function DAGLayer:back_propagate(bp_err, next_bp_err, input, output, t) - if t == nil then - t = 1 - end - self:set_err_outputs(next_bp_err, t) - self:set_err_inputs(bp_err, t) - self:set_inputs(input, t) - self:set_outputs(output, t) - for i = #self.queue, 1, -1 do - local ref = self.queue[i] - ref.layer:back_propagate(ref.err_inputs[t], ref.err_outputs[t], ref.inputs[t], ref.outputs[t], t) - end -end - -function DAGLayer:get_params() - local param_repos = {} - for id, ref in pairs(self.queue) do - table.insert(param_repos, ref.layer:get_params()) - end - return nerv.ParamRepo.merge(param_repos, self.loc_type) -end - -DAGLayer.PORT_TYPES = { - INPUT = {}, - OUTPUT = {}, - ERR_INPUT = {}, - ERR_OUTPUT = {} -} - -function DAGLayer:get_intermediate(id, port_type) - if id == "<input>" or id == "<output>" then - nerv.error("an actual real layer id is expected") - end - local layer = self.layers[id] - if layer == nil then - nerv.error("layer id %s not found", id) - end - if port_type == DAGLayer.PORT_TYPES.INPUT then - return layer.inputs - elseif port_type == DAGLayer.PORT_TYPES.OUTPUT then - return layer.outputs - elseif port_type == DAGLayer.PORT_TYPES.ERR_INPUT then - return layer.err_inputs - elseif port_type == DAGLayer.PORT_TYPES.ERR_OUTPUT then - return layer.err_outputs - end - nerv.error("unrecognized port type") -end diff --git a/nerv/nn/network.lua b/nerv/nn/network.lua index 35e11e3..2cb83ce 100644 --- a/nerv/nn/network.lua +++ b/nerv/nn/network.lua @@ -109,12 +109,14 @@ function network:init(batch_size, chunk_size) self.chunk_size = chunk_size self:topsort() - + self:make_initial_store() collectgarbage('collect') +end +function network:epoch_init() for i = 1, #self.layers do - self.layers[i]:init(batch_size, chunk_size) + self.layers[i]:init(self.batch_size, self.chunk_size) end end @@ -123,7 +125,7 @@ function network:topsort() local degree = {} for t = 1, self.chunk_size do degree[t] = {} - for i = 1, #self.layers do + for i = 1, #self.layers do degree[t][i] = 0 end end @@ -154,7 +156,7 @@ function network:topsort() end end end - while l<=r do + while l <= r do local t, i = self.queue[l].chunk, self.queue[l].id l = l + 1 local _, dim_out = self.layers[i]:get_dim() @@ -214,7 +216,7 @@ function network:make_initial_store() end end - -- connect memory and reference + -- connect memory and reference self.input = {} self.output = {} self.err_input = {} @@ -420,7 +422,7 @@ function network:mini_batch_init(info) if self.info.do_train then self:set_err_input(self.info.err_input) self:set_err_output(self.info.err_output) - + -- flush border gradient for t = self.max_length + 1, self.max_length + self.delay do if t > self.chunk_size then diff --git a/nerv/tnn/init.lua b/nerv/tnn/init.lua deleted file mode 100644 index 44ce26b..0000000 --- a/nerv/tnn/init.lua +++ /dev/null @@ -1,47 +0,0 @@ -local LayerT = nerv.class('nerv.LayerT') - -function LayerT:__init(id, global_conf, layer_conf) - nerv.error_method_not_implemented() -end - -function LayerT:init(batch_size, chunk_size) - nerv.error_method_not_implemented() -end - -function LayerT:update(bp_err, input, output, t) - nerv.error_method_not_implemented() -end - -function LayerT:propagate(input, output, t) - nerv.error_method_not_implemented() -end - -function LayerT:back_propagate(bp_err, next_bp_err, input, output, t) - nerv.error_method_not_implemented() -end - -function LayerT:check_dim_len(len_in, len_out) - local expected_in = #self.dim_in - local expected_out = #self.dim_out - if len_in > 0 and expected_in ~= len_in then - nerv.error("layer %s expects %d inputs, %d given", - self.id, len_in, expected_in) - end - if len_out > 0 and expected_out ~= len_out then - nerv.error("layer %s expects %d outputs, %d given", - self.id, len_out, expected_out) - end -end - -LayerT.find_param = nerv.Layer.find_param - -function LayerT:get_params() - nerv.error_method_not_implemented() -end - -function LayerT:get_dim() - return self.dim_in, self.dim_out -end - -nerv.include('sutil.lua') -nerv.include('tnn.lua') diff --git a/nerv/tnn/sutil.lua b/nerv/tnn/sutil.lua deleted file mode 100644 index 6a968b7..0000000 --- a/nerv/tnn/sutil.lua +++ /dev/null @@ -1,80 +0,0 @@ -local Util = nerv.class("nerv.SUtil") --Scheduler Utility - -function Util.simple_split(inputstr, sep) - if sep == nil then - sep = "%s" - end - local t={} ; i=1 - for str in string.gmatch(inputstr, "([^"..sep.."]+)") do - t[i] = str - i = i + 1 - end - return t -end - -function Util.parse_schedule(str) - --parse a string like "1.2*10:1.5" to a list of numbers - local sch = {} - local s = Util.simple_split(str, ':') - for i = 1, #s do - local p = Util.simple_split(s[i], "%*") - if #p ~= 2 and #p ~= 1 then - nerv.error("nerv.SUtil:parse_schedule error, unit(%s) not proper, has %d components.", s[i], #p) - end - if p[2] == nil then - p[2] = "1" - end - p[1] = tonumber(p[1]) - p[2] = tonumber(p[2]) - for j = 1, p[2] do - table.insert(sch, p[1]) - end - end - - --for i = 1, #sch do - -- print(sch[i]) - --end - return sch -end - -function Util.sche_get(s, it) - --get s[it] - if s == nil then - nerv.info("Util.sche_get: warning, scheule is nil, returning zero...") - return 0 - end - if #s >= it then - return s[it] - else - nerv.info("Util.sche_get: warning, it(%d) > #schedule(%d), returning the last one of schedule(%f)...", it, #s, s[#s]) - return s[#s] - end -end - -function Util.parse_commands_set(str) - local coms = {} - local s = Util.simple_split(str, ':,') - for i = 1 ,#s do - if coms[s[i]] == 1 then - nerv.warning("nerv.SUtil.parse_commands_set command(%s) appered more than once in command_set(%s)", s[i], str) - end - coms[s[i]] = 1 - end - return coms -end - -function Util.log_redirect(fn) - nerv.log_fh = assert(io.open(fn, "w")) - nerv.info("CAUTION[LOG_REDIRECT], all nerv.printf/info/warning/error calls will be double-written to %s", fn) - nerv.printf = - function (fmt, ...) - io.write(nerv.sprintf(fmt, ...)) - nerv.log_fh:write(nerv.sprintf(fmt, ...)) - nerv.log_fh:flush() - end - nerv.error = - function (fmt, ...) - nerv.log_fh:write(nerv.sprintf("[nerv] internal error:" .. fmt .. "\n", ...)) - error(nerv.sprintf("[nerv] internal error: " .. fmt .. "\n", ...)) - end -end diff --git a/nerv/tnn/tnn.lua b/nerv/tnn/tnn.lua deleted file mode 100644 index d527fe6..0000000 --- a/nerv/tnn/tnn.lua +++ /dev/null @@ -1,596 +0,0 @@ -local TNN = nerv.class("nerv.TNN") - -local function parse_id(str) - --used to parse layerid[portid],time - local id, port, time, _ - _, _, id, port, time = string.find(str, "([a-zA-Z0-9_]+)%[([0-9]+)%][,]*([0-9]*)") - if id == nil or port == nil then - _, _, id, port, time = string.find(str, "(.+)%[([0-9]+)%][,]*([0-9]*)") - if not (id == "<input>" or id == "<output>") then - nerv.error("wrong format of connection id") - end - end - --print(str, id, port, time) - port = tonumber(port) - if (time == nil) then - time = 0 - else - time = tonumber(time) - end - --now time don't need to be parsed - return id, port -end - -local function discover(id, layers, layer_repo) - local ref = layers[id] - if id == "<input>" or id == "<output>" then - return nil - end - if ref == nil then - local layer = layer_repo:get_layer(id) - local dim_in, dim_out = layer:get_dim() - ref = { - layer = layer, - id = layer.id, - inputs_m = {}, --storage for computation, inputs_m[time][port] - inputs_b = {}, --inputs_g[time][port], whether this input can been computed - inputs_matbak_p = {}, --which is a back-up space to handle some cross-border computation, inputs_p_matbak[port] - outputs_m = {}, - outputs_b = {}, - err_inputs_m = {}, - err_inputs_matbak_p = {}, --which is a back-up space to handle some cross-border computation - err_inputs_b = {}, - err_outputs_m = {}, - err_outputs_b = {}, - i_conns_p = {}, --list of inputing connections - o_conns_p = {}, --list of outputing connections - dim_in = dim_in, --list of dimensions of ports - dim_out = dim_out, - } - layers[id] = ref - end - return ref -end - -nerv.TNN.FC = {} --flag const -nerv.TNN.FC.SEQ_START = 4 -nerv.TNN.FC.SEQ_END = 8 -nerv.TNN.FC.HAS_INPUT = 1 -nerv.TNN.FC.HAS_LABEL = 2 -nerv.TNN.FC.SEQ_NORM = bit.bor(nerv.TNN.FC.HAS_INPUT, nerv.TNN.FC.HAS_LABEL) --This instance have both input and label - -function TNN.make_initial_store(st, p, dim, batch_size, chunk_size, extend_t, global_conf, st_c, p_c, t_c) - --Return a table of matrix storage from time (1-extend_t)..(chunk_size+extend_t) - if (type(st) ~= "table") then - nerv.error("st should be a table") - end - for i = 1 - extend_t - 2, chunk_size + extend_t + 2 do --intentionally allocated more time - if (st[i] == nil) then - st[i] = {} - end - st[i][p] = global_conf.cumat_type(batch_size, dim) - st[i][p]:fill(0) - if (st_c ~= nil) then - if (st_c[i + t_c] == nil) then - st_c[i + t_c] = {} - end - st_c[i + t_c][p_c] = st[i][p] - end - end - collectgarbage("collect") --free the old one to save memory -end - -function TNN:out_of_feedrange(t) --out of chunk, or no input, for the current feed - if (t < 1 or t > self.chunk_size) then - return true - end - if (self.feeds_now.flagsPack_now[t] == 0 or self.feeds_now.flagsPack_now[t] == nil) then - return true - end - return false -end - -function TNN:__init(id, global_conf, layer_conf) - self.clip_t = layer_conf.clip_t - if self.clip_t == nil then - self.clip_t = 0 - end - if self.clip_t > 0 then - nerv.info("tnn(%s) will clip gradient across time with %f...", id, self.clip_t) - end - - self.extend_t = layer_conf.extend_t --TNN will allocate storage of time for 1-extend_t .. chunk_size+extend_t - if self.extend_t == nil then - self.extend_t = 5 - end - nerv.info("tnn(%s) will extend storage beyond MB border for time steps %d...", id, self.extend_t) - - local layers = {} - local inputs_p = {} --map:port of the TNN to layer ref and port - local outputs_p = {} - local dim_in = layer_conf.dim_in - local dim_out = layer_conf.dim_out - local parsed_conns = {} - local _ - - for id, _ in pairs(layer_conf.sub_layers.layers) do --caution: with this line, some layer not connected will be included - discover(id, layers, layer_conf.sub_layers) - end - - for _, ll in pairs(layer_conf.connections) do - local id_from, port_from = parse_id(ll[1]) - local id_to, port_to = parse_id(ll[2]) - local time_to = ll[3] - - print(id_from, id_to, time_to) - - local ref_from = discover(id_from, layers, layer_conf.sub_layers) - local ref_to = discover(id_to, layers, layer_conf.sub_layers) - - if (id_from == "<input>") then - if (dim_in[port_from] ~= ref_to.dim_in[port_to] or time_to ~= 0) then - nerv.error("mismatch dimension or wrong time %s,%s,%d", ll[1], ll[2], ll[3]) - end - inputs_p[port_from] = {["ref"] = ref_to, ["port"] = port_to} - ref_to.inputs_m[port_to] = {} --just a place holder - elseif (id_to == "<output>") then - if (dim_out[port_to] ~= ref_from.dim_out[port_from] or time_to ~= 0) then - nerv.error("mismatch dimension or wrong time %s,%s,%d", ll[1], ll[2], ll[3]) - end - outputs_p[port_to] = {["ref"] = ref_from, ["port"] = port_from} - ref_from.outputs_m[port_from] = {} --just a place holder - else - local conn_now = { - ["src"] = {["ref"] = ref_from, ["port"] = port_from}, - ["dst"] = {["ref"] = ref_to, ["port"] = port_to}, - ["time"] = time_to - } - if (ref_to.dim_in[port_to] ~= ref_from.dim_out[port_from]) then - nerv.error("mismatch dimension or wrong time %s,%s,%d", ll[1], ll[2], ll[3]) - end - table.insert(parsed_conns, conn_now) - ref_to.i_conns_p[conn_now.dst.port] = conn_now - ref_from.o_conns_p[conn_now.src.port] = conn_now - end - end - - for id, ref in pairs(layers) do - print(id, "#dim_in:", #ref.dim_in, "#dim_out:", #ref.dim_out, "#i_conns_p:", #ref.i_conns_p, "#o_conns_p", #ref.o_conns_p) - end - - self.layers = layers - self.inputs_p = inputs_p - self.outputs_p = outputs_p - self.id = id - self.dim_in = dim_in - self.dim_out = dim_out - self.parsed_conns = parsed_conns - self.gconf = global_conf -end - -function TNN:init(batch_size, chunk_size) - self.batch_size = batch_size - self.chunk_size = chunk_size - for i, conn in ipairs(self.parsed_conns) do --init storage for connections inside the NN - local _, output_dim - local ref_from, port_from, ref_to, port_to, time - ref_from, port_from = conn.src.ref, conn.src.port - ref_to, port_to = conn.dst.ref, conn.dst.port - time = conn.time - - local dim = ref_from.dim_out[port_from] - if (dim == 0) then - nerv.error("layer %s has a zero dim port", ref_from.layer.id) - end - - nerv.info("TNN initing storage %s->%s", ref_from.layer.id, ref_to.layer.id) - ref_to.inputs_matbak_p[port_to] = self.gconf.cumat_type(batch_size, dim) - self.make_initial_store(ref_from.outputs_m, port_from, dim, batch_size, chunk_size, self.extend_t, self.gconf, ref_to.inputs_m, port_to, time) - ref_from.err_inputs_matbak_p[port_from] = self.gconf.cumat_type(batch_size, dim) - self.make_initial_store(ref_from.err_inputs_m, port_from, dim, batch_size, chunk_size, self.extend_t, self.gconf, ref_to.err_outputs_m, port_to, time) - end - - self.outputs_m = {} - self.err_inputs_m = {} - for i = 1, #self.dim_out do --Init storage for output ports - local ref = self.outputs_p[i].ref - local p = self.outputs_p[i].port - self.make_initial_store(ref.outputs_m, p, self.dim_out[i], batch_size, chunk_size, self.extend_t, self.gconf, self.outputs_m, i, 0) - self.make_initial_store(ref.err_inputs_m, p, self.dim_out[i], batch_size, chunk_size, self.extend_t, self.gconf, self.err_inputs_m, i, 0) - end - - self.inputs_m = {} - self.err_outputs_m = {} - for i = 1, #self.dim_in do --Init storage for input ports - local ref = self.inputs_p[i].ref - local p = self.inputs_p[i].port - self.make_initial_store(ref.inputs_m, p, self.dim_in[i], batch_size, chunk_size, self.extend_t, self.gconf, self.inputs_m, i, 0) - self.make_initial_store(ref.err_outputs_m, p, self.dim_in[i], batch_size, chunk_size, self.extend_t, self.gconf, self.err_outputs_m, i, 0) - end - - for id, ref in pairs(self.layers) do --Calling init for child layers - for i = 1, #ref.dim_in do - if (ref.inputs_m[i] == nil or ref.err_outputs_m[i] == nil) then - nerv.error("dangling input port %d of layer %s", i, id) - end - end - for i = 1, #ref.dim_out do - if (ref.outputs_m[i] == nil or ref.err_inputs_m[i] == nil) then - nerv.error("dangling output port %d of layer %s", i, id) - end - end - -- initialize sub layers - nerv.info("TNN initing sub-layer %s", ref.id) - ref.layer:init(batch_size, chunk_size) - collectgarbage("collect") - end - - local flags_now = {} - local flagsPack_now = {} - for i = 1, chunk_size do - flags_now[i] = {} - flagsPack_now[i] = 0 - end - - self.feeds_now = {} --feeds is for the reader to fill - self.feeds_now.inputs_m = self.inputs_m - self.feeds_now.flags_now = flags_now - self.feeds_now.flagsPack_now = flagsPack_now - - self:flush_all() -end - ---[[ -function DAGLayer:batch_resize(batch_size) - self.gconf.batch_size = batch_size - - for i, conn in ipairs(self.parsed_conn) do - local _, output_dim - local ref_from, port_from, ref_to, port_to - ref_from, port_from = unpack(conn[1]) - ref_to, port_to = unpack(conn[2]) - _, output_dim = ref_from.layer:get_dim() - - if ref_from.outputs[port_from]:nrow() ~= batch_size and output_dim[port_from] > 0 then - local mid = self.gconf.cumat_type(batch_size, output_dim[port_from]) - local err_mid = mid:create() - - ref_from.outputs[port_from] = mid - ref_to.inputs[port_to] = mid - - ref_from.err_inputs[port_from] = err_mid - ref_to.err_outputs[port_to] = err_mid - end - end - for id, ref in pairs(self.layers) do - ref.layer:batch_resize(batch_size) - end - collectgarbage("collect") -end -]]-- - -function TNN:flush_all() --flush all history and activation - local _, ref - for _, ref in pairs(self.layers) do - for i = 1, #ref.dim_in do - for t = 1 - self.extend_t, self.chunk_size + self.extend_t do - ref.inputs_m[t][i]:fill(self.gconf.nn_act_default) - if (ref.inputs_b[t] == nil) then - ref.inputs_b[t] = {} - end - ref.inputs_b[t][i] = false - ref.err_outputs_m[t][i]:fill(0) - if (ref.err_outputs_b[t] == nil) then - ref.err_outputs_b[t] = {} - end - ref.err_outputs_b[t][i] = false - end - end - for i = 1, #ref.dim_out do - for t = 1 - self.extend_t, self.chunk_size + self.extend_t do - ref.outputs_m[t][i]:fill(self.gconf.nn_act_default) - if (ref.outputs_b[t] == nil) then - ref.outputs_b[t] = {} - end - ref.outputs_b[t][i] = false - ref.err_inputs_m[t][i]:fill(0) - if (ref.err_inputs_b[t] == nil) then - ref.err_inputs_b[t] = {} - end - ref.err_inputs_b[t][i] = false - end - end - end -end - ---reader: some reader ---Returns: bool, whether has new feed ---Returns: feeds, a table that will be filled with the reader's feeds -function TNN:getfeed_from_reader(reader) - local feeds_now = self.feeds_now - local got_new = reader:get_batch(feeds_now) - return got_new, feeds_now -end - -function TNN:move_right_to_nextmb(list_t) --move output history activations of 1..chunk_size to 1-chunk_size..0 - if list_t == nil then - list_t = {} - for i = self.extend_t, 1, -1 do - list_t[i] = 1 - i - end - end - for i = 1, #list_t do - t = list_t[i] - if t < 1 - self.extend_t or t > 0 then - nerv.error("MB move range error") - end - for id, ref in pairs(self.layers) do - for p = 1, #ref.dim_out do - ref.outputs_m[t][p]:copy_fromd(ref.outputs_m[t + self.chunk_size][p]) - end - end - end -end - -function TNN:net_propagate() --propagate according to feeds_now - for t = 1, self.chunk_size, 1 do - for id, ref in pairs(self.layers) do - for p = 1, #ref.dim_out do - ref.outputs_b[t][p] = false - end - for p = 1, #ref.dim_in do - ref.inputs_b[t][p] = false - end - end - end - - local feeds_now = self.feeds_now - for t = 1, self.chunk_size do --some layer maybe do not have inputs from time 1..chunk_size - for id, ref in pairs(self.layers) do - if #ref.dim_in > 0 then --some layer is just there(only to save some parameter) - self:propagate_dfs(ref, t) - end - end - end - for t = 1, self.chunk_size do - if (bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_INPUT) > 0) then - for i = 1, #self.dim_in do - local ref = self.inputs_p[i].ref - local p = self.inputs_p[i].port - ref.inputs_b[t][p] = true - self:propagate_dfs(ref, t) - end - end - end - - local flag_out = true - for t = 1, self.chunk_size do --check whether every output has been computed - if (bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_LABEL) > 0) then - for i = 1, #self.dim_out do - local ref = self.outputs_p[i].ref - if (ref.outputs_b[t][1] ~= true) then - flag_out = false - break - end - end - end - end - - if (flag_out == false) then - nerv.error("some thing wrong, some labeled output is not propagated") - end -end - ---ref: the TNN_ref of a layer ---t: the current time to propagate -function TNN:propagate_dfs(ref, t) - if (self:out_of_feedrange(t)) then - return - end - if (ref.outputs_b[t][1] == true) then --already propagated, 1 is just a random port - return - end - - --print("debug dfs", ref.layer.id, t) - - local flag = true --whether have all inputs - for _, conn in pairs(ref.i_conns_p) do - local p = conn.dst.port - if (not (ref.inputs_b[t][p] or self:out_of_feedrange(t - conn.time))) then - flag = false - break - end - end - if (flag == false) then - return - end - - --ok, do propagate - --print("debug ok, propagating"); - --The MB moving will cause bordering history to be changed, so it is more wise to flush the input activation - if (bit.band(self.feeds_now.flagsPack_now[t], bit.bor(nerv.TNN.FC.SEQ_START, nerv.TNN.FC.SEQ_END)) > 0) then --flush cross-border history - for i = 1, self.batch_size do - local seq_start = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_START) - local seq_end = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_END) - if (seq_start > 0 or seq_end > 0) then - for p, conn in pairs(ref.i_conns_p) do - if ((ref.i_conns_p[p].time > 0 and seq_start > 0) or (ref.i_conns_p[p].time < 0 and seq_end > 0)) then --cross-border, set to default - ref.inputs_m[t][p][i - 1]:fill(self.gconf.nn_act_default) - end - end - end - end - end - self.gconf.timer:tic("tnn_actual_layer_propagate") - ref.layer:propagate(ref.inputs_m[t], ref.outputs_m[t], t) --propagate! - self.gconf.timer:toc("tnn_actual_layer_propagate") - --[[ - if (bit.band(self.feeds_now.flagsPack_now[t], bit.bor(nerv.TNN.FC.SEQ_START, nerv.TNN.FC.SEQ_END)) > 0) then --restore cross-border history - for i = 1, self.batch_size do - local seq_start = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_START) - local seq_end = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_END) - if (seq_start > 0 or seq_end > 0) then - for p, conn in pairs(ref.o_conns_p) do - if ((ref.o_conns_p[p].time > 0 and seq_end > 0) or (ref.o_conns_p[p].time < 0 and seq_start > 0)) then - ref.outputs_m[t][p][i - 1]:fill(self.gconf.nn_act_default) - end - end - end - end - end - ]]-- - --set input flag for future layers - for i = 1, #ref.dim_out do - if (ref.outputs_b[t][i] == true) then - nerv.error("this time's outputs_b should be false") - end - ref.outputs_b[t][i] = true - end - - --try dfs for further layers - for _, conn in pairs(ref.o_conns_p) do - --print("debug dfs-searching", conn.dst.ref.layer.id) - conn.dst.ref.inputs_b[t + conn.time][conn.dst.port] = true - self:propagate_dfs(conn.dst.ref, t + conn.time) - end -end - ---do_update: bool, whether we are doing back-propagate or updating the parameters -function TNN:net_backpropagate(do_update) --propagate according to feeds_now - if do_update == nil then - nerv.error("do_update should not be nil") - end - for t = 1, self.chunk_size, 1 do - for id, ref in pairs(self.layers) do - for p = 1, #ref.dim_out do - ref.err_inputs_b[t][p] = false - end - for p = 1, #ref.dim_in do - ref.err_outputs_b[t][p] = false - end - end - end - - local feeds_now = self.feeds_now - for t = 1, self.chunk_size do --some layer maybe do not have outputs from time 1..chunk_size - for id, ref in pairs(self.layers) do - if #ref.dim_out > 0 then --some layer is just there(only to save some parameter) - self:backpropagate_dfs(ref, t, do_update) - end - end - end - for t = 1, self.chunk_size do - if bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_LABEL) > 0 then - for i = 1, #self.dim_out do - local ref = self.outputs_p[i].ref - local p = self.outputs_p[i].port - ref.err_inputs_b[t][p] = true - self:backpropagate_dfs(ref, t, do_update) - end - end - end - - local flag_out = true - for t = 1, self.chunk_size do --check whether every output has been computed - if bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_INPUT) > 0 then - for i = 1, #self.dim_in do - local ref = self.inputs_p[i].ref - if ref.err_outputs_b[t][1] ~= true then - flag_out = false - break - end - end - end - end - if (flag_out == false) then - nerv.error("some thing wrong, some input is not back_propagated") - end -end - ---ref: the TNN_ref of a layer ---t: the current time to propagate -function TNN:backpropagate_dfs(ref, t, do_update) - if do_update == nil then - nerv.error("got a nil do_update") - end - if self:out_of_feedrange(t) then - return - end - if ref.err_outputs_b[t][1] == true then --already back_propagated, 1 is just a random port - return - end - - --print("debug dfs", ref.layer.id, t) - - local flag = true --whether have all inputs - for _, conn in pairs(ref.o_conns_p) do - local p = conn.src.port - if (not (ref.err_inputs_b[t][p] or self:out_of_feedrange(t + conn.time))) then - flag = false - break - end - end - if (flag == false) then - return - end - - --ok, do back_propagate - --print("debug ok, back-propagating(or updating)") - if (do_update == false) then - self.gconf.timer:tic("tnn_actual_layer_backpropagate") - ref.layer:back_propagate(ref.err_inputs_m[t], ref.err_outputs_m[t], ref.inputs_m[t], ref.outputs_m[t], t) - self.gconf.timer:toc("tnn_actual_layer_backpropagate") - if self.clip_t > 0 then - for _, conn in pairs(ref.i_conns_p) do - local p = conn.dst.port --port for ref - if conn.time ~= 0 then - --print("debug clip_t tnn", ref.id, "port:", p, "clip:", self.clip_t) - ref.err_outputs_m[t][p]:clip(-self.clip_t, self.clip_t) - end - end - end - else - --print(ref.err_inputs_m[t][1]) - self.gconf.timer:tic("tnn_actual_layer_update") - ref.layer:update(ref.err_inputs_m[t], ref.inputs_m[t], ref.outputs_m[t], t) - self.gconf.timer:toc("tnn_actual_layer_update") - end - - if (do_update == false and bit.band(self.feeds_now.flagsPack_now[t], bit.bor(nerv.TNN.FC.SEQ_START, nerv.TNN.FC.SEQ_END)) > 0) then --flush cross-border errors - for i = 1, self.batch_size do - local seq_start = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_START) - local seq_end = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_END) - if (seq_start > 0 or seq_end > 0) then - for p, conn in pairs(ref.i_conns_p) do - if ((ref.i_conns_p[p].time > 0 and seq_start > 0) or (ref.i_conns_p[p].time < 0 and seq_end > 0)) then --cross-border, set to zero - ref.err_outputs_m[t][p][i - 1]:fill(0) - end - end - end - end - end - - for i = 1, #ref.dim_in do - if (ref.err_outputs_b[t][i] == true) then - nerv.error("this time's outputs_b should be false") - end - ref.err_outputs_b[t][i] = true - end - - --try dfs for further layers - for _, conn in pairs(ref.i_conns_p) do - --print("debug dfs-searching", conn.src.ref.layer.id) - conn.src.ref.err_inputs_b[t - conn.time][conn.src.port] = true - self:backpropagate_dfs(conn.src.ref, t - conn.time, do_update) - end -end - ---Return: nerv.ParamRepo -function TNN:get_params() - local param_repos = {} - for id, ref in pairs(self.layers) do - table.insert(param_repos, ref.layer:get_params()) - end - return nerv.ParamRepo.merge(param_repos) -end - |