From bba82cac04474b8177ab45d41543bc993801a4e0 Mon Sep 17 00:00:00 2001 From: txh18 Date: Thu, 3 Dec 2015 12:48:49 +0800 Subject: moved tnn to main nerv dir and added it to Makefile --- nerv/Makefile | 7 +- nerv/examples/lmptb/lm_trainer.lua | 2 +- nerv/examples/lmptb/lmptb/lmseqreader.lua | 2 +- nerv/examples/lmptb/lstmlm_ptb_main.lua | 6 +- nerv/examples/lmptb/m-tests/sutil_test.lua | 2 +- nerv/examples/lmptb/rnnlm_ptb_main.lua | 2 +- nerv/examples/lmptb/tnn/init.lua | 51 -- nerv/examples/lmptb/tnn/layer_dag_t.lua | 386 ---------------- nerv/examples/lmptb/tnn/layers/elem_mul.lua | 38 -- nerv/examples/lmptb/tnn/layers/gate_fff.lua | 71 --- nerv/examples/lmptb/tnn/layersT/dropout_t.lua | 71 --- nerv/examples/lmptb/tnn/layersT/lstm_t.lua | 125 ----- nerv/examples/lmptb/tnn/layersT/softmax_ce_t.lua | 93 ---- nerv/examples/lmptb/tnn/sutil.lua | 52 --- nerv/examples/lmptb/tnn/tnn.lua | 565 ----------------------- nerv/init.lua | 1 + nerv/layer/elem_mul.lua | 38 ++ nerv/layer/gate_fff.lua | 71 +++ nerv/layer/init.lua | 2 + nerv/tnn/init.lua | 49 ++ nerv/tnn/layer_dag_t.lua | 386 ++++++++++++++++ nerv/tnn/layersT/dropout_t.lua | 71 +++ nerv/tnn/layersT/lstm_t.lua | 125 +++++ nerv/tnn/layersT/softmax_ce_t.lua | 93 ++++ nerv/tnn/sutil.lua | 52 +++ nerv/tnn/tnn.lua | 565 +++++++++++++++++++++++ 26 files changed, 1465 insertions(+), 1461 deletions(-) delete mode 100644 nerv/examples/lmptb/tnn/init.lua delete mode 100644 nerv/examples/lmptb/tnn/layer_dag_t.lua delete mode 100644 nerv/examples/lmptb/tnn/layers/elem_mul.lua delete mode 100644 nerv/examples/lmptb/tnn/layers/gate_fff.lua delete mode 100644 nerv/examples/lmptb/tnn/layersT/dropout_t.lua delete mode 100644 nerv/examples/lmptb/tnn/layersT/lstm_t.lua delete mode 100644 nerv/examples/lmptb/tnn/layersT/softmax_ce_t.lua delete mode 100644 nerv/examples/lmptb/tnn/sutil.lua delete mode 100644 nerv/examples/lmptb/tnn/tnn.lua create mode 100644 nerv/layer/elem_mul.lua create mode 100644 nerv/layer/gate_fff.lua create mode 100644 nerv/tnn/init.lua create mode 100644 nerv/tnn/layer_dag_t.lua create mode 100644 nerv/tnn/layersT/dropout_t.lua create mode 100644 nerv/tnn/layersT/lstm_t.lua create mode 100644 nerv/tnn/layersT/softmax_ce_t.lua create mode 100644 nerv/tnn/sutil.lua create mode 100644 nerv/tnn/tnn.lua diff --git a/nerv/Makefile b/nerv/Makefile index df6ce98..5c329f9 100644 --- a/nerv/Makefile +++ b/nerv/Makefile @@ -7,7 +7,7 @@ INC_PATH := $(LUA_BINDIR)/../include/nerv LUA_DIR = $(INST_LUADIR)/nerv OBJ_DIR := $(BUILD_DIR)/objs ISUBDIR := io matrix luaT -SUBDIR := matrix io layer examples nn lib/io lib/luaT lib/matrix +SUBDIR := matrix io layer examples nn lib/io lib/luaT lib/matrix tnn/layersT INC_SUBDIR := $(addprefix $(INC_PATH)/,$(ISUBDIR)) OBJ_SUBDIR := $(addprefix $(OBJ_DIR)/,$(SUBDIR)) @@ -32,8 +32,11 @@ LIBS := $(INST_LIBDIR)/libnerv.so $(LIB_PATH)/libnervcore.so $(LIB_PATH)/libluaT LUA_LIBS := matrix/init.lua io/init.lua init.lua \ layer/init.lua layer/affine.lua layer/sigmoid.lua layer/tanh.lua layer/softmax_ce.lua layer/softmax.lua \ layer/window.lua layer/bias.lua layer/combiner.lua layer/mse.lua layer/affine_recurrent.lua \ + layer/elem_mul.lua layer/gate_fff.lua \ nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/layer_dag.lua \ - io/sgd_buffer.lua + io/sgd_buffer.lua \ + tnn/init.lua tnn/layer_dag_t.lua tnn/sutil.lua tnn/tnn.lua \ + tnn/layersT/dropout_t.lua tnn/layersT/lstm_t.lua tnn/layersT/softmax_ce_t.lua INCLUDE := -I $(LUA_INCDIR) -DLUA_USE_APICHECK #CUDA_BASE := /usr/local/cuda-7.0 diff --git a/nerv/examples/lmptb/lm_trainer.lua b/nerv/examples/lmptb/lm_trainer.lua index a203cc6..e5384b1 100644 --- a/nerv/examples/lmptb/lm_trainer.lua +++ b/nerv/examples/lmptb/lm_trainer.lua @@ -2,7 +2,7 @@ require 'lmptb.lmvocab' require 'lmptb.lmfeeder' require 'lmptb.lmutil' require 'lmptb.layer.init' -require 'tnn.init' +--require 'tnn.init' require 'lmptb.lmseqreader' local LMTrainer = nerv.class('nerv.LMTrainer') diff --git a/nerv/examples/lmptb/lmptb/lmseqreader.lua b/nerv/examples/lmptb/lmptb/lmseqreader.lua index ff07415..ead8d4c 100644 --- a/nerv/examples/lmptb/lmptb/lmseqreader.lua +++ b/nerv/examples/lmptb/lmptb/lmseqreader.lua @@ -1,5 +1,5 @@ require 'lmptb.lmvocab' -require 'tnn.init' +--require 'tnn.init' local LMReader = nerv.class("nerv.LMSeqReader") diff --git a/nerv/examples/lmptb/lstmlm_ptb_main.lua b/nerv/examples/lmptb/lstmlm_ptb_main.lua index 53a7bd5..4123378 100644 --- a/nerv/examples/lmptb/lstmlm_ptb_main.lua +++ b/nerv/examples/lmptb/lstmlm_ptb_main.lua @@ -2,7 +2,7 @@ require 'lmptb.lmvocab' require 'lmptb.lmfeeder' require 'lmptb.lmutil' require 'lmptb.layer.init' -require 'tnn.init' +--require 'tnn.init' require 'lmptb.lmseqreader' require 'lm_trainer' @@ -197,10 +197,10 @@ global_conf = { hidden_size = 300, --set to 400 for a stable good test PPL chunk_size = 15, batch_size = 10, - max_iter = 35, + max_iter = 45, decay_iter = 10, param_random = function() return (math.random() / 5 - 0.1) end, - dropout_str = "0.5*15:0", + dropout_str = "0.5", train_fn = train_fn, valid_fn = valid_fn, diff --git a/nerv/examples/lmptb/m-tests/sutil_test.lua b/nerv/examples/lmptb/m-tests/sutil_test.lua index c2425c2..3f9bf9e 100644 --- a/nerv/examples/lmptb/m-tests/sutil_test.lua +++ b/nerv/examples/lmptb/m-tests/sutil_test.lua @@ -1,4 +1,4 @@ -require "tnn.init" +--require "tnn.init" ss = "0.1*1:2" nerv.SUtil.parse_schedule(ss) diff --git a/nerv/examples/lmptb/rnnlm_ptb_main.lua b/nerv/examples/lmptb/rnnlm_ptb_main.lua index 35b2e08..ca62023 100644 --- a/nerv/examples/lmptb/rnnlm_ptb_main.lua +++ b/nerv/examples/lmptb/rnnlm_ptb_main.lua @@ -2,7 +2,7 @@ require 'lmptb.lmvocab' require 'lmptb.lmfeeder' require 'lmptb.lmutil' require 'lmptb.layer.init' -require 'tnn.init' +--require 'tnn.init' require 'lmptb.lmseqreader' require 'lm_trainer' diff --git a/nerv/examples/lmptb/tnn/init.lua b/nerv/examples/lmptb/tnn/init.lua deleted file mode 100644 index 66ea4ed..0000000 --- a/nerv/examples/lmptb/tnn/init.lua +++ /dev/null @@ -1,51 +0,0 @@ -local LayerT = nerv.class('nerv.LayerT') - -function LayerT:__init(id, global_conf, layer_conf) - nerv.error_method_not_implemented() -end - -function LayerT:init(batch_size, chunk_size) - nerv.error_method_not_implemented() -end - -function LayerT:update(bp_err, input, output, t) - nerv.error_method_not_implemented() -end - -function LayerT:propagate(input, output, t) - nerv.error_method_not_implemented() -end - -function LayerT:back_propagate(bp_err, next_bp_err, input, output, t) - nerv.error_method_not_implemented() -end - -function LayerT:check_dim_len(len_in, len_out) - local expected_in = #self.dim_in - local expected_out = #self.dim_out - if len_in > 0 and expected_in ~= len_in then - nerv.error("layer %s expects %d inputs, %d given", - self.id, len_in, expected_in) - end - if len_out > 0 and expected_out ~= len_out then - nerv.error("layer %s expects %d outputs, %d given", - self.id, len_out, expected_out) - end -end - -function LayerT:get_params() - nerv.error_method_not_implemented() -end - -function LayerT:get_dim() - return self.dim_in, self.dim_out -end - -nerv.include('sutil.lua') -nerv.include('tnn.lua') -nerv.include('layersT/softmax_ce_t.lua') -nerv.include('layersT/lstm_t.lua') -nerv.include('layersT/dropout_t.lua') -nerv.include('layers/elem_mul.lua') -nerv.include('layers/gate_fff.lua') -nerv.include('layer_dag_t.lua') diff --git a/nerv/examples/lmptb/tnn/layer_dag_t.lua b/nerv/examples/lmptb/tnn/layer_dag_t.lua deleted file mode 100644 index e3a9316..0000000 --- a/nerv/examples/lmptb/tnn/layer_dag_t.lua +++ /dev/null @@ -1,386 +0,0 @@ -local DAGLayerT = nerv.class("nerv.DAGLayerT", "nerv.LayerT") - -local function parse_id(str) - local id, port, _ - _, _, id, port = string.find(str, "([a-zA-Z0-9_.]+)%[([0-9]+)%]") - if id == nil or port == nil then - _, _, id, port = string.find(str, "(.+)%[([0-9]+)%]") - if not (id == "" or id == "") then - nerv.error("wrong format of connection id") - end - end - port = tonumber(port) - return id, port -end - -local function discover(id, layers, layer_repo) - local ref = layers[id] - if id == "" or id == "" then - return nil - end - if ref == nil then - local layer = layer_repo:get_layer(id) - local dim_in, dim_out = layer:get_dim() - ref = { - id = layer.id, - layer = layer, - inputs = {}, - outputs = {}, - err_inputs = {}, - err_outputs = {}, - next_layers = {}, - input_len = #dim_in, - output_len = #dim_out, - in_deg = 0, - visited = false - } - layers[id] = ref - end - return ref -end - -function DAGLayerT:__init(id, global_conf, layer_conf) - local layers = {} - local inputs = {} - local outputs = {} - local dim_in = layer_conf.dim_in - local dim_out = layer_conf.dim_out - local parsed_conn = {} - for from, to in pairs(layer_conf.connections) do - local id_from, port_from = parse_id(from) - local id_to, port_to = parse_id(to) - local ref_from = discover(id_from, layers, layer_conf.sub_layers) - local ref_to = discover(id_to, layers, layer_conf.sub_layers) - local input_dim, output_dim, _ - if id_from == "" then - input_dim, _ = ref_to.layer:get_dim() - if dim_in[port_from] ~= input_dim[port_to] then - nerv.error("mismatching data dimension between %s and %s", from, to) - end - inputs[port_from] = {ref_to, port_to} - if ref_to.inputs[1] == nil then - ref_to.inputs[1] = {} - end - if ref_to.inputs[1][port_to] ~= nil then - nerv.error("port(%d) for layer(%s) already attached", port_to, to) - end - ref_to.inputs[1][port_to] = inputs -- just a place holder - elseif id_to == "" then - _, output_dim = ref_from.layer:get_dim() - if output_dim[port_from] ~= dim_out[port_to] then - nerv.error("mismatching data dimension between %s and %s", from, to) - end - outputs[port_to] = {ref_from, port_from} - if ref_from.outputs[1] == nil then - ref_from.outputs[1] = {} - end - if ref_from.outputs[1][port_from] ~= nil then - nerv.error("port(%d) for layer(%s) already attached", port_from, from) - end - ref_from.outputs[1] = {} - ref_from.outputs[1][port_from] = outputs -- just a place holder - else - _, output_dim = ref_from.layer:get_dim() - input_dim, _ = ref_to.layer:get_dim() - if output_dim[port_from] ~= input_dim[port_to] then - nerv.error("mismatching data dimension between %s and %s", from, to) - end - - table.insert(parsed_conn, - {{ref_from, port_from}, {ref_to, port_to}}) - table.insert(ref_from.next_layers, ref_to) -- add edge - ref_to.in_deg = ref_to.in_deg + 1 -- increase the in-degree of the target layer - end - end - - -- topology sort - local queue = {} - local l = 1 - local r = 1 - for id, ref in pairs(layers) do - if ref.in_deg == 0 then - table.insert(queue, ref) - nerv.info("adding source layer: %s", id) - r = r + 1 - end - end - if l == r then - nerv.error("loop detected") - end - while l < r do - local cur = queue[l] - cur.visited = true - l = l + 1 - for _, nl in pairs(cur.next_layers) do - nl.in_deg = nl.in_deg - 1 - if nl.in_deg == 0 then - table.insert(queue, nl) - r = r + 1 - end - end - end - for i = 1, #queue do - nerv.info("enqueued layer: %s %s", queue[i].layer, queue[i].layer.id) - end - - for id, ref in pairs(layers) do - -- check wether the graph is connected - if ref.visited == false then - nerv.warning("layer %s is ignored", id) - end - end - - self.layers = layers - self.inputs = inputs - self.outputs = outputs - self.id = id - self.dim_in = dim_in - self.dim_out = dim_out - self.parsed_conn = parsed_conn - self.queue = queue - self.gconf = global_conf -end - -function DAGLayerT:init(batch_size, chunk_size) - nerv.info("initing DAGLayerT %s...\n", self.id) - if chunk_size == nil then - chunk_size = 1 - nerv.info("(Initing DAGLayerT) chunk_size is nil, setting it to default 1\n") - end - - self.chunk_size = chunk_size - - for i, conn in ipairs(self.parsed_conn) do - local _, output_dim - local ref_from, port_from, ref_to, port_to - ref_from, port_from = unpack(conn[1]) - ref_to, port_to = unpack(conn[2]) - _, output_dim = ref_from.layer:get_dim() - local dim = 1 - if output_dim[port_from] > 0 then - dim = output_dim[port_from] - end - - for t = 1, chunk_size do - local mid = self.gconf.cumat_type(batch_size, dim) - local err_mid = mid:create() - - if ref_from.outputs[t] == nil then - ref_from.outputs[t] = {} - end - if ref_to.inputs[t] == nil then - ref_to.inputs[t] = {} - end - if ref_to.err_outputs[t] == nil then - ref_to.err_outputs[t] = {} - end - if ref_from.err_inputs[t] == nil then - ref_from.err_inputs[t] = {} - end - - ref_from.outputs[t][port_from] = mid - ref_to.inputs[t][port_to] = mid - - ref_from.err_inputs[t][port_from] = err_mid - ref_to.err_outputs[t][port_to] = err_mid - end - end - for id, ref in pairs(self.layers) do - for i = 1, ref.input_len do - if ref.inputs[1][i] == nil then --peek at time 1 - nerv.error("dangling input port %d of layer %s", i, id) - end - end - for i = 1, ref.output_len do - if ref.outputs[1][i] == nil then --peek at time 1 - nerv.error("dangling output port %d of layer %s", i, id) - end - end - -- initialize sub layers - ref.layer:init(batch_size, chunk_size) - end - for i = 1, #self.dim_in do - if self.inputs[i] == nil then - nerv.error("dangling port %d of layer ", i) - end - end - for i = 1, #self.dim_out do - if self.outputs[i] == nil then - nerv.error("dangling port %d of layer ", i) - end - end -end - -function DAGLayerT:batch_resize(batch_size, chunk_size) - if chunk_size == nil then - chunk_size = 1 - end - if batch_size ~= self.gconf.batch_size - or chunk_size ~= self.gconf.chunk_size then - nerv.printf("warn: in DAGLayerT:batch_resize, the batch_size ~= gconf.batch_size, or chunk_size ~= gconf.chunk_size") - end - self.gconf.batch_size = batch_size - self.gconf.chunk_size = chunk_size - - for i, conn in ipairs(self.parsed_conn) do - local _, output_dim - local ref_from, port_from, ref_to, port_to - ref_from, port_from = unpack(conn[1]) - ref_to, port_to = unpack(conn[2]) - _, output_dim = ref_from.layer:get_dim() - - for t = 1, chunk_size do - if ref_from.outputs[t] == nil then - ref_from.outputs[t] = {} - end - if ref_to.inputs[t] == nil then - ref_to.inputs[t] = {} - end - if ref_from.err_outputs[t] == nil then - ref_from.err_outputs[t] = {} - end - if ref_from.err_inputs[t] == nil then - ref_from.err_inputs[t] = {} - end - - local mid = self.gconf.cumat_type(batch_size, dim) - local err_mid = mid:create() - - ref_from.outputs[t][port_from] = mid - ref_to.inputs[t][port_to] = mid - - ref_from.err_inputs[t][port_from] = err_mid - ref_to.err_outputs[t][port_to] = err_mid - end - end - for id, ref in pairs(self.layers) do - ref.layer:batch_resize(batch_size, chunk_size) - end - collectgarbage("collect") -end - -function DAGLayerT:set_inputs(input, t) - for i = 1, #self.dim_in do - if input[i] == nil then - nerv.error("some input is not provided"); - end - local layer = self.inputs[i][1] - local port = self.inputs[i][2] - if layer.inputs[t] == nil then - layer.inputs[t] = {} - end - layer.inputs[t][port] = input[i] - end -end - -function DAGLayerT:set_outputs(output, t) - for i = 1, #self.dim_out do - if output[i] == nil then - nerv.error("some output is not provided"); - end - local layer = self.outputs[i][1] - local port = self.outputs[i][2] - if layer.outputs[t] == nil then - layer.outputs[t] = {} - end - layer.outputs[t][port] = output[i] - end -end - -function DAGLayerT:set_err_inputs(bp_err, t) - for i = 1, #self.dim_out do - local layer = self.outputs[i][1] - local port = self.outputs[i][2] - if layer.err_inputs[t] == nil then - layer.err_inputs[t] = {} - end - layer.err_inputs[t][port] = bp_err[i] - end -end - -function DAGLayerT:set_err_outputs(next_bp_err, t) - for i = 1, #self.dim_in do - local layer = self.inputs[i][1] - local port = self.inputs[i][2] - if layer.err_outputs[t] == nil then - layer.err_outputs[t] = {} - end - layer.err_outputs[t][port] = next_bp_err[i] - end -end - -function DAGLayerT:update(bp_err, input, output, t) - if t == nil then - t = 1 - end - self:set_err_inputs(bp_err, t) - self:set_inputs(input, t) - self:set_outputs(output, t) - for id, ref in pairs(self.queue) do - ref.layer:update(ref.err_inputs[t], ref.inputs[t], ref.outputs[t], t) - end -end - -function DAGLayerT:propagate(input, output, t) - if t == nil then - t = 1 - end - self:set_inputs(input, t) - self:set_outputs(output, t) - local ret = false - for i = 1, #self.queue do - local ref = self.queue[i] - --print("debug DAGLAyerT:propagate", ref.id, t) - ret = ref.layer:propagate(ref.inputs[t], ref.outputs[t], t) - end - return ret -end - -function DAGLayerT:back_propagate(bp_err, next_bp_err, input, output, t) - if t == nil then - t = 1 - end - self:set_err_outputs(next_bp_err, t) - self:set_err_inputs(bp_err, t) - self:set_inputs(input, t) - self:set_outputs(output, t) - for i = #self.queue, 1, -1 do - local ref = self.queue[i] - ref.layer:back_propagate(ref.err_inputs[t], ref.err_outputs[t], ref.inputs[t], ref.outputs[t], t) - end -end - -function DAGLayerT:get_params() - local param_repos = {} - for id, ref in pairs(self.queue) do - table.insert(param_repos, ref.layer:get_params()) - end - return nerv.ParamRepo.merge(param_repos) -end - -DAGLayerT.PORT_TYPES = { - INPUT = {}, - OUTPUT = {}, - ERR_INPUT = {}, - ERR_OUTPUT = {} -} - -function DAGLayerT:get_intermediate(id, port_type) - if id == "" or id == "" then - nerv.error("an actual real layer id is expected") - end - local layer = self.layers[id] - if layer == nil then - nerv.error("layer id %s not found", id) - end - if port_type == DAGLayerT.PORT_TYPES.INPUT then - return layer.inputs - elseif port_type == DAGLayerT.PORT_TYPES.OUTPUT then - return layer.outputs - elseif port_type == DAGLayerT.PORT_TYPES.ERR_INPUT then - return layer.err_inputs - elseif port_type == DAGLayerT.PORT_TYPES.ERR_OUTPUT then - return layer.err_outputs - end - nerv.error("unrecognized port type") -end diff --git a/nerv/examples/lmptb/tnn/layers/elem_mul.lua b/nerv/examples/lmptb/tnn/layers/elem_mul.lua deleted file mode 100644 index c809d3e..0000000 --- a/nerv/examples/lmptb/tnn/layers/elem_mul.lua +++ /dev/null @@ -1,38 +0,0 @@ -local ElemMulLayer = nerv.class('nerv.ElemMulLayer', 'nerv.Layer') - -function ElemMulLayer:__init(id, global_conf, layer_conf) - self.id = id - self.dim_in = layer_conf.dim_in - self.dim_out = layer_conf.dim_out - self.gconf = global_conf - - self:check_dim_len(2, 1) -- Element-multiply input[1] and input[2] -end - -function ElemMulLayer:init(batch_size) - if self.dim_in[1] ~= self.dim_in[2] or - self.dim_in[1] ~= self.dim_out[1] then - nerv.error("dim_in and dim_out mismatch for ElemMulLayer") - end -end - -function ElemMulLayer:batch_resize(batch_size) - --do nothing -end - -function ElemMulLayer:propagate(input, output) - output[1]:mul_elem(input[1], input[2]) -end - -function ElemMulLayer:back_propagate(bp_err, next_bp_err, input, output) - next_bp_err[1]:mul_elem(bp_err[1], input[2]) - next_bp_err[2]:mul_elem(bp_err[1], input[1]) -end - -function ElemMulLayer:update(bp_err, input, output) - --do nothing -end - -function ElemMulLayer:get_params() - return nerv.ParamRepo({}) -end diff --git a/nerv/examples/lmptb/tnn/layers/gate_fff.lua b/nerv/examples/lmptb/tnn/layers/gate_fff.lua deleted file mode 100644 index 751dde1..0000000 --- a/nerv/examples/lmptb/tnn/layers/gate_fff.lua +++ /dev/null @@ -1,71 +0,0 @@ -local GateFFFLayer = nerv.class('nerv.GateFFFLayer', 'nerv.Layer') - -function GateFFFLayer:__init(id, global_conf, layer_conf) - self.id = id - self.dim_in = layer_conf.dim_in - self.dim_out = layer_conf.dim_out - self.gconf = global_conf - - self.ltp1 = self:find_param("ltp1", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[1], self.dim_out[1]}) --layer_conf.ltp - self.ltp2 = self:find_param("ltp2", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[2], self.dim_out[1]}) --layer_conf.ltp - self.ltp3 = self:find_param("ltp3", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[3], self.dim_out[1]}) --layer_conf.ltp - self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]})--layer_conf.bp - - self:check_dim_len(3, 1) -- exactly one input and one output -end - -function GateFFFLayer:init(batch_size) - if self.ltp1.trans:ncol() ~= self.bp.trans:ncol() or - self.ltp2.trans:ncol() ~= self.bp.trans:ncol() or - self.ltp3.trans:ncol() ~= self.bp.trans:ncol() then - nerv.error("mismatching dimensions of linear transform and bias paramter") - end - if self.dim_in[1] ~= self.ltp1.trans:nrow() or - self.dim_in[2] ~= self.ltp2.trans:nrow() or - self.dim_in[3] ~= self.ltp3.trans:nrow() then - nerv.error("mismatching dimensions of linear transform parameter and input") - end - if self.dim_out[1] ~= self.ltp1.trans:ncol() then - nerv.error("mismatching dimensions of linear transform parameter and output") - end - self.ltp1:train_init() - self.ltp2:train_init() - self.ltp3:train_init() - self.bp:train_init() - self.err_bakm = self.gconf.cumat_type(batch_size, self.dim_out[1]) -end - -function GateFFFLayer:batch_resize(batch_size) - if self.err_m:nrow() ~= batch_size then - self.err_bakm = self.gconf.cumat_type(batch_size, self.dim_out[1]) - end -end - -function GateFFFLayer:propagate(input, output) - -- apply linear transform - output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N') - output[1]:mul(input[2], self.ltp2.trans, 1.0, 1.0, 'N', 'N') - output[1]:mul(input[3], self.ltp3.trans, 1.0, 1.0, 'N', 'N') - -- add bias - output[1]:add_row(self.bp.trans, 1.0) - output[1]:sigmoid(output[1]) -end - -function GateFFFLayer:back_propagate(bp_err, next_bp_err, input, output) - self.err_bakm:sigmoid_grad(bp_err[1], output[1]) - next_bp_err[1]:mul(self.err_bakm, self.ltp1.trans, 1.0, 0.0, 'N', 'T') - next_bp_err[2]:mul(self.err_bakm, self.ltp2.trans, 1.0, 0.0, 'N', 'T') - next_bp_err[3]:mul(self.err_bakm, self.ltp3.trans, 1.0, 0.0, 'N', 'T') -end - -function GateFFFLayer:update(bp_err, input, output) - self.err_bakm:sigmoid_grad(bp_err[1], output[1]) - self.ltp1:update_by_err_input(self.err_bakm, input[1]) - self.ltp2:update_by_err_input(self.err_bakm, input[2]) - self.ltp3:update_by_err_input(self.err_bakm, input[3]) - self.bp:update_by_gradient(self.err_bakm:colsum()) -end - -function GateFFFLayer:get_params() - return nerv.ParamRepo({self.ltp1, self.ltp2, self.ltp3, self.bp}) -end diff --git a/nerv/examples/lmptb/tnn/layersT/dropout_t.lua b/nerv/examples/lmptb/tnn/layersT/dropout_t.lua deleted file mode 100644 index 4351285..0000000 --- a/nerv/examples/lmptb/tnn/layersT/dropout_t.lua +++ /dev/null @@ -1,71 +0,0 @@ -local Dropout = nerv.class("nerv.DropoutLayerT", "nerv.LayerT") - -function Dropout:__init(id, global_conf, layer_conf) - self.id = id - self.gconf = global_conf - self.dim_in = layer_conf.dim_in - self.dim_out = layer_conf.dim_out - self:check_dim_len(1, 1) -- two inputs: nn output and label -end - -function Dropout:init(batch_size, chunk_size) - if self.dim_in[1] ~= self.dim_out[1] then - nerv.error("mismatching dimensions of input and output") - end - if chunk_size == nil then - chunk_size = 1 - end - self.mask_t = {} - for t = 1, chunk_size do - self.mask_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1]) - end -end - -function Dropout:batch_resize(batch_size, chunk_size) - if chunk_size == nil then - chunk_size = 1 - end - for t = 1, chunk_size do - if self.mask_t[t] == nil or self.mask_t[t]:nrow() ~= batch_size then - self.mask_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1]) - end - end -end - -function Dropout:propagate(input, output, t) - if t == nil then - t = 1 - end - if self.gconf.dropout_rate == nil then - nerv.info("DropoutLayerT:propagate warning, global_conf.dropout_rate is nil, setting it zero") - self.gconf.dropout_rate = 0 - end - - if self.gconf.dropout_rate == 0 then - output[1]:copy_fromd(input[1]) - else - self.mask_t[t]:rand_uniform() - --since we will lose a portion of the actvations, we multiply the activations by 1/(1-dr) to compensate - self.mask_t[t]:thres_mask(self.mask_t[t], self.gconf.dropout_rate, 0, 1 / (1.0 - self.gconf.dropout_rate)) - output[1]:mul_elem(input[1], self.mask_t[t]) - end -end - -function Dropout:update(bp_err, input, output, t) - -- no params, therefore do nothing -end - -function Dropout:back_propagate(bp_err, next_bp_err, input, output, t) - if t == nil then - t = 1 - end - if self.gconf.dropout_rate == 0 then - next_bp_err[1]:copy_fromd(bp_err[1]) - else - next_bp_err[1]:mul_elem(bp_err[1], self.mask_t[t]) - end -end - -function Dropout:get_params() - return nerv.ParamRepo({}) -end diff --git a/nerv/examples/lmptb/tnn/layersT/lstm_t.lua b/nerv/examples/lmptb/tnn/layersT/lstm_t.lua deleted file mode 100644 index ded6058..0000000 --- a/nerv/examples/lmptb/tnn/layersT/lstm_t.lua +++ /dev/null @@ -1,125 +0,0 @@ -local LSTMLayerT = nerv.class('nerv.LSTMLayerT', 'nerv.LayerT') - -function LSTMLayerT:__init(id, global_conf, layer_conf) - --input1:x input2:h input3:c - self.id = id - self.dim_in = layer_conf.dim_in - self.dim_out = layer_conf.dim_out - self.gconf = global_conf - - --prepare a DAGLayerT to hold the lstm structure - local pr = layer_conf.pr - if pr == nil then - pr = nerv.ParamRepo() - end - - local function ap(str) - return self.id .. '.' .. str - end - - local layers = { - ["nerv.CombinerLayer"] = { - [ap("inputXDup")] = {{}, {["dim_in"] = {self.dim_in[1]}, - ["dim_out"] = {self.dim_in[1], self.dim_in[1], self.dim_in[1], self.dim_in[1]}, ["lambda"] = {1}}}, - [ap("inputHDup")] = {{}, {["dim_in"] = {self.dim_in[2]}, - ["dim_out"] = {self.dim_in[2], self.dim_in[2], self.dim_in[2], self.dim_in[2]}, ["lambda"] = {1}}}, - [ap("inputCDup")] = {{}, {["dim_in"] = {self.dim_in[3]}, - ["dim_out"] = {self.dim_in[3], self.dim_in[3], self.dim_in[3], self.dim_in[3]}, ["lambda"] = {1}}}, - [ap("mainCDup")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3], self.dim_in[3], self.dim_in[3]}, - ["lambda"] = {1, 1}}}, - }, - ["nerv.AffineLayer"] = { - [ap("mainAffineL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2], self.dim_in[3]}, - ["dim_out"] = {self.dim_out[1]}, ["pr"] = pr}}, - }, - ["nerv.TanhLayer"] = { - [ap("mainTanhL")] = {{}, {["dim_in"] = {self.dim_out[1]}, ["dim_out"] = {self.dim_out[1]}}}, - [ap("outputTanhL")] = {{}, {["dim_in"] = {self.dim_out[1]}, ["dim_out"] = {self.dim_out[1]}}}, - }, - ["nerv.GateFFFLayer"] = { - [ap("forgetGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2], self.dim_in[3]}, - ["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}}, - [ap("inputGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2], self.dim_in[3]}, - ["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}}, - [ap("outputGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2], self.dim_in[3]}, - ["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}}, - - }, - ["nerv.ElemMulLayer"] = { - [ap("inputGMulL")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3]}}}, - [ap("forgetGMulL")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3]}}}, - [ap("outputGMulL")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3]}}}, - }, - } - - local layerRepo = nerv.LayerRepo(layers, pr, global_conf) - - local connections_t = { - ["[1]"] = ap("inputXDup[1]"), - ["[2]"] = ap("inputHDup[1]"), - ["[3]"] = ap("inputCDup[1]"), - - [ap("inputXDup[1]")] = ap("mainAffineL[1]"), - [ap("inputHDup[1]")] = ap("mainAffineL[2]"), - [ap("inputCDup[1]")] = ap("mainAffineL[3]"), - [ap("mainAffineL[1]")] = ap("mainTanhL[1]"), - - [ap("inputXDup[2]")] = ap("inputGateL[1]"), - [ap("inputHDup[2]")] = ap("inputGateL[2]"), - [ap("inputCDup[2]")] = ap("inputGateL[3]"), - - [ap("inputXDup[3]")] = ap("forgetGateL[1]"), - [ap("inputHDup[3]")] = ap("forgetGateL[2]"), - [ap("inputCDup[3]")] = ap("forgetGateL[3]"), - - [ap("mainTanhL[1]")] = ap("inputGMulL[1]"), - [ap("inputGateL[1]")] = ap("inputGMulL[2]"), - - [ap("inputCDup[4]")] = ap("forgetGMulL[1]"), - [ap("forgetGateL[1]")] = ap("forgetGMulL[2]"), - - [ap("inputGMulL[1]")] = ap("mainCDup[1]"), - [ap("forgetGMulL[1]")] = ap("mainCDup[2]"), - - [ap("inputXDup[4]")] = ap("outputGateL[1]"), - [ap("inputHDup[4]")] = ap("outputGateL[2]"), - [ap("mainCDup[3]")] = ap("outputGateL[3]"), - - [ap("mainCDup[2]")] = "[2]", - [ap("mainCDup[1]")] = ap("outputTanhL[1]"), - - [ap("outputTanhL[1]")] = ap("outputGMulL[1]"), - [ap("outputGateL[1]")] = ap("outputGMulL[2]"), - - [ap("outputGMulL[1]")] = "[1]", - } - self.dagL = nerv.DAGLayerT(self.id, global_conf, - {["dim_in"] = self.dim_in, ["dim_out"] = self.dim_out, ["sub_layers"] = layerRepo, - ["connections"] = connections_t}) - - self:check_dim_len(3, 2) -- x, h, c and h, c -end - -function LSTMLayerT:init(batch_size, chunk_size) - self.dagL:init(batch_size, chunk_size) -end - -function LSTMLayerT:batch_resize(batch_size, chunk_size) - self.dagL:batch_resize(batch_size, chunk_size) -end - -function LSTMLayerT:update(bp_err, input, output, t) - self.dagL:update(bp_err, input, output, t) -end - -function LSTMLayerT:propagate(input, output, t) - self.dagL:propagate(input, output, t) -end - -function LSTMLayerT:back_propagate(bp_err, next_bp_err, input, output, t) - self.dagL:back_propagate(bp_err, next_bp_err, input, output, t) -end - -function LSTMLayerT:get_params() - return self.dagL:get_params() -end diff --git a/nerv/examples/lmptb/tnn/layersT/softmax_ce_t.lua b/nerv/examples/lmptb/tnn/layersT/softmax_ce_t.lua deleted file mode 100644 index a9ce975..0000000 --- a/nerv/examples/lmptb/tnn/layersT/softmax_ce_t.lua +++ /dev/null @@ -1,93 +0,0 @@ -local SoftmaxCELayer = nerv.class("nerv.SoftmaxCELayerT", "nerv.LayerT") - -function SoftmaxCELayer:__init(id, global_conf, layer_conf) - self.id = id - self.gconf = global_conf - self.dim_in = layer_conf.dim_in - self.dim_out = layer_conf.dim_out - self.compressed = layer_conf.compressed - if self.compressed == nil then - self.compressed = false - end - self:check_dim_len(2, -1) -- two inputs: nn output and label -end - -function SoftmaxCELayer:init(batch_size, chunk_size) - if not self.compressed and (self.dim_in[1] ~= self.dim_in[2]) then - nerv.error("mismatching dimensions of previous network output and labels") - end - if chunk_size == nil then - chunk_size = 1 - end - self.total_ce = 0.0 - self.total_correct = 0 - self.total_frames = 0 - self.softmax_t = {} - self.ce_t = {} - for t = 1, chunk_size do - self.softmax_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1]) - self.ce_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1]) - end -end - -function SoftmaxCELayer:batch_resize(batch_size, chunk_size) - if chunk_size == nil then - chunk_size = 1 - end - for t = 1, chunk_size do - if self.softmax_t[t]:nrow() ~= batch_size then - self.softmax_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1]) - self.ce_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1]) - end - end -end - -function SoftmaxCELayer:update(bp_err, input, output, t) - -- no params, therefore do nothing -end - -function SoftmaxCELayer:propagate(input, output, t) - if t == nil then - t = 1 - end - local softmax = self.softmax_t[t] - local ce = self.ce_t[t] - local classified = softmax:softmax(input[1]) - local label = input[2] - ce:log_elem(softmax) - if self.compressed then - label = label:decompress(input[1]:ncol()) - end - ce:mul_elem(ce, label) - ce = ce:rowsum() - if output[1] ~= nil then - output[1]:copy_fromd(ce) - end - -- add total ce - self.total_ce = self.total_ce - ce:colsum()[0][0] - self.total_frames = self.total_frames + softmax:nrow() - -- TODO: add colsame for uncompressed label - if self.compressed then - self.total_correct = self.total_correct + classified:colsame(input[2])[0][0] - end -end - -function SoftmaxCELayer:back_propagate(bp_err, next_bp_err, input, output, t) - -- softmax output - label - if t == nil then - t = 1 - end - local label = input[2] - if self.compressed then - label = label:decompress(input[1]:ncol()) - end - local nbe = next_bp_err[1] - nbe:add(self.softmax_t[t], label, 1.0, -1.0) - if bp_err[1] ~= nil then - nbe:scale_rows_by_col(bp_err[1]) - end -end - -function SoftmaxCELayer:get_params() - return nerv.ParamRepo({}) -end diff --git a/nerv/examples/lmptb/tnn/sutil.lua b/nerv/examples/lmptb/tnn/sutil.lua deleted file mode 100644 index f5bc408..0000000 --- a/nerv/examples/lmptb/tnn/sutil.lua +++ /dev/null @@ -1,52 +0,0 @@ -local Util = nerv.class("nerv.SUtil") --Scheduler Utility - -function Util.simple_split(inputstr, sep) - if sep == nil then - sep = "%s" - end - local t={} ; i=1 - for str in string.gmatch(inputstr, "([^"..sep.."]+)") do - t[i] = str - i = i + 1 - end - return t -end - -function Util.parse_schedule(str) - --parse a string like "1.2*10:1.5" to a list of numbers - local sch = {} - local s = Util.simple_split(str, ':') - for i = 1, #s do - local p = Util.simple_split(s[i], "%*") - if #p ~= 2 and #p ~= 1 then - nerv.error("nerv.SUtil:parse_schedule error, unit(%s) not proper, has %d components.", s[i], #p) - end - if p[2] == nil then - p[2] = "1" - end - p[1] = tonumber(p[1]) - p[2] = tonumber(p[2]) - for j = 1, p[2] do - table.insert(sch, p[1]) - end - end - - --for i = 1, #sch do - -- print(sch[i]) - --end - return sch -end - -function Util.sche_get(s, it) - --get s[it] - if s == nil then - nerv.info("Util.sche_get: warning, scheule is nil, returning zero...") - return 0 - end - if #s >= it then - return s[it] - else - nerv.info("Util.sche_get: warning, it(%d) > #schedule(%d), returning the last one of schedule(%f)...", it, #s, s[#s]) - return s[#s] - end -end diff --git a/nerv/examples/lmptb/tnn/tnn.lua b/nerv/examples/lmptb/tnn/tnn.lua deleted file mode 100644 index 56c9dc0..0000000 --- a/nerv/examples/lmptb/tnn/tnn.lua +++ /dev/null @@ -1,565 +0,0 @@ -local TNN = nerv.class("nerv.TNN") - -local function parse_id(str) - --used to parse layerid[portid],time - local id, port, time, _ - _, _, id, port, time = string.find(str, "([a-zA-Z0-9_]+)%[([0-9]+)%][,]*([0-9]*)") - if id == nil or port == nil then - _, _, id, port, time = string.find(str, "(.+)%[([0-9]+)%][,]*([0-9]*)") - if not (id == "" or id == "") then - nerv.error("wrong format of connection id") - end - end - --print(str, id, port, time) - port = tonumber(port) - if (time == nil) then - time = 0 - else - time = tonumber(time) - end - --now time don't need to be parsed - return id, port -end - -local function discover(id, layers, layer_repo) - local ref = layers[id] - if id == "" or id == "" then - return nil - end - if ref == nil then - local layer = layer_repo:get_layer(id) - local dim_in, dim_out = layer:get_dim() - ref = { - layer = layer, - id = layer.id, - inputs_m = {}, --storage for computation, inputs_m[time][port] - inputs_b = {}, --inputs_g[time][port], whether this input can been computed - inputs_matbak_p = {}, --which is a back-up space to handle some cross-border computation, inputs_p_matbak[port] - outputs_m = {}, - outputs_b = {}, - err_inputs_m = {}, - err_inputs_matbak_p = {}, --which is a back-up space to handle some cross-border computation - err_inputs_b = {}, - err_outputs_m = {}, - err_outputs_b = {}, - i_conns_p = {}, --list of inputing connections - o_conns_p = {}, --list of outputing connections - dim_in = dim_in, --list of dimensions of ports - dim_out = dim_out, - } - layers[id] = ref - end - return ref -end - -nerv.TNN.FC = {} --flag const -nerv.TNN.FC.SEQ_START = 4 -nerv.TNN.FC.SEQ_END = 8 -nerv.TNN.FC.HAS_INPUT = 1 -nerv.TNN.FC.HAS_LABEL = 2 -nerv.TNN.FC.SEQ_NORM = bit.bor(nerv.TNN.FC.HAS_INPUT, nerv.TNN.FC.HAS_LABEL) --This instance have both input and label - -function TNN.make_initial_store(st, p, dim, batch_size, chunk_size, global_conf, st_c, p_c, t_c) - --Return a table of matrix storage from time (1-chunk_size)..(2*chunk_size) - if (type(st) ~= "table") then - nerv.error("st should be a table") - end - for i = 1 - chunk_size - 1, chunk_size * 2 + 1 do --intentionally allocated more time, should be [1-chunk_size, chunk_size*2] - if (st[i] == nil) then - st[i] = {} - end - st[i][p] = global_conf.cumat_type(batch_size, dim) - st[i][p]:fill(0) - if (st_c ~= nil) then - if (st_c[i + t_c] == nil) then - st_c[i + t_c] = {} - end - st_c[i + t_c][p_c] = st[i][p] - end - end -end - -function TNN:out_of_feedrange(t) --out of chunk, or no input, for the current feed - if (t < 1 or t > self.chunk_size) then - return true - end - if (self.feeds_now.flagsPack_now[t] == 0 or self.feeds_now.flagsPack_now[t] == nil) then - return true - end - return false -end - -function TNN:__init(id, global_conf, layer_conf) - self.clip_t = layer_conf.clip_t - if self.clip_t == nil then - self.clip_t = 0 - end - if self.clip_t > 0 then - nerv.info("tnn(%s) will clip gradient across time with %f...", id, self.clip_t) - end - local layers = {} - local inputs_p = {} --map:port of the TNN to layer ref and port - local outputs_p = {} - local dim_in = layer_conf.dim_in - local dim_out = layer_conf.dim_out - local parsed_conns = {} - local _ - - for _, ll in pairs(layer_conf.connections) do - local id_from, port_from = parse_id(ll[1]) - local id_to, port_to = parse_id(ll[2]) - local time_to = ll[3] - - print(id_from, id_to, time_to) - - local ref_from = discover(id_from, layers, layer_conf.sub_layers) - local ref_to = discover(id_to, layers, layer_conf.sub_layers) - - if (id_from == "") then - if (dim_in[port_from] ~= ref_to.dim_in[port_to] or time_to ~= 0) then - nerv.error("mismatch dimension or wrong time %s,%s,%d", ll[1], ll[2], ll[3]) - end - inputs_p[port_from] = {["ref"] = ref_to, ["port"] = port_to} - ref_to.inputs_m[port_to] = {} --just a place holder - elseif (id_to == "") then - if (dim_out[port_to] ~= ref_from.dim_out[port_from] or time_to ~= 0) then - nerv.error("mismatch dimension or wrong time %s,%s,%d", ll[1], ll[2], ll[3]) - end - outputs_p[port_to] = {["ref"] = ref_from, ["port"] = port_from} - ref_from.outputs_m[port_from] = {} --just a place holder - else - local conn_now = { - ["src"] = {["ref"] = ref_from, ["port"] = port_from}, - ["dst"] = {["ref"] = ref_to, ["port"] = port_to}, - ["time"] = time_to - } - if (ref_to.dim_in[port_to] ~= ref_from.dim_out[port_from]) then - nerv.error("mismatch dimension or wrong time %s,%s,%d", ll[1], ll[2], ll[3]) - end - table.insert(parsed_conns, conn_now) - ref_to.i_conns_p[conn_now.dst.port] = conn_now - ref_from.o_conns_p[conn_now.src.port] = conn_now - end - end - - for id, ref in pairs(layers) do - print(id, "#dim_in:", #ref.dim_in, "#dim_out:", #ref.dim_out, "#i_conns_p:", #ref.i_conns_p, "#o_conns_p", #ref.o_conns_p) - end - - self.layers = layers - self.inputs_p = inputs_p - self.outputs_p = outputs_p - self.id = id - self.dim_in = dim_in - self.dim_out = dim_out - self.parsed_conns = parsed_conns - self.gconf = global_conf -end - -function TNN:init(batch_size, chunk_size) - self.batch_size = batch_size - self.chunk_size = chunk_size - for i, conn in ipairs(self.parsed_conns) do --init storage for connections inside the NN - local _, output_dim - local ref_from, port_from, ref_to, port_to, time - ref_from, port_from = conn.src.ref, conn.src.port - ref_to, port_to = conn.dst.ref, conn.dst.port - time = conn.time - - local dim = ref_from.dim_out[port_from] - if (dim == 0) then - nerv.error("layer %s has a zero dim port", ref_from.layer.id) - end - - print("TNN initing storage", ref_from.layer.id, "->", ref_to.layer.id) - ref_to.inputs_matbak_p[port_to] = self.gconf.cumat_type(batch_size, dim) - self.make_initial_store(ref_from.outputs_m, port_from, dim, batch_size, chunk_size, self.gconf, ref_to.inputs_m, port_to, time) - ref_from.err_inputs_matbak_p[port_from] = self.gconf.cumat_type(batch_size, dim) - self.make_initial_store(ref_from.err_inputs_m, port_from, dim, batch_size, chunk_size, self.gconf, ref_to.err_outputs_m, port_to, time) - - end - - self.outputs_m = {} - self.err_inputs_m = {} - for i = 1, #self.dim_out do --Init storage for output ports - local ref = self.outputs_p[i].ref - local p = self.outputs_p[i].port - self.make_initial_store(ref.outputs_m, p, self.dim_out[i], batch_size, chunk_size, self.gconf, self.outputs_m, i, 0) - self.make_initial_store(ref.err_inputs_m, p, self.dim_out[i], batch_size, chunk_size, self.gconf, self.err_inputs_m, i, 0) - end - - self.inputs_m = {} - self.err_outputs_m = {} - for i = 1, #self.dim_in do --Init storage for input ports - local ref = self.inputs_p[i].ref - local p = self.inputs_p[i].port - self.make_initial_store(ref.inputs_m, p, self.dim_in[i], batch_size, chunk_size, self.gconf, self.inputs_m, i, 0) - self.make_initial_store(ref.err_outputs_m, p, self.dim_in[i], batch_size, chunk_size, self.gconf, self.err_outputs_m, i, 0) - end - - for id, ref in pairs(self.layers) do --Calling init for child layers - for i = 1, #ref.dim_in do - if (ref.inputs_m[i] == nil or ref.err_outputs_m[i] == nil) then - nerv.error("dangling input port %d of layer %s", i, id) - end - end - for i = 1, #ref.dim_out do - if (ref.outputs_m[i] == nil or ref.err_inputs_m[i] == nil) then - nerv.error("dangling output port %d of layer %s", i, id) - end - end - -- initialize sub layers - ref.layer:init(batch_size, chunk_size) - end - - local flags_now = {} - local flagsPack_now = {} - for i = 1, chunk_size do - flags_now[i] = {} - flagsPack_now[i] = 0 - end - - self.feeds_now = {} --feeds is for the reader to fill - self.feeds_now.inputs_m = self.inputs_m - self.feeds_now.flags_now = flags_now - self.feeds_now.flagsPack_now = flagsPack_now - - self:flush_all() -end - ---[[ -function DAGLayer:batch_resize(batch_size) - self.gconf.batch_size = batch_size - - for i, conn in ipairs(self.parsed_conn) do - local _, output_dim - local ref_from, port_from, ref_to, port_to - ref_from, port_from = unpack(conn[1]) - ref_to, port_to = unpack(conn[2]) - _, output_dim = ref_from.layer:get_dim() - - if ref_from.outputs[port_from]:nrow() ~= batch_size and output_dim[port_from] > 0 then - local mid = self.gconf.cumat_type(batch_size, output_dim[port_from]) - local err_mid = mid:create() - - ref_from.outputs[port_from] = mid - ref_to.inputs[port_to] = mid - - ref_from.err_inputs[port_from] = err_mid - ref_to.err_outputs[port_to] = err_mid - end - end - for id, ref in pairs(self.layers) do - ref.layer:batch_resize(batch_size) - end - collectgarbage("collect") -end -]]-- - -function TNN:flush_all() --flush all history and activation - local _, ref - for _, ref in pairs(self.layers) do - for i = 1, #ref.dim_in do - for t = 1 - self.chunk_size, self.chunk_size * 2 do - ref.inputs_m[t][i]:fill(self.gconf.nn_act_default) - if (ref.inputs_b[t] == nil) then - ref.inputs_b[t] = {} - end - ref.inputs_b[t][i] = false - ref.err_outputs_m[t][i]:fill(0) - if (ref.err_outputs_b[t] == nil) then - ref.err_outputs_b[t] = {} - end - ref.err_outputs_b[t][i] = false - end - end - for i = 1, #ref.dim_out do - for t = 1 - self.chunk_size, self.chunk_size * 2 do - ref.outputs_m[t][i]:fill(self.gconf.nn_act_default) - if (ref.outputs_b[t] == nil) then - ref.outputs_b[t] = {} - end - ref.outputs_b[t][i] = false - ref.err_inputs_m[t][i]:fill(0) - if (ref.err_inputs_b[t] == nil) then - ref.err_inputs_b[t] = {} - end - ref.err_inputs_b[t][i] = false - end - end - end -end - ---reader: some reader ---Returns: bool, whether has new feed ---Returns: feeds, a table that will be filled with the reader's feeds -function TNN:getfeed_from_reader(reader) - local feeds_now = self.feeds_now - local got_new = reader:get_batch(feeds_now) - return got_new, feeds_now -end - -function TNN:move_right_to_nextmb(list_t) --move output history activations of 1..chunk_size to 1-chunk_size..0 - if list_t == nil then - list_t = {} - for i = 1, self.chunk_size do - list_t[i] = i - self.chunk_size - end - end - for i = 1, #list_t do - t = list_t[i] - if t < 1 - self.chunk_size or t > 0 then - nerv.error("MB move range error") - end - for id, ref in pairs(self.layers) do - for p = 1, #ref.dim_out do - ref.outputs_m[t][p]:copy_fromd(ref.outputs_m[t + self.chunk_size][p]) - end - end - end -end - -function TNN:net_propagate() --propagate according to feeds_now - for t = 1, self.chunk_size, 1 do - for id, ref in pairs(self.layers) do - for p = 1, #ref.dim_out do - ref.outputs_b[t][p] = false - end - for p = 1, #ref.dim_in do - ref.inputs_b[t][p] = false - end - end - end - - local feeds_now = self.feeds_now - for t = 1, self.chunk_size do - if (bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_INPUT) > 0) then - for i = 1, #self.dim_in do - local ref = self.inputs_p[i].ref - local p = self.inputs_p[i].port - ref.inputs_b[t][p] = true - self:propagate_dfs(ref, t) - end - end - end - - local flag_out = true - for t = 1, self.chunk_size do --check whether every output has been computed - if (bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_LABEL) > 0) then - for i = 1, #self.dim_out do - local ref = self.outputs_p[i].ref - if (ref.outputs_b[t][1] ~= true) then - flag_out = false - break - end - end - end - end - if (flag_out == false) then - nerv.error("some thing wrong, some labeled output is not propagated") - end -end - ---ref: the TNN_ref of a layer ---t: the current time to propagate -function TNN:propagate_dfs(ref, t) - if (self:out_of_feedrange(t)) then - return - end - if (ref.outputs_b[t][1] == true) then --already propagated, 1 is just a random port - return - end - - --print("debug dfs", ref.layer.id, t) - - local flag = true --whether have all inputs - for _, conn in pairs(ref.i_conns_p) do - local p = conn.dst.port - if (not (ref.inputs_b[t][p] or self:out_of_feedrange(t - conn.time))) then - flag = false - break - end - end - if (flag == false) then - return - end - - --ok, do propagate - --print("debug ok, propagating"); - --The MB moving will cause bordering history to be changed, so it is more wise to flush the input activation - if (bit.band(self.feeds_now.flagsPack_now[t], bit.bor(nerv.TNN.FC.SEQ_START, nerv.TNN.FC.SEQ_END)) > 0) then --flush cross-border history - for i = 1, self.batch_size do - local seq_start = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_START) - local seq_end = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_END) - if (seq_start > 0 or seq_end > 0) then - for p, conn in pairs(ref.i_conns_p) do - if ((ref.i_conns_p[p].time > 0 and seq_start > 0) or (ref.i_conns_p[p].time < 0 and seq_end > 0)) then --cross-border, set to default - ref.inputs_m[t][p][i - 1]:fill(self.gconf.nn_act_default) - end - end - end - end - end - self.gconf.timer:tic("tnn_actual_layer_propagate") - ref.layer:propagate(ref.inputs_m[t], ref.outputs_m[t], t) --propagate! - self.gconf.timer:toc("tnn_actual_layer_propagate") - --[[ - if (bit.band(self.feeds_now.flagsPack_now[t], bit.bor(nerv.TNN.FC.SEQ_START, nerv.TNN.FC.SEQ_END)) > 0) then --restore cross-border history - for i = 1, self.batch_size do - local seq_start = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_START) - local seq_end = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_END) - if (seq_start > 0 or seq_end > 0) then - for p, conn in pairs(ref.o_conns_p) do - if ((ref.o_conns_p[p].time > 0 and seq_end > 0) or (ref.o_conns_p[p].time < 0 and seq_start > 0)) then - ref.outputs_m[t][p][i - 1]:fill(self.gconf.nn_act_default) - end - end - end - end - end - ]]-- - --set input flag for future layers - for i = 1, #ref.dim_out do - if (ref.outputs_b[t][i] == true) then - nerv.error("this time's outputs_b should be false") - end - ref.outputs_b[t][i] = true - end - - --try dfs for further layers - for _, conn in pairs(ref.o_conns_p) do - --print("debug dfs-searching", conn.dst.ref.layer.id) - conn.dst.ref.inputs_b[t + conn.time][conn.dst.port] = true - self:propagate_dfs(conn.dst.ref, t + conn.time) - end -end - ---do_update: bool, whether we are doing back-propagate or updating the parameters -function TNN:net_backpropagate(do_update) --propagate according to feeds_now - if do_update == nil then - nerv.error("do_update should not be nil") - end - for t = 1, self.chunk_size, 1 do - for id, ref in pairs(self.layers) do - for p = 1, #ref.dim_out do - ref.err_inputs_b[t][p] = false - end - for p = 1, #ref.dim_in do - ref.err_outputs_b[t][p] = false - end - end - end - - local feeds_now = self.feeds_now - for t = 1, self.chunk_size do - if bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_LABEL) > 0 then - for i = 1, #self.dim_out do - local ref = self.outputs_p[i].ref - local p = self.outputs_p[i].port - ref.err_inputs_b[t][p] = true - self:backpropagate_dfs(ref, t, do_update) - end - end - end - - local flag_out = true - for t = 1, self.chunk_size do --check whether every output has been computed - if bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_INPUT) > 0 then - for i = 1, #self.dim_in do - local ref = self.inputs_p[i].ref - if ref.err_outputs_b[t][1] ~= true then - flag_out = false - break - end - end - end - end - if (flag_out == false) then - nerv.error("some thing wrong, some input is not back_propagated") - end -end - ---ref: the TNN_ref of a layer ---t: the current time to propagate -function TNN:backpropagate_dfs(ref, t, do_update) - if self:out_of_feedrange(t) then - return - end - if ref.err_outputs_b[t][1] == true then --already back_propagated, 1 is just a random port - return - end - - --print("debug dfs", ref.layer.id, t) - - local flag = true --whether have all inputs - for _, conn in pairs(ref.o_conns_p) do - local p = conn.src.port - if (not (ref.err_inputs_b[t][p] or self:out_of_feedrange(t + conn.time))) then - flag = false - break - end - end - if (flag == false) then - return - end - - --ok, do back_propagate - --print("debug ok, back-propagating(or updating)") - if (do_update == false) then - self.gconf.timer:tic("tnn_actual_layer_backpropagate") - ref.layer:back_propagate(ref.err_inputs_m[t], ref.err_outputs_m[t], ref.inputs_m[t], ref.outputs_m[t], t) - self.gconf.timer:toc("tnn_actual_layer_backpropagate") - if self.clip_t > 0 then - for _, conn in pairs(ref.i_conns_p) do - local p = conn.dst.port --port for ref - if conn.time ~= 0 then - --print("debug clip_t tnn", ref.id, "port:", p, "clip:", self.clip_t) - ref.err_outputs_m[t][p]:clip(-self.clip_t, self.clip_t) - end - end - end - else - --print(ref.err_inputs_m[t][1]) - self.gconf.timer:tic("tnn_actual_layer_update") - ref.layer:update(ref.err_inputs_m[t], ref.inputs_m[t], ref.outputs_m[t], t) - self.gconf.timer:toc("tnn_actual_layer_update") - end - - if (do_update == false and bit.band(self.feeds_now.flagsPack_now[t], bit.bor(nerv.TNN.FC.SEQ_START, nerv.TNN.FC.SEQ_END)) > 0) then --flush cross-border errors - for i = 1, self.batch_size do - local seq_start = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_START) - local seq_end = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_END) - if (seq_start > 0 or seq_end > 0) then - for p, conn in pairs(ref.i_conns_p) do - if ((ref.i_conns_p[p].time > 0 and seq_start > 0) or (ref.i_conns_p[p].time < 0 and seq_end > 0)) then --cross-border, set to zero - ref.err_outputs_m[t][p][i - 1]:fill(0) - end - end - end - end - end - - for i = 1, #ref.dim_in do - if (ref.err_outputs_b[t][i] == true) then - nerv.error("this time's outputs_b should be false") - end - ref.err_outputs_b[t][i] = true - end - - --try dfs for further layers - for _, conn in pairs(ref.i_conns_p) do - --print("debug dfs-searching", conn.src.ref.layer.id) - conn.src.ref.err_inputs_b[t - conn.time][conn.src.port] = true - self:backpropagate_dfs(conn.src.ref, t - conn.time, do_update) - end -end - ---Return: nerv.ParamRepo -function TNN:get_params() - local param_repos = {} - for id, ref in pairs(self.layers) do - table.insert(param_repos, ref.layer:get_params()) - end - return nerv.ParamRepo.merge(param_repos) -end - diff --git a/nerv/init.lua b/nerv/init.lua index 9c1a5c8..b5d20a2 100644 --- a/nerv/init.lua +++ b/nerv/init.lua @@ -130,3 +130,4 @@ nerv.include('matrix/init.lua') nerv.include('io/init.lua') nerv.include('layer/init.lua') nerv.include('nn/init.lua') +nerv.include('tnn/init.lua') diff --git a/nerv/layer/elem_mul.lua b/nerv/layer/elem_mul.lua new file mode 100644 index 0000000..c809d3e --- /dev/null +++ b/nerv/layer/elem_mul.lua @@ -0,0 +1,38 @@ +local ElemMulLayer = nerv.class('nerv.ElemMulLayer', 'nerv.Layer') + +function ElemMulLayer:__init(id, global_conf, layer_conf) + self.id = id + self.dim_in = layer_conf.dim_in + self.dim_out = layer_conf.dim_out + self.gconf = global_conf + + self:check_dim_len(2, 1) -- Element-multiply input[1] and input[2] +end + +function ElemMulLayer:init(batch_size) + if self.dim_in[1] ~= self.dim_in[2] or + self.dim_in[1] ~= self.dim_out[1] then + nerv.error("dim_in and dim_out mismatch for ElemMulLayer") + end +end + +function ElemMulLayer:batch_resize(batch_size) + --do nothing +end + +function ElemMulLayer:propagate(input, output) + output[1]:mul_elem(input[1], input[2]) +end + +function ElemMulLayer:back_propagate(bp_err, next_bp_err, input, output) + next_bp_err[1]:mul_elem(bp_err[1], input[2]) + next_bp_err[2]:mul_elem(bp_err[1], input[1]) +end + +function ElemMulLayer:update(bp_err, input, output) + --do nothing +end + +function ElemMulLayer:get_params() + return nerv.ParamRepo({}) +end diff --git a/nerv/layer/gate_fff.lua b/nerv/layer/gate_fff.lua new file mode 100644 index 0000000..751dde1 --- /dev/null +++ b/nerv/layer/gate_fff.lua @@ -0,0 +1,71 @@ +local GateFFFLayer = nerv.class('nerv.GateFFFLayer', 'nerv.Layer') + +function GateFFFLayer:__init(id, global_conf, layer_conf) + self.id = id + self.dim_in = layer_conf.dim_in + self.dim_out = layer_conf.dim_out + self.gconf = global_conf + + self.ltp1 = self:find_param("ltp1", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[1], self.dim_out[1]}) --layer_conf.ltp + self.ltp2 = self:find_param("ltp2", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[2], self.dim_out[1]}) --layer_conf.ltp + self.ltp3 = self:find_param("ltp3", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[3], self.dim_out[1]}) --layer_conf.ltp + self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]})--layer_conf.bp + + self:check_dim_len(3, 1) -- exactly one input and one output +end + +function GateFFFLayer:init(batch_size) + if self.ltp1.trans:ncol() ~= self.bp.trans:ncol() or + self.ltp2.trans:ncol() ~= self.bp.trans:ncol() or + self.ltp3.trans:ncol() ~= self.bp.trans:ncol() then + nerv.error("mismatching dimensions of linear transform and bias paramter") + end + if self.dim_in[1] ~= self.ltp1.trans:nrow() or + self.dim_in[2] ~= self.ltp2.trans:nrow() or + self.dim_in[3] ~= self.ltp3.trans:nrow() then + nerv.error("mismatching dimensions of linear transform parameter and input") + end + if self.dim_out[1] ~= self.ltp1.trans:ncol() then + nerv.error("mismatching dimensions of linear transform parameter and output") + end + self.ltp1:train_init() + self.ltp2:train_init() + self.ltp3:train_init() + self.bp:train_init() + self.err_bakm = self.gconf.cumat_type(batch_size, self.dim_out[1]) +end + +function GateFFFLayer:batch_resize(batch_size) + if self.err_m:nrow() ~= batch_size then + self.err_bakm = self.gconf.cumat_type(batch_size, self.dim_out[1]) + end +end + +function GateFFFLayer:propagate(input, output) + -- apply linear transform + output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N') + output[1]:mul(input[2], self.ltp2.trans, 1.0, 1.0, 'N', 'N') + output[1]:mul(input[3], self.ltp3.trans, 1.0, 1.0, 'N', 'N') + -- add bias + output[1]:add_row(self.bp.trans, 1.0) + output[1]:sigmoid(output[1]) +end + +function GateFFFLayer:back_propagate(bp_err, next_bp_err, input, output) + self.err_bakm:sigmoid_grad(bp_err[1], output[1]) + next_bp_err[1]:mul(self.err_bakm, self.ltp1.trans, 1.0, 0.0, 'N', 'T') + next_bp_err[2]:mul(self.err_bakm, self.ltp2.trans, 1.0, 0.0, 'N', 'T') + next_bp_err[3]:mul(self.err_bakm, self.ltp3.trans, 1.0, 0.0, 'N', 'T') +end + +function GateFFFLayer:update(bp_err, input, output) + self.err_bakm:sigmoid_grad(bp_err[1], output[1]) + self.ltp1:update_by_err_input(self.err_bakm, input[1]) + self.ltp2:update_by_err_input(self.err_bakm, input[2]) + self.ltp3:update_by_err_input(self.err_bakm, input[3]) + self.bp:update_by_gradient(self.err_bakm:colsum()) +end + +function GateFFFLayer:get_params() + return nerv.ParamRepo({self.ltp1, self.ltp2, self.ltp3, self.bp}) +end diff --git a/nerv/layer/init.lua b/nerv/layer/init.lua index 32b82d8..23606e1 100644 --- a/nerv/layer/init.lua +++ b/nerv/layer/init.lua @@ -100,3 +100,5 @@ nerv.include('mse.lua') nerv.include('combiner.lua') nerv.include('affine_recurrent.lua') nerv.include('softmax.lua') +nerv.include('elem_mul.lua') +nerv.include('gate_fff.lua') diff --git a/nerv/tnn/init.lua b/nerv/tnn/init.lua new file mode 100644 index 0000000..979f5d8 --- /dev/null +++ b/nerv/tnn/init.lua @@ -0,0 +1,49 @@ +local LayerT = nerv.class('nerv.LayerT') + +function LayerT:__init(id, global_conf, layer_conf) + nerv.error_method_not_implemented() +end + +function LayerT:init(batch_size, chunk_size) + nerv.error_method_not_implemented() +end + +function LayerT:update(bp_err, input, output, t) + nerv.error_method_not_implemented() +end + +function LayerT:propagate(input, output, t) + nerv.error_method_not_implemented() +end + +function LayerT:back_propagate(bp_err, next_bp_err, input, output, t) + nerv.error_method_not_implemented() +end + +function LayerT:check_dim_len(len_in, len_out) + local expected_in = #self.dim_in + local expected_out = #self.dim_out + if len_in > 0 and expected_in ~= len_in then + nerv.error("layer %s expects %d inputs, %d given", + self.id, len_in, expected_in) + end + if len_out > 0 and expected_out ~= len_out then + nerv.error("layer %s expects %d outputs, %d given", + self.id, len_out, expected_out) + end +end + +function LayerT:get_params() + nerv.error_method_not_implemented() +end + +function LayerT:get_dim() + return self.dim_in, self.dim_out +end + +nerv.include('sutil.lua') +nerv.include('tnn.lua') +nerv.include('layersT/softmax_ce_t.lua') +nerv.include('layersT/lstm_t.lua') +nerv.include('layersT/dropout_t.lua') +nerv.include('layer_dag_t.lua') diff --git a/nerv/tnn/layer_dag_t.lua b/nerv/tnn/layer_dag_t.lua new file mode 100644 index 0000000..e3a9316 --- /dev/null +++ b/nerv/tnn/layer_dag_t.lua @@ -0,0 +1,386 @@ +local DAGLayerT = nerv.class("nerv.DAGLayerT", "nerv.LayerT") + +local function parse_id(str) + local id, port, _ + _, _, id, port = string.find(str, "([a-zA-Z0-9_.]+)%[([0-9]+)%]") + if id == nil or port == nil then + _, _, id, port = string.find(str, "(.+)%[([0-9]+)%]") + if not (id == "" or id == "") then + nerv.error("wrong format of connection id") + end + end + port = tonumber(port) + return id, port +end + +local function discover(id, layers, layer_repo) + local ref = layers[id] + if id == "" or id == "" then + return nil + end + if ref == nil then + local layer = layer_repo:get_layer(id) + local dim_in, dim_out = layer:get_dim() + ref = { + id = layer.id, + layer = layer, + inputs = {}, + outputs = {}, + err_inputs = {}, + err_outputs = {}, + next_layers = {}, + input_len = #dim_in, + output_len = #dim_out, + in_deg = 0, + visited = false + } + layers[id] = ref + end + return ref +end + +function DAGLayerT:__init(id, global_conf, layer_conf) + local layers = {} + local inputs = {} + local outputs = {} + local dim_in = layer_conf.dim_in + local dim_out = layer_conf.dim_out + local parsed_conn = {} + for from, to in pairs(layer_conf.connections) do + local id_from, port_from = parse_id(from) + local id_to, port_to = parse_id(to) + local ref_from = discover(id_from, layers, layer_conf.sub_layers) + local ref_to = discover(id_to, layers, layer_conf.sub_layers) + local input_dim, output_dim, _ + if id_from == "" then + input_dim, _ = ref_to.layer:get_dim() + if dim_in[port_from] ~= input_dim[port_to] then + nerv.error("mismatching data dimension between %s and %s", from, to) + end + inputs[port_from] = {ref_to, port_to} + if ref_to.inputs[1] == nil then + ref_to.inputs[1] = {} + end + if ref_to.inputs[1][port_to] ~= nil then + nerv.error("port(%d) for layer(%s) already attached", port_to, to) + end + ref_to.inputs[1][port_to] = inputs -- just a place holder + elseif id_to == "" then + _, output_dim = ref_from.layer:get_dim() + if output_dim[port_from] ~= dim_out[port_to] then + nerv.error("mismatching data dimension between %s and %s", from, to) + end + outputs[port_to] = {ref_from, port_from} + if ref_from.outputs[1] == nil then + ref_from.outputs[1] = {} + end + if ref_from.outputs[1][port_from] ~= nil then + nerv.error("port(%d) for layer(%s) already attached", port_from, from) + end + ref_from.outputs[1] = {} + ref_from.outputs[1][port_from] = outputs -- just a place holder + else + _, output_dim = ref_from.layer:get_dim() + input_dim, _ = ref_to.layer:get_dim() + if output_dim[port_from] ~= input_dim[port_to] then + nerv.error("mismatching data dimension between %s and %s", from, to) + end + + table.insert(parsed_conn, + {{ref_from, port_from}, {ref_to, port_to}}) + table.insert(ref_from.next_layers, ref_to) -- add edge + ref_to.in_deg = ref_to.in_deg + 1 -- increase the in-degree of the target layer + end + end + + -- topology sort + local queue = {} + local l = 1 + local r = 1 + for id, ref in pairs(layers) do + if ref.in_deg == 0 then + table.insert(queue, ref) + nerv.info("adding source layer: %s", id) + r = r + 1 + end + end + if l == r then + nerv.error("loop detected") + end + while l < r do + local cur = queue[l] + cur.visited = true + l = l + 1 + for _, nl in pairs(cur.next_layers) do + nl.in_deg = nl.in_deg - 1 + if nl.in_deg == 0 then + table.insert(queue, nl) + r = r + 1 + end + end + end + for i = 1, #queue do + nerv.info("enqueued layer: %s %s", queue[i].layer, queue[i].layer.id) + end + + for id, ref in pairs(layers) do + -- check wether the graph is connected + if ref.visited == false then + nerv.warning("layer %s is ignored", id) + end + end + + self.layers = layers + self.inputs = inputs + self.outputs = outputs + self.id = id + self.dim_in = dim_in + self.dim_out = dim_out + self.parsed_conn = parsed_conn + self.queue = queue + self.gconf = global_conf +end + +function DAGLayerT:init(batch_size, chunk_size) + nerv.info("initing DAGLayerT %s...\n", self.id) + if chunk_size == nil then + chunk_size = 1 + nerv.info("(Initing DAGLayerT) chunk_size is nil, setting it to default 1\n") + end + + self.chunk_size = chunk_size + + for i, conn in ipairs(self.parsed_conn) do + local _, output_dim + local ref_from, port_from, ref_to, port_to + ref_from, port_from = unpack(conn[1]) + ref_to, port_to = unpack(conn[2]) + _, output_dim = ref_from.layer:get_dim() + local dim = 1 + if output_dim[port_from] > 0 then + dim = output_dim[port_from] + end + + for t = 1, chunk_size do + local mid = self.gconf.cumat_type(batch_size, dim) + local err_mid = mid:create() + + if ref_from.outputs[t] == nil then + ref_from.outputs[t] = {} + end + if ref_to.inputs[t] == nil then + ref_to.inputs[t] = {} + end + if ref_to.err_outputs[t] == nil then + ref_to.err_outputs[t] = {} + end + if ref_from.err_inputs[t] == nil then + ref_from.err_inputs[t] = {} + end + + ref_from.outputs[t][port_from] = mid + ref_to.inputs[t][port_to] = mid + + ref_from.err_inputs[t][port_from] = err_mid + ref_to.err_outputs[t][port_to] = err_mid + end + end + for id, ref in pairs(self.layers) do + for i = 1, ref.input_len do + if ref.inputs[1][i] == nil then --peek at time 1 + nerv.error("dangling input port %d of layer %s", i, id) + end + end + for i = 1, ref.output_len do + if ref.outputs[1][i] == nil then --peek at time 1 + nerv.error("dangling output port %d of layer %s", i, id) + end + end + -- initialize sub layers + ref.layer:init(batch_size, chunk_size) + end + for i = 1, #self.dim_in do + if self.inputs[i] == nil then + nerv.error("dangling port %d of layer ", i) + end + end + for i = 1, #self.dim_out do + if self.outputs[i] == nil then + nerv.error("dangling port %d of layer ", i) + end + end +end + +function DAGLayerT:batch_resize(batch_size, chunk_size) + if chunk_size == nil then + chunk_size = 1 + end + if batch_size ~= self.gconf.batch_size + or chunk_size ~= self.gconf.chunk_size then + nerv.printf("warn: in DAGLayerT:batch_resize, the batch_size ~= gconf.batch_size, or chunk_size ~= gconf.chunk_size") + end + self.gconf.batch_size = batch_size + self.gconf.chunk_size = chunk_size + + for i, conn in ipairs(self.parsed_conn) do + local _, output_dim + local ref_from, port_from, ref_to, port_to + ref_from, port_from = unpack(conn[1]) + ref_to, port_to = unpack(conn[2]) + _, output_dim = ref_from.layer:get_dim() + + for t = 1, chunk_size do + if ref_from.outputs[t] == nil then + ref_from.outputs[t] = {} + end + if ref_to.inputs[t] == nil then + ref_to.inputs[t] = {} + end + if ref_from.err_outputs[t] == nil then + ref_from.err_outputs[t] = {} + end + if ref_from.err_inputs[t] == nil then + ref_from.err_inputs[t] = {} + end + + local mid = self.gconf.cumat_type(batch_size, dim) + local err_mid = mid:create() + + ref_from.outputs[t][port_from] = mid + ref_to.inputs[t][port_to] = mid + + ref_from.err_inputs[t][port_from] = err_mid + ref_to.err_outputs[t][port_to] = err_mid + end + end + for id, ref in pairs(self.layers) do + ref.layer:batch_resize(batch_size, chunk_size) + end + collectgarbage("collect") +end + +function DAGLayerT:set_inputs(input, t) + for i = 1, #self.dim_in do + if input[i] == nil then + nerv.error("some input is not provided"); + end + local layer = self.inputs[i][1] + local port = self.inputs[i][2] + if layer.inputs[t] == nil then + layer.inputs[t] = {} + end + layer.inputs[t][port] = input[i] + end +end + +function DAGLayerT:set_outputs(output, t) + for i = 1, #self.dim_out do + if output[i] == nil then + nerv.error("some output is not provided"); + end + local layer = self.outputs[i][1] + local port = self.outputs[i][2] + if layer.outputs[t] == nil then + layer.outputs[t] = {} + end + layer.outputs[t][port] = output[i] + end +end + +function DAGLayerT:set_err_inputs(bp_err, t) + for i = 1, #self.dim_out do + local layer = self.outputs[i][1] + local port = self.outputs[i][2] + if layer.err_inputs[t] == nil then + layer.err_inputs[t] = {} + end + layer.err_inputs[t][port] = bp_err[i] + end +end + +function DAGLayerT:set_err_outputs(next_bp_err, t) + for i = 1, #self.dim_in do + local layer = self.inputs[i][1] + local port = self.inputs[i][2] + if layer.err_outputs[t] == nil then + layer.err_outputs[t] = {} + end + layer.err_outputs[t][port] = next_bp_err[i] + end +end + +function DAGLayerT:update(bp_err, input, output, t) + if t == nil then + t = 1 + end + self:set_err_inputs(bp_err, t) + self:set_inputs(input, t) + self:set_outputs(output, t) + for id, ref in pairs(self.queue) do + ref.layer:update(ref.err_inputs[t], ref.inputs[t], ref.outputs[t], t) + end +end + +function DAGLayerT:propagate(input, output, t) + if t == nil then + t = 1 + end + self:set_inputs(input, t) + self:set_outputs(output, t) + local ret = false + for i = 1, #self.queue do + local ref = self.queue[i] + --print("debug DAGLAyerT:propagate", ref.id, t) + ret = ref.layer:propagate(ref.inputs[t], ref.outputs[t], t) + end + return ret +end + +function DAGLayerT:back_propagate(bp_err, next_bp_err, input, output, t) + if t == nil then + t = 1 + end + self:set_err_outputs(next_bp_err, t) + self:set_err_inputs(bp_err, t) + self:set_inputs(input, t) + self:set_outputs(output, t) + for i = #self.queue, 1, -1 do + local ref = self.queue[i] + ref.layer:back_propagate(ref.err_inputs[t], ref.err_outputs[t], ref.inputs[t], ref.outputs[t], t) + end +end + +function DAGLayerT:get_params() + local param_repos = {} + for id, ref in pairs(self.queue) do + table.insert(param_repos, ref.layer:get_params()) + end + return nerv.ParamRepo.merge(param_repos) +end + +DAGLayerT.PORT_TYPES = { + INPUT = {}, + OUTPUT = {}, + ERR_INPUT = {}, + ERR_OUTPUT = {} +} + +function DAGLayerT:get_intermediate(id, port_type) + if id == "" or id == "" then + nerv.error("an actual real layer id is expected") + end + local layer = self.layers[id] + if layer == nil then + nerv.error("layer id %s not found", id) + end + if port_type == DAGLayerT.PORT_TYPES.INPUT then + return layer.inputs + elseif port_type == DAGLayerT.PORT_TYPES.OUTPUT then + return layer.outputs + elseif port_type == DAGLayerT.PORT_TYPES.ERR_INPUT then + return layer.err_inputs + elseif port_type == DAGLayerT.PORT_TYPES.ERR_OUTPUT then + return layer.err_outputs + end + nerv.error("unrecognized port type") +end diff --git a/nerv/tnn/layersT/dropout_t.lua b/nerv/tnn/layersT/dropout_t.lua new file mode 100644 index 0000000..4351285 --- /dev/null +++ b/nerv/tnn/layersT/dropout_t.lua @@ -0,0 +1,71 @@ +local Dropout = nerv.class("nerv.DropoutLayerT", "nerv.LayerT") + +function Dropout:__init(id, global_conf, layer_conf) + self.id = id + self.gconf = global_conf + self.dim_in = layer_conf.dim_in + self.dim_out = layer_conf.dim_out + self:check_dim_len(1, 1) -- two inputs: nn output and label +end + +function Dropout:init(batch_size, chunk_size) + if self.dim_in[1] ~= self.dim_out[1] then + nerv.error("mismatching dimensions of input and output") + end + if chunk_size == nil then + chunk_size = 1 + end + self.mask_t = {} + for t = 1, chunk_size do + self.mask_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1]) + end +end + +function Dropout:batch_resize(batch_size, chunk_size) + if chunk_size == nil then + chunk_size = 1 + end + for t = 1, chunk_size do + if self.mask_t[t] == nil or self.mask_t[t]:nrow() ~= batch_size then + self.mask_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1]) + end + end +end + +function Dropout:propagate(input, output, t) + if t == nil then + t = 1 + end + if self.gconf.dropout_rate == nil then + nerv.info("DropoutLayerT:propagate warning, global_conf.dropout_rate is nil, setting it zero") + self.gconf.dropout_rate = 0 + end + + if self.gconf.dropout_rate == 0 then + output[1]:copy_fromd(input[1]) + else + self.mask_t[t]:rand_uniform() + --since we will lose a portion of the actvations, we multiply the activations by 1/(1-dr) to compensate + self.mask_t[t]:thres_mask(self.mask_t[t], self.gconf.dropout_rate, 0, 1 / (1.0 - self.gconf.dropout_rate)) + output[1]:mul_elem(input[1], self.mask_t[t]) + end +end + +function Dropout:update(bp_err, input, output, t) + -- no params, therefore do nothing +end + +function Dropout:back_propagate(bp_err, next_bp_err, input, output, t) + if t == nil then + t = 1 + end + if self.gconf.dropout_rate == 0 then + next_bp_err[1]:copy_fromd(bp_err[1]) + else + next_bp_err[1]:mul_elem(bp_err[1], self.mask_t[t]) + end +end + +function Dropout:get_params() + return nerv.ParamRepo({}) +end diff --git a/nerv/tnn/layersT/lstm_t.lua b/nerv/tnn/layersT/lstm_t.lua new file mode 100644 index 0000000..ded6058 --- /dev/null +++ b/nerv/tnn/layersT/lstm_t.lua @@ -0,0 +1,125 @@ +local LSTMLayerT = nerv.class('nerv.LSTMLayerT', 'nerv.LayerT') + +function LSTMLayerT:__init(id, global_conf, layer_conf) + --input1:x input2:h input3:c + self.id = id + self.dim_in = layer_conf.dim_in + self.dim_out = layer_conf.dim_out + self.gconf = global_conf + + --prepare a DAGLayerT to hold the lstm structure + local pr = layer_conf.pr + if pr == nil then + pr = nerv.ParamRepo() + end + + local function ap(str) + return self.id .. '.' .. str + end + + local layers = { + ["nerv.CombinerLayer"] = { + [ap("inputXDup")] = {{}, {["dim_in"] = {self.dim_in[1]}, + ["dim_out"] = {self.dim_in[1], self.dim_in[1], self.dim_in[1], self.dim_in[1]}, ["lambda"] = {1}}}, + [ap("inputHDup")] = {{}, {["dim_in"] = {self.dim_in[2]}, + ["dim_out"] = {self.dim_in[2], self.dim_in[2], self.dim_in[2], self.dim_in[2]}, ["lambda"] = {1}}}, + [ap("inputCDup")] = {{}, {["dim_in"] = {self.dim_in[3]}, + ["dim_out"] = {self.dim_in[3], self.dim_in[3], self.dim_in[3], self.dim_in[3]}, ["lambda"] = {1}}}, + [ap("mainCDup")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3], self.dim_in[3], self.dim_in[3]}, + ["lambda"] = {1, 1}}}, + }, + ["nerv.AffineLayer"] = { + [ap("mainAffineL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2], self.dim_in[3]}, + ["dim_out"] = {self.dim_out[1]}, ["pr"] = pr}}, + }, + ["nerv.TanhLayer"] = { + [ap("mainTanhL")] = {{}, {["dim_in"] = {self.dim_out[1]}, ["dim_out"] = {self.dim_out[1]}}}, + [ap("outputTanhL")] = {{}, {["dim_in"] = {self.dim_out[1]}, ["dim_out"] = {self.dim_out[1]}}}, + }, + ["nerv.GateFFFLayer"] = { + [ap("forgetGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2], self.dim_in[3]}, + ["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}}, + [ap("inputGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2], self.dim_in[3]}, + ["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}}, + [ap("outputGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2], self.dim_in[3]}, + ["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}}, + + }, + ["nerv.ElemMulLayer"] = { + [ap("inputGMulL")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3]}}}, + [ap("forgetGMulL")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3]}}}, + [ap("outputGMulL")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3]}}}, + }, + } + + local layerRepo = nerv.LayerRepo(layers, pr, global_conf) + + local connections_t = { + ["[1]"] = ap("inputXDup[1]"), + ["[2]"] = ap("inputHDup[1]"), + ["[3]"] = ap("inputCDup[1]"), + + [ap("inputXDup[1]")] = ap("mainAffineL[1]"), + [ap("inputHDup[1]")] = ap("mainAffineL[2]"), + [ap("inputCDup[1]")] = ap("mainAffineL[3]"), + [ap("mainAffineL[1]")] = ap("mainTanhL[1]"), + + [ap("inputXDup[2]")] = ap("inputGateL[1]"), + [ap("inputHDup[2]")] = ap("inputGateL[2]"), + [ap("inputCDup[2]")] = ap("inputGateL[3]"), + + [ap("inputXDup[3]")] = ap("forgetGateL[1]"), + [ap("inputHDup[3]")] = ap("forgetGateL[2]"), + [ap("inputCDup[3]")] = ap("forgetGateL[3]"), + + [ap("mainTanhL[1]")] = ap("inputGMulL[1]"), + [ap("inputGateL[1]")] = ap("inputGMulL[2]"), + + [ap("inputCDup[4]")] = ap("forgetGMulL[1]"), + [ap("forgetGateL[1]")] = ap("forgetGMulL[2]"), + + [ap("inputGMulL[1]")] = ap("mainCDup[1]"), + [ap("forgetGMulL[1]")] = ap("mainCDup[2]"), + + [ap("inputXDup[4]")] = ap("outputGateL[1]"), + [ap("inputHDup[4]")] = ap("outputGateL[2]"), + [ap("mainCDup[3]")] = ap("outputGateL[3]"), + + [ap("mainCDup[2]")] = "[2]", + [ap("mainCDup[1]")] = ap("outputTanhL[1]"), + + [ap("outputTanhL[1]")] = ap("outputGMulL[1]"), + [ap("outputGateL[1]")] = ap("outputGMulL[2]"), + + [ap("outputGMulL[1]")] = "[1]", + } + self.dagL = nerv.DAGLayerT(self.id, global_conf, + {["dim_in"] = self.dim_in, ["dim_out"] = self.dim_out, ["sub_layers"] = layerRepo, + ["connections"] = connections_t}) + + self:check_dim_len(3, 2) -- x, h, c and h, c +end + +function LSTMLayerT:init(batch_size, chunk_size) + self.dagL:init(batch_size, chunk_size) +end + +function LSTMLayerT:batch_resize(batch_size, chunk_size) + self.dagL:batch_resize(batch_size, chunk_size) +end + +function LSTMLayerT:update(bp_err, input, output, t) + self.dagL:update(bp_err, input, output, t) +end + +function LSTMLayerT:propagate(input, output, t) + self.dagL:propagate(input, output, t) +end + +function LSTMLayerT:back_propagate(bp_err, next_bp_err, input, output, t) + self.dagL:back_propagate(bp_err, next_bp_err, input, output, t) +end + +function LSTMLayerT:get_params() + return self.dagL:get_params() +end diff --git a/nerv/tnn/layersT/softmax_ce_t.lua b/nerv/tnn/layersT/softmax_ce_t.lua new file mode 100644 index 0000000..a9ce975 --- /dev/null +++ b/nerv/tnn/layersT/softmax_ce_t.lua @@ -0,0 +1,93 @@ +local SoftmaxCELayer = nerv.class("nerv.SoftmaxCELayerT", "nerv.LayerT") + +function SoftmaxCELayer:__init(id, global_conf, layer_conf) + self.id = id + self.gconf = global_conf + self.dim_in = layer_conf.dim_in + self.dim_out = layer_conf.dim_out + self.compressed = layer_conf.compressed + if self.compressed == nil then + self.compressed = false + end + self:check_dim_len(2, -1) -- two inputs: nn output and label +end + +function SoftmaxCELayer:init(batch_size, chunk_size) + if not self.compressed and (self.dim_in[1] ~= self.dim_in[2]) then + nerv.error("mismatching dimensions of previous network output and labels") + end + if chunk_size == nil then + chunk_size = 1 + end + self.total_ce = 0.0 + self.total_correct = 0 + self.total_frames = 0 + self.softmax_t = {} + self.ce_t = {} + for t = 1, chunk_size do + self.softmax_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1]) + self.ce_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1]) + end +end + +function SoftmaxCELayer:batch_resize(batch_size, chunk_size) + if chunk_size == nil then + chunk_size = 1 + end + for t = 1, chunk_size do + if self.softmax_t[t]:nrow() ~= batch_size then + self.softmax_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1]) + self.ce_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1]) + end + end +end + +function SoftmaxCELayer:update(bp_err, input, output, t) + -- no params, therefore do nothing +end + +function SoftmaxCELayer:propagate(input, output, t) + if t == nil then + t = 1 + end + local softmax = self.softmax_t[t] + local ce = self.ce_t[t] + local classified = softmax:softmax(input[1]) + local label = input[2] + ce:log_elem(softmax) + if self.compressed then + label = label:decompress(input[1]:ncol()) + end + ce:mul_elem(ce, label) + ce = ce:rowsum() + if output[1] ~= nil then + output[1]:copy_fromd(ce) + end + -- add total ce + self.total_ce = self.total_ce - ce:colsum()[0][0] + self.total_frames = self.total_frames + softmax:nrow() + -- TODO: add colsame for uncompressed label + if self.compressed then + self.total_correct = self.total_correct + classified:colsame(input[2])[0][0] + end +end + +function SoftmaxCELayer:back_propagate(bp_err, next_bp_err, input, output, t) + -- softmax output - label + if t == nil then + t = 1 + end + local label = input[2] + if self.compressed then + label = label:decompress(input[1]:ncol()) + end + local nbe = next_bp_err[1] + nbe:add(self.softmax_t[t], label, 1.0, -1.0) + if bp_err[1] ~= nil then + nbe:scale_rows_by_col(bp_err[1]) + end +end + +function SoftmaxCELayer:get_params() + return nerv.ParamRepo({}) +end diff --git a/nerv/tnn/sutil.lua b/nerv/tnn/sutil.lua new file mode 100644 index 0000000..f5bc408 --- /dev/null +++ b/nerv/tnn/sutil.lua @@ -0,0 +1,52 @@ +local Util = nerv.class("nerv.SUtil") --Scheduler Utility + +function Util.simple_split(inputstr, sep) + if sep == nil then + sep = "%s" + end + local t={} ; i=1 + for str in string.gmatch(inputstr, "([^"..sep.."]+)") do + t[i] = str + i = i + 1 + end + return t +end + +function Util.parse_schedule(str) + --parse a string like "1.2*10:1.5" to a list of numbers + local sch = {} + local s = Util.simple_split(str, ':') + for i = 1, #s do + local p = Util.simple_split(s[i], "%*") + if #p ~= 2 and #p ~= 1 then + nerv.error("nerv.SUtil:parse_schedule error, unit(%s) not proper, has %d components.", s[i], #p) + end + if p[2] == nil then + p[2] = "1" + end + p[1] = tonumber(p[1]) + p[2] = tonumber(p[2]) + for j = 1, p[2] do + table.insert(sch, p[1]) + end + end + + --for i = 1, #sch do + -- print(sch[i]) + --end + return sch +end + +function Util.sche_get(s, it) + --get s[it] + if s == nil then + nerv.info("Util.sche_get: warning, scheule is nil, returning zero...") + return 0 + end + if #s >= it then + return s[it] + else + nerv.info("Util.sche_get: warning, it(%d) > #schedule(%d), returning the last one of schedule(%f)...", it, #s, s[#s]) + return s[#s] + end +end diff --git a/nerv/tnn/tnn.lua b/nerv/tnn/tnn.lua new file mode 100644 index 0000000..56c9dc0 --- /dev/null +++ b/nerv/tnn/tnn.lua @@ -0,0 +1,565 @@ +local TNN = nerv.class("nerv.TNN") + +local function parse_id(str) + --used to parse layerid[portid],time + local id, port, time, _ + _, _, id, port, time = string.find(str, "([a-zA-Z0-9_]+)%[([0-9]+)%][,]*([0-9]*)") + if id == nil or port == nil then + _, _, id, port, time = string.find(str, "(.+)%[([0-9]+)%][,]*([0-9]*)") + if not (id == "" or id == "") then + nerv.error("wrong format of connection id") + end + end + --print(str, id, port, time) + port = tonumber(port) + if (time == nil) then + time = 0 + else + time = tonumber(time) + end + --now time don't need to be parsed + return id, port +end + +local function discover(id, layers, layer_repo) + local ref = layers[id] + if id == "" or id == "" then + return nil + end + if ref == nil then + local layer = layer_repo:get_layer(id) + local dim_in, dim_out = layer:get_dim() + ref = { + layer = layer, + id = layer.id, + inputs_m = {}, --storage for computation, inputs_m[time][port] + inputs_b = {}, --inputs_g[time][port], whether this input can been computed + inputs_matbak_p = {}, --which is a back-up space to handle some cross-border computation, inputs_p_matbak[port] + outputs_m = {}, + outputs_b = {}, + err_inputs_m = {}, + err_inputs_matbak_p = {}, --which is a back-up space to handle some cross-border computation + err_inputs_b = {}, + err_outputs_m = {}, + err_outputs_b = {}, + i_conns_p = {}, --list of inputing connections + o_conns_p = {}, --list of outputing connections + dim_in = dim_in, --list of dimensions of ports + dim_out = dim_out, + } + layers[id] = ref + end + return ref +end + +nerv.TNN.FC = {} --flag const +nerv.TNN.FC.SEQ_START = 4 +nerv.TNN.FC.SEQ_END = 8 +nerv.TNN.FC.HAS_INPUT = 1 +nerv.TNN.FC.HAS_LABEL = 2 +nerv.TNN.FC.SEQ_NORM = bit.bor(nerv.TNN.FC.HAS_INPUT, nerv.TNN.FC.HAS_LABEL) --This instance have both input and label + +function TNN.make_initial_store(st, p, dim, batch_size, chunk_size, global_conf, st_c, p_c, t_c) + --Return a table of matrix storage from time (1-chunk_size)..(2*chunk_size) + if (type(st) ~= "table") then + nerv.error("st should be a table") + end + for i = 1 - chunk_size - 1, chunk_size * 2 + 1 do --intentionally allocated more time, should be [1-chunk_size, chunk_size*2] + if (st[i] == nil) then + st[i] = {} + end + st[i][p] = global_conf.cumat_type(batch_size, dim) + st[i][p]:fill(0) + if (st_c ~= nil) then + if (st_c[i + t_c] == nil) then + st_c[i + t_c] = {} + end + st_c[i + t_c][p_c] = st[i][p] + end + end +end + +function TNN:out_of_feedrange(t) --out of chunk, or no input, for the current feed + if (t < 1 or t > self.chunk_size) then + return true + end + if (self.feeds_now.flagsPack_now[t] == 0 or self.feeds_now.flagsPack_now[t] == nil) then + return true + end + return false +end + +function TNN:__init(id, global_conf, layer_conf) + self.clip_t = layer_conf.clip_t + if self.clip_t == nil then + self.clip_t = 0 + end + if self.clip_t > 0 then + nerv.info("tnn(%s) will clip gradient across time with %f...", id, self.clip_t) + end + local layers = {} + local inputs_p = {} --map:port of the TNN to layer ref and port + local outputs_p = {} + local dim_in = layer_conf.dim_in + local dim_out = layer_conf.dim_out + local parsed_conns = {} + local _ + + for _, ll in pairs(layer_conf.connections) do + local id_from, port_from = parse_id(ll[1]) + local id_to, port_to = parse_id(ll[2]) + local time_to = ll[3] + + print(id_from, id_to, time_to) + + local ref_from = discover(id_from, layers, layer_conf.sub_layers) + local ref_to = discover(id_to, layers, layer_conf.sub_layers) + + if (id_from == "") then + if (dim_in[port_from] ~= ref_to.dim_in[port_to] or time_to ~= 0) then + nerv.error("mismatch dimension or wrong time %s,%s,%d", ll[1], ll[2], ll[3]) + end + inputs_p[port_from] = {["ref"] = ref_to, ["port"] = port_to} + ref_to.inputs_m[port_to] = {} --just a place holder + elseif (id_to == "") then + if (dim_out[port_to] ~= ref_from.dim_out[port_from] or time_to ~= 0) then + nerv.error("mismatch dimension or wrong time %s,%s,%d", ll[1], ll[2], ll[3]) + end + outputs_p[port_to] = {["ref"] = ref_from, ["port"] = port_from} + ref_from.outputs_m[port_from] = {} --just a place holder + else + local conn_now = { + ["src"] = {["ref"] = ref_from, ["port"] = port_from}, + ["dst"] = {["ref"] = ref_to, ["port"] = port_to}, + ["time"] = time_to + } + if (ref_to.dim_in[port_to] ~= ref_from.dim_out[port_from]) then + nerv.error("mismatch dimension or wrong time %s,%s,%d", ll[1], ll[2], ll[3]) + end + table.insert(parsed_conns, conn_now) + ref_to.i_conns_p[conn_now.dst.port] = conn_now + ref_from.o_conns_p[conn_now.src.port] = conn_now + end + end + + for id, ref in pairs(layers) do + print(id, "#dim_in:", #ref.dim_in, "#dim_out:", #ref.dim_out, "#i_conns_p:", #ref.i_conns_p, "#o_conns_p", #ref.o_conns_p) + end + + self.layers = layers + self.inputs_p = inputs_p + self.outputs_p = outputs_p + self.id = id + self.dim_in = dim_in + self.dim_out = dim_out + self.parsed_conns = parsed_conns + self.gconf = global_conf +end + +function TNN:init(batch_size, chunk_size) + self.batch_size = batch_size + self.chunk_size = chunk_size + for i, conn in ipairs(self.parsed_conns) do --init storage for connections inside the NN + local _, output_dim + local ref_from, port_from, ref_to, port_to, time + ref_from, port_from = conn.src.ref, conn.src.port + ref_to, port_to = conn.dst.ref, conn.dst.port + time = conn.time + + local dim = ref_from.dim_out[port_from] + if (dim == 0) then + nerv.error("layer %s has a zero dim port", ref_from.layer.id) + end + + print("TNN initing storage", ref_from.layer.id, "->", ref_to.layer.id) + ref_to.inputs_matbak_p[port_to] = self.gconf.cumat_type(batch_size, dim) + self.make_initial_store(ref_from.outputs_m, port_from, dim, batch_size, chunk_size, self.gconf, ref_to.inputs_m, port_to, time) + ref_from.err_inputs_matbak_p[port_from] = self.gconf.cumat_type(batch_size, dim) + self.make_initial_store(ref_from.err_inputs_m, port_from, dim, batch_size, chunk_size, self.gconf, ref_to.err_outputs_m, port_to, time) + + end + + self.outputs_m = {} + self.err_inputs_m = {} + for i = 1, #self.dim_out do --Init storage for output ports + local ref = self.outputs_p[i].ref + local p = self.outputs_p[i].port + self.make_initial_store(ref.outputs_m, p, self.dim_out[i], batch_size, chunk_size, self.gconf, self.outputs_m, i, 0) + self.make_initial_store(ref.err_inputs_m, p, self.dim_out[i], batch_size, chunk_size, self.gconf, self.err_inputs_m, i, 0) + end + + self.inputs_m = {} + self.err_outputs_m = {} + for i = 1, #self.dim_in do --Init storage for input ports + local ref = self.inputs_p[i].ref + local p = self.inputs_p[i].port + self.make_initial_store(ref.inputs_m, p, self.dim_in[i], batch_size, chunk_size, self.gconf, self.inputs_m, i, 0) + self.make_initial_store(ref.err_outputs_m, p, self.dim_in[i], batch_size, chunk_size, self.gconf, self.err_outputs_m, i, 0) + end + + for id, ref in pairs(self.layers) do --Calling init for child layers + for i = 1, #ref.dim_in do + if (ref.inputs_m[i] == nil or ref.err_outputs_m[i] == nil) then + nerv.error("dangling input port %d of layer %s", i, id) + end + end + for i = 1, #ref.dim_out do + if (ref.outputs_m[i] == nil or ref.err_inputs_m[i] == nil) then + nerv.error("dangling output port %d of layer %s", i, id) + end + end + -- initialize sub layers + ref.layer:init(batch_size, chunk_size) + end + + local flags_now = {} + local flagsPack_now = {} + for i = 1, chunk_size do + flags_now[i] = {} + flagsPack_now[i] = 0 + end + + self.feeds_now = {} --feeds is for the reader to fill + self.feeds_now.inputs_m = self.inputs_m + self.feeds_now.flags_now = flags_now + self.feeds_now.flagsPack_now = flagsPack_now + + self:flush_all() +end + +--[[ +function DAGLayer:batch_resize(batch_size) + self.gconf.batch_size = batch_size + + for i, conn in ipairs(self.parsed_conn) do + local _, output_dim + local ref_from, port_from, ref_to, port_to + ref_from, port_from = unpack(conn[1]) + ref_to, port_to = unpack(conn[2]) + _, output_dim = ref_from.layer:get_dim() + + if ref_from.outputs[port_from]:nrow() ~= batch_size and output_dim[port_from] > 0 then + local mid = self.gconf.cumat_type(batch_size, output_dim[port_from]) + local err_mid = mid:create() + + ref_from.outputs[port_from] = mid + ref_to.inputs[port_to] = mid + + ref_from.err_inputs[port_from] = err_mid + ref_to.err_outputs[port_to] = err_mid + end + end + for id, ref in pairs(self.layers) do + ref.layer:batch_resize(batch_size) + end + collectgarbage("collect") +end +]]-- + +function TNN:flush_all() --flush all history and activation + local _, ref + for _, ref in pairs(self.layers) do + for i = 1, #ref.dim_in do + for t = 1 - self.chunk_size, self.chunk_size * 2 do + ref.inputs_m[t][i]:fill(self.gconf.nn_act_default) + if (ref.inputs_b[t] == nil) then + ref.inputs_b[t] = {} + end + ref.inputs_b[t][i] = false + ref.err_outputs_m[t][i]:fill(0) + if (ref.err_outputs_b[t] == nil) then + ref.err_outputs_b[t] = {} + end + ref.err_outputs_b[t][i] = false + end + end + for i = 1, #ref.dim_out do + for t = 1 - self.chunk_size, self.chunk_size * 2 do + ref.outputs_m[t][i]:fill(self.gconf.nn_act_default) + if (ref.outputs_b[t] == nil) then + ref.outputs_b[t] = {} + end + ref.outputs_b[t][i] = false + ref.err_inputs_m[t][i]:fill(0) + if (ref.err_inputs_b[t] == nil) then + ref.err_inputs_b[t] = {} + end + ref.err_inputs_b[t][i] = false + end + end + end +end + +--reader: some reader +--Returns: bool, whether has new feed +--Returns: feeds, a table that will be filled with the reader's feeds +function TNN:getfeed_from_reader(reader) + local feeds_now = self.feeds_now + local got_new = reader:get_batch(feeds_now) + return got_new, feeds_now +end + +function TNN:move_right_to_nextmb(list_t) --move output history activations of 1..chunk_size to 1-chunk_size..0 + if list_t == nil then + list_t = {} + for i = 1, self.chunk_size do + list_t[i] = i - self.chunk_size + end + end + for i = 1, #list_t do + t = list_t[i] + if t < 1 - self.chunk_size or t > 0 then + nerv.error("MB move range error") + end + for id, ref in pairs(self.layers) do + for p = 1, #ref.dim_out do + ref.outputs_m[t][p]:copy_fromd(ref.outputs_m[t + self.chunk_size][p]) + end + end + end +end + +function TNN:net_propagate() --propagate according to feeds_now + for t = 1, self.chunk_size, 1 do + for id, ref in pairs(self.layers) do + for p = 1, #ref.dim_out do + ref.outputs_b[t][p] = false + end + for p = 1, #ref.dim_in do + ref.inputs_b[t][p] = false + end + end + end + + local feeds_now = self.feeds_now + for t = 1, self.chunk_size do + if (bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_INPUT) > 0) then + for i = 1, #self.dim_in do + local ref = self.inputs_p[i].ref + local p = self.inputs_p[i].port + ref.inputs_b[t][p] = true + self:propagate_dfs(ref, t) + end + end + end + + local flag_out = true + for t = 1, self.chunk_size do --check whether every output has been computed + if (bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_LABEL) > 0) then + for i = 1, #self.dim_out do + local ref = self.outputs_p[i].ref + if (ref.outputs_b[t][1] ~= true) then + flag_out = false + break + end + end + end + end + if (flag_out == false) then + nerv.error("some thing wrong, some labeled output is not propagated") + end +end + +--ref: the TNN_ref of a layer +--t: the current time to propagate +function TNN:propagate_dfs(ref, t) + if (self:out_of_feedrange(t)) then + return + end + if (ref.outputs_b[t][1] == true) then --already propagated, 1 is just a random port + return + end + + --print("debug dfs", ref.layer.id, t) + + local flag = true --whether have all inputs + for _, conn in pairs(ref.i_conns_p) do + local p = conn.dst.port + if (not (ref.inputs_b[t][p] or self:out_of_feedrange(t - conn.time))) then + flag = false + break + end + end + if (flag == false) then + return + end + + --ok, do propagate + --print("debug ok, propagating"); + --The MB moving will cause bordering history to be changed, so it is more wise to flush the input activation + if (bit.band(self.feeds_now.flagsPack_now[t], bit.bor(nerv.TNN.FC.SEQ_START, nerv.TNN.FC.SEQ_END)) > 0) then --flush cross-border history + for i = 1, self.batch_size do + local seq_start = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_START) + local seq_end = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_END) + if (seq_start > 0 or seq_end > 0) then + for p, conn in pairs(ref.i_conns_p) do + if ((ref.i_conns_p[p].time > 0 and seq_start > 0) or (ref.i_conns_p[p].time < 0 and seq_end > 0)) then --cross-border, set to default + ref.inputs_m[t][p][i - 1]:fill(self.gconf.nn_act_default) + end + end + end + end + end + self.gconf.timer:tic("tnn_actual_layer_propagate") + ref.layer:propagate(ref.inputs_m[t], ref.outputs_m[t], t) --propagate! + self.gconf.timer:toc("tnn_actual_layer_propagate") + --[[ + if (bit.band(self.feeds_now.flagsPack_now[t], bit.bor(nerv.TNN.FC.SEQ_START, nerv.TNN.FC.SEQ_END)) > 0) then --restore cross-border history + for i = 1, self.batch_size do + local seq_start = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_START) + local seq_end = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_END) + if (seq_start > 0 or seq_end > 0) then + for p, conn in pairs(ref.o_conns_p) do + if ((ref.o_conns_p[p].time > 0 and seq_end > 0) or (ref.o_conns_p[p].time < 0 and seq_start > 0)) then + ref.outputs_m[t][p][i - 1]:fill(self.gconf.nn_act_default) + end + end + end + end + end + ]]-- + --set input flag for future layers + for i = 1, #ref.dim_out do + if (ref.outputs_b[t][i] == true) then + nerv.error("this time's outputs_b should be false") + end + ref.outputs_b[t][i] = true + end + + --try dfs for further layers + for _, conn in pairs(ref.o_conns_p) do + --print("debug dfs-searching", conn.dst.ref.layer.id) + conn.dst.ref.inputs_b[t + conn.time][conn.dst.port] = true + self:propagate_dfs(conn.dst.ref, t + conn.time) + end +end + +--do_update: bool, whether we are doing back-propagate or updating the parameters +function TNN:net_backpropagate(do_update) --propagate according to feeds_now + if do_update == nil then + nerv.error("do_update should not be nil") + end + for t = 1, self.chunk_size, 1 do + for id, ref in pairs(self.layers) do + for p = 1, #ref.dim_out do + ref.err_inputs_b[t][p] = false + end + for p = 1, #ref.dim_in do + ref.err_outputs_b[t][p] = false + end + end + end + + local feeds_now = self.feeds_now + for t = 1, self.chunk_size do + if bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_LABEL) > 0 then + for i = 1, #self.dim_out do + local ref = self.outputs_p[i].ref + local p = self.outputs_p[i].port + ref.err_inputs_b[t][p] = true + self:backpropagate_dfs(ref, t, do_update) + end + end + end + + local flag_out = true + for t = 1, self.chunk_size do --check whether every output has been computed + if bit.band(feeds_now.flagsPack_now[t], nerv.TNN.FC.HAS_INPUT) > 0 then + for i = 1, #self.dim_in do + local ref = self.inputs_p[i].ref + if ref.err_outputs_b[t][1] ~= true then + flag_out = false + break + end + end + end + end + if (flag_out == false) then + nerv.error("some thing wrong, some input is not back_propagated") + end +end + +--ref: the TNN_ref of a layer +--t: the current time to propagate +function TNN:backpropagate_dfs(ref, t, do_update) + if self:out_of_feedrange(t) then + return + end + if ref.err_outputs_b[t][1] == true then --already back_propagated, 1 is just a random port + return + end + + --print("debug dfs", ref.layer.id, t) + + local flag = true --whether have all inputs + for _, conn in pairs(ref.o_conns_p) do + local p = conn.src.port + if (not (ref.err_inputs_b[t][p] or self:out_of_feedrange(t + conn.time))) then + flag = false + break + end + end + if (flag == false) then + return + end + + --ok, do back_propagate + --print("debug ok, back-propagating(or updating)") + if (do_update == false) then + self.gconf.timer:tic("tnn_actual_layer_backpropagate") + ref.layer:back_propagate(ref.err_inputs_m[t], ref.err_outputs_m[t], ref.inputs_m[t], ref.outputs_m[t], t) + self.gconf.timer:toc("tnn_actual_layer_backpropagate") + if self.clip_t > 0 then + for _, conn in pairs(ref.i_conns_p) do + local p = conn.dst.port --port for ref + if conn.time ~= 0 then + --print("debug clip_t tnn", ref.id, "port:", p, "clip:", self.clip_t) + ref.err_outputs_m[t][p]:clip(-self.clip_t, self.clip_t) + end + end + end + else + --print(ref.err_inputs_m[t][1]) + self.gconf.timer:tic("tnn_actual_layer_update") + ref.layer:update(ref.err_inputs_m[t], ref.inputs_m[t], ref.outputs_m[t], t) + self.gconf.timer:toc("tnn_actual_layer_update") + end + + if (do_update == false and bit.band(self.feeds_now.flagsPack_now[t], bit.bor(nerv.TNN.FC.SEQ_START, nerv.TNN.FC.SEQ_END)) > 0) then --flush cross-border errors + for i = 1, self.batch_size do + local seq_start = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_START) + local seq_end = bit.band(self.feeds_now.flags_now[t][i], nerv.TNN.FC.SEQ_END) + if (seq_start > 0 or seq_end > 0) then + for p, conn in pairs(ref.i_conns_p) do + if ((ref.i_conns_p[p].time > 0 and seq_start > 0) or (ref.i_conns_p[p].time < 0 and seq_end > 0)) then --cross-border, set to zero + ref.err_outputs_m[t][p][i - 1]:fill(0) + end + end + end + end + end + + for i = 1, #ref.dim_in do + if (ref.err_outputs_b[t][i] == true) then + nerv.error("this time's outputs_b should be false") + end + ref.err_outputs_b[t][i] = true + end + + --try dfs for further layers + for _, conn in pairs(ref.i_conns_p) do + --print("debug dfs-searching", conn.src.ref.layer.id) + conn.src.ref.err_inputs_b[t - conn.time][conn.src.port] = true + self:backpropagate_dfs(conn.src.ref, t - conn.time, do_update) + end +end + +--Return: nerv.ParamRepo +function TNN:get_params() + local param_repos = {} + for id, ref in pairs(self.layers) do + table.insert(param_repos, ref.layer:get_params()) + end + return nerv.ParamRepo.merge(param_repos) +end + -- cgit v1.2.3-70-g09d2