summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDeterminant <[email protected]>2016-02-22 12:10:35 +0800
committerDeterminant <[email protected]>2016-02-22 12:10:35 +0800
commit9642bd16922b288c81dee25f17373466ae6888c4 (patch)
tree97c6a4c7e42de3addd535750b159b353fe9ec378
parent8f19acf152652ff887d3fe978e78a076dca60611 (diff)
clean up obsolete files
-rw-r--r--nerv/Makefile9
-rw-r--r--nerv/examples/lmptb/lmptb/layer/affine_recurrent.lua (renamed from nerv/layer/affine_recurrent.lua)0
-rw-r--r--nerv/examples/lmptb/lmptb/layer/init.lua1
-rw-r--r--nerv/layer/gate_fff.lua73
-rw-r--r--nerv/layer/gru.lua2
-rw-r--r--nerv/layer/init.lua9
-rw-r--r--nerv/nn/layer_repo.lua3
-rw-r--r--nerv/tnn/init.lua5
-rw-r--r--nerv/tnn/layer_dag_t.lua386
-rw-r--r--nerv/tnn/layersT/dropout_t.lua71
-rw-r--r--nerv/tnn/layersT/gru_t.lua114
-rw-r--r--nerv/tnn/layersT/lstm_t.lua124
-rw-r--r--nerv/tnn/layersT/softmax_ce_t.lua93
13 files changed, 16 insertions, 874 deletions
diff --git a/nerv/Makefile b/nerv/Makefile
index ee4b9c0..c0db53a 100644
--- a/nerv/Makefile
+++ b/nerv/Makefile
@@ -7,7 +7,7 @@ INC_PATH := $(LUA_BINDIR)/../include/nerv
LUA_DIR = $(INST_LUADIR)/nerv
OBJ_DIR := $(BUILD_DIR)/objs
ISUBDIR := io matrix luaT
-SUBDIR := matrix io layer examples nn lib/io lib/luaT lib/matrix tnn/layersT
+SUBDIR := matrix io layer examples nn tnn lib/io lib/luaT lib/matrix
INC_SUBDIR := $(addprefix $(INC_PATH)/,$(ISUBDIR))
OBJ_SUBDIR := $(addprefix $(OBJ_DIR)/,$(SUBDIR))
@@ -31,12 +31,11 @@ OBJS := $(CORE_OBJS) $(NERV_OBJS) $(LUAT_OBJS)
LIBS := $(INST_LIBDIR)/libnerv.so $(LIB_PATH)/libnervcore.so $(LIB_PATH)/libluaT.so
LUA_LIBS := matrix/init.lua io/init.lua init.lua \
layer/init.lua layer/affine.lua layer/sigmoid.lua layer/tanh.lua layer/softmax_ce.lua layer/softmax.lua \
- layer/window.lua layer/bias.lua layer/combiner.lua layer/mse.lua layer/affine_recurrent.lua \
- layer/elem_mul.lua layer/gate_fff.lua layer/lstm.lua layer/lstm_gate.lua layer/dropout.lua layer/gru.lua \
+ layer/window.lua layer/bias.lua layer/combiner.lua layer/mse.lua \
+ layer/elem_mul.lua layer/lstm.lua layer/lstm_gate.lua layer/dropout.lua layer/gru.lua \
nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/layer_dag.lua \
io/sgd_buffer.lua \
- tnn/init.lua tnn/layer_dag_t.lua tnn/sutil.lua tnn/tnn.lua \
- tnn/layersT/dropout_t.lua tnn/layersT/lstm_t.lua tnn/layersT/gru_t.lua tnn/layersT/softmax_ce_t.lua
+ tnn/init.lua tnn/sutil.lua tnn/tnn.lua
INCLUDE := -I $(LUA_INCDIR) -DLUA_USE_APICHECK
#CUDA_BASE := /usr/local/cuda-7.0
diff --git a/nerv/layer/affine_recurrent.lua b/nerv/examples/lmptb/lmptb/layer/affine_recurrent.lua
index fd6f38f..fd6f38f 100644
--- a/nerv/layer/affine_recurrent.lua
+++ b/nerv/examples/lmptb/lmptb/layer/affine_recurrent.lua
diff --git a/nerv/examples/lmptb/lmptb/layer/init.lua b/nerv/examples/lmptb/lmptb/layer/init.lua
index ceae009..e20e2dc 100644
--- a/nerv/examples/lmptb/lmptb/layer/init.lua
+++ b/nerv/examples/lmptb/lmptb/layer/init.lua
@@ -1,4 +1,5 @@
require 'lmptb.layer.select_linear'
+require 'lmptb.layer.affine_recurrent'
require 'lmptb.layer.affine_recurrent_plusvec'
--require 'lmptb.layer.gru_t'
require 'lmptb.layer.lm_affine_recurrent'
diff --git a/nerv/layer/gate_fff.lua b/nerv/layer/gate_fff.lua
deleted file mode 100644
index 6082e27..0000000
--- a/nerv/layer/gate_fff.lua
+++ /dev/null
@@ -1,73 +0,0 @@
-local GateFFFLayer = nerv.class('nerv.GateFLayer', 'nerv.Layer') --Full matrix gate
-
-function GateFFFLayer:__init(id, global_conf, layer_conf)
- self.id = id
- self.dim_in = layer_conf.dim_in
- self.dim_out = layer_conf.dim_out
- self.gconf = global_conf
-
- for i = 1, #self.dim_in do
- self["ltp" .. i] = self:find_param("ltp" .. i, layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[i], self.dim_out[1]}) --layer_conf.ltp
- end
- self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]})--layer_conf.bp
-
- self:check_dim_len(-1, 1) --accept multiple inputs
-end
-
-function GateFFFLayer:init(batch_size)
- for i = 1, #self.dim_in do
- if self["ltp" .. i].trans:ncol() ~= self.bp.trans:ncol() then
- nerv.error("mismatching dimensions of linear transform and bias paramter")
- end
- if self.dim_in[i] ~= self["ltp" .. i].trans:nrow() then
- nerv.error("mismatching dimensions of linear transform parameter and input")
- end
- self["ltp"..i]:train_init()
- end
-
- if self.dim_out[1] ~= self.ltp1.trans:ncol() then
- nerv.error("mismatching dimensions of linear transform parameter and output")
- end
- self.bp:train_init()
- self.err_bakm = self.gconf.cumat_type(batch_size, self.dim_out[1])
-end
-
-function GateFFFLayer:batch_resize(batch_size)
- if self.err_m:nrow() ~= batch_size then
- self.err_bakm = self.gconf.cumat_type(batch_size, self.dim_out[1])
- end
-end
-
-function GateFFFLayer:propagate(input, output)
- -- apply linear transform
- output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N')
- for i = 2, #self.dim_in do
- output[1]:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N')
- end
- -- add bias
- output[1]:add_row(self.bp.trans, 1.0)
- output[1]:sigmoid(output[1])
-end
-
-function GateFFFLayer:back_propagate(bp_err, next_bp_err, input, output)
- self.err_bakm:sigmoid_grad(bp_err[1], output[1])
- for i = 1, #self.dim_in do
- next_bp_err[i]:mul(self.err_bakm, self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T')
- end
-end
-
-function GateFFFLayer:update(bp_err, input, output)
- self.err_bakm:sigmoid_grad(bp_err[1], output[1])
- for i = 1, #self.dim_in do
- self["ltp" .. i]:update_by_err_input(self.err_bakm, input[i])
- end
- self.bp:update_by_gradient(self.err_bakm:colsum())
-end
-
-function GateFFFLayer:get_params()
- local pr = nerv.ParamRepo({self.bp})
- for i = 1, #self.dim_in do
- pr:add(self["ltp" .. i].id, self["ltp" .. i])
- end
- return pr
-end
diff --git a/nerv/layer/gru.lua b/nerv/layer/gru.lua
index 2162e28..e81d21a 100644
--- a/nerv/layer/gru.lua
+++ b/nerv/layer/gru.lua
@@ -48,7 +48,7 @@ function GRULayer:__init(id, global_conf, layer_conf)
["nerv.TanhLayer"] = {
[ap("mainTanhL")] = {{}, {dim_in = {dout1}, dim_out = {dout1}}},
},
- ["nerv.GateFLayer"] = {
+ ["nerv.LSTMGateLayer"] = {
[ap("resetGateL")] = {{}, {dim_in = {din1, din2},
dim_out = {din2},
pr = pr}},
diff --git a/nerv/layer/init.lua b/nerv/layer/init.lua
index 6b7a1d7..54f33ae 100644
--- a/nerv/layer/init.lua
+++ b/nerv/layer/init.lua
@@ -109,11 +109,16 @@ nerv.include('bias.lua')
nerv.include('window.lua')
nerv.include('mse.lua')
nerv.include('combiner.lua')
-nerv.include('affine_recurrent.lua')
nerv.include('softmax.lua')
nerv.include('elem_mul.lua')
-nerv.include('gate_fff.lua')
nerv.include('lstm.lua')
nerv.include('lstm_gate.lua')
nerv.include('dropout.lua')
nerv.include('gru.lua')
+
+-- The following lines are for backward compatibility, and will be removed in
+-- the future. The use of these names are deprecated.
+nerv.DropoutLayerT = nerv.DropoutLayer
+nerv.GRULayerT = nerv.GRULayer
+nerv.LSTMLayerT = nerv.LSTMLayer
+nerv.SoftmaxCELayerT = nerv.SoftmaxCELayer
diff --git a/nerv/nn/layer_repo.lua b/nerv/nn/layer_repo.lua
index 2f8de08..3d3a79f 100644
--- a/nerv/nn/layer_repo.lua
+++ b/nerv/nn/layer_repo.lua
@@ -9,6 +9,9 @@ function LayerRepo:add_layers(layer_spec, param_repo, global_conf)
local layers = self.layers
for ltype, llist in pairs(layer_spec) do
local layer_type = nerv.get_type(ltype)
+ if layer_type == nil then
+ nerv.error('layer type `%s` not found', ltype)
+ end
for id, spec in pairs(llist) do
if layers[id] ~= nil then
nerv.error("a layer with id %s already exists", id)
diff --git a/nerv/tnn/init.lua b/nerv/tnn/init.lua
index 7faca31..44ce26b 100644
--- a/nerv/tnn/init.lua
+++ b/nerv/tnn/init.lua
@@ -45,8 +45,3 @@ end
nerv.include('sutil.lua')
nerv.include('tnn.lua')
-nerv.include('layersT/softmax_ce_t.lua')
-nerv.include('layersT/lstm_t.lua')
-nerv.include('layersT/gru_t.lua')
-nerv.include('layersT/dropout_t.lua')
-nerv.include('layer_dag_t.lua')
diff --git a/nerv/tnn/layer_dag_t.lua b/nerv/tnn/layer_dag_t.lua
deleted file mode 100644
index b651f4e..0000000
--- a/nerv/tnn/layer_dag_t.lua
+++ /dev/null
@@ -1,386 +0,0 @@
-local DAGLayerT = nerv.class("nerv.DAGLayerT", "nerv.LayerT")
-
-local function parse_id(str)
- local id, port, _
- _, _, id, port = string.find(str, "([a-zA-Z0-9_.]+)%[([0-9]+)%]")
- if id == nil or port == nil then
- _, _, id, port = string.find(str, "(.+)%[([0-9]+)%]")
- if not (id == "<input>" or id == "<output>") then
- nerv.error("wrong format of connection id")
- end
- end
- port = tonumber(port)
- return id, port
-end
-
-local function discover(id, layers, layer_repo)
- local ref = layers[id]
- if id == "<input>" or id == "<output>" then
- return nil
- end
- if ref == nil then
- local layer = layer_repo:get_layer(id)
- local dim_in, dim_out = layer:get_dim()
- ref = {
- id = layer.id,
- layer = layer,
- inputs = {},
- outputs = {},
- err_inputs = {},
- err_outputs = {},
- next_layers = {},
- input_len = #dim_in,
- output_len = #dim_out,
- in_deg = 0,
- visited = false
- }
- layers[id] = ref
- end
- return ref
-end
-
-function DAGLayerT:__init(id, global_conf, layer_conf)
- local layers = {}
- local inputs = {}
- local outputs = {}
- local dim_in = layer_conf.dim_in
- local dim_out = layer_conf.dim_out
- local parsed_conn = {}
- for from, to in pairs(layer_conf.connections) do
- local id_from, port_from = parse_id(from)
- local id_to, port_to = parse_id(to)
- local ref_from = discover(id_from, layers, layer_conf.sub_layers)
- local ref_to = discover(id_to, layers, layer_conf.sub_layers)
- local input_dim, output_dim, _
- if id_from == "<input>" then
- input_dim, _ = ref_to.layer:get_dim()
- if dim_in[port_from] ~= input_dim[port_to] then
- nerv.error("mismatching data dimension between %s and %s", from, to)
- end
- inputs[port_from] = {ref_to, port_to}
- if ref_to.inputs[1] == nil then
- ref_to.inputs[1] = {}
- end
- if ref_to.inputs[1][port_to] ~= nil then
- nerv.error("port(%d) for layer(%s) already attached", port_to, to)
- end
- ref_to.inputs[1][port_to] = inputs -- just a place holder
- elseif id_to == "<output>" then
- _, output_dim = ref_from.layer:get_dim()
- if output_dim[port_from] ~= dim_out[port_to] then
- nerv.error("mismatching data dimension between %s and %s", from, to)
- end
- outputs[port_to] = {ref_from, port_from}
- if ref_from.outputs[1] == nil then
- ref_from.outputs[1] = {}
- end
- if ref_from.outputs[1][port_from] ~= nil then
- nerv.error("port(%d) for layer(%s) already attached", port_from, from)
- end
- ref_from.outputs[1] = {}
- ref_from.outputs[1][port_from] = outputs -- just a place holder
- else
- _, output_dim = ref_from.layer:get_dim()
- input_dim, _ = ref_to.layer:get_dim()
- if output_dim[port_from] ~= input_dim[port_to] then
- nerv.error("mismatching data dimension between %s and %s", from, to)
- end
-
- table.insert(parsed_conn,
- {{ref_from, port_from}, {ref_to, port_to}})
- table.insert(ref_from.next_layers, ref_to) -- add edge
- ref_to.in_deg = ref_to.in_deg + 1 -- increase the in-degree of the target layer
- end
- end
-
- -- topology sort
- local queue = {}
- local l = 1
- local r = 1
- for id, ref in pairs(layers) do
- if ref.in_deg == 0 then
- table.insert(queue, ref)
- nerv.info("adding source layer: %s", id)
- r = r + 1
- end
- end
- if l == r then
- nerv.error("loop detected")
- end
- while l < r do
- local cur = queue[l]
- cur.visited = true
- l = l + 1
- for _, nl in pairs(cur.next_layers) do
- nl.in_deg = nl.in_deg - 1
- if nl.in_deg == 0 then
- table.insert(queue, nl)
- r = r + 1
- end
- end
- end
- for i = 1, #queue do
- nerv.info("enqueued layer: %s %s", queue[i].layer, queue[i].layer.id)
- end
-
- for id, ref in pairs(layers) do
- -- check wether the graph is connected
- if ref.visited == false then
- nerv.warning("layer %s is ignored", id)
- end
- end
-
- self.layers = layers
- self.inputs = inputs
- self.outputs = outputs
- self.id = id
- self.dim_in = dim_in
- self.dim_out = dim_out
- self.parsed_conn = parsed_conn
- self.queue = queue
- self.gconf = global_conf
-end
-
-function DAGLayerT:init(batch_size, chunk_size)
- nerv.info("initing DAGLayerT %s...", self.id)
- if chunk_size == nil then
- chunk_size = 1
- nerv.info("(Initing DAGLayerT) chunk_size is nil, setting it to default 1\n")
- end
-
- self.chunk_size = chunk_size
-
- for i, conn in ipairs(self.parsed_conn) do
- local _, output_dim
- local ref_from, port_from, ref_to, port_to
- ref_from, port_from = unpack(conn[1])
- ref_to, port_to = unpack(conn[2])
- _, output_dim = ref_from.layer:get_dim()
- local dim = 1
- if output_dim[port_from] > 0 then
- dim = output_dim[port_from]
- end
-
- for t = 1, chunk_size do
- local mid = self.gconf.cumat_type(batch_size, dim)
- local err_mid = mid:create()
-
- if ref_from.outputs[t] == nil then
- ref_from.outputs[t] = {}
- end
- if ref_to.inputs[t] == nil then
- ref_to.inputs[t] = {}
- end
- if ref_to.err_outputs[t] == nil then
- ref_to.err_outputs[t] = {}
- end
- if ref_from.err_inputs[t] == nil then
- ref_from.err_inputs[t] = {}
- end
-
- ref_from.outputs[t][port_from] = mid
- ref_to.inputs[t][port_to] = mid
-
- ref_from.err_inputs[t][port_from] = err_mid
- ref_to.err_outputs[t][port_to] = err_mid
- end
- end
- for id, ref in pairs(self.layers) do
- for i = 1, ref.input_len do
- if ref.inputs[1][i] == nil then --peek at time 1
- nerv.error("dangling input port %d of layer %s", i, id)
- end
- end
- for i = 1, ref.output_len do
- if ref.outputs[1][i] == nil then --peek at time 1
- nerv.error("dangling output port %d of layer %s", i, id)
- end
- end
- -- initialize sub layers
- ref.layer:init(batch_size, chunk_size)
- end
- for i = 1, #self.dim_in do
- if self.inputs[i] == nil then
- nerv.error("dangling port %d of layer <input>", i)
- end
- end
- for i = 1, #self.dim_out do
- if self.outputs[i] == nil then
- nerv.error("dangling port %d of layer <output>", i)
- end
- end
-end
-
-function DAGLayerT:batch_resize(batch_size, chunk_size)
- if chunk_size == nil then
- chunk_size = 1
- end
- if batch_size ~= self.gconf.batch_size
- or chunk_size ~= self.gconf.chunk_size then
- nerv.printf("warn: in DAGLayerT:batch_resize, the batch_size ~= gconf.batch_size, or chunk_size ~= gconf.chunk_size")
- end
- self.gconf.batch_size = batch_size
- self.gconf.chunk_size = chunk_size
-
- for i, conn in ipairs(self.parsed_conn) do
- local _, output_dim
- local ref_from, port_from, ref_to, port_to
- ref_from, port_from = unpack(conn[1])
- ref_to, port_to = unpack(conn[2])
- _, output_dim = ref_from.layer:get_dim()
-
- for t = 1, chunk_size do
- if ref_from.outputs[t] == nil then
- ref_from.outputs[t] = {}
- end
- if ref_to.inputs[t] == nil then
- ref_to.inputs[t] = {}
- end
- if ref_from.err_outputs[t] == nil then
- ref_from.err_outputs[t] = {}
- end
- if ref_from.err_inputs[t] == nil then
- ref_from.err_inputs[t] = {}
- end
-
- local mid = self.gconf.cumat_type(batch_size, dim)
- local err_mid = mid:create()
-
- ref_from.outputs[t][port_from] = mid
- ref_to.inputs[t][port_to] = mid
-
- ref_from.err_inputs[t][port_from] = err_mid
- ref_to.err_outputs[t][port_to] = err_mid
- end
- end
- for id, ref in pairs(self.layers) do
- ref.layer:batch_resize(batch_size, chunk_size)
- end
- collectgarbage("collect")
-end
-
-function DAGLayerT:set_inputs(input, t)
- for i = 1, #self.dim_in do
- if input[i] == nil then
- nerv.error("some input is not provided");
- end
- local layer = self.inputs[i][1]
- local port = self.inputs[i][2]
- if layer.inputs[t] == nil then
- layer.inputs[t] = {}
- end
- layer.inputs[t][port] = input[i]
- end
-end
-
-function DAGLayerT:set_outputs(output, t)
- for i = 1, #self.dim_out do
- if output[i] == nil then
- nerv.error("some output is not provided");
- end
- local layer = self.outputs[i][1]
- local port = self.outputs[i][2]
- if layer.outputs[t] == nil then
- layer.outputs[t] = {}
- end
- layer.outputs[t][port] = output[i]
- end
-end
-
-function DAGLayerT:set_err_inputs(bp_err, t)
- for i = 1, #self.dim_out do
- local layer = self.outputs[i][1]
- local port = self.outputs[i][2]
- if layer.err_inputs[t] == nil then
- layer.err_inputs[t] = {}
- end
- layer.err_inputs[t][port] = bp_err[i]
- end
-end
-
-function DAGLayerT:set_err_outputs(next_bp_err, t)
- for i = 1, #self.dim_in do
- local layer = self.inputs[i][1]
- local port = self.inputs[i][2]
- if layer.err_outputs[t] == nil then
- layer.err_outputs[t] = {}
- end
- layer.err_outputs[t][port] = next_bp_err[i]
- end
-end
-
-function DAGLayerT:update(bp_err, input, output, t)
- if t == nil then
- t = 1
- end
- self:set_err_inputs(bp_err, t)
- self:set_inputs(input, t)
- self:set_outputs(output, t)
- for id, ref in pairs(self.queue) do
- ref.layer:update(ref.err_inputs[t], ref.inputs[t], ref.outputs[t], t)
- end
-end
-
-function DAGLayerT:propagate(input, output, t)
- if t == nil then
- t = 1
- end
- self:set_inputs(input, t)
- self:set_outputs(output, t)
- local ret = false
- for i = 1, #self.queue do
- local ref = self.queue[i]
- --print("debug DAGLAyerT:propagate", ref.id, t)
- ret = ref.layer:propagate(ref.inputs[t], ref.outputs[t], t)
- end
- return ret
-end
-
-function DAGLayerT:back_propagate(bp_err, next_bp_err, input, output, t)
- if t == nil then
- t = 1
- end
- self:set_err_outputs(next_bp_err, t)
- self:set_err_inputs(bp_err, t)
- self:set_inputs(input, t)
- self:set_outputs(output, t)
- for i = #self.queue, 1, -1 do
- local ref = self.queue[i]
- ref.layer:back_propagate(ref.err_inputs[t], ref.err_outputs[t], ref.inputs[t], ref.outputs[t], t)
- end
-end
-
-function DAGLayerT:get_params()
- local param_repos = {}
- for id, ref in pairs(self.queue) do
- table.insert(param_repos, ref.layer:get_params())
- end
- return nerv.ParamRepo.merge(param_repos)
-end
-
-DAGLayerT.PORT_TYPES = {
- INPUT = {},
- OUTPUT = {},
- ERR_INPUT = {},
- ERR_OUTPUT = {}
-}
-
-function DAGLayerT:get_intermediate(id, port_type)
- if id == "<input>" or id == "<output>" then
- nerv.error("an actual real layer id is expected")
- end
- local layer = self.layers[id]
- if layer == nil then
- nerv.error("layer id %s not found", id)
- end
- if port_type == DAGLayerT.PORT_TYPES.INPUT then
- return layer.inputs
- elseif port_type == DAGLayerT.PORT_TYPES.OUTPUT then
- return layer.outputs
- elseif port_type == DAGLayerT.PORT_TYPES.ERR_INPUT then
- return layer.err_inputs
- elseif port_type == DAGLayerT.PORT_TYPES.ERR_OUTPUT then
- return layer.err_outputs
- end
- nerv.error("unrecognized port type")
-end
diff --git a/nerv/tnn/layersT/dropout_t.lua b/nerv/tnn/layersT/dropout_t.lua
deleted file mode 100644
index 4351285..0000000
--- a/nerv/tnn/layersT/dropout_t.lua
+++ /dev/null
@@ -1,71 +0,0 @@
-local Dropout = nerv.class("nerv.DropoutLayerT", "nerv.LayerT")
-
-function Dropout:__init(id, global_conf, layer_conf)
- self.id = id
- self.gconf = global_conf
- self.dim_in = layer_conf.dim_in
- self.dim_out = layer_conf.dim_out
- self:check_dim_len(1, 1) -- two inputs: nn output and label
-end
-
-function Dropout:init(batch_size, chunk_size)
- if self.dim_in[1] ~= self.dim_out[1] then
- nerv.error("mismatching dimensions of input and output")
- end
- if chunk_size == nil then
- chunk_size = 1
- end
- self.mask_t = {}
- for t = 1, chunk_size do
- self.mask_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1])
- end
-end
-
-function Dropout:batch_resize(batch_size, chunk_size)
- if chunk_size == nil then
- chunk_size = 1
- end
- for t = 1, chunk_size do
- if self.mask_t[t] == nil or self.mask_t[t]:nrow() ~= batch_size then
- self.mask_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1])
- end
- end
-end
-
-function Dropout:propagate(input, output, t)
- if t == nil then
- t = 1
- end
- if self.gconf.dropout_rate == nil then
- nerv.info("DropoutLayerT:propagate warning, global_conf.dropout_rate is nil, setting it zero")
- self.gconf.dropout_rate = 0
- end
-
- if self.gconf.dropout_rate == 0 then
- output[1]:copy_fromd(input[1])
- else
- self.mask_t[t]:rand_uniform()
- --since we will lose a portion of the actvations, we multiply the activations by 1/(1-dr) to compensate
- self.mask_t[t]:thres_mask(self.mask_t[t], self.gconf.dropout_rate, 0, 1 / (1.0 - self.gconf.dropout_rate))
- output[1]:mul_elem(input[1], self.mask_t[t])
- end
-end
-
-function Dropout:update(bp_err, input, output, t)
- -- no params, therefore do nothing
-end
-
-function Dropout:back_propagate(bp_err, next_bp_err, input, output, t)
- if t == nil then
- t = 1
- end
- if self.gconf.dropout_rate == 0 then
- next_bp_err[1]:copy_fromd(bp_err[1])
- else
- next_bp_err[1]:mul_elem(bp_err[1], self.mask_t[t])
- end
-end
-
-function Dropout:get_params()
- return nerv.ParamRepo({})
-end
diff --git a/nerv/tnn/layersT/gru_t.lua b/nerv/tnn/layersT/gru_t.lua
deleted file mode 100644
index 8f15cc8..0000000
--- a/nerv/tnn/layersT/gru_t.lua
+++ /dev/null
@@ -1,114 +0,0 @@
-local GRULayerT = nerv.class('nerv.GRULayerT', 'nerv.LayerT')
-
-function GRULayerT:__init(id, global_conf, layer_conf)
- --input1:x input2:h input3:c(h^~)
- self.id = id
- self.dim_in = layer_conf.dim_in
- self.dim_out = layer_conf.dim_out
- self.gconf = global_conf
-
- if self.dim_in[2] ~= self.dim_out[1] then
- nerv.error("dim_in[2](%d) mismatch with dim_out[1](%d)", self.dim_in[2], self.dim_out[1])
- end
-
- --prepare a DAGLayerT to hold the lstm structure
- local pr = layer_conf.pr
- if pr == nil then
- pr = nerv.ParamRepo()
- end
-
- local function ap(str)
- return self.id .. '.' .. str
- end
-
- local layers = {
- ["nerv.CombinerLayer"] = {
- [ap("inputXDup")] = {{}, {["dim_in"] = {self.dim_in[1]},
- ["dim_out"] = {self.dim_in[1], self.dim_in[1], self.dim_in[1]}, ["lambda"] = {1}}},
- [ap("inputHDup")] = {{}, {["dim_in"] = {self.dim_in[2]},
- ["dim_out"] = {self.dim_in[2], self.dim_in[2], self.dim_in[2], self.dim_in[2], self.dim_in[2]}, ["lambda"] = {1}}},
- [ap("updateGDup")] = {{}, {["dim_in"] = {self.dim_in[2]},
- ["dim_out"] = {self.dim_in[2], self.dim_in[2]}, ["lambda"] = {1}}},
- [ap("updateMergeL")] = {{}, {["dim_in"] = {self.dim_in[2], self.dim_in[2], self.dim_in[2]}, ["dim_out"] = {self.dim_out[1]},
- ["lambda"] = {1, -1, 1}}},
- },
- ["nerv.AffineLayer"] = {
- [ap("mainAffineL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2]}, ["dim_out"] = {self.dim_out[1]}, ["pr"] = pr}},
- },
- ["nerv.TanhLayer"] = {
- [ap("mainTanhL")] = {{}, {["dim_in"] = {self.dim_out[1]}, ["dim_out"] = {self.dim_out[1]}}},
- },
- ["nerv.GateFLayer"] = {
- [ap("resetGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2]},
- ["dim_out"] = {self.dim_in[2]}, ["pr"] = pr}},
- [ap("updateGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2]},
- ["dim_out"] = {self.dim_in[2]}, ["pr"] = pr}},
- },
- ["nerv.ElemMulLayer"] = {
- [ap("resetGMulL")] = {{}, {["dim_in"] = {self.dim_in[2], self.dim_in[2]}, ["dim_out"] = {self.dim_in[2]}}},
- [ap("updateGMulCL")] = {{}, {["dim_in"] = {self.dim_in[2], self.dim_in[2]}, ["dim_out"] = {self.dim_in[2]}}},
- [ap("updateGMulHL")] = {{}, {["dim_in"] = {self.dim_in[2], self.dim_in[2]}, ["dim_out"] = {self.dim_in[2]}}},
- },
- }
-
- local layerRepo = nerv.LayerRepo(layers, pr, global_conf)
-
- local connections_t = {
- ["<input>[1]"] = ap("inputXDup[1]"),
- ["<input>[2]"] = ap("inputHDup[1]"),
-
- [ap("inputXDup[1]")] = ap("resetGateL[1]"),
- [ap("inputHDup[1]")] = ap("resetGateL[2]"),
- [ap("inputXDup[2]")] = ap("updateGateL[1]"),
- [ap("inputHDup[2]")] = ap("updateGateL[2]"),
- [ap("updateGateL[1]")] = ap("updateGDup[1]"),
-
- [ap("resetGateL[1]")] = ap("resetGMulL[1]"),
- [ap("inputHDup[3]")] = ap("resetGMulL[2]"),
-
- [ap("inputXDup[3]")] = ap("mainAffineL[1]"),
- [ap("resetGMulL[1]")] = ap("mainAffineL[2]"),
- [ap("mainAffineL[1]")] = ap("mainTanhL[1]"),
-
- [ap("updateGDup[1]")] = ap("updateGMulHL[1]"),
- [ap("inputHDup[4]")] = ap("updateGMulHL[2]"),
- [ap("updateGDup[2]")] = ap("updateGMulCL[1]"),
- [ap("mainTanhL[1]")] = ap("updateGMulCL[2]"),
-
- [ap("inputHDup[5]")] = ap("updateMergeL[1]"),
- [ap("updateGMulHL[1]")] = ap("updateMergeL[2]"),
- [ap("updateGMulCL[1]")] = ap("updateMergeL[3]"),
-
- [ap("updateMergeL[1]")] = "<output>[1]",
- }
-
- self.dagL = nerv.DAGLayerT(self.id, global_conf,
- {["dim_in"] = self.dim_in, ["dim_out"] = self.dim_out, ["sub_layers"] = layerRepo,
- ["connections"] = connections_t})
-
- self:check_dim_len(2, 1) -- x, h and h
-end
-
-function GRULayerT:init(batch_size, chunk_size)
- self.dagL:init(batch_size, chunk_size)
-end
-
-function GRULayerT:batch_resize(batch_size, chunk_size)
- self.dagL:batch_resize(batch_size, chunk_size)
-end
-
-function GRULayerT:update(bp_err, input, output, t)
- self.dagL:update(bp_err, input, output, t)
-end
-
-function GRULayerT:propagate(input, output, t)
- self.dagL:propagate(input, output, t)
-end
-
-function GRULayerT:back_propagate(bp_err, next_bp_err, input, output, t)
- self.dagL:back_propagate(bp_err, next_bp_err, input, output, t)
-end
-
-function GRULayerT:get_params()
- return self.dagL:get_params()
-end
diff --git a/nerv/tnn/layersT/lstm_t.lua b/nerv/tnn/layersT/lstm_t.lua
deleted file mode 100644
index 04d0600..0000000
--- a/nerv/tnn/layersT/lstm_t.lua
+++ /dev/null
@@ -1,124 +0,0 @@
-local LSTMLayerT = nerv.class('nerv.LSTMLayerT', 'nerv.LayerT')
-
-function LSTMLayerT:__init(id, global_conf, layer_conf)
- --input1:x input2:h input3:c
- self.id = id
- self.dim_in = layer_conf.dim_in
- self.dim_out = layer_conf.dim_out
- self.gconf = global_conf
-
- --prepare a DAGLayerT to hold the lstm structure
- local pr = layer_conf.pr
- if pr == nil then
- pr = nerv.ParamRepo()
- end
-
- local function ap(str)
- return self.id .. '.' .. str
- end
-
- local layers = {
- ["nerv.CombinerLayer"] = {
- [ap("inputXDup")] = {{}, {["dim_in"] = {self.dim_in[1]},
- ["dim_out"] = {self.dim_in[1], self.dim_in[1], self.dim_in[1], self.dim_in[1]}, ["lambda"] = {1}}},
- [ap("inputHDup")] = {{}, {["dim_in"] = {self.dim_in[2]},
- ["dim_out"] = {self.dim_in[2], self.dim_in[2], self.dim_in[2], self.dim_in[2]}, ["lambda"] = {1}}},
- [ap("inputCDup")] = {{}, {["dim_in"] = {self.dim_in[3]},
- ["dim_out"] = {self.dim_in[3], self.dim_in[3], self.dim_in[3]}, ["lambda"] = {1}}},
- [ap("mainCDup")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3], self.dim_in[3], self.dim_in[3]},
- ["lambda"] = {1, 1}}},
- },
- ["nerv.AffineLayer"] = {
- [ap("mainAffineL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2]},
- ["dim_out"] = {self.dim_out[1]}, ["pr"] = pr}},
- },
- ["nerv.TanhLayer"] = {
- [ap("mainTanhL")] = {{}, {["dim_in"] = {self.dim_out[1]}, ["dim_out"] = {self.dim_out[1]}}},
- [ap("outputTanhL")] = {{}, {["dim_in"] = {self.dim_out[1]}, ["dim_out"] = {self.dim_out[1]}}},
- },
- ["nerv.GateFLayer"] = {
- [ap("forgetGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2], self.dim_in[3]},
- ["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}},
- [ap("inputGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2], self.dim_in[3]},
- ["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}},
- [ap("outputGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2], self.dim_in[3]},
- ["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}},
-
- },
- ["nerv.ElemMulLayer"] = {
- [ap("inputGMulL")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3]}}},
- [ap("forgetGMulL")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3]}}},
- [ap("outputGMulL")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3]}}},
- },
- }
-
- local layerRepo = nerv.LayerRepo(layers, pr, global_conf)
-
- local connections_t = {
- ["<input>[1]"] = ap("inputXDup[1]"),
- ["<input>[2]"] = ap("inputHDup[1]"),
- ["<input>[3]"] = ap("inputCDup[1]"),
-
- [ap("inputXDup[1]")] = ap("mainAffineL[1]"),
- [ap("inputHDup[1]")] = ap("mainAffineL[2]"),
- [ap("mainAffineL[1]")] = ap("mainTanhL[1]"),
-
- [ap("inputXDup[2]")] = ap("inputGateL[1]"),
- [ap("inputHDup[2]")] = ap("inputGateL[2]"),
- [ap("inputCDup[1]")] = ap("inputGateL[3]"),
-
- [ap("inputXDup[3]")] = ap("forgetGateL[1]"),
- [ap("inputHDup[3]")] = ap("forgetGateL[2]"),
- [ap("inputCDup[2]")] = ap("forgetGateL[3]"),
-
- [ap("mainTanhL[1]")] = ap("inputGMulL[1]"),
- [ap("inputGateL[1]")] = ap("inputGMulL[2]"),
-
- [ap("inputCDup[3]")] = ap("forgetGMulL[1]"),
- [ap("forgetGateL[1]")] = ap("forgetGMulL[2]"),
-
- [ap("inputGMulL[1]")] = ap("mainCDup[1]"),
- [ap("forgetGMulL[1]")] = ap("mainCDup[2]"),
-
- [ap("inputXDup[4]")] = ap("outputGateL[1]"),
- [ap("inputHDup[4]")] = ap("outputGateL[2]"),
- [ap("mainCDup[3]")] = ap("outputGateL[3]"),
-
- [ap("mainCDup[2]")] = "<output>[2]",
- [ap("mainCDup[1]")] = ap("outputTanhL[1]"),
-
- [ap("outputTanhL[1]")] = ap("outputGMulL[1]"),
- [ap("outputGateL[1]")] = ap("outputGMulL[2]"),
-
- [ap("outputGMulL[1]")] = "<output>[1]",
- }
- self.dagL = nerv.DAGLayerT(self.id, global_conf,
- {["dim_in"] = self.dim_in, ["dim_out"] = self.dim_out, ["sub_layers"] = layerRepo,
- ["connections"] = connections_t})
-
- self:check_dim_len(3, 2) -- x, h, c and h, c
-end
-
-function LSTMLayerT:init(batch_size, chunk_size)
- self.dagL:init(batch_size, chunk_size)
-end
-
-function LSTMLayerT:batch_resize(batch_size, chunk_size)
- self.dagL:batch_resize(batch_size, chunk_size)
-end
-
-function LSTMLayerT:update(bp_err, input, output, t)
- self.dagL:update(bp_err, input, output, t)
-end
-
-function LSTMLayerT:propagate(input, output, t)
- self.dagL:propagate(input, output, t)
-end
-
-function LSTMLayerT:back_propagate(bp_err, next_bp_err, input, output, t)
- self.dagL:back_propagate(bp_err, next_bp_err, input, output, t)
-end
-
-function LSTMLayerT:get_params()
- return self.dagL:get_params()
-end
diff --git a/nerv/tnn/layersT/softmax_ce_t.lua b/nerv/tnn/layersT/softmax_ce_t.lua
deleted file mode 100644
index a9ce975..0000000
--- a/nerv/tnn/layersT/softmax_ce_t.lua
+++ /dev/null
@@ -1,93 +0,0 @@
-local SoftmaxCELayer = nerv.class("nerv.SoftmaxCELayerT", "nerv.LayerT")
-
-function SoftmaxCELayer:__init(id, global_conf, layer_conf)
- self.id = id
- self.gconf = global_conf
- self.dim_in = layer_conf.dim_in
- self.dim_out = layer_conf.dim_out
- self.compressed = layer_conf.compressed
- if self.compressed == nil then
- self.compressed = false
- end
- self:check_dim_len(2, -1) -- two inputs: nn output and label
-end
-
-function SoftmaxCELayer:init(batch_size, chunk_size)
- if not self.compressed and (self.dim_in[1] ~= self.dim_in[2]) then
- nerv.error("mismatching dimensions of previous network output and labels")
- end
- if chunk_size == nil then
- chunk_size = 1
- end
- self.total_ce = 0.0
- self.total_correct = 0
- self.total_frames = 0
- self.softmax_t = {}
- self.ce_t = {}
- for t = 1, chunk_size do
- self.softmax_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1])
- self.ce_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1])
- end
-end
-
-function SoftmaxCELayer:batch_resize(batch_size, chunk_size)
- if chunk_size == nil then
- chunk_size = 1
- end
- for t = 1, chunk_size do
- if self.softmax_t[t]:nrow() ~= batch_size then
- self.softmax_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1])
- self.ce_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1])
- end
- end
-end
-
-function SoftmaxCELayer:update(bp_err, input, output, t)
- -- no params, therefore do nothing
-end
-
-function SoftmaxCELayer:propagate(input, output, t)
- if t == nil then
- t = 1
- end
- local softmax = self.softmax_t[t]
- local ce = self.ce_t[t]
- local classified = softmax:softmax(input[1])
- local label = input[2]
- ce:log_elem(softmax)
- if self.compressed then
- label = label:decompress(input[1]:ncol())
- end
- ce:mul_elem(ce, label)
- ce = ce:rowsum()
- if output[1] ~= nil then
- output[1]:copy_fromd(ce)
- end
- -- add total ce
- self.total_ce = self.total_ce - ce:colsum()[0][0]
- self.total_frames = self.total_frames + softmax:nrow()
- -- TODO: add colsame for uncompressed label
- if self.compressed then
- self.total_correct = self.total_correct + classified:colsame(input[2])[0][0]
- end
-end
-
-function SoftmaxCELayer:back_propagate(bp_err, next_bp_err, input, output, t)
- -- softmax output - label
- if t == nil then
- t = 1
- end
- local label = input[2]
- if self.compressed then
- label = label:decompress(input[1]:ncol())
- end
- local nbe = next_bp_err[1]
- nbe:add(self.softmax_t[t], label, 1.0, -1.0)
- if bp_err[1] ~= nil then
- nbe:scale_rows_by_col(bp_err[1])
- end
-end
-
-function SoftmaxCELayer:get_params()
- return nerv.ParamRepo({})
-end