aboutsummaryrefslogtreecommitdiff
path: root/nerv/examples/lmptb/tnn/layersT/lstm_t.lua
diff options
context:
space:
mode:
Diffstat (limited to 'nerv/examples/lmptb/tnn/layersT/lstm_t.lua')
-rw-r--r--nerv/examples/lmptb/tnn/layersT/lstm_t.lua40
1 files changed, 20 insertions, 20 deletions
diff --git a/nerv/examples/lmptb/tnn/layersT/lstm_t.lua b/nerv/examples/lmptb/tnn/layersT/lstm_t.lua
index 4ec2e54..d7d8a20 100644
--- a/nerv/examples/lmptb/tnn/layersT/lstm_t.lua
+++ b/nerv/examples/lmptb/tnn/layersT/lstm_t.lua
@@ -19,10 +19,12 @@ function LSTMLayerT:__init(id, global_conf, layer_conf)
local layers = {
["nerv.CombinerLayer"] = {
- [ap("inputXDup")] = {{}, {["dim_in"] = {self.dim_in[1]}, ["dim_out"] = {self.dim_in[1], self.dim_in[1], self.dim_in[1]}}},
- [ap("inputHDup")] = {{}, {["dim_in"] = {self.dim_in[2]}, ["dim_out"] = {self.dim_in[2], self.dim_in[2], self.dim_in[2]}}},
+ [ap("inputXDup")] = {{}, {["dim_in"] = {self.dim_in[1]},
+ ["dim_out"] = {self.dim_in[1], self.dim_in[1], self.dim_in[1]}, ["lambda"] = {1}}},
+ [ap("inputHDup")] = {{}, {["dim_in"] = {self.dim_in[2]},
+ ["dim_out"] = {self.dim_in[2], self.dim_in[2], self.dim_in[2]}, ["lambda"] = {1}}},
[ap("inputCDup")] = {{}, {["dim_in"] = {self.dim_in[3]},
- ["dim_out"] = {self.dim_in[3], self.dim_in[3], self.dim_in[3], self.dim_in[3]}}},
+ ["dim_out"] = {self.dim_in[3], self.dim_in[3], self.dim_in[3], self.dim_in[3]}, ["lambda"] = {1}}},
[ap("mainCDup")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3], self.dim_in[3]},
["lambda"] = {1, 1}}},
},
@@ -76,39 +78,37 @@ function LSTMLayerT:__init(id, global_conf, layer_conf)
[ap("forgetGMul[1]")] = ap("mainCDup[2]"),
[ap("mainCDup[2]")] = "<output>[2]",
- }
+ [ap("mainCDup[1]")] = ap("outputTanhL[1]"),
+ [ap("outputTanhL[1]")] = "<output>[1]",
+ }
+ self.dagL = nerv.DAGLayerT(self.id, global_conf,
+ {["dim_in"] = self.dim_in, ["dim_out"] = self.dim_out, ["sub_layers"] = layerRepo,
+ ["connections"] = connections_t})
+
self:check_dim_len(3, 2) -- x, h, c and h, c
end
-function LSTMLayerT:init(batch_size)
- if self.ltp.trans:ncol() ~= self.bp.trans:ncol() then
- nerv.error("mismatching dimensions of linear transform and bias paramter")
- end
- if self.dim_in[1] ~= self.ltp.trans:nrow() then
- nerv.error("mismatching dimensions of linear transform parameter and input")
- end
- if self.dim_out[1] ~= self.ltp.trans:ncol() then
- nerv.error("mismatching dimensions of linear transform parameter and output")
- end
- self.ltp_grad = self.ltp.trans:create()
- self.ltp:train_init()
- self.bp:train_init()
+function LSTMLayerT:init(batch_size, chunk_size)
+ self.dagL:init(batch_size, chunk_size)
end
-function LSTMLayerT:batch_resize(batch_size)
- -- do nothing
+function LSTMLayerT:batch_resize(batch_size, chunk_size)
+ self.dagL:batch_resize(batch_size, chunk_size)
end
function LSTMLayerT:update(bp_err, input, output)
+ self.dagL:update(bp_err, input, output)
end
function LSTMLayerT:propagate(input, output)
+ self.dagL:propagate(input, output)
end
function LSTMLayerT:back_propagate(bp_err, next_bp_err, input, output)
+ self.dagL:back_propagate(bp_err, next_bp_err, input, output)
end
function LSTMLayerT:get_params()
- return nerv.ParamRepo({self.ltp, self.bp})
+ return self.dagL:get_params()
end