summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortxh18 <[email protected]>2015-11-08 17:59:27 +0800
committertxh18 <[email protected]>2015-11-08 17:59:27 +0800
commita8d369d49933ffdd306f47db6b74e0d27deda5d0 (patch)
tree8a2126153fb2363ffea94bfe8c0b9e88f11bd0ea
parentabc36052969ab121c8a1cfa478fc14e9e8dc78a2 (diff)
trying to test softmax_ce_t
-rw-r--r--nerv/examples/lmptb/lmptb/lmseqreader.lua1
-rw-r--r--nerv/examples/lmptb/m-tests/tnn_test.lua8
-rw-r--r--nerv/examples/lmptb/rnn/init.lua45
-rw-r--r--nerv/examples/lmptb/rnn/softmax_ce_t.lua81
-rw-r--r--nerv/examples/lmptb/rnn/tnn.lua2
5 files changed, 131 insertions, 6 deletions
diff --git a/nerv/examples/lmptb/lmptb/lmseqreader.lua b/nerv/examples/lmptb/lmptb/lmseqreader.lua
index 41e3903..f7e2539 100644
--- a/nerv/examples/lmptb/lmptb/lmseqreader.lua
+++ b/nerv/examples/lmptb/lmptb/lmseqreader.lua
@@ -1,5 +1,4 @@
require 'lmptb.lmvocab'
-require 'rnn.tnn'
local LMReader = nerv.class("nerv.LMSeqReader")
diff --git a/nerv/examples/lmptb/m-tests/tnn_test.lua b/nerv/examples/lmptb/m-tests/tnn_test.lua
index ddea54c..888ba0f 100644
--- a/nerv/examples/lmptb/m-tests/tnn_test.lua
+++ b/nerv/examples/lmptb/m-tests/tnn_test.lua
@@ -2,8 +2,8 @@ require 'lmptb.lmvocab'
require 'lmptb.lmfeeder'
require 'lmptb.lmutil'
require 'lmptb.layer.init'
+require 'rnn.init'
require 'lmptb.lmseqreader'
-require 'rnn.tnn'
--[[global function rename]]--
printf = nerv.printf
@@ -194,6 +194,7 @@ function lm_process_file(global_conf, fn, tnn, do_train)
next_log_wcn = next_log_wcn + global_conf.log_w_num
printf("%s %d words processed %s.\n", global_conf.sche_log_pre, result["rnn"].cn_w, os.date())
printf("\t%s log prob per sample :%f.\n", global_conf.sche_log_pre, result:logp_sample("rnn"))
+ nerv.LMUtil.wait(1)
end
--[[
@@ -259,17 +260,16 @@ global_conf = {
nn_act_default = 0,
hidden_size = 20,
- chunk_size = 5,
+ chunk_size = 2,
batch_size = 3,
max_iter = 3,
param_random = function() return (math.random() / 5 - 0.1) end,
- independent = true,
train_fn = train_fn,
valid_fn = valid_fn,
test_fn = test_fn,
sche_log_pre = "[SCHEDULER]:",
- log_w_num = 20, --give a message when log_w_num words have been processed
+ log_w_num = 10, --give a message when log_w_num words have been processed
timer = nerv.Timer()
}
diff --git a/nerv/examples/lmptb/rnn/init.lua b/nerv/examples/lmptb/rnn/init.lua
new file mode 100644
index 0000000..0e08cb6
--- /dev/null
+++ b/nerv/examples/lmptb/rnn/init.lua
@@ -0,0 +1,45 @@
+local Layer = nerv.class('nerv.LayerT')
+
+function Layer:__init(id, global_conf, layer_conf)
+ nerv.error_method_not_implemented()
+end
+
+function Layer:init(batch_size, chunk_size)
+ nerv.error_method_not_implemented()
+end
+
+function Layer:update(bp_err, input, output, t)
+ nerv.error_method_not_implemented()
+end
+
+function Layer:propagate(input, output, t)
+ nerv.error_method_not_implemented()
+end
+
+function Layer:back_propagate(bp_err, next_bp_err, input, output, t)
+ nerv.error_method_not_implemented()
+end
+
+function Layer:check_dim_len(len_in, len_out)
+ local expected_in = #self.dim_in
+ local expected_out = #self.dim_out
+ if len_in > 0 and expected_in ~= len_in then
+ nerv.error("layer %s expects %d inputs, %d given",
+ self.id, len_in, expected_in)
+ end
+ if len_out > 0 and expected_out ~= len_out then
+ nerv.error("layer %s expects %d outputs, %d given",
+ self.id, len_out, expected_out)
+ end
+end
+
+function Layer:get_params()
+ nerv.error_method_not_implemented()
+end
+
+function Layer:get_dim()
+ return self.dim_in, self.dim_out
+end
+
+nerv.include('tnn.lua')
+nerv.include('softmax_ce_t.lua')
diff --git a/nerv/examples/lmptb/rnn/softmax_ce_t.lua b/nerv/examples/lmptb/rnn/softmax_ce_t.lua
new file mode 100644
index 0000000..dddb05a
--- /dev/null
+++ b/nerv/examples/lmptb/rnn/softmax_ce_t.lua
@@ -0,0 +1,81 @@
+local SoftmaxCELayer = nerv.class("nerv.SoftmaxCELayerT", "nerv.LayerT")
+
+function SoftmaxCELayer:__init(id, global_conf, layer_conf)
+ self.id = id
+ self.gconf = global_conf
+ self.dim_in = layer_conf.dim_in
+ self.dim_out = layer_conf.dim_out
+ self.compressed = layer_conf.compressed
+ if self.compressed == nil then
+ self.compressed = false
+ end
+ self:check_dim_len(2, -1) -- two inputs: nn output and label
+end
+
+function SoftmaxCELayer:init(batch_size, chunk_size)
+ if not self.compressed and (self.dim_in[1] ~= self.dim_in[2]) then
+ nerv.error("mismatching dimensions of previous network output and labels")
+ end
+ self.total_ce = 0.0
+ self.total_correct = 0
+ self.total_frames = 0
+ self.softmax_t = {}
+ self.ce_t = {}
+ for t = 1, chunk_size do
+ self.softmax_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1])
+ self.ce_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1])
+ end
+end
+
+function SoftmaxCELayer:batch_resize(batch_size)
+ for t = 1, chunk_size do
+ if self.softmax_t[t]:nrow() ~= batch_resize then
+ self.softmax_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1])
+ self.ce_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1])
+ end
+ end
+end
+
+function SoftmaxCELayer:update(bp_err, input, output, t)
+ -- no params, therefore do nothing
+end
+
+function SoftmaxCELayer:propagate(input, output, t)
+ local softmax = self.softmax_t[t]
+ local ce = self.ce_t[t]
+ local classified = softmax:softmax(input[1])
+ local label = input[2]
+ ce:log_elem(softmax)
+ if self.compressed then
+ label = label:decompress(input[1]:ncol())
+ end
+ ce:mul_elem(ce, label)
+ ce = ce:rowsum()
+ if output[1] ~= nil then
+ output[1]:copy_fromd(ce)
+ end
+ -- add total ce
+ self.total_ce = self.total_ce - ce:colsum()[0][0]
+ self.total_frames = self.total_frames + softmax:nrow()
+ -- TODO: add colsame for uncompressed label
+ if self.compressed then
+ self.total_correct = self.total_correct + classified:colsame(input[2])[0][0]
+ end
+end
+
+function SoftmaxCELayer:back_propagate(bp_err, next_bp_err, input, output, t)
+ -- softmax output - label
+ local label = input[2]
+ if self.compressed then
+ label = label:decompress(input[1]:ncol())
+ end
+ local nbe = next_bp_err[1]
+ nbe:add(self.softmax_t[t], label, 1.0, -1.0)
+ if bp_err[1] ~= nil then
+ nbe:scale_rows_by_col(bp_err[1])
+ end
+end
+
+function SoftmaxCELayer:get_params()
+ return nerv.ParamRepo({})
+end
diff --git a/nerv/examples/lmptb/rnn/tnn.lua b/nerv/examples/lmptb/rnn/tnn.lua
index ae9ed7a..8c3963c 100644
--- a/nerv/examples/lmptb/rnn/tnn.lua
+++ b/nerv/examples/lmptb/rnn/tnn.lua
@@ -198,7 +198,7 @@ function TNN:init(batch_size, chunk_size)
end
end
-- initialize sub layers
- ref.layer:init(batch_size)
+ ref.layer:init(batch_size, chunk_size)
end
local flags_now = {}