diff options
author | txh18 <cloudygooseg@gmail.com> | 2015-12-10 13:28:13 +0800 |
---|---|---|
committer | txh18 <cloudygooseg@gmail.com> | 2015-12-10 13:28:13 +0800 |
commit | 91075c34160fa24e484148b26c1178e05c2212a4 (patch) | |
tree | 9bfab2962f6f8b6c28b41c56793fec3e48d94412 /nerv/examples | |
parent | 62169f73b935dd6df8fe0c5628beed58820d186e (diff) |
bug fix for recent changes in tnn
Diffstat (limited to 'nerv/examples')
-rw-r--r-- | nerv/examples/lmptb/lmptb/layer/select_linear.lua | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/nerv/examples/lmptb/lmptb/layer/select_linear.lua b/nerv/examples/lmptb/lmptb/layer/select_linear.lua index 580b9c5..431ef3a 100644 --- a/nerv/examples/lmptb/lmptb/layer/select_linear.lua +++ b/nerv/examples/lmptb/lmptb/layer/select_linear.lua @@ -30,7 +30,7 @@ function SL:init(batch_size) end function SL:update(bp_err, input, output) - --use this to produce reproducable result + --use this to produce reproducable result, don't forget to set the dropout to zero! --for i = 1, input[1]:nrow(), 1 do -- local word_vec = self.ltp.trans[input[1][i - 1][0]] -- word_vec:add(word_vec, bp_err[1][i - 1], 1, - self.gconf.lrate / self.gconf.batch_size) |