diff options
author | txh18 <[email protected]> | 2015-10-28 15:06:27 +0800 |
---|---|---|
committer | txh18 <[email protected]> | 2015-10-28 15:06:27 +0800 |
commit | af99db1c6bc4823cc6ff094f24e963acd4788ef1 (patch) | |
tree | 6b33abba8eb72475b0cdddb55435efa9e23ceb03 | |
parent | 7c95640c95f1cc1d84b4d49fa97fd922748b88a7 (diff) |
added update_select_rows to doc
-rw-r--r-- | nerv/doc/nerv_matrix.md | 2 | ||||
-rw-r--r-- | nerv/examples/lmptb/main.lua | 6 | ||||
-rw-r--r-- | nerv/nn/layer_dag.lua | 4 |
3 files changed, 6 insertions, 6 deletions
diff --git a/nerv/doc/nerv_matrix.md b/nerv/doc/nerv_matrix.md index 54bf440..b915dee 100644 --- a/nerv/doc/nerv_matrix.md +++ b/nerv/doc/nerv_matrix.md @@ -67,6 +67,8 @@ Copy the content of the __Matrix__ `self` to a __CuMatrix__ `a`. `idx` should be a row vector. This function copy the rows of `ma` to `self` according to `idx`, in other words, it assigns `ma[idx[i]]` to `self[i]`. * __void Matrix.copy_rows_fromd_by_idx(Matrix self, CuMatrix b, CuMatrix idx)__ `idx` needs to a row vector matrix, it stacks the rows of index `idx` of the __CuMatrix__ `b` and copies to `self`. +* __void Matrix.update_select_rows(Matrix self, Matrix err, Matrix idx, double alpha, double beta)__ +Update selected rows of `self`, i.e. `self[idx[i]] = self[idx[i]] * (1 - beta * alpha) + alpha * err[i]`. * __void Matrix.add(Matrix self, Matrix ma, Matrix mb, Element_type alpha, Element_type beta)__ It sets the content of __Matrix__ `self` to be `alpha * ma + beta * mb`.__Matrix__ `ma,mb,self` should be of the same size. * __void Matrix.mul(Matrix self, Matrix ma, Matrix mb, Element_type alpha, Element_type beta, [string ta, string tb])__ diff --git a/nerv/examples/lmptb/main.lua b/nerv/examples/lmptb/main.lua index d505456..1939eda 100644 --- a/nerv/examples/lmptb/main.lua +++ b/nerv/examples/lmptb/main.lua @@ -224,9 +224,11 @@ function propagateFile(global_conf, dagL, fn, config) if (result["rnn"].cn_w % global_conf.log_w_num == 0) then printf("%s %d words processed %s.\n", global_conf.sche_log_pre, result["rnn"].cn_w, os.date()) printf("\t%s log prob per sample :%f.\n", global_conf.sche_log_pre, result:logp_sample("rnn")); + --[[ for key, value in pairs(global_conf.timer.rec) do printf("\t [global_conf.timer]: time spent on %s:%.5fs\n", key, value) end + ]]-- --comment this for debughtx global_conf.timer:flush() --nerv.CuMatrix.print_profile() @@ -272,7 +274,7 @@ if (set == "ptb") then hidden_size = 200, batch_size = 10, - bptt = 3, --train bptt_block's words. could be set to zero + bptt = 6, --train bptt_block's words. could be set to zero max_iter = 18, param_random = function() return (math.random() / 5 - 0.1) end, independent = true, @@ -281,7 +283,7 @@ if (set == "ptb") then valid_fn = valid_fn, test_fn = test_fn, sche_log_pre = "[SCHEDULER]:", - log_w_num = 500000, --give a message when log_w_num words have been processed + log_w_num = 100000, --give a message when log_w_num words have been processed timer = nerv.Timer() } global_conf.work_dir = work_dir_base.."/h"..global_conf.hidden_size.."bp"..global_conf.bptt.."slr"..global_conf.lrate --..os.date("_%bD%dH%H") --comment this for testing diff --git a/nerv/nn/layer_dag.lua b/nerv/nn/layer_dag.lua index 4904f4f..73bb77d 100644 --- a/nerv/nn/layer_dag.lua +++ b/nerv/nn/layer_dag.lua @@ -251,9 +251,7 @@ function DAGLayer:update(bp_err, input, output) -- print("update") for id, ref in pairs(self.queue) do -- print(ref.layer.id) - self.gconf.timer:tic("(update)"..ref.layer.id); ref.layer:update(ref.err_inputs, ref.inputs, ref.outputs) - self.gconf.timer:toc("(update)"..ref.layer.id); end end @@ -264,9 +262,7 @@ function DAGLayer:propagate(input, output) for i = 1, #self.queue do local ref = self.queue[i] -- print(ref.layer.id) - self.gconf.timer:tic("(propagate)"..ref.layer.id); ret = ref.layer:propagate(ref.inputs, ref.outputs) - self.gconf.timer:toc("(propagate)"..ref.layer.id); end return ret end |