diff options
author | txh18 <[email protected]> | 2015-10-23 19:36:31 +0800 |
---|---|---|
committer | txh18 <[email protected]> | 2015-10-23 19:36:31 +0800 |
commit | 1234c026869ab052e898cc2541143fe4a22312b6 (patch) | |
tree | bd4b980ae12340b4ea3a8aa6259d43dc891b5568 /embedding_example/setup_nerv.lua | |
parent | f0937ae6e6401f25f15bb0e83e764ca888e81f11 (diff) | |
parent | 64fce92b7845b716f3c168036691c37b2467d99b (diff) |
Just come back, let's merge the new master
Merge branch 'master' into txh18/rnnlm
Diffstat (limited to 'embedding_example/setup_nerv.lua')
-rw-r--r-- | embedding_example/setup_nerv.lua | 25 |
1 files changed, 25 insertions, 0 deletions
diff --git a/embedding_example/setup_nerv.lua b/embedding_example/setup_nerv.lua new file mode 100644 index 0000000..d80c306 --- /dev/null +++ b/embedding_example/setup_nerv.lua @@ -0,0 +1,25 @@ +local k,l,_=pcall(require,"luarocks.loader") _=k and l.add_context("nerv","scm-1") +require 'nerv' +local arg = {...} +dofile(arg[1]) +local param_repo = nerv.ParamRepo() +param_repo:import(gconf.initialized_param, nil, gconf) +local layer_repo = make_layer_repo(param_repo) +local network = get_decode_network(layer_repo) +local global_transf = get_global_transf(layer_repo) +local batch_size = 1 +network:init(batch_size) + +function propagator(input, output) + local transformed = nerv.speech_utils.global_transf( + gconf.cumat_type.new_from_host(input), + global_transf, 0, 0, gconf) -- preprocessing + local gpu_input = transformed + local gpu_output = nerv.CuMatrixFloat(output:nrow(), output:ncol()) + network:propagate({gpu_input}, {gpu_output}) + gpu_output:copy_toh(output) + -- collect garbage in-time to save GPU memory + collectgarbage("collect") +end + +return network.dim_in[1], network.dim_out[1], propagator |