aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fastnn/example/asgd_sds_trainer.lua93
-rw-r--r--fastnn/example/fastnn_baseline.lua258
-rw-r--r--fastnn/lib/ModelSync.c305
-rw-r--r--fastnn/threads/Makefile1
-rw-r--r--nerv/io/sgd_buffer.lua4
-rw-r--r--nerv/lib/common.h2
-rw-r--r--nerv/lib/matrix/cumatrix.c2
-rw-r--r--nerv/nn/layer_repo.lua2
8 files changed, 624 insertions, 43 deletions
diff --git a/fastnn/example/asgd_sds_trainer.lua b/fastnn/example/asgd_sds_trainer.lua
index 44611b2..cf1c7a6 100644
--- a/fastnn/example/asgd_sds_trainer.lua
+++ b/fastnn/example/asgd_sds_trainer.lua
@@ -1,19 +1,22 @@
-package.path="/home/slhome/wd007/.luarocks/share/lua/5.1/?.lua;/home/slhome/wd007/.luarocks/share/lua/5.1/?/init.lua;/sgfs/users/wd007/src/nerv/install/share/lua/5.1/?.lua;/sgfs/users/wd007/src/nerv/install/share/lua/5.1/?/init.lua;"..package.path;
-package.cpath="/home/slhome/wd007/.luarocks/lib/lua/5.1/?.so;/sgfs/users/wd007/src/nerv/install/lib/lua/5.1/?.so;"..package.cpath
+
+NERV_ROOT = "/sgfs/users/wd007/src/nerv-2"
+
+env = string.format([[
+package.path="/home/slhome/wd007/.luarocks/share/lua/5.1/?.lua;/home/slhome/wd007/.luarocks/share/lua/5.1/?/init.lua;%s/install/share/lua/5.1/?.lua;%s/install/share/lua/5.1/?/init.lua;"..package.path;
+package.cpath="/home/slhome/wd007/.luarocks/lib/lua/5.1/?.so;%s/install/lib/lua/5.1/?.so;"..package.cpath
local k,l,_=pcall(require,"luarocks.loader") _=k and l.add_context("nerv","scm-1")
+]], NERV_ROOT, NERV_ROOT, NERV_ROOT)
+
+loadstring(env)()
+
require 'nerv'
require 'fastnn'
require 'libhtkio'
require 'threads'
-dofile("fastnn/fastnn_baseline.lua")
+dofile("fastnn/example/fastnn_baseline.lua")
-env = string.format([[
-package.path="/home/slhome/wd007/.luarocks/share/lua/5.1/?.lua;/home/slhome/wd007/.luarocks/share/lua/5.1/?/init.lua;/sgfs/users/wd007/src/nerv/install/share/lua/5.1/?.lua;/sgfs/users/wd007/src/nerv/install/share/lua/5.1/?/init.lua;"..package.path;
-package.cpath="/home/slhome/wd007/.luarocks/lib/lua/5.1/?.so;/sgfs/users/wd007/src/nerv/install/lib/lua/5.1/?.so;"..package.cpath
-local k,l,_=pcall(require,"luarocks.loader") _=k and l.add_context("nerv","scm-1")
-]])
train_thread_code = [[
@@ -21,7 +24,9 @@ train_thread_code = [[
require 'nerv'
require 'fastnn'
-dofile("fastnn/fastnn_baseline.lua")
+require 'libhtkio'
+
+dofile("fastnn/example/fastnn_baseline.lua")
os.execute("export MALLOC_CHECK_=0")
local thread_idx = %d
@@ -49,39 +54,35 @@ else
gconf.tr_scp = scp_file
end
+share_mutex:lock()
+
gconf.randomize = bp
gconf.lrate = lrate
gconf.batch_size = batch_size
-gconf.network[1] = nnet_in
-nerv.info_stderr("input network: %%s", gconf.network[1])
+gconf.initialized_param[2] = nnet_in
+nerv.info_stderr("input network: %%s", gconf.initialized_param[2])
--nerv.info_stderr(gconf.randomize)
nerv.info_stderr("input batch_size: %%d", gconf.batch_size)
nerv.info_stderr("input scp_file: %%s", scp_file)
nerv.info_stderr("input lrate: %%f", gconf.lrate)
-share_mutex:lock()
+
share_gpu:select_gpu()
nerv.context = nerv.CCuContext()
--print(nerv.context)
-nerv.info_stderr("thread %%d loading transf ...", thread_idx)
-local param_transf_repo = nerv.ParamRepo()
-param_transf_repo:import(gconf.transf, nil, gconf)
-local transf_node_repo = make_transf_node_repo(param_transf_repo)
-local transf_layer_repo = make_transf_link_repo(transf_node_repo, param_transf_repo)
-local transf = transf_layer_repo:get_layer("global_transf")
-
-nerv.info_stderr("thread %%d loading network ...", thread_idx)
-local param_network_repo = nerv.ParamRepo()
-param_network_repo:import(gconf.network, nil, gconf)
-local network_node_repo = make_network_node_repo(param_network_repo)
-local network_layer_repo = make_network_link_repo(network_node_repo, param_network_repo)
-local network = get_network(network_layer_repo)
+nerv.info_stderr("thread %%d loading parameters ...", thread_idx)
+local param_repo = nerv.ParamRepo()
+param_repo:import(gconf.initialized_param, nil, gconf)
+local layer_repo = make_layer_repo(param_repo)
+local network = get_network(layer_repo)
+local global_transf = get_global_transf(layer_repo)
+
share_mutex:unlock()
-local buffer = make_buffer(make_readers(nil, transf_layer_repo, feat_repo_shareid, data_mutex_shareid))
+local buffer = make_buffer(make_readers(nil, layer_repo, feat_repo_shareid, data_mutex_shareid))
local input_order = get_input_order()
@@ -98,21 +99,35 @@ local input_order = get_input_order()
gconf.cnt = gconf.cnt + 1
if gconf.cnt == 2000 then
- print_stat(network_node_repo)
+ print_stat(layer_repo)
gconf.cnt = 0
end
local input = {}
+
+ for i, e in ipairs(input_order) do
+ local id = e.id
+ if data[id] == nil then
+ nerv.error("input data %%s not found", id)
+ end
+ local transformed
+ if e.global_transf then
+ transformed = nerv.speech_utils.global_transf(data[id],
+ global_transf,
+ gconf.frm_ext or 0, 0,
+ gconf)
+ else
+ transformed = data[id]
+ end
+ table.insert(input, transformed)
+ end
+
+ local output = {nerv.CuMatrixFloat(gconf.batch_size, 1)}
+ err_output = {}
+ for i = 1, #input do
+ table.insert(err_output, input[i]:create())
+ end
- for i, id in ipairs(input_order) do
- if data[id] == nil then
- nerv.error("input data %%s not found", id)
- end
- table.insert(input, data[id])
- end
-
- local output = {nerv.CuMatrixFloat(gconf.batch_size, 1)}
- err_output = {input[1]:create()}
network:propagate(input, output)
if bp then
@@ -132,7 +147,7 @@ local input_order = get_input_order()
end
--print_stat(network_node_repo)
- local ce_crit = network_node_repo:get_layer("ce_crit")
+ local ce_crit = layer_repo:get_layer("ce_crit")
local xent = fastnn.CXent(ce_crit.total_frames, ce_crit.total_correct, ce_crit.total_ce, ce_crit.total_ce)
share_master:LockModel()
@@ -236,8 +251,8 @@ min_iter = 1
max_iter = 20
min_halving = 0
gconf.batch_size = 256
-pf0 = get_filename(gconf.network[1])
-nnet_in = gconf.network[1]
+pf0 = get_filename(gconf.initialized_param[2])
+nnet_in = gconf.initialized_param[2]
nnet_out = ""
sds_scp = "tr_sds_"..string.format("%.4d", math.random()*10000)..".scp" --"tr_sds.scp"
sds_factor = 0.4
diff --git a/fastnn/example/fastnn_baseline.lua b/fastnn/example/fastnn_baseline.lua
new file mode 100644
index 0000000..6e774de
--- /dev/null
+++ b/fastnn/example/fastnn_baseline.lua
@@ -0,0 +1,258 @@
+require 'htk_io'
+
+gconf = {lrate = 0.2, wcost = 1e-6, momentum = 0.9,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ frm_ext = 5,
+ frm_trim = 5,
+ batch_size = 256,
+ buffer_size = 81920,
+ rearrange = true,
+ tr_scp = "/sgfs/users/wd007/asr/baseline_chn_50h/finetune/finetune_baseline/train.scp",
+ cv_scp = "/sgfs/users/wd007/asr/baseline_chn_50h/finetune/finetune_baseline/train_cv.scp",
+ htk_conf = "/sgfs/users/wd007/asr/baseline_chn_50h/finetune/finetune_baseline/fbank_d_a_z.conf",
+ initialized_param = {"/sgfs/users/wd007/src/nerv/tools/nerv.global.transf",
+ "/sgfs/users/wd007/src/nerv/tools/nerv.svd0.55_3000h_iter1.init"},
+ debug = false}
+
+function make_layer_repo(param_repo)
+ local layer_repo = nerv.LayerRepo(
+ {
+ -- global transf
+ ["nerv.BiasLayer"] =
+ {
+ blayer1 = {{bias = "bias1"}, {dim_in = {1320}, dim_out = {1320}}},
+ },
+ ["nerv.WindowLayer"] =
+ {
+ wlayer1 = {{window = "window1"}, {dim_in = {1320}, dim_out = {1320}}},
+ },
+ -- biased linearity
+ ["nerv.AffineLayer"] =
+ {
+ affine0 = {{ltp = "affine0_ltp", bp = "affine0_bp"},
+ {dim_in = {1320}, dim_out = {2048}}},
+ affine1 = {{ltp = "affine1_ltp", bp = "affine1_bp"},
+ {dim_in = {2048}, dim_out = {367}}},
+ affine2 = {{ltp = "affine2_ltp", bp = "affine2_bp"},
+ {dim_in = {367}, dim_out = {2048}}},
+ affine3 = {{ltp = "affine3_ltp", bp = "affine3_bp"},
+ {dim_in = {2048}, dim_out = {408}}},
+ affine4 = {{ltp = "affine4_ltp", bp = "affine4_bp"},
+ {dim_in = {408}, dim_out = {2048}}},
+ affine5 = {{ltp = "affine5_ltp", bp = "affine5_bp"},
+ {dim_in = {2048}, dim_out = {368}}},
+ affine6 = {{ltp = "affine6_ltp", bp = "affine6_bp"},
+ {dim_in = {368}, dim_out = {2048}}},
+ affine7 = {{ltp = "affine7_ltp", bp = "affine7_bp"},
+ {dim_in = {2048}, dim_out = {303}}},
+ affine8 = {{ltp = "affine8_ltp", bp = "affine8_bp"},
+ {dim_in = {303}, dim_out = {2048}}},
+ affine9 = {{ltp = "affine9_ltp", bp = "affine9_bp"},
+ {dim_in = {2048}, dim_out = {277}}},
+ affine10 = {{ltp = "affine10_ltp", bp = "affine10_bp"},
+ {dim_in = {277}, dim_out = {2048}}},
+ affine11 = {{ltp = "affine11_ltp", bp = "affine11_bp"},
+ {dim_in = {2048}, dim_out = {361}}},
+ affine12 = {{ltp = "affine12_ltp", bp = "affine12_bp"},
+ {dim_in = {361}, dim_out = {2048}}},
+ affine13 = {{ltp = "affine13_ltp", bp = "affine13_bp"},
+ {dim_in = {2048}, dim_out = {441}}},
+ affine14 = {{ltp = "affine14_ltp", bp = "affine14_bp"},
+ {dim_in = {441}, dim_out = {10092}}},
+ },
+ ["nerv.SigmoidLayer"] =
+ {
+ sigmoid0 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid1 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid2 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid3 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid4 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid5 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid6 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ },
+ ["nerv.SoftmaxCELayer"] = -- softmax + ce criterion layer for finetune output
+ {
+ ce_crit = {{}, {dim_in = {10092, 1}, dim_out = {1}, compressed = true}}
+ },
+ ["nerv.SoftmaxLayer"] = -- softmax for decode output
+ {
+ softmax = {{}, {dim_in = {10092}, dim_out = {10092}}}
+ }
+ }, param_repo, gconf)
+
+ layer_repo:add_layers(
+ {
+ ["nerv.DAGLayer"] =
+ {
+ global_transf = {{}, {
+ dim_in = {1320}, dim_out = {1320},
+ sub_layers = layer_repo,
+ connections =
+ {
+ ["<input>[1]"] = "blayer1[1]",
+ ["blayer1[1]"] = "wlayer1[1]",
+ ["wlayer1[1]"] = "<output>[1]"
+ }
+ }},
+ main = {{}, {
+ dim_in = {1320}, dim_out = {10092},
+ sub_layers = layer_repo,
+ connections = {
+ ["<input>[1]"] = "affine0[1]",
+ ["affine0[1]"] = "sigmoid0[1]",
+ ["sigmoid0[1]"] = "affine1[1]",
+ ["affine1[1]"] = "affine2[1]",
+ ["affine2[1]"] = "sigmoid1[1]",
+ ["sigmoid1[1]"] = "affine3[1]",
+ ["affine3[1]"] = "affine4[1]",
+ ["affine4[1]"] = "sigmoid2[1]",
+ ["sigmoid2[1]"] = "affine5[1]",
+ ["affine5[1]"] = "affine6[1]",
+ ["affine6[1]"] = "sigmoid3[1]",
+ ["sigmoid3[1]"] = "affine7[1]",
+ ["affine7[1]"] = "affine8[1]",
+ ["affine8[1]"] = "sigmoid4[1]",
+ ["sigmoid4[1]"] = "affine9[1]",
+ ["affine9[1]"] = "affine10[1]",
+ ["affine10[1]"] = "sigmoid5[1]",
+ ["sigmoid5[1]"] = "affine11[1]",
+ ["affine11[1]"] = "affine12[1]",
+ ["affine12[1]"] = "sigmoid6[1]",
+ ["sigmoid6[1]"] = "affine13[1]",
+ ["affine13[1]"] = "affine14[1]",
+ ["affine14[1]"] = "<output>[1]",
+ }
+ }}
+ }
+ }, param_repo, gconf)
+
+ layer_repo:add_layers(
+ {
+ ["nerv.DAGLayer"] =
+ {
+ ce_output = {{}, {
+ dim_in = {1320, 1}, dim_out = {1},
+ sub_layers = layer_repo,
+ connections = {
+ ["<input>[1]"] = "main[1]",
+ ["main[1]"] = "ce_crit[1]",
+ ["<input>[2]"] = "ce_crit[2]",
+ ["ce_crit[1]"] = "<output>[1]"
+ }
+ }},
+ softmax_output = {{}, {
+ dim_in = {1320}, dim_out = {10092},
+ sub_layers = layer_repo,
+ connections = {
+ ["<input>[1]"] = "main[1]",
+ ["main[1]"] = "softmax[1]",
+ ["softmax[1]"] = "<output>[1]"
+ }
+ }}
+ }
+ }, param_repo, gconf)
+
+ return layer_repo
+end
+
+
+function get_network(layer_repo)
+ return layer_repo:get_layer("ce_output")
+end
+
+function get_decode_network(layer_repo)
+ return layer_repo:get_layer("softmax_output")
+end
+
+function get_global_transf(layer_repo)
+ return layer_repo:get_layer("global_transf")
+end
+
+
+
+function make_readers(scp_file, layer_repo, feat_repo_shareid, data_mutex_shareid)
+ return {
+ {reader = nerv.TNetReader(gconf,
+ {
+ id = "main_scp",
+ scp_file = scp_file,
+ conf_file = gconf.htk_conf,
+ frm_ext = gconf.frm_ext,
+ mlfs = {
+ phone_state = {
+ file = "/sgfs/users/wd007/asr/baseline_chn_50h/finetune/finetune_baseline/ref.mlf",
+ format = "map",
+ format_arg = "/sgfs/users/wd007/asr/baseline_chn_50h/finetune/finetune_baseline/dict",
+ dir = "*/",
+ ext = "lab"
+ }
+ },
+ global_transf = layer_repo:get_layer("global_transf")
+ }, feat_repo_shareid, data_mutex_shareid),
+ data = {main_scp = 1320, phone_state = 1}}
+ }
+end
+
+function get_feat_id()
+ return {main_scp = true}
+end
+
+
+function make_buffer(readers)
+ return nerv.SGDBuffer(gconf,
+ {
+ buffer_size = gconf.buffer_size,
+ randomize = gconf.randomize,
+ readers = readers,
+ use_gpu = true
+ })
+end
+
+function get_input_order()
+ return {{id = "main_scp", global_transf = true},
+ {id = "phone_state"}}
+end
+
+function get_accuracy(layer_repo)
+ local ce_crit = layer_repo:get_layer("ce_crit")
+ return ce_crit.total_correct / ce_crit.total_frames * 100
+end
+
+function print_stat(layer_repo)
+ local ce_crit = layer_repo:get_layer("ce_crit")
+ nerv.info("*** training stat begin ***")
+ nerv.printf("cross entropy:\t\t%.8f\n", ce_crit.total_ce)
+ nerv.printf("correct:\t\t%d\n", ce_crit.total_correct)
+ nerv.printf("frames:\t\t\t%d\n", ce_crit.total_frames)
+ nerv.printf("err/frm:\t\t%.8f\n", ce_crit.total_ce / ce_crit.total_frames)
+ nerv.printf("accuracy:\t\t%.3f%%\n", get_accuracy(layer_repo))
+ nerv.info("*** training stat end ***")
+end
+
+function print_xent(xent)
+ local totalframes = xent:totalframes()
+ local loss = xent:loss()
+ local correct = xent:correct()
+ nerv.info_stderr("*** training statistics info begin ***")
+ nerv.info_stderr("total frames:\t\t%d", totalframes)
+ nerv.info_stderr("cross entropy:\t%.8f", loss/totalframes)
+ nerv.info_stderr("frame accuracy:\t%.3f%%", 100*correct/totalframes)
+ nerv.info_stderr("*** training statistics info end ***")
+end
+
+function frame_acc(xent)
+ local correct = xent:correct()
+ local totalframes = xent:totalframes()
+ return string.format("%.3f", 100*correct/totalframes)
+end
+
+function print_gconf()
+ nerv.info_stderr("%s \t:= %s", "network", gconf.initialized_param[1])
+ nerv.info_stderr("%s \t:= %s", "transf", gconf.initialized_param[2])
+ nerv.info_stderr("%s \t:= %s", "batch_size", gconf.batch_size)
+ nerv.info_stderr("%s \t:= %s", "buffer_size", gconf.buffer_size)
+ nerv.info_stderr("%s \t:= %s", "lrate", gconf.lrate)
+ nerv.info_stderr("%s \t:= %s", "tr_scp", gconf.tr_scp)
+ nerv.info_stderr("%s \t:= %s", "cv_scp", gconf.cv_scp)
+end
diff --git a/fastnn/lib/ModelSync.c b/fastnn/lib/ModelSync.c
new file mode 100644
index 0000000..bd511ea
--- /dev/null
+++ b/fastnn/lib/ModelSync.c
@@ -0,0 +1,305 @@
+
+#include "ModelSync.h"
+#include "../../nerv/lib/matrix/cuda_helper.h"
+#include "../../nerv/lib/matrix/generic/elem_type.h"
+#include "common.h"
+#include <string.h>
+
+
+ModelSync* ModelSync_new(void)
+{
+ ModelSync *self = (ModelSync*)malloc(sizeof(ModelSync));
+ if (NULL != self)
+ {
+ self->model_mutex = THMutex_new();
+ self->state_mutex = THMutex_new();
+ self->initialized_ = false;
+ self->dim_ = 0;
+ self->pos_ = 0;
+ self->data_ = NULL;
+ self->free_data_ = NULL;
+ self->data_ = NULL;
+ self->refcount = 1;
+ self->threadcount = 0;
+ }
+ return self;
+}
+
+ModelSync* ModelSync_newWithId(long id)
+{
+ ModelSync *self = (ModelSync*)id;
+ __sync_fetch_and_add(&self->refcount, 1);
+ return self;
+}
+
+long ModelSync_id(ModelSync *self)
+{
+ return (long)(self);
+}
+
+int ModelSync_lockmodel(ModelSync *self)
+{
+ if(THMutex_lock(self->model_mutex))
+ return 1;
+ return 0;
+}
+
+int ModelSync_unlockmodel(ModelSync *self)
+{
+ if(THMutex_unlock(self->model_mutex))
+ return 1;
+ return 0;
+
+}
+int ModelSync_lockstate(ModelSync *self)
+{
+ if(THMutex_lock(self->state_mutex))
+ return 1;
+ return 0;
+}
+
+int ModelSync_unlockstate(ModelSync *self)
+{
+ if(THMutex_unlock(self->state_mutex))
+ return 1;
+ return 0;
+}
+
+int ModelSync_free(ModelSync *self)
+{
+ if (NULL != self && __sync_fetch_and_add(&self->refcount, -1) == 1)
+ {
+ free(self->model_mutex);
+ free(self->state_mutex);
+ Status status;
+ CUDA_SAFE_SYNC_CALL(cudaFreeHost(self->free_data_), &status);
+ free(self);
+ }
+}
+
+int ModelSync_initBuffer(ModelSync *self)
+{
+ if (NULL != self)
+ {
+ void *free_data = NULL, *data = NULL;
+ size_t size = self->dim_ * sizeof(float)+16;
+ Status status;
+ CUDA_SAFE_SYNC_CALL(cudaHostAlloc((void**) &free_data, size, cudaHostAllocPortable), &status);
+ NERV_SET_STATUS(&status, NERV_NORMAL, 0);
+
+ data = (free_data ? (void *)( (((unsigned long)*(&free_data)) + 15) & ~0xFUL ) : NULL) ;
+ if (NULL != data)
+ {
+ self->data_ = (float*)(data);
+ self->free_data_ = (float*)(free_data);
+ }
+ return 0;
+ }
+ return 1;
+}
+
+int ModelSync_weightfromd(ModelSync *self, Matrix *dm)
+{
+
+ if (NULL != self && NULL != dm)
+ {
+ void *host_data_ = (void*)self->data_;
+ size_t width = dm->ncol * sizeof(float);
+ size_t src_pitch = dm->stride;
+ size_t dst_pitch = src_pitch;
+ Status status;
+
+ CUDA_SAFE_SYNC_CALL(cudaMemcpy2D(host_data_+self->pos_, dst_pitch, dm->data.f, src_pitch, width, dm->nrow, cudaMemcpyDeviceToHost), &status);
+ NERV_SET_STATUS(&status, NERV_NORMAL, 0);
+ self->pos_ += dm->nrow * dm->stride;
+ return 0;
+ }
+ return 1;
+
+}
+
+
+int ModelSync_weighttod(ModelSync *self, Matrix *dm)
+{
+
+ if (NULL != self && NULL != dm)
+ {
+ void *host_data_ = (void*)self->data_;
+ size_t width = dm->ncol * sizeof(float);
+ size_t dst_pitch = dm->stride;
+ size_t src_pitch = dst_pitch;
+ Status status;
+
+ CUDA_SAFE_SYNC_CALL(cudaMemcpy2D(dm->data.f, dst_pitch, host_data_+self->pos_, src_pitch, width, dm->nrow, cudaMemcpyHostToDevice), &status);
+ NERV_SET_STATUS(&status, NERV_NORMAL, 0);
+
+ self->pos_ += dm->nrow * dm->stride;
+ self->initialized_ = true;
+ return 0;
+ }
+ return 1;
+}
+
+void ModelSync_syncinc(ModelSync *self)
+{
+ __sync_fetch_and_add(&self->threadcount, 1);
+}
+
+void ModelSync_syncdec(ModelSync *self)
+{
+ __sync_fetch_and_add(&self->threadcount, -1);
+}
+
+int ModelSync_threadcount(ModelSync *self)
+{
+ return self->threadcount;
+}
+
+/////////////////////////////////
+
+Xent* Xent_new()
+{
+ Xent *xent = (Xent*)malloc(sizeof(Xent));
+ memset(xent, 0, sizeof(Xent));
+ xent->refcount = 1;
+ return xent;
+}
+
+Xent* Xent_newWithId(long id)
+{
+ Xent *xent = (Xent*)id;
+ __sync_fetch_and_add(&xent->refcount, 1);
+ return xent;
+}
+
+Xent* Xent_newWithParm(size_t frames_, size_t correct_, double loss_, double entropy_)
+{
+ Xent *xent = (Xent*)malloc(sizeof(Xent));
+ xent->frames_ = frames_;
+ xent->correct_ = correct_;
+ xent->loss_ = loss_;
+ xent->entropy_ = entropy_;
+ xent->refcount = 1;
+ return xent;
+}
+
+long Xent_id(Xent *xent)
+{
+ return (long)(xent);
+}
+
+Xent* Xent_add(Xent *a, Xent *b)
+{
+ a->frames_ += b->frames_;
+ a->correct_ += b->correct_;
+ a->loss_ += b->loss_;
+ a->entropy_ += b->entropy_;
+ return a;
+}
+
+void Xent_free(Xent *xent)
+{
+ if (NULL != xent && __sync_fetch_and_add(&xent->refcount, -1) == 1)
+ {
+ free(xent);
+ xent = NULL;
+ }
+}
+
+
+//////////////////////////////////
+
+Mse* Mse_new()
+{
+ Mse *mse = (Mse*)malloc(sizeof(Mse));
+ memset(mse, 0, sizeof(Mse));
+ mse->refcount = 1;
+ return mse;
+}
+
+Mse* Mse_newWithId(long id)
+{
+ Mse *mse = (Mse*)id;
+ __sync_fetch_and_add(&mse->refcount, 1);
+ return mse;
+}
+
+Mse* Mse_newWithParm(size_t frames_, double loss_)
+{
+ Mse *mse = (Mse*)malloc(sizeof(Mse));
+ mse->frames_ = frames_;
+ mse->loss_ = loss_;
+ mse->refcount = 1;
+ return mse;
+}
+
+
+long Mse_id(Mse *mse)
+{
+ return (long)(mse);
+}
+
+Mse* Mse_add(Mse *a, Mse *b)
+{
+ a->frames_ += b->frames_;
+ a->loss_ += b->loss_;
+ return a;
+}
+
+void Mse_free(Mse *mse)
+{
+ if (NULL != mse && __sync_fetch_and_add(&mse->refcount, -1) == 1)
+ {
+ free(mse);
+ mse = NULL;
+ }
+}
+
+//////////////////////////////////
+
+GlobalOption* GlobalOption_new()
+{
+ GlobalOption *option = (GlobalOption*)malloc(sizeof(GlobalOption));
+ option->refcount = 1;
+ return option;
+}
+
+GlobalOption* GlobalOption_newWithParm(int batch_size, float lrate, bool bp,const char *tr_scp, const char *cv_scp, const char *transf, const char *network)
+{
+ GlobalOption *option = (GlobalOption*)malloc(sizeof(GlobalOption));
+ option->batch_size = batch_size;
+ option->lrate = lrate;
+ option->bp = bp;
+ strncpy(option->tr_scp, tr_scp, strlen(tr_scp)+1);
+ strncpy(option->cv_scp, cv_scp, strlen(cv_scp)+1);
+ strncpy(option->transf, transf, strlen(transf)+1);
+ strncpy(option->network, network, strlen(network)+1);
+ option->refcount = 1;
+
+ return option;
+}
+
+GlobalOption* GlobalOption_newWithId(long id)
+{
+ GlobalOption *option = (GlobalOption*)id;
+ __sync_fetch_and_add(&option->refcount, 1);
+ return option;
+}
+
+
+
+long GlobalOption_id(GlobalOption *option)
+{
+ return (long)(option);
+}
+
+void GlobalOption_free(GlobalOption *option)
+{
+ if (NULL != option && __sync_fetch_and_add(&option->refcount, -1) == 1)
+ {
+ free(option);
+ option = NULL;
+ }
+}
+
+
diff --git a/fastnn/threads/Makefile b/fastnn/threads/Makefile
index 17958f9..4205adc 100644
--- a/fastnn/threads/Makefile
+++ b/fastnn/threads/Makefile
@@ -35,6 +35,7 @@ $(OBJ_DIR)/%.o: %.c $(patsubst /%.o,/%.c,$@)
$(LIBS): $(OBJS)
gcc -shared -o $@ $^ $(LDFLAGS) -Wl,-rpath=$(LIB_PATH) -L$(LIB_PATH) -lnervcore -lluaT -lpthread
+ cp $@ $(LIB_PATH)/
clean:
-rm -rf $(OBJ_DIR)
diff --git a/nerv/io/sgd_buffer.lua b/nerv/io/sgd_buffer.lua
index 3f854f0..65d6da1 100644
--- a/nerv/io/sgd_buffer.lua
+++ b/nerv/io/sgd_buffer.lua
@@ -57,7 +57,7 @@ function SGDBuffer:saturate()
buff.data:copy_from(buff.leftover, 0, lrow)
buff.leftover = nil
end
- nerv.info("buffer leftover: %d\n", lrow)
+ nerv.info("buffer leftover: %d", lrow)
reader.tail = lrow
reader.has_leftover = false
end
@@ -107,7 +107,7 @@ function SGDBuffer:get_data()
if not self:saturate() then
return nil -- the remaining data cannot build a batch
end
- nerv.info("%.3fs to fill the buffer", os.clock() - t)
+ --nerv.info("%.3fs to fill the buffer", os.clock() - t)
end
if self.head + batch_size > self.tail then
return nil -- the remaining data cannot build a batch
diff --git a/nerv/lib/common.h b/nerv/lib/common.h
index 6878e34..a4e3582 100644
--- a/nerv/lib/common.h
+++ b/nerv/lib/common.h
@@ -59,6 +59,8 @@ typedef struct Status {
nerv_error_status(L, &status); \
} while (0)
+#define PROFILE_HASHMAP_SIZE 123457
+
typedef struct HashNode {
const char *key;
void *val;
diff --git a/nerv/lib/matrix/cumatrix.c b/nerv/lib/matrix/cumatrix.c
index a5991ab..c913db2 100644
--- a/nerv/lib/matrix/cumatrix.c
+++ b/nerv/lib/matrix/cumatrix.c
@@ -2,7 +2,7 @@
#include "../common.h"
#include "cuda_helper.h"
#include <string.h>
-#define PROFILE_HASHMAP_SIZE 123457
+
static cublasHandle_t cublas_handle;
static cudaEvent_t profile_start, profile_stop;
static HashMap *profile;
diff --git a/nerv/nn/layer_repo.lua b/nerv/nn/layer_repo.lua
index ef333a7..8473727 100644
--- a/nerv/nn/layer_repo.lua
+++ b/nerv/nn/layer_repo.lua
@@ -13,7 +13,7 @@ function LayerRepo:add_layers(layer_spec, param_repo, global_conf)
if layers[id] ~= nil then
nerv.error("a layer with id %s already exists", id)
end
- nerv.info("create layer: %s", id)
+ --nerv.info("create layer: %s", id)
if type(spec[2]) ~= "table" then
nerv.error("layer config table is need")
end