aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortxh18 <cloudygooseg@gmail.com>2015-12-02 15:23:54 +0800
committertxh18 <cloudygooseg@gmail.com>2015-12-02 15:23:54 +0800
commit41a841f3e0992a578cf5c8f82ae44a552a6f8b2f (patch)
tree0a5ba8d1530290fd91febcfe69986c96be0ac895
parent0c286aa6237da9e8daa7db8ed1e3805a33312926 (diff)
changed thres_mask function of matrix to a more standard api
-rw-r--r--nerv/examples/lmptb/tnn/init.lua1
-rw-r--r--nerv/layer/init.lua6
-rw-r--r--nerv/lib/matrix/cukernel.h2
-rw-r--r--nerv/lib/matrix/generic/cukernel.cu11
-rw-r--r--nerv/lib/matrix/generic/cumatrix.c5
-rw-r--r--nerv/matrix/generic/cumatrix.c9
6 files changed, 19 insertions, 15 deletions
diff --git a/nerv/examples/lmptb/tnn/init.lua b/nerv/examples/lmptb/tnn/init.lua
index d45a2fa..ddaa6b8 100644
--- a/nerv/examples/lmptb/tnn/init.lua
+++ b/nerv/examples/lmptb/tnn/init.lua
@@ -41,6 +41,7 @@ function LayerT:get_dim()
return self.dim_in, self.dim_out
end
+nerv.include('sutil.lua')
nerv.include('tnn.lua')
nerv.include('layersT/softmax_ce_t.lua')
nerv.include('layersT/lstm_t.lua')
diff --git a/nerv/layer/init.lua b/nerv/layer/init.lua
index b8b7ea1..32b82d8 100644
--- a/nerv/layer/init.lua
+++ b/nerv/layer/init.lua
@@ -72,18 +72,18 @@ end
function Layer:find_param(pid, l_conf, gconf, p_type, p_dim)
if l_conf[pid] ~= nil then
- nerv.info("Param [%s] of layer [%s] found in layer_conf.\n", pid, self.id)
+ nerv.info("Param [%s] of layer [%s] found in layer_conf.", pid, self.id)
return l_conf[pid]
end
local pid_g = self.id .. '_' .. pid --global identifier
local pr = l_conf.pr
local p
if pr ~= nil and pr:has_param(pid_g) == true then
- nerv.info("Param [%s] of layer [%s] found in layer_conf.paramRepo.\n", pid, self.id)
+ nerv.info("Param [%s] of layer [%s] found in layer_conf.paramRepo.", pid, self.id)
p = pr:get_param(pid_g)
return p
end
- nerv.info("Param [%s] of layer [%s] is not found in layer_conf or layer_conf.paramRepo, switch to auto-generate.\n", pid, self.id)
+ nerv.info("Param [%s] of layer [%s] is not found in layer_conf or layer_conf.paramRepo, switch to auto-generate.", pid, self.id)
p = p_type(pid_g, gconf)
p.trans = gconf.cumat_type(unpack(p_dim))
p.trans:generate(gconf.param_random)
diff --git a/nerv/lib/matrix/cukernel.h b/nerv/lib/matrix/cukernel.h
index 47dc0a8..fe682d3 100644
--- a/nerv/lib/matrix/cukernel.h
+++ b/nerv/lib/matrix/cukernel.h
@@ -4,7 +4,7 @@ void cudak_(cuda_log_elem)(const Matrix *a, Matrix *b);
void cudak_(cuda_sigmoid)(const Matrix *a, Matrix *b);
void cudak_(cuda_sigmoid_grad)(const Matrix *output, const Matrix *err, Matrix *nerr);
void cudak_(cuda_rand_uniform)(const Matrix *a); //a's curand_gen may be modified
-void cudak_(cuda_thres_mask)(const Matrix *a, double thres, double low, double high);
+void cudak_(cuda_thres_mask)(const Matrix *a, const Matrix *b, double thres, double low, double high);
void cudak_(cuda_tanh)(const Matrix *a, Matrix *b);
void cudak_(cuda_tanh_grad)(const Matrix *output, const Matrix *err, Matrix *nerr);
void cudak_(cuda_rowsum)(const Matrix *a, Matrix *b);
diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu
index b092e4a..aa830b5 100644
--- a/nerv/lib/matrix/generic/cukernel.cu
+++ b/nerv/lib/matrix/generic/cukernel.cu
@@ -20,14 +20,14 @@ __global__ void cudak_(log_elem)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
b[idx] = log(tmp);
}
-__global__ void cudak_(thres_mask)(MATRIX_ELEM *a, double thres, double low, double high,
+__global__ void cudak_(thres_mask)(MATRIX_ELEM *a, MATRIX_ELEM *b, double thres, double low, double high,
int nrow, int ncol, int stride) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
long idx;
if (i >= nrow || j >= ncol) return;
idx = j + i * stride;
- if (a[idx] < thres)
+ if (b[idx] < thres)
a[idx] = low;
else
a[idx] = high;
@@ -389,7 +389,7 @@ extern "C" {
cudaStreamSynchronize(0);
}
- void cudak_(cuda_rand_uniform)(Matrix *a) {
+ void cudak_(cuda_rand_uniform)(const Matrix *a) {
#ifdef MATRIX_USE_FLOAT
curandGenerateUniform(*(a->curand_gen), MATRIX_ELEM_PTR(a), a->nrow * a->stride / sizeof(MATRIX_ELEM));
#endif
@@ -398,12 +398,13 @@ extern "C" {
#endif
}
- void cudak_(cuda_thres_mask)(const Matrix *a, double thres, double low, double high) {
+ void cudak_(cuda_thres_mask)(const Matrix *a, const Matrix *b, double thres, double low, double high) {
dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x),
CEIL_DIV(a->nrow, threadsPerBlock.y));
cudak_(thres_mask)<<<numBlocks, threadsPerBlock>>> \
- (MATRIX_ELEM_PTR(a), thres, low, high, a->nrow, a->ncol, a->stride / sizeof(MATRIX_ELEM));
+ (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b),
+ thres, low, high, a->nrow, a->ncol, a->stride / sizeof(MATRIX_ELEM));
cudaStreamSynchronize(0);
}
diff --git a/nerv/lib/matrix/generic/cumatrix.c b/nerv/lib/matrix/generic/cumatrix.c
index cbb0481..65e0788 100644
--- a/nerv/lib/matrix/generic/cumatrix.c
+++ b/nerv/lib/matrix/generic/cumatrix.c
@@ -83,9 +83,10 @@ void nerv_matrix_(rand_uniform)(Matrix *a, Status *status) {
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-void nerv_matrix_(thres_mask)(Matrix *a, double thres, double low, double high, Status *status) {
+void nerv_matrix_(thres_mask)(Matrix *a, Matrix *b, double thres, double low, double high, Status *status) {
+ CHECK_SAME_DIMENSION(a, b, status);
PROFILE_START
- cudak_(cuda_thres_mask)(a, thres, low, high);
+ cudak_(cuda_thres_mask)(a, b, thres, low, high);
PROFILE_STOP
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
diff --git a/nerv/matrix/generic/cumatrix.c b/nerv/matrix/generic/cumatrix.c
index d1f763b..fb36033 100644
--- a/nerv/matrix/generic/cumatrix.c
+++ b/nerv/matrix/generic/cumatrix.c
@@ -65,10 +65,11 @@ static int nerv_matrix_(lua_sigmoid_grad)(lua_State *L) {
static int nerv_matrix_(lua_thres_mask)(lua_State *L) {
Status status;
Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
- MATRIX_ELEM thres = luaL_checknumber(L, 2);
- MATRIX_ELEM low = luaL_checknumber(L, 3);
- MATRIX_ELEM high = luaL_checknumber(L, 4);
- nerv_matrix_(thres_mask)(a, thres, low, high, &status);
+ Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname));
+ MATRIX_ELEM thres = luaL_checknumber(L, 3);
+ MATRIX_ELEM low = luaL_checknumber(L, 4);
+ MATRIX_ELEM high = luaL_checknumber(L, 5);
+ nerv_matrix_(thres_mask)(a, b, thres, low, high, &status);
NERV_LUA_CHECK_STATUS(L, status);
return 0;
}