aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortxh18 <[email protected]>2015-11-23 15:54:20 +0800
committertxh18 <[email protected]>2015-11-23 15:54:20 +0800
commite76ae9b12651ed8497537edf357f4cf90421ea0d (patch)
tree6a305444219a041ce953a3323e4d5449e335b218
parent979473dcc890a92fb90b470b924d1e1e70f6dbc0 (diff)
parentec6bde79a5817409bb8a77075b411974c1d8f856 (diff)
merge in recent changes about param updates
Merge branch 'master' into txh18/rnnlm
-rw-r--r--Makefile2
-rw-r--r--README.md1
-rw-r--r--nerv/Makefile4
-rw-r--r--nerv/layer/affine.lua72
-rw-r--r--nerv/lib/matrix/cukernel.cu24
-rw-r--r--nerv/lib/matrix/generic/cukernel.cu4
-rw-r--r--nerv/lib/matrix/generic/cumatrix.c2
-rw-r--r--nerv/matrix/generic/cumatrix.c9
8 files changed, 57 insertions, 61 deletions
diff --git a/Makefile b/Makefile
index 664a83b..72a5915 100644
--- a/Makefile
+++ b/Makefile
@@ -7,7 +7,7 @@ luajit:
luarocks:
PREFIX=$(PREFIX) ./tools/build_luarocks.sh
install:
- cd nerv; $(PREFIX)/bin/luarocks make
+ cd nerv; $(PREFIX)/bin/luarocks make CFLAGS=$(CFLAGS)
speech:
cd speech/speech_utils; $(PREFIX)/bin/luarocks make
cd speech/htk_io; $(PREFIX)/bin/luarocks make
diff --git a/README.md b/README.md
index c198cc5..fe9dfc1 100644
--- a/README.md
+++ b/README.md
@@ -12,6 +12,7 @@ git clone https://github.com/Nerv-SJTU/nerv.git
cd nerv
git submodule init && git submodule update
make
+#To include some new CUDA feature(e.x. atomicCAS), use "make CFLAGS=-D__NERV_FUTURE_CUDA_7"
#further, if you want the speech modules
git clone https://github.com/Nerv-SJTU/nerv-speech.git speech
diff --git a/nerv/Makefile b/nerv/Makefile
index b449f82..55c174c 100644
--- a/nerv/Makefile
+++ b/nerv/Makefile
@@ -33,7 +33,7 @@ LUA_LIBS := matrix/init.lua io/init.lua init.lua \
layer/init.lua layer/affine.lua layer/sigmoid.lua layer/softmax_ce.lua layer/softmax.lua \
layer/window.lua layer/bias.lua layer/combiner.lua layer/mse.lua layer/affine_recurrent.lua \
nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/layer_dag.lua \
- io/sgd_buffer.lua
+ io/sgd_buffer.lua
INCLUDE := -I $(LUA_INCDIR) -DLUA_USE_APICHECK
#CUDA_BASE := /usr/local/cuda-7.0
@@ -55,7 +55,7 @@ $(OBJ_DIR) $(LUA_DIR) $(OBJ_SUBDIR) $(LUA_SUBDIR) $(INC_SUBDIR):
$(OBJ_DIR)/%.o: %.c $(patsubst /%.o,/%.c,$@)
gcc -c -o $@ $< $(INCLUDE) -fPIC $(CFLAGS)
$(OBJ_DIR)/lib/matrix/cukernel.o: lib/matrix/cukernel.cu
- $(NVCC) -c -o $@ $< $(INCLUDE) $(NVCC_FLAGS)
+ $(NVCC) -c -o $@ $< $(INCLUDE) $(NVCC_FLAGS) $(CFLAGS)
$(LUA_DIR)/%.lua: %.lua
cp $< $@
diff --git a/nerv/layer/affine.lua b/nerv/layer/affine.lua
index 3e84ec0..ed58d38 100644
--- a/nerv/layer/affine.lua
+++ b/nerv/layer/affine.lua
@@ -17,49 +17,46 @@ function MatrixParam:train_init()
self.correction:fill(0)
end
-function MatrixParam:update_by_gradient(gradient)
+function MatrixParam:_update_by_gradient(gradient, alpha, beta)
local gconf = self.gconf
+ -- momentum gain
+ local mmt_gain = 1.0 / (1.0 - gconf.momentum)
+ local n = self.gconf.batch_size * mmt_gain
+ -- perform update
if gconf.momentum > 0 then
self.correction:add(self.correction, gradient, gconf.momentum, 1.0)
- -- momentum gain
- local mmt_gain = 1.0 / (1.0 - gconf.momentum)
- local n = self.gconf.batch_size * mmt_gain
- -- perform update
- self.trans:add(self.trans, self.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n)
+ self.trans:add(self.trans, self.correction, alpha, -gconf.lrate / n * beta)
else
- self.trans:add(self.trans, gradient, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / gconf.batch_size)
+ self.trans:add(self.trans, gradient, alpha, -gconf.lrate / n * beta)
end
end
-function MatrixParam:update_by_err_input(err, input)
+function MatrixParam:_update_by_err_input(err, input, alpha, beta)
local gconf = self.gconf
+ -- momentum gain
+ local mmt_gain = 1.0 / (1.0 - gconf.momentum)
+ local n = self.gconf.batch_size * mmt_gain
+ -- perform update
if gconf.momentum > 0 then
self.correction:mul(input, err, 1.0, gconf.momentum, 'T', 'N')
- -- momentum gain
- local mmt_gain = 1.0 / (1.0 - gconf.momentum)
- local n = self.gconf.batch_size * mmt_gain
- -- perform update
- self.trans:add(self.trans, self.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n)
+ self.trans:add(self.trans, self.correction, alpha, -gconf.lrate / n * beta)
else
- self.trans:mul(input, err, - gconf.lrate / gconf.batch_size, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, 'T', 'N')
+ self.trans:mul(input, err, -gconf.lrate / n * beta, alpha, 'T', 'N')
end
end
---[[ --these updates are the same
-function LinearTransParam:update(gradient)
- MatrixParam.update(self, gradient)
- -- local gconf = self.gconf
- -- weight decay(put into MatrixParam:update)
- -- self.trans:add(self.trans, self.trans, 1.0, -gconf.lrate * gconf.wcost / gconf.batch_size)
+function MatrixParam:update_by_gradient(gradient)
+ self:_update_by_gradient(gradient, 1.0, 1.0)
+end
+
+function MatrixParam:update_by_err_input(err, input)
+ self:_update_by_err_input(err, input, 1.0, 1.0)
end
-function BiasParam:update(gradient)
- MatrixParam.update(self, gradient)
- --local gconf = self.gconf
- -- weight decay
- -- self.trans:add(self.trans, self.trans, 1.0, -gconf.lrate * gconf.wcost / gconf.batch_size)
+function LinearTransParam:update_by_err_input(err, input)
+ local l2 = 1 - self.gconf.lrate * self.gconf.wcost
+ self:_update_by_err_input(err, input, l2, l2)
end
-]]--
function AffineLayer:__init(id, global_conf, layer_conf)
self.id = id
@@ -69,7 +66,7 @@ function AffineLayer:__init(id, global_conf, layer_conf)
self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]})--layer_conf.bp
self.gconf = global_conf
self:check_dim_len(1, 1) -- exactly one input and one output
- self.direct_update = layer_conf.direct_update or global_conf.direct_update
+ -- self.direct_update = layer_conf.direct_update or global_conf.direct_update
end
function AffineLayer:init(batch_size)
@@ -92,25 +89,8 @@ function AffineLayer:batch_resize(batch_size)
end
function AffineLayer:update(bp_err, input, output)
- if self.direct_update == true then
- local gconf = self.gconf
- if gconf.momentum > 0 then
- self.ltp.correction:mul(input[1], bp_err[1], 1.0, gconf.momentum, 'T', 'N')
- self.bp.correction:add(self.bp.correction, bp_err[1]:colsum(), gconf.momentum, 1)
- -- momentum gain
- local mmt_gain = 1.0 / (1.0 - gconf.momentum)
- local n = self.gconf.batch_size * mmt_gain
- -- perform update
- self.ltp.trans:add(self.ltp.trans, self.ltp.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n)
- self.bp.trans:add(self.bp.trans, self.bp.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n)
- else
- self.ltp.trans:mul(input[1], bp_err[1], - gconf.lrate / gconf.batch_size, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, 'T', 'N')
- self.bp.trans:add(self.bp.trans, bp_err[1]:colsum(), 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / gconf.batch_size)
- end
- else
- self.ltp:update_by_err_input(bp_err[1], input[1])
- self.bp:update_by_gradient(bp_err[1]:colsum())
- end
+ self.ltp:update_by_err_input(bp_err[1], input[1])
+ self.bp:update_by_gradient(bp_err[1]:colsum())
end
function AffineLayer:propagate(input, output)
diff --git a/nerv/lib/matrix/cukernel.cu b/nerv/lib/matrix/cukernel.cu
index 1e856b9..210e6bf 100644
--- a/nerv/lib/matrix/cukernel.cu
+++ b/nerv/lib/matrix/cukernel.cu
@@ -2,34 +2,38 @@
#include "cumatrix.h"
-__device__ double atomicAdd_nvidia(double* address, double val) {
- //nvidia provided this implementation on the net
- //atmoicAdd is not included in CUDA for double
+#ifdef __NERV_FUTURE_CUDA_7
+__device__ double atomicAdd_nvidia(double* address, double val) {
+ /* nvidia provided this implementation
+ atmoicAdd is not included in CUDA for double */
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
- old = atomicCAS(address_as_ull, assumed,
- __double_as_longlong(val +
+ old = atomicCAS(address_as_ull, assumed,
+ __double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
-__device__ float atomicAdd_nvidia(float* address, float val) {
- //nvidia provided this implementation on the net
- //I tried the included atomocAdd, but the select_liner layer result seems unreproduceable, but sadly, even if I used this implementation, the select_linear layer result is still unreproduceable
+__device__ float atomicAdd_nvidia(float* address, float val) {
+ /* nvidia provided this implementation
+ I tried the included atomocAdd, but the select_liner layer result seems
+ unreproduceable, but sadly, even if I used this implementation, the
+ select_linear layer result is still unreproduceable */
int* address_as_ull = (int*)address;
int old = *address_as_ull, assumed;
do {
assumed = old;
- old = atomicCAS(address_as_ull, assumed,
- __float_as_int(val +
+ old = atomicCAS(address_as_ull, assumed,
+ __float_as_int(val +
__int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
+#endif
#define cudak_(NAME) cudak_float_ ## NAME
diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu
index e1063af..e58c488 100644
--- a/nerv/lib/matrix/generic/cukernel.cu
+++ b/nerv/lib/matrix/generic/cukernel.cu
@@ -225,6 +225,7 @@ __global__ void cudak_(clip)(MATRIX_ELEM *a,
a[j + i * stride] = val_1;
}
+#ifdef __NERV_FUTURE_CUDA_7
__global__ void cudak_(update_select_rows)(MATRIX_ELEM *c, const MATRIX_ELEM *a, const MATRIX_ELEM *idx,
int nrow_a, int ncol_a, int stride_c, int stride_a, double alpha, double beta) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
@@ -235,6 +236,7 @@ __global__ void cudak_(update_select_rows)(MATRIX_ELEM *c, const MATRIX_ELEM *a,
//c[j + i_c * stride_c] = c[j + i_c * stride_c] * (1 - beta * alpha) + a[j + i * stride_a] * alpha;
atomicAdd_nvidia(c + j + i_c * stride_c, c[j + i_c * stride_c] * (- beta * alpha) + a[j + i * stride_a] * alpha);
}
+#endif
__global__ void cudak_(expand_frm)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
int nrow, int ncol,
@@ -552,6 +554,7 @@ extern "C" {
cudaStreamSynchronize(0);
}
+#ifdef __NERV_FUTURE_CUDA_7
void cudak_(cuda_update_select_rows)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta) {
dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x),
@@ -562,6 +565,7 @@ extern "C" {
a->stride / sizeof(MATRIX_ELEM), alpha, beta);
cudaStreamSynchronize(0);
}
+#endif
void cudak_(cuda_expand_frm)(const Matrix *a, Matrix *b, int context) {
dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
diff --git a/nerv/lib/matrix/generic/cumatrix.c b/nerv/lib/matrix/generic/cumatrix.c
index 2dc5899..00af895 100644
--- a/nerv/lib/matrix/generic/cumatrix.c
+++ b/nerv/lib/matrix/generic/cumatrix.c
@@ -359,6 +359,7 @@ void nerv_matrix_(copy_rows_fromd_by_idx)(Matrix *a, const Matrix *b,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
+#ifdef __NERV_FUTURE_CUDA_7
void nerv_matrix_(update_select_rows)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta, Status *status) {
long nrow = a->nrow;
if (idx->nrow != 1)
@@ -370,6 +371,7 @@ void nerv_matrix_(update_select_rows)(Matrix *c, const Matrix *a, const Matrix *
PROFILE_STOP
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
+#endif
void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b,
int context, Status *status) {
diff --git a/nerv/matrix/generic/cumatrix.c b/nerv/matrix/generic/cumatrix.c
index f675149..e1519b0 100644
--- a/nerv/matrix/generic/cumatrix.c
+++ b/nerv/matrix/generic/cumatrix.c
@@ -291,8 +291,10 @@ static int nerv_matrix_(lua_scale_rows_by_row)(lua_State *L) {
return 0;
}
+#ifdef __NERV_FUTURE_CUDA_7
static int nerv_matrix_(lua_update_select_rows)(lua_State *L) {
- //Update c's select rows, i.e. c[idx[i]] = c[idx[i]] * (1 - beta * alpha) + a[i] * alpha
+ /* update c's select rows,
+ * i.e. c[idx[i]] = c[idx[i]] * (1 - beta * alpha) + a[i] * alpha */
Status status;
Matrix *c = luaT_checkudata(L, 1, nerv_matrix_(tname));
const Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname));
@@ -303,6 +305,7 @@ static int nerv_matrix_(lua_update_select_rows)(lua_State *L) {
NERV_LUA_CHECK_STATUS(L, status);
return 0;
}
+#endif
static const luaL_Reg nerv_matrix_(extra_methods)[] = {
{"colsum", nerv_matrix_(lua_colsum)},
@@ -323,7 +326,6 @@ static const luaL_Reg nerv_matrix_(extra_methods)[] = {
{"add_row", nerv_matrix_(lua_add_row)},
{"clip", nerv_matrix_(lua_clip)},
{"fill", nerv_matrix_(lua_fill)},
- {"update_select_rows", nerv_matrix_(lua_update_select_rows)},
{"sigmoid", nerv_matrix_(lua_sigmoid)},
{"sigmoid_grad", nerv_matrix_(lua_sigmoid_grad)},
{"softmax", nerv_matrix_(lua_softmax)},
@@ -335,6 +337,9 @@ static const luaL_Reg nerv_matrix_(extra_methods)[] = {
{"rearrange_frm", nerv_matrix_(lua_rearrange_frm)},
{"scale_rows_by_row", nerv_matrix_(lua_scale_rows_by_row)},
{"scale_rows_by_col", nerv_matrix_(lua_scale_rows_by_col)},
+#ifdef __NERV_FUTURE_CUDA_7
+ {"update_select_rows", nerv_matrix_(lua_update_select_rows)},
+#endif
{NULL, NULL}
};