diff options
author | Determinant <ted.sybil@gmail.com> | 2016-05-01 00:44:43 +0800 |
---|---|---|
committer | Determinant <ted.sybil@gmail.com> | 2016-05-01 00:44:43 +0800 |
commit | 34fe366898bc48b4e1ad6fa945dad8821857a459 (patch) | |
tree | 1210e1a28dd4c0f6e1cefe0b98c95b333ceec158 /nerv/lib/matrix/generic/cukernel.cu | |
parent | 2bb64ecc5e0350f89cff1e978a1b8dcca1528a06 (diff) | |
parent | 1aaffa6b9a56d4d580dea7bf3f1b7df1eec5da2f (diff) |
Merge branch 'master' of ssh://speechlab.sjtu.edu.cn:8022/nerv-dev/nerv
Diffstat (limited to 'nerv/lib/matrix/generic/cukernel.cu')
-rw-r--r-- | nerv/lib/matrix/generic/cukernel.cu | 44 |
1 files changed, 44 insertions, 0 deletions
diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu index cf9d213..82bea14 100644 --- a/nerv/lib/matrix/generic/cukernel.cu +++ b/nerv/lib/matrix/generic/cukernel.cu @@ -90,6 +90,27 @@ __global__ void cudak_(tanh_grad)(const MATRIX_ELEM *output, nerr[idx] = (1.0 - output[idx] * output[idx]) * err[idx]; } +__global__ void cudak_(relu)(const MATRIX_ELEM *a, MATRIX_ELEM *b, + int nrow, int ncol, int stride) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + long idx; + if (i >= nrow || j >= ncol) return; + idx = j + i * stride; + b[idx] = a[idx] > 0 ? a[idx] : 0; +} + +__global__ void cudak_(relu_grad)(const MATRIX_ELEM *output, + const MATRIX_ELEM *err, + MATRIX_ELEM *nerr, + int nrow, int ncol, int stride) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + long idx; + if (i >= nrow || j >= ncol) return; + idx = j + i * stride; + nerr[idx] = (output[idx] > 0 ? 1 : 0) * err[idx]; +} __global__ void cudak_(softmax_final)(const MATRIX_ELEM *a, MATRIX_ELEM *b, const MATRIX_ELEM *max, const MATRIX_ELEM *deno, int nrow, int ncol, int stride, int mstride) { @@ -510,6 +531,29 @@ extern "C" { cudaStreamSynchronize(0); } + void cudak_(cuda_relu)(const Matrix *a, Matrix *b) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + cudak_(relu)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), b->nrow, b->ncol, + b->stride / sizeof(MATRIX_ELEM)); + cudaStreamSynchronize(0); + } + + void cudak_(cuda_relu_grad)(const Matrix *output, + const Matrix *err, Matrix *nerr) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(nerr->ncol, threadsPerBlock.x), + CEIL_DIV(nerr->nrow, threadsPerBlock.y)); + cudak_(relu_grad)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(output), MATRIX_ELEM_PTR(err), + MATRIX_ELEM_PTR(nerr), + nerr->nrow, nerr->ncol, + nerr->stride / sizeof(MATRIX_ELEM)); + cudaStreamSynchronize(0); + } + void cudak_(cuda_rowsum)(const Matrix *a, Matrix *b) { dim3 block(CUDA_THREADS_NN, 1); int ncol = a->ncol; |