aboutsummaryrefslogtreecommitdiff
path: root/nerv/lib/matrix/generic
diff options
context:
space:
mode:
Diffstat (limited to 'nerv/lib/matrix/generic')
-rw-r--r--nerv/lib/matrix/generic/cukernel.cu44
-rw-r--r--nerv/lib/matrix/generic/cumatrix.c19
-rw-r--r--nerv/lib/matrix/generic/cumatrix.h5
-rw-r--r--nerv/lib/matrix/generic/mmatrix.c39
-rw-r--r--nerv/lib/matrix/generic/mmatrix.h10
5 files changed, 117 insertions, 0 deletions
diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu
index cf9d213..82bea14 100644
--- a/nerv/lib/matrix/generic/cukernel.cu
+++ b/nerv/lib/matrix/generic/cukernel.cu
@@ -90,6 +90,27 @@ __global__ void cudak_(tanh_grad)(const MATRIX_ELEM *output,
nerr[idx] = (1.0 - output[idx] * output[idx]) * err[idx];
}
+__global__ void cudak_(relu)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
+ int nrow, int ncol, int stride) {
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
+ long idx;
+ if (i >= nrow || j >= ncol) return;
+ idx = j + i * stride;
+ b[idx] = a[idx] > 0 ? a[idx] : 0;
+}
+
+__global__ void cudak_(relu_grad)(const MATRIX_ELEM *output,
+ const MATRIX_ELEM *err,
+ MATRIX_ELEM *nerr,
+ int nrow, int ncol, int stride) {
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
+ long idx;
+ if (i >= nrow || j >= ncol) return;
+ idx = j + i * stride;
+ nerr[idx] = (output[idx] > 0 ? 1 : 0) * err[idx];
+}
__global__ void cudak_(softmax_final)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
const MATRIX_ELEM *max, const MATRIX_ELEM *deno,
int nrow, int ncol, int stride, int mstride) {
@@ -510,6 +531,29 @@ extern "C" {
cudaStreamSynchronize(0);
}
+ void cudak_(cuda_relu)(const Matrix *a, Matrix *b) {
+ dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
+ dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x),
+ CEIL_DIV(b->nrow, threadsPerBlock.y));
+ cudak_(relu)<<<numBlocks, threadsPerBlock>>> \
+ (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), b->nrow, b->ncol,
+ b->stride / sizeof(MATRIX_ELEM));
+ cudaStreamSynchronize(0);
+ }
+
+ void cudak_(cuda_relu_grad)(const Matrix *output,
+ const Matrix *err, Matrix *nerr) {
+ dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
+ dim3 numBlocks(CEIL_DIV(nerr->ncol, threadsPerBlock.x),
+ CEIL_DIV(nerr->nrow, threadsPerBlock.y));
+ cudak_(relu_grad)<<<numBlocks, threadsPerBlock>>> \
+ (MATRIX_ELEM_PTR(output), MATRIX_ELEM_PTR(err),
+ MATRIX_ELEM_PTR(nerr),
+ nerr->nrow, nerr->ncol,
+ nerr->stride / sizeof(MATRIX_ELEM));
+ cudaStreamSynchronize(0);
+ }
+
void cudak_(cuda_rowsum)(const Matrix *a, Matrix *b) {
dim3 block(CUDA_THREADS_NN, 1);
int ncol = a->ncol;
diff --git a/nerv/lib/matrix/generic/cumatrix.c b/nerv/lib/matrix/generic/cumatrix.c
index bc5f285..432222a 100644
--- a/nerv/lib/matrix/generic/cumatrix.c
+++ b/nerv/lib/matrix/generic/cumatrix.c
@@ -117,6 +117,25 @@ void nerv_matrix_(tanh_grad)(Matrix *nerr, const Matrix *err, const Matrix *outp
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
+void nerv_matrix_(relu)(Matrix *a, const Matrix *b,
+ CuContext *context, Status *status) {
+ CHECK_SAME_DIMENSION(a, b, status);
+ PROFILE_START
+ cudak_(cuda_relu)(b, a);
+ PROFILE_STOP
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
+void nerv_matrix_(relu_grad)(Matrix *nerr, const Matrix *err, const Matrix *output,
+ CuContext *context, Status *status) {
+ CHECK_SAME_DIMENSION(nerr, err, status);
+ CHECK_SAME_DIMENSION(nerr, output, status);
+ PROFILE_START
+ cudak_(cuda_relu_grad)(output, err, nerr);
+ PROFILE_STOP
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a,
CuContext *context, Status *status) {
Matrix *max, *max_idx;
diff --git a/nerv/lib/matrix/generic/cumatrix.h b/nerv/lib/matrix/generic/cumatrix.h
index 79bfc76..459513b 100644
--- a/nerv/lib/matrix/generic/cumatrix.h
+++ b/nerv/lib/matrix/generic/cumatrix.h
@@ -17,6 +17,11 @@ void nerv_matrix_(tanh)(Matrix *a, const Matrix *b,
void nerv_matrix_(tanh_grad)(Matrix *nerr, const Matrix *err,
const Matrix *output,
CuContext *context, Status *status);
+void nerv_matrix_(relu)(Matrix *a, const Matrix *b,
+ CuContext *context, Status *status);
+void nerv_matrix_(relu_grad)(Matrix *nerr, const Matrix *err,
+ const Matrix *output,
+ CuContext *context, Status *status);
Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a,
CuContext *context, Status *status);
diff --git a/nerv/lib/matrix/generic/mmatrix.c b/nerv/lib/matrix/generic/mmatrix.c
index ccfb2ce..e76d4fb 100644
--- a/nerv/lib/matrix/generic/mmatrix.c
+++ b/nerv/lib/matrix/generic/mmatrix.c
@@ -460,6 +460,45 @@ void nerv_matrix_(tanh_grad)(Matrix *nerr, const Matrix *err,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
+void nerv_matrix_(relu)(Matrix *b, const Matrix *a,
+ MContext *context, Status *status) {
+ CHECK_SAME_DIMENSION(a, b, status);
+ int i, j;
+ size_t astride = a->stride, bstride = b->stride;
+ const MATRIX_ELEM *arow = MATRIX_ELEM_PTR(a);
+ MATRIX_ELEM *brow = MATRIX_ELEM_PTR(b);
+ for (i = 0; i < b->nrow; i++)
+ {
+ for (j = 0; j < b->ncol; j++)
+ brow[j] = arow[j] > 0 ? arow[j] : 0;
+ arow = MATRIX_NEXT_ROW_PTR(arow, astride);
+ brow = MATRIX_NEXT_ROW_PTR(brow, bstride);
+ }
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
+void nerv_matrix_(relu_grad)(Matrix *nerr, const Matrix *err,
+ const Matrix *output,
+ MContext *context, Status *status) {
+ CHECK_SAME_DIMENSION(nerr, err, status);
+ CHECK_SAME_DIMENSION(nerr, output, status);
+ int i, j;
+ size_t nerr_stride = nerr->stride,
+ err_stride = err->stride,
+ out_stride = output->stride;
+ MATRIX_ELEM *nerr_row = MATRIX_ELEM_PTR(nerr);
+ const MATRIX_ELEM *err_row = MATRIX_ELEM_PTR(err),
+ *out_row = MATRIX_ELEM_PTR(output);
+ for (i = 0; i < nerr->nrow; i++)
+ {
+ for (j = 0; j < nerr->ncol; j++)
+ nerr_row[j] = (out_row[j] > 0 ? 1 : 0) * err_row[j];
+ nerr_row = MATRIX_NEXT_ROW_PTR(nerr_row, nerr_stride);
+ err_row = MATRIX_NEXT_ROW_PTR(err_row, err_stride);
+ out_row = MATRIX_NEXT_ROW_PTR(out_row, out_stride);
+ }
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b,
int cont, MContext *context, Status *status) {
if (a->nrow != b->nrow)
diff --git a/nerv/lib/matrix/generic/mmatrix.h b/nerv/lib/matrix/generic/mmatrix.h
index 41c39f6..7f494d6 100644
--- a/nerv/lib/matrix/generic/mmatrix.h
+++ b/nerv/lib/matrix/generic/mmatrix.h
@@ -13,6 +13,16 @@ void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b,
void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err,
const Matrix *output,
MContext *context, Status *status);
+void nerv_matrix_(tanh)(Matrix *a, const Matrix *b,
+ MContext *context, Status *status);
+void nerv_matrix_(tanh_grad)(Matrix *nerr, const Matrix *err,
+ const Matrix *output,
+ MContext *context, Status *status);
+void nerv_matrix_(relu)(Matrix *a, const Matrix *b,
+ MContext *context, Status *status);
+void nerv_matrix_(relu_grad)(Matrix *nerr, const Matrix *err,
+ const Matrix *output,
+ MContext *context, Status *status);
Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a,
MContext *context, Status *status);