aboutsummaryrefslogtreecommitdiff
path: root/nerv/lib/matrix/generic
diff options
context:
space:
mode:
Diffstat (limited to 'nerv/lib/matrix/generic')
-rw-r--r--nerv/lib/matrix/generic/cukernel.cu86
-rw-r--r--nerv/lib/matrix/generic/cumatrix.c36
-rw-r--r--nerv/lib/matrix/generic/cumatrix.h3
-rw-r--r--nerv/lib/matrix/generic/matrix.c8
4 files changed, 132 insertions, 1 deletions
diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu
index d042d48..aa830b5 100644
--- a/nerv/lib/matrix/generic/cukernel.cu
+++ b/nerv/lib/matrix/generic/cukernel.cu
@@ -20,6 +20,19 @@ __global__ void cudak_(log_elem)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
b[idx] = log(tmp);
}
+__global__ void cudak_(thres_mask)(MATRIX_ELEM *a, MATRIX_ELEM *b, double thres, double low, double high,
+ int nrow, int ncol, int stride) {
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
+ long idx;
+ if (i >= nrow || j >= ncol) return;
+ idx = j + i * stride;
+ if (b[idx] < thres)
+ a[idx] = low;
+ else
+ a[idx] = high;
+}
+
__global__ void cudak_(mul_elem)(const MATRIX_ELEM *a, const MATRIX_ELEM *b,
MATRIX_ELEM *c,
int nrow, int ncol, int stride) {
@@ -53,6 +66,29 @@ __global__ void cudak_(sigmoid_grad)(const MATRIX_ELEM *output,
nerr[idx] = output[idx] * (1.0 - output[idx]) * err[idx];
}
+__global__ void cudak_(tanh)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
+ int nrow, int ncol, int stride) {
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
+ long idx;
+ if (i >= nrow || j >= ncol) return;
+ idx = j + i * stride;
+ //b[idx] = (exp(a[idx]) - exp(-a[idx])) / (exp(a[idx]) + exp(-a[idx])); //could cause nan
+ b[idx] = tanh(a[idx]);
+}
+
+__global__ void cudak_(tanh_grad)(const MATRIX_ELEM *output,
+ const MATRIX_ELEM *err,
+ MATRIX_ELEM *nerr,
+ int nrow, int ncol, int stride) {
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
+ long idx;
+ if (i >= nrow || j >= ncol) return;
+ idx = j + i * stride;
+ nerr[idx] = (1.0 - output[idx] * output[idx]) * err[idx];
+}
+
__global__ void cudak_(softmax_final)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
const MATRIX_ELEM *max, const MATRIX_ELEM *deno,
int nrow, int ncol, int stride, int mstride) {
@@ -225,14 +261,18 @@ __global__ void cudak_(clip)(MATRIX_ELEM *a,
a[j + i * stride] = val_1;
}
+#ifdef __NERV_FUTURE_CUDA_7
__global__ void cudak_(update_select_rows)(MATRIX_ELEM *c, const MATRIX_ELEM *a, const MATRIX_ELEM *idx,
int nrow_a, int ncol_a, int stride_c, int stride_a, double alpha, double beta) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= nrow_a || j >= ncol_a) return;
int i_c = lrintf(idx[i]);
- c[j + i_c * stride_c] = c[j + i_c * stride_c] * (1 - beta * alpha) + a[j + i * stride_a] * alpha;
+ //critical: i_c could conflict among threads(same index in the idx array), so atomicAdd is used
+ //c[j + i_c * stride_c] = c[j + i_c * stride_c] * (1 - beta * alpha) + a[j + i * stride_a] * alpha;
+ atomicAdd_nvidia(c + j + i_c * stride_c, c[j + i_c * stride_c] * (- beta * alpha) + a[j + i * stride_a] * alpha);
}
+#endif
__global__ void cudak_(expand_frm)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
int nrow, int ncol,
@@ -349,6 +389,48 @@ extern "C" {
cudaStreamSynchronize(0);
}
+ void cudak_(cuda_rand_uniform)(const Matrix *a) {
+ #ifdef MATRIX_USE_FLOAT
+ curandGenerateUniform(*(a->curand_gen), MATRIX_ELEM_PTR(a), a->nrow * a->stride / sizeof(MATRIX_ELEM));
+ #endif
+ #ifdef MATRIX_USE_DOUBLE
+ curandGenerateUniformDouble(*(a->curand_gen), MATRIX_ELEM_PTR(a), a->nrow * a->stride / sizeof(MATRIX_ELEM));
+ #endif
+ }
+
+ void cudak_(cuda_thres_mask)(const Matrix *a, const Matrix *b, double thres, double low, double high) {
+ dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
+ dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x),
+ CEIL_DIV(a->nrow, threadsPerBlock.y));
+ cudak_(thres_mask)<<<numBlocks, threadsPerBlock>>> \
+ (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b),
+ thres, low, high, a->nrow, a->ncol, a->stride / sizeof(MATRIX_ELEM));
+ cudaStreamSynchronize(0);
+ }
+
+ void cudak_(cuda_tanh)(const Matrix *a, Matrix *b) {
+ dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
+ dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x),
+ CEIL_DIV(b->nrow, threadsPerBlock.y));
+ cudak_(tanh)<<<numBlocks, threadsPerBlock>>> \
+ (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), b->nrow, b->ncol,
+ b->stride / sizeof(MATRIX_ELEM));
+ cudaStreamSynchronize(0);
+ }
+
+ void cudak_(cuda_tanh_grad)(const Matrix *output,
+ const Matrix *err, Matrix *nerr) {
+ dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
+ dim3 numBlocks(CEIL_DIV(nerr->ncol, threadsPerBlock.x),
+ CEIL_DIV(nerr->nrow, threadsPerBlock.y));
+ cudak_(tanh_grad)<<<numBlocks, threadsPerBlock>>> \
+ (MATRIX_ELEM_PTR(output), MATRIX_ELEM_PTR(err),
+ MATRIX_ELEM_PTR(nerr),
+ nerr->nrow, nerr->ncol,
+ nerr->stride / sizeof(MATRIX_ELEM));
+ cudaStreamSynchronize(0);
+ }
+
void cudak_(cuda_rowsum)(const Matrix *a, Matrix *b) {
dim3 block(CUDA_THREADS_NN, 1);
int ncol = a->ncol;
@@ -550,6 +632,7 @@ extern "C" {
cudaStreamSynchronize(0);
}
+#ifdef __NERV_FUTURE_CUDA_7
void cudak_(cuda_update_select_rows)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta) {
dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x),
@@ -560,6 +643,7 @@ extern "C" {
a->stride / sizeof(MATRIX_ELEM), alpha, beta);
cudaStreamSynchronize(0);
}
+#endif
void cudak_(cuda_expand_frm)(const Matrix *a, Matrix *b, int context) {
dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
diff --git a/nerv/lib/matrix/generic/cumatrix.c b/nerv/lib/matrix/generic/cumatrix.c
index 2dc5899..65e0788 100644
--- a/nerv/lib/matrix/generic/cumatrix.c
+++ b/nerv/lib/matrix/generic/cumatrix.c
@@ -10,6 +10,7 @@
#include "../../common.h"
#include "../cukernel.h"
#include "../cuda_helper.h"
+#include <curand.h>
void nerv_matrix_(add)(Matrix *c, const Matrix *a, const Matrix *b,
MATRIX_ELEM alpha, MATRIX_ELEM beta,
@@ -75,6 +76,39 @@ void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
+void nerv_matrix_(rand_uniform)(Matrix *a, Status *status) {
+ PROFILE_START
+ cudak_(cuda_rand_uniform)(a);
+ PROFILE_STOP
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
+void nerv_matrix_(thres_mask)(Matrix *a, Matrix *b, double thres, double low, double high, Status *status) {
+ CHECK_SAME_DIMENSION(a, b, status);
+ PROFILE_START
+ cudak_(cuda_thres_mask)(a, b, thres, low, high);
+ PROFILE_STOP
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
+void nerv_matrix_(tanh)(Matrix *a, const Matrix *b, Status *status) {
+ CHECK_SAME_DIMENSION(a, b, status);
+ PROFILE_START
+ cudak_(cuda_tanh)(b, a);
+ PROFILE_STOP
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
+void nerv_matrix_(tanh_grad)(Matrix *nerr, const Matrix *err,
+ const Matrix *output, Status *status) {
+ CHECK_SAME_DIMENSION(nerr, err, status);
+ CHECK_SAME_DIMENSION(nerr, output, status);
+ PROFILE_START
+ cudak_(cuda_tanh_grad)(output, err, nerr);
+ PROFILE_STOP
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, Status *status) {
Matrix *max, *max_idx;
Matrix *dno;
@@ -359,6 +393,7 @@ void nerv_matrix_(copy_rows_fromd_by_idx)(Matrix *a, const Matrix *b,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
+#ifdef __NERV_FUTURE_CUDA_7
void nerv_matrix_(update_select_rows)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta, Status *status) {
long nrow = a->nrow;
if (idx->nrow != 1)
@@ -370,6 +405,7 @@ void nerv_matrix_(update_select_rows)(Matrix *c, const Matrix *a, const Matrix *
PROFILE_STOP
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
+#endif
void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b,
int context, Status *status) {
diff --git a/nerv/lib/matrix/generic/cumatrix.h b/nerv/lib/matrix/generic/cumatrix.h
index 21c29b7..aa8805a 100644
--- a/nerv/lib/matrix/generic/cumatrix.h
+++ b/nerv/lib/matrix/generic/cumatrix.h
@@ -9,6 +9,9 @@ void nerv_matrix_(mul)(Matrix *c, const Matrix *a, const Matrix *b,
void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, Status *status);
void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err,
const Matrix *output, Status *status);
+void nerv_matrix_(tanh)(Matrix *a, const Matrix *b, Status *status);
+void nerv_matrix_(tanh_grad)(Matrix *nerr, const Matrix *err,
+ const Matrix *output, Status *status);
Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, Status *status);
Matrix *nerv_matrix_(rowsum)(Matrix *a, Status *status);
diff --git a/nerv/lib/matrix/generic/matrix.c b/nerv/lib/matrix/generic/matrix.c
index 4246751..fd5d28f 100644
--- a/nerv/lib/matrix/generic/matrix.c
+++ b/nerv/lib/matrix/generic/matrix.c
@@ -10,6 +10,8 @@ void nerv_matrix_(data_free)(Matrix *self, Status *status) {
{
/* free matrix data */
MATRIX_DATA_FREE(MATRIX_ELEM_PTR(self), status);
+ curandDestroyGenerator(*(self->curand_gen));
+ free(self->curand_gen);
free(self->data_ref);
free(self);
}
@@ -39,6 +41,11 @@ Matrix *nerv_matrix_(create)(long nrow, long ncol, Status *status) {
}
self->data_ref = (long *)malloc(sizeof(long));
*self->data_ref = 0;
+
+ self->curand_gen = (curandGenerator_t*)malloc(sizeof(curandGenerator_t));
+ curandCreateGenerator(self->curand_gen, CURAND_RNG_PSEUDO_DEFAULT);
+ curandSetPseudoRandomGeneratorSeed(*(self->curand_gen), time(NULL));
+
nerv_matrix_(data_retain)(self);
NERV_SET_STATUS(status, NERV_NORMAL, 0);
return self;
@@ -57,6 +64,7 @@ Matrix *nerv_matrix_(getrow)(Matrix *self, int row) {
prow->nmax = prow->ncol;
MATRIX_ELEM_PTR(prow) = MATRIX_ROW_PTR(self, row);
prow->data_ref = self->data_ref;
+ prow->curand_gen = self->curand_gen;
nerv_matrix_(data_retain)(prow);
return prow;
}