aboutsummaryrefslogtreecommitdiff
path: root/nerv/lib/matrix/generic/cukernel.cu
diff options
context:
space:
mode:
authortxh18 <cloudygooseg@gmail.com>2015-12-01 21:39:16 +0800
committertxh18 <cloudygooseg@gmail.com>2015-12-01 21:39:16 +0800
commit0c286aa6237da9e8daa7db8ed1e3805a33312926 (patch)
tree9f9f8619aeb717e21b7fd51c30fac1cfbd06a3cb /nerv/lib/matrix/generic/cukernel.cu
parent910fd4a25527028414d6cc3b2b620c74e9d06e8c (diff)
added rand_uniform and thres_mask for cumatrix
Diffstat (limited to 'nerv/lib/matrix/generic/cukernel.cu')
-rw-r--r--nerv/lib/matrix/generic/cukernel.cu31
1 files changed, 31 insertions, 0 deletions
diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu
index 1a20b4f..b092e4a 100644
--- a/nerv/lib/matrix/generic/cukernel.cu
+++ b/nerv/lib/matrix/generic/cukernel.cu
@@ -20,6 +20,19 @@ __global__ void cudak_(log_elem)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
b[idx] = log(tmp);
}
+__global__ void cudak_(thres_mask)(MATRIX_ELEM *a, double thres, double low, double high,
+ int nrow, int ncol, int stride) {
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
+ long idx;
+ if (i >= nrow || j >= ncol) return;
+ idx = j + i * stride;
+ if (a[idx] < thres)
+ a[idx] = low;
+ else
+ a[idx] = high;
+}
+
__global__ void cudak_(mul_elem)(const MATRIX_ELEM *a, const MATRIX_ELEM *b,
MATRIX_ELEM *c,
int nrow, int ncol, int stride) {
@@ -376,6 +389,24 @@ extern "C" {
cudaStreamSynchronize(0);
}
+ void cudak_(cuda_rand_uniform)(Matrix *a) {
+ #ifdef MATRIX_USE_FLOAT
+ curandGenerateUniform(*(a->curand_gen), MATRIX_ELEM_PTR(a), a->nrow * a->stride / sizeof(MATRIX_ELEM));
+ #endif
+ #ifdef MATRIX_USE_DOUBLE
+ curandGenerateUniformDouble(*(a->curand_gen), MATRIX_ELEM_PTR(a), a->nrow * a->stride / sizeof(MATRIX_ELEM));
+ #endif
+ }
+
+ void cudak_(cuda_thres_mask)(const Matrix *a, double thres, double low, double high) {
+ dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
+ dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x),
+ CEIL_DIV(a->nrow, threadsPerBlock.y));
+ cudak_(thres_mask)<<<numBlocks, threadsPerBlock>>> \
+ (MATRIX_ELEM_PTR(a), thres, low, high, a->nrow, a->ncol, a->stride / sizeof(MATRIX_ELEM));
+ cudaStreamSynchronize(0);
+ }
+
void cudak_(cuda_tanh)(const Matrix *a, Matrix *b) {
dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x),