diff options
author | TianxingHe <htx_2006@hotmail.com> | 2015-12-04 15:19:44 +0800 |
---|---|---|
committer | TianxingHe <htx_2006@hotmail.com> | 2015-12-04 15:19:44 +0800 |
commit | af684cb95478fc38cc3d9f284b6b518a431c87e2 (patch) | |
tree | faab52eb3f6507331703b656c62a9e2ebf3b3f92 /nerv/lib | |
parent | cbcce5ecc2864872b411eebbd307fa0f9a7e9dc0 (diff) | |
parent | 618450eb71817ded45c422f35d8fede2d52a66b2 (diff) |
Merge pull request #15 from cloudygoose/txh18/rnnlm
LSTM&TNN
Diffstat (limited to 'nerv/lib')
-rw-r--r-- | nerv/lib/matrix/cukernel.cu | 36 | ||||
-rw-r--r-- | nerv/lib/matrix/cukernel.h | 4 | ||||
-rw-r--r-- | nerv/lib/matrix/generic/cukernel.cu | 86 | ||||
-rw-r--r-- | nerv/lib/matrix/generic/cumatrix.c | 36 | ||||
-rw-r--r-- | nerv/lib/matrix/generic/cumatrix.h | 3 | ||||
-rw-r--r-- | nerv/lib/matrix/generic/matrix.c | 8 | ||||
-rw-r--r-- | nerv/lib/matrix/matrix.h | 2 |
7 files changed, 174 insertions, 1 deletions
diff --git a/nerv/lib/matrix/cukernel.cu b/nerv/lib/matrix/cukernel.cu index a19030a..210e6bf 100644 --- a/nerv/lib/matrix/cukernel.cu +++ b/nerv/lib/matrix/cukernel.cu @@ -1,5 +1,41 @@ #define NERV_GENERIC_CUKERNEL +#include "cumatrix.h" + +#ifdef __NERV_FUTURE_CUDA_7 +__device__ double atomicAdd_nvidia(double* address, double val) { + /* nvidia provided this implementation + atmoicAdd is not included in CUDA for double */ + unsigned long long int* address_as_ull = + (unsigned long long int*)address; + unsigned long long int old = *address_as_ull, assumed; + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __double_as_longlong(val + + __longlong_as_double(assumed))); + } while (assumed != old); + return __longlong_as_double(old); +} + +__device__ float atomicAdd_nvidia(float* address, float val) { + /* nvidia provided this implementation + I tried the included atomocAdd, but the select_liner layer result seems + unreproduceable, but sadly, even if I used this implementation, the + select_linear layer result is still unreproduceable */ + int* address_as_ull = (int*)address; + int old = *address_as_ull, assumed; + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __float_as_int(val + + __int_as_float(assumed))); + } while (assumed != old); + return __int_as_float(old); +} +#endif + + #define cudak_(NAME) cudak_float_ ## NAME #define MATRIX_USE_FLOAT #include "generic/elem_type.h" diff --git a/nerv/lib/matrix/cukernel.h b/nerv/lib/matrix/cukernel.h index fffe0bc..fe682d3 100644 --- a/nerv/lib/matrix/cukernel.h +++ b/nerv/lib/matrix/cukernel.h @@ -3,6 +3,10 @@ void cudak_(cuda_mul_elem)(const Matrix *a, const Matrix *b, Matrix *c); void cudak_(cuda_log_elem)(const Matrix *a, Matrix *b); void cudak_(cuda_sigmoid)(const Matrix *a, Matrix *b); void cudak_(cuda_sigmoid_grad)(const Matrix *output, const Matrix *err, Matrix *nerr); +void cudak_(cuda_rand_uniform)(const Matrix *a); //a's curand_gen may be modified +void cudak_(cuda_thres_mask)(const Matrix *a, const Matrix *b, double thres, double low, double high); +void cudak_(cuda_tanh)(const Matrix *a, Matrix *b); +void cudak_(cuda_tanh_grad)(const Matrix *output, const Matrix *err, Matrix *nerr); void cudak_(cuda_rowsum)(const Matrix *a, Matrix *b); void cudak_(cuda_rowmax)(const Matrix *a, Matrix *b); void cudak_(cuda_rowmax_idx)(const Matrix *a, Matrix *b, Matrix *idx); diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu index d042d48..aa830b5 100644 --- a/nerv/lib/matrix/generic/cukernel.cu +++ b/nerv/lib/matrix/generic/cukernel.cu @@ -20,6 +20,19 @@ __global__ void cudak_(log_elem)(const MATRIX_ELEM *a, MATRIX_ELEM *b, b[idx] = log(tmp); } +__global__ void cudak_(thres_mask)(MATRIX_ELEM *a, MATRIX_ELEM *b, double thres, double low, double high, + int nrow, int ncol, int stride) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + long idx; + if (i >= nrow || j >= ncol) return; + idx = j + i * stride; + if (b[idx] < thres) + a[idx] = low; + else + a[idx] = high; +} + __global__ void cudak_(mul_elem)(const MATRIX_ELEM *a, const MATRIX_ELEM *b, MATRIX_ELEM *c, int nrow, int ncol, int stride) { @@ -53,6 +66,29 @@ __global__ void cudak_(sigmoid_grad)(const MATRIX_ELEM *output, nerr[idx] = output[idx] * (1.0 - output[idx]) * err[idx]; } +__global__ void cudak_(tanh)(const MATRIX_ELEM *a, MATRIX_ELEM *b, + int nrow, int ncol, int stride) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + long idx; + if (i >= nrow || j >= ncol) return; + idx = j + i * stride; + //b[idx] = (exp(a[idx]) - exp(-a[idx])) / (exp(a[idx]) + exp(-a[idx])); //could cause nan + b[idx] = tanh(a[idx]); +} + +__global__ void cudak_(tanh_grad)(const MATRIX_ELEM *output, + const MATRIX_ELEM *err, + MATRIX_ELEM *nerr, + int nrow, int ncol, int stride) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + long idx; + if (i >= nrow || j >= ncol) return; + idx = j + i * stride; + nerr[idx] = (1.0 - output[idx] * output[idx]) * err[idx]; +} + __global__ void cudak_(softmax_final)(const MATRIX_ELEM *a, MATRIX_ELEM *b, const MATRIX_ELEM *max, const MATRIX_ELEM *deno, int nrow, int ncol, int stride, int mstride) { @@ -225,14 +261,18 @@ __global__ void cudak_(clip)(MATRIX_ELEM *a, a[j + i * stride] = val_1; } +#ifdef __NERV_FUTURE_CUDA_7 __global__ void cudak_(update_select_rows)(MATRIX_ELEM *c, const MATRIX_ELEM *a, const MATRIX_ELEM *idx, int nrow_a, int ncol_a, int stride_c, int stride_a, double alpha, double beta) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i >= nrow_a || j >= ncol_a) return; int i_c = lrintf(idx[i]); - c[j + i_c * stride_c] = c[j + i_c * stride_c] * (1 - beta * alpha) + a[j + i * stride_a] * alpha; + //critical: i_c could conflict among threads(same index in the idx array), so atomicAdd is used + //c[j + i_c * stride_c] = c[j + i_c * stride_c] * (1 - beta * alpha) + a[j + i * stride_a] * alpha; + atomicAdd_nvidia(c + j + i_c * stride_c, c[j + i_c * stride_c] * (- beta * alpha) + a[j + i * stride_a] * alpha); } +#endif __global__ void cudak_(expand_frm)(const MATRIX_ELEM *a, MATRIX_ELEM *b, int nrow, int ncol, @@ -349,6 +389,48 @@ extern "C" { cudaStreamSynchronize(0); } + void cudak_(cuda_rand_uniform)(const Matrix *a) { + #ifdef MATRIX_USE_FLOAT + curandGenerateUniform(*(a->curand_gen), MATRIX_ELEM_PTR(a), a->nrow * a->stride / sizeof(MATRIX_ELEM)); + #endif + #ifdef MATRIX_USE_DOUBLE + curandGenerateUniformDouble(*(a->curand_gen), MATRIX_ELEM_PTR(a), a->nrow * a->stride / sizeof(MATRIX_ELEM)); + #endif + } + + void cudak_(cuda_thres_mask)(const Matrix *a, const Matrix *b, double thres, double low, double high) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x), + CEIL_DIV(a->nrow, threadsPerBlock.y)); + cudak_(thres_mask)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), + thres, low, high, a->nrow, a->ncol, a->stride / sizeof(MATRIX_ELEM)); + cudaStreamSynchronize(0); + } + + void cudak_(cuda_tanh)(const Matrix *a, Matrix *b) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + cudak_(tanh)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), b->nrow, b->ncol, + b->stride / sizeof(MATRIX_ELEM)); + cudaStreamSynchronize(0); + } + + void cudak_(cuda_tanh_grad)(const Matrix *output, + const Matrix *err, Matrix *nerr) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(nerr->ncol, threadsPerBlock.x), + CEIL_DIV(nerr->nrow, threadsPerBlock.y)); + cudak_(tanh_grad)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(output), MATRIX_ELEM_PTR(err), + MATRIX_ELEM_PTR(nerr), + nerr->nrow, nerr->ncol, + nerr->stride / sizeof(MATRIX_ELEM)); + cudaStreamSynchronize(0); + } + void cudak_(cuda_rowsum)(const Matrix *a, Matrix *b) { dim3 block(CUDA_THREADS_NN, 1); int ncol = a->ncol; @@ -550,6 +632,7 @@ extern "C" { cudaStreamSynchronize(0); } +#ifdef __NERV_FUTURE_CUDA_7 void cudak_(cuda_update_select_rows)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta) { dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x), @@ -560,6 +643,7 @@ extern "C" { a->stride / sizeof(MATRIX_ELEM), alpha, beta); cudaStreamSynchronize(0); } +#endif void cudak_(cuda_expand_frm)(const Matrix *a, Matrix *b, int context) { dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); diff --git a/nerv/lib/matrix/generic/cumatrix.c b/nerv/lib/matrix/generic/cumatrix.c index 2dc5899..65e0788 100644 --- a/nerv/lib/matrix/generic/cumatrix.c +++ b/nerv/lib/matrix/generic/cumatrix.c @@ -10,6 +10,7 @@ #include "../../common.h" #include "../cukernel.h" #include "../cuda_helper.h" +#include <curand.h> void nerv_matrix_(add)(Matrix *c, const Matrix *a, const Matrix *b, MATRIX_ELEM alpha, MATRIX_ELEM beta, @@ -75,6 +76,39 @@ void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err, NERV_SET_STATUS(status, NERV_NORMAL, 0); } +void nerv_matrix_(rand_uniform)(Matrix *a, Status *status) { + PROFILE_START + cudak_(cuda_rand_uniform)(a); + PROFILE_STOP + NERV_SET_STATUS(status, NERV_NORMAL, 0); +} + +void nerv_matrix_(thres_mask)(Matrix *a, Matrix *b, double thres, double low, double high, Status *status) { + CHECK_SAME_DIMENSION(a, b, status); + PROFILE_START + cudak_(cuda_thres_mask)(a, b, thres, low, high); + PROFILE_STOP + NERV_SET_STATUS(status, NERV_NORMAL, 0); +} + +void nerv_matrix_(tanh)(Matrix *a, const Matrix *b, Status *status) { + CHECK_SAME_DIMENSION(a, b, status); + PROFILE_START + cudak_(cuda_tanh)(b, a); + PROFILE_STOP + NERV_SET_STATUS(status, NERV_NORMAL, 0); +} + +void nerv_matrix_(tanh_grad)(Matrix *nerr, const Matrix *err, + const Matrix *output, Status *status) { + CHECK_SAME_DIMENSION(nerr, err, status); + CHECK_SAME_DIMENSION(nerr, output, status); + PROFILE_START + cudak_(cuda_tanh_grad)(output, err, nerr); + PROFILE_STOP + NERV_SET_STATUS(status, NERV_NORMAL, 0); +} + Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, Status *status) { Matrix *max, *max_idx; Matrix *dno; @@ -359,6 +393,7 @@ void nerv_matrix_(copy_rows_fromd_by_idx)(Matrix *a, const Matrix *b, NERV_SET_STATUS(status, NERV_NORMAL, 0); } +#ifdef __NERV_FUTURE_CUDA_7 void nerv_matrix_(update_select_rows)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta, Status *status) { long nrow = a->nrow; if (idx->nrow != 1) @@ -370,6 +405,7 @@ void nerv_matrix_(update_select_rows)(Matrix *c, const Matrix *a, const Matrix * PROFILE_STOP NERV_SET_STATUS(status, NERV_NORMAL, 0); } +#endif void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b, int context, Status *status) { diff --git a/nerv/lib/matrix/generic/cumatrix.h b/nerv/lib/matrix/generic/cumatrix.h index 21c29b7..aa8805a 100644 --- a/nerv/lib/matrix/generic/cumatrix.h +++ b/nerv/lib/matrix/generic/cumatrix.h @@ -9,6 +9,9 @@ void nerv_matrix_(mul)(Matrix *c, const Matrix *a, const Matrix *b, void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, Status *status); void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err, const Matrix *output, Status *status); +void nerv_matrix_(tanh)(Matrix *a, const Matrix *b, Status *status); +void nerv_matrix_(tanh_grad)(Matrix *nerr, const Matrix *err, + const Matrix *output, Status *status); Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, Status *status); Matrix *nerv_matrix_(rowsum)(Matrix *a, Status *status); diff --git a/nerv/lib/matrix/generic/matrix.c b/nerv/lib/matrix/generic/matrix.c index 4246751..fd5d28f 100644 --- a/nerv/lib/matrix/generic/matrix.c +++ b/nerv/lib/matrix/generic/matrix.c @@ -10,6 +10,8 @@ void nerv_matrix_(data_free)(Matrix *self, Status *status) { { /* free matrix data */ MATRIX_DATA_FREE(MATRIX_ELEM_PTR(self), status); + curandDestroyGenerator(*(self->curand_gen)); + free(self->curand_gen); free(self->data_ref); free(self); } @@ -39,6 +41,11 @@ Matrix *nerv_matrix_(create)(long nrow, long ncol, Status *status) { } self->data_ref = (long *)malloc(sizeof(long)); *self->data_ref = 0; + + self->curand_gen = (curandGenerator_t*)malloc(sizeof(curandGenerator_t)); + curandCreateGenerator(self->curand_gen, CURAND_RNG_PSEUDO_DEFAULT); + curandSetPseudoRandomGeneratorSeed(*(self->curand_gen), time(NULL)); + nerv_matrix_(data_retain)(self); NERV_SET_STATUS(status, NERV_NORMAL, 0); return self; @@ -57,6 +64,7 @@ Matrix *nerv_matrix_(getrow)(Matrix *self, int row) { prow->nmax = prow->ncol; MATRIX_ELEM_PTR(prow) = MATRIX_ROW_PTR(self, row); prow->data_ref = self->data_ref; + prow->curand_gen = self->curand_gen; nerv_matrix_(data_retain)(prow); return prow; } diff --git a/nerv/lib/matrix/matrix.h b/nerv/lib/matrix/matrix.h index 67a6e30..5a85c08 100644 --- a/nerv/lib/matrix/matrix.h +++ b/nerv/lib/matrix/matrix.h @@ -2,6 +2,7 @@ #define NERV_GENERIC_MATRIX_H #include <stddef.h> +#include <curand.h> typedef struct Matrix { size_t stride; /* size of a row */ @@ -13,6 +14,7 @@ typedef struct Matrix { long *i; } data; /* pointer to actual storage */ long *data_ref; + curandGenerator_t *curand_gen; } Matrix; #define MATRIX_ROW_PTR(self, row) \ |