diff options
author | txh18 <[email protected]> | 2015-11-23 15:34:05 +0800 |
---|---|---|
committer | txh18 <[email protected]> | 2015-11-23 15:34:05 +0800 |
commit | fee96e3d2cb39c6e2b4631b9d8b37b9949df27e7 (patch) | |
tree | 18493f755fedd928f5225d435875cd440342d255 /nerv/lib | |
parent | cb4f01b67c3bbbfef8eee8902210b75957add62f (diff) | |
parent | 6d66b73e49e3a3e41a1fb92d5180163f45ff6253 (diff) |
Merge remote-tracking branch 'upstream/master'
Diffstat (limited to 'nerv/lib')
-rw-r--r-- | nerv/lib/matrix/cukernel.cu | 36 | ||||
-rw-r--r-- | nerv/lib/matrix/generic/cukernel.cu | 8 | ||||
-rw-r--r-- | nerv/lib/matrix/generic/cumatrix.c | 2 |
3 files changed, 45 insertions, 1 deletions
diff --git a/nerv/lib/matrix/cukernel.cu b/nerv/lib/matrix/cukernel.cu index a19030a..210e6bf 100644 --- a/nerv/lib/matrix/cukernel.cu +++ b/nerv/lib/matrix/cukernel.cu @@ -1,5 +1,41 @@ #define NERV_GENERIC_CUKERNEL +#include "cumatrix.h" + +#ifdef __NERV_FUTURE_CUDA_7 +__device__ double atomicAdd_nvidia(double* address, double val) { + /* nvidia provided this implementation + atmoicAdd is not included in CUDA for double */ + unsigned long long int* address_as_ull = + (unsigned long long int*)address; + unsigned long long int old = *address_as_ull, assumed; + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __double_as_longlong(val + + __longlong_as_double(assumed))); + } while (assumed != old); + return __longlong_as_double(old); +} + +__device__ float atomicAdd_nvidia(float* address, float val) { + /* nvidia provided this implementation + I tried the included atomocAdd, but the select_liner layer result seems + unreproduceable, but sadly, even if I used this implementation, the + select_linear layer result is still unreproduceable */ + int* address_as_ull = (int*)address; + int old = *address_as_ull, assumed; + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __float_as_int(val + + __int_as_float(assumed))); + } while (assumed != old); + return __int_as_float(old); +} +#endif + + #define cudak_(NAME) cudak_float_ ## NAME #define MATRIX_USE_FLOAT #include "generic/elem_type.h" diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu index d042d48..e58c488 100644 --- a/nerv/lib/matrix/generic/cukernel.cu +++ b/nerv/lib/matrix/generic/cukernel.cu @@ -225,14 +225,18 @@ __global__ void cudak_(clip)(MATRIX_ELEM *a, a[j + i * stride] = val_1; } +#ifdef __NERV_FUTURE_CUDA_7 __global__ void cudak_(update_select_rows)(MATRIX_ELEM *c, const MATRIX_ELEM *a, const MATRIX_ELEM *idx, int nrow_a, int ncol_a, int stride_c, int stride_a, double alpha, double beta) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i >= nrow_a || j >= ncol_a) return; int i_c = lrintf(idx[i]); - c[j + i_c * stride_c] = c[j + i_c * stride_c] * (1 - beta * alpha) + a[j + i * stride_a] * alpha; + //critical: i_c could conflict among threads(same index in the idx array), so atomicAdd is used + //c[j + i_c * stride_c] = c[j + i_c * stride_c] * (1 - beta * alpha) + a[j + i * stride_a] * alpha; + atomicAdd_nvidia(c + j + i_c * stride_c, c[j + i_c * stride_c] * (- beta * alpha) + a[j + i * stride_a] * alpha); } +#endif __global__ void cudak_(expand_frm)(const MATRIX_ELEM *a, MATRIX_ELEM *b, int nrow, int ncol, @@ -550,6 +554,7 @@ extern "C" { cudaStreamSynchronize(0); } +#ifdef __NERV_FUTURE_CUDA_7 void cudak_(cuda_update_select_rows)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta) { dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x), @@ -560,6 +565,7 @@ extern "C" { a->stride / sizeof(MATRIX_ELEM), alpha, beta); cudaStreamSynchronize(0); } +#endif void cudak_(cuda_expand_frm)(const Matrix *a, Matrix *b, int context) { dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); diff --git a/nerv/lib/matrix/generic/cumatrix.c b/nerv/lib/matrix/generic/cumatrix.c index 2dc5899..00af895 100644 --- a/nerv/lib/matrix/generic/cumatrix.c +++ b/nerv/lib/matrix/generic/cumatrix.c @@ -359,6 +359,7 @@ void nerv_matrix_(copy_rows_fromd_by_idx)(Matrix *a, const Matrix *b, NERV_SET_STATUS(status, NERV_NORMAL, 0); } +#ifdef __NERV_FUTURE_CUDA_7 void nerv_matrix_(update_select_rows)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta, Status *status) { long nrow = a->nrow; if (idx->nrow != 1) @@ -370,6 +371,7 @@ void nerv_matrix_(update_select_rows)(Matrix *c, const Matrix *a, const Matrix * PROFILE_STOP NERV_SET_STATUS(status, NERV_NORMAL, 0); } +#endif void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b, int context, Status *status) { |