aboutsummaryrefslogtreecommitdiff
path: root/nerv/lib/matrix/generic/cukernel.cu
diff options
context:
space:
mode:
Diffstat (limited to 'nerv/lib/matrix/generic/cukernel.cu')
-rw-r--r--nerv/lib/matrix/generic/cukernel.cu72
1 files changed, 64 insertions, 8 deletions
diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu
index aa830b5..2b696d5 100644
--- a/nerv/lib/matrix/generic/cukernel.cu
+++ b/nerv/lib/matrix/generic/cukernel.cu
@@ -262,12 +262,29 @@ __global__ void cudak_(clip)(MATRIX_ELEM *a,
}
#ifdef __NERV_FUTURE_CUDA_7
-__global__ void cudak_(update_select_rows)(MATRIX_ELEM *c, const MATRIX_ELEM *a, const MATRIX_ELEM *idx,
- int nrow_a, int ncol_a, int stride_c, int stride_a, double alpha, double beta) {
+__global__ void cudak_(update_select_rows_by_rowidx)(MATRIX_ELEM *c, const MATRIX_ELEM *a, const MATRIX_ELEM *idx,
+ int nrow_a, int ncol_a, int nrow_c, int stride_c, int stride_a, double alpha, double beta) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= nrow_a || j >= ncol_a) return;
int i_c = lrintf(idx[i]);
+ if (i_c < 0 || i_c >= nrow_c) {
+ printf("ERROR inside kernel update_select_rows, i_c(%d) out of range!", i_c);
+ }
+ //critical: i_c could conflict among threads(same index in the idx array), so atomicAdd is used
+ //c[j + i_c * stride_c] = c[j + i_c * stride_c] * (1 - beta * alpha) + a[j + i * stride_a] * alpha;
+ atomicAdd_nvidia(c + j + i_c * stride_c, c[j + i_c * stride_c] * (- beta * alpha) + a[j + i * stride_a] * alpha);
+}
+
+__global__ void cudak_(update_select_rows_by_colidx)(MATRIX_ELEM *c, const MATRIX_ELEM *a, const MATRIX_ELEM *idx,
+ int nrow_a, int ncol_a, int nrow_c, int stride_c, int stride_a, int stride_idx, double alpha, double beta) {
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
+ if (i >= nrow_a || j >= ncol_a) return;
+ int i_c = lrintf(idx[stride_idx * i]);
+ if (i_c < 0 || i_c >= nrow_c) {
+ printf("ERROR inside kernel update_select_rows, i_c(%d) out of range!", i_c);
+ }
//critical: i_c could conflict among threads(same index in the idx array), so atomicAdd is used
//c[j + i_c * stride_c] = c[j + i_c * stride_c] * (1 - beta * alpha) + a[j + i * stride_a] * alpha;
atomicAdd_nvidia(c + j + i_c * stride_c, c[j + i_c * stride_c] * (- beta * alpha) + a[j + i * stride_a] * alpha);
@@ -335,13 +352,31 @@ __global__ void cudak_(gen_col_idx)(MATRIX_ELEM *b,
__global__ void cudak_(copy_rows_by_idx)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
const MATRIX_ELEM *idx,
- int nrow, int ncol, int stride) {
+ int nrow, int ncol, int a_nrow, int stride) {
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
+ if (i >= nrow || j >= ncol) return;
+ int k = lrintf(idx[i]);
+ if (k < 0 || k >= a_nrow) {
+ printf("error in kernel copy_rows_by_idx k(%d) out of range\n", k);
+ }
+ b[j + i * stride] = a[j + k * stride];
+}
+
+__global__ void cudak_(copy_rows_by_colidx)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
+ const MATRIX_ELEM *idx,
+ int nrow, int ncol, int a_nrow, int stride, int idx_stride) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= nrow || j >= ncol) return;
- b[j + i * stride] = a[j + lrintf(idx[i]) * stride];
+ int k = lrintf(idx[i * idx_stride]);
+ if (k < 0 || k >= a_nrow) {
+ printf("error in kernel copy_rows_by_colidx k(%d) out of range\n", k);
+ }
+ b[j + i * stride] = a[j + k * stride];
}
+
extern "C" {
#include "../cukernel.h"
void cudak_(cuda_log_elem)(const Matrix *a, Matrix *b) {
@@ -633,16 +668,26 @@ extern "C" {
}
#ifdef __NERV_FUTURE_CUDA_7
- void cudak_(cuda_update_select_rows)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta) {
+ void cudak_(cuda_update_select_rows_by_rowidx)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta) {
dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x),
CEIL_DIV(a->nrow, threadsPerBlock.y));
- cudak_(update_select_rows)<<<numBlocks, threadsPerBlock>>> \
+ cudak_(update_select_rows_by_rowidx)<<<numBlocks, threadsPerBlock>>> \
(MATRIX_ELEM_PTR(c), MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(idx),
- a->nrow, a->ncol, c->stride / sizeof(MATRIX_ELEM),
+ a->nrow, a->ncol, c->nrow, c->stride / sizeof(MATRIX_ELEM),
a->stride / sizeof(MATRIX_ELEM), alpha, beta);
cudaStreamSynchronize(0);
}
+ void cudak_(cuda_update_select_rows_by_colidx)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta) {
+ dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
+ dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x),
+ CEIL_DIV(a->nrow, threadsPerBlock.y));
+ cudak_(update_select_rows_by_colidx)<<<numBlocks, threadsPerBlock>>> \
+ (MATRIX_ELEM_PTR(c), MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(idx),
+ a->nrow, a->ncol, c->nrow, c->stride / sizeof(MATRIX_ELEM),
+ a->stride / sizeof(MATRIX_ELEM), idx->stride / sizeof(MATRIX_ELEM), alpha, beta);
+ cudaStreamSynchronize(0);
+ }
#endif
void cudak_(cuda_expand_frm)(const Matrix *a, Matrix *b, int context) {
@@ -710,7 +755,18 @@ extern "C" {
cudak_(copy_rows_by_idx)<<<numBlocks, threadsPerBlock>>> \
(MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b),
MATRIX_ELEM_PTR(idx) + idx_begin,
- b->nrow, b->ncol, b->stride / sizeof(MATRIX_ELEM));
+ b->nrow, b->ncol, a->nrow, b->stride / sizeof(MATRIX_ELEM));
+ cudaStreamSynchronize(0);
+ }
+
+ void cudak_(cuda_copy_rows_by_colidx)(const Matrix *a, Matrix *b,
+ const Matrix *idx, int idx_begin) {
+ dim3 threadsPerBlock(CUDA_THREADS_NN, 1);
+ dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), b->nrow);
+ cudak_(copy_rows_by_colidx)<<<numBlocks, threadsPerBlock>>> \
+ (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b),
+ MATRIX_ELEM_PTR(idx) + idx_begin,
+ b->nrow, b->ncol, a->nrow, b->stride / sizeof(MATRIX_ELEM), idx->stride / sizeof(MATRIX_ELEM));
cudaStreamSynchronize(0);
}
}