aboutsummaryrefslogtreecommitdiff
path: root/nerv/lib/matrix/generic/cukernel.cu
diff options
context:
space:
mode:
authortxh18 <cloudygooseg@gmail.com>2015-10-27 16:24:55 +0800
committertxh18 <cloudygooseg@gmail.com>2015-10-27 16:24:55 +0800
commit7c95640c95f1cc1d84b4d49fa97fd922748b88a7 (patch)
treee78fc611b8768ddf6e9191d597bf667e83b3353b /nerv/lib/matrix/generic/cukernel.cu
parentba8a1c9d5366c22b0b631f26ae1de7c5da2cbaeb (diff)
added update_select_rows for select_linear:update speed-up
Diffstat (limited to 'nerv/lib/matrix/generic/cukernel.cu')
-rw-r--r--nerv/lib/matrix/generic/cukernel.cu20
1 files changed, 20 insertions, 0 deletions
diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu
index 08feb59..6c8e64a 100644
--- a/nerv/lib/matrix/generic/cukernel.cu
+++ b/nerv/lib/matrix/generic/cukernel.cu
@@ -225,6 +225,15 @@ __global__ void cudak_(clip)(MATRIX_ELEM *a,
a[j + i * stride] = val_1;
}
+__global__ void cudak_(update_select_rows)(MATRIX_ELEM *c, const MATRIX_ELEM *a, const MATRIX_ELEM *idx,
+ int nrow_a, int ncol_a, int stride_c, int stride_a, double alpha, double beta) {
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
+ if (i >= nrow_a || j >= ncol_a) return;
+ int i_c = lrintf(idx[i]);
+ c[j + i_c * stride_c] = c[j + i_c * stride_c] * (1 - beta * alpha) + a[j + i * stride_a] * alpha;
+}
+
__global__ void cudak_(expand_frm)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
int nrow, int ncol,
int enrow, int encol,
@@ -540,6 +549,17 @@ extern "C" {
a->stride / sizeof(MATRIX_ELEM), val_1, val_2);
cudaStreamSynchronize(0);
}
+
+ void cudak_(cuda_update_select_rows)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta) {
+ dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
+ dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x),
+ CEIL_DIV(a->nrow, threadsPerBlock.y));
+ cudak_(update_select_rows)<<<numBlocks, threadsPerBlock>>> \
+ (MATRIX_ELEM_PTR(c), MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(idx),
+ a->nrow, a->ncol, c->stride / sizeof(MATRIX_ELEM),
+ a->stride / sizeof(MATRIX_ELEM), alpha, beta);
+ cudaStreamSynchronize(0);
+ }
void cudak_(cuda_expand_frm)(const Matrix *a, Matrix *b, int context) {
dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);