aboutsummaryrefslogtreecommitdiff
path: root/matrix
diff options
context:
space:
mode:
authorDeterminant <[email protected]>2015-05-22 12:50:13 +0800
committerDeterminant <[email protected]>2015-05-22 12:50:13 +0800
commit6c0671929f24f296c51568ae3d5c93a3e4d5518f (patch)
tree7f28f155498ec5cf461c7a154032204032d556e1 /matrix
parent0b128e097d425418499ab2257c5448f14fec3215 (diff)
rename colsum to rowsum; add colsum implementation
Diffstat (limited to 'matrix')
-rw-r--r--matrix/cukernel.h3
-rw-r--r--matrix/generic/cukernel.cu64
-rw-r--r--matrix/generic/cumatrix.c19
3 files changed, 69 insertions, 17 deletions
diff --git a/matrix/cukernel.h b/matrix/cukernel.h
index ea81e5a..67a255e 100644
--- a/matrix/cukernel.h
+++ b/matrix/cukernel.h
@@ -1,7 +1,8 @@
#ifdef NERV_GENERIC_CUKERNEL
void cudak_(cuda_sigmoid)(const Matrix *a, Matrix *b);
+void cudak_(cuda_rowsum)(const Matrix *a, Matrix *b);
+void cudak_(cuda_rowmax)(const Matrix *a, Matrix *b);
void cudak_(cuda_colsum)(const Matrix *a, Matrix *b);
-void cudak_(cuda_colmax)(const Matrix *a, Matrix *b);
void cudak_(cuda_softmax_denominator)(const Matrix *a, const Matrix *max, Matrix *b);
void cudak_(cuda_softmax_final)(const Matrix *a, const Matrix *max, const Matrix *deno, Matrix *b);
#endif
diff --git a/matrix/generic/cukernel.cu b/matrix/generic/cukernel.cu
index a37ccf4..4b6af61 100644
--- a/matrix/generic/cukernel.cu
+++ b/matrix/generic/cukernel.cu
@@ -27,7 +27,7 @@ __global__ void cudak_(softmax_final)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
b[idx] = exp(a[idx] - max[0 + i * mstride]) / deno[0 + i * mstride];
}
-__global__ void cudak_(block_reduce_sum)(const MATRIX_ELEM *input,
+__global__ void cudak_(block_reduce_rowsum)(const MATRIX_ELEM *input,
MATRIX_ELEM *output,
const int istride, const int ostride,
const int n) {
@@ -45,7 +45,25 @@ __global__ void cudak_(block_reduce_sum)(const MATRIX_ELEM *input,
output[blockIdx.x + ostride * blockIdx.y] = cudak_(arr)[0];
}
-__global__ void cudak_(block_reduce_softmax_sum)(const MATRIX_ELEM *input,
+__global__ void cudak_(block_reduce_colsum)(const MATRIX_ELEM *input,
+ MATRIX_ELEM *output,
+ const int istride, const int ostride,
+ const int n) {
+ extern __shared__ MATRIX_ELEM cudak_(arr)[];
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
+ cudak_(arr)[threadIdx.y] = i < n ? input[blockIdx.x + istride * i] : 0;
+ __syncthreads();
+ for (int offset = blockDim.y >> 1; offset; offset >>= 1)
+ {
+ if (threadIdx.y < offset)
+ cudak_(arr)[threadIdx.y] += cudak_(arr)[threadIdx.y + offset];
+ __syncthreads();
+ }
+ if (threadIdx.y == 0)
+ output[blockIdx.x + ostride * blockIdx.y] = cudak_(arr)[0];
+}
+
+__global__ void cudak_(block_reduce_softmax_rowsum)(const MATRIX_ELEM *input,
MATRIX_ELEM *output,
const MATRIX_ELEM *max,
const int istride, const int ostride,
@@ -65,7 +83,7 @@ __global__ void cudak_(block_reduce_softmax_sum)(const MATRIX_ELEM *input,
output[blockIdx.x + ostride * blockIdx.y] = cudak_(arr)[0];
}
-__global__ void cudak_(block_reduce_max)(const MATRIX_ELEM *input,
+__global__ void cudak_(block_reduce_rowmax)(const MATRIX_ELEM *input,
MATRIX_ELEM *output,
const int istride, const int ostride,
const int n) {
@@ -99,7 +117,7 @@ extern "C" {
b->stride / sizeof(MATRIX_ELEM));
}
- void cudak_(cuda_colsum)(const Matrix *a, Matrix *b) {
+ void cudak_(cuda_rowsum)(const Matrix *a, Matrix *b) {
dim3 block(CUDA_THREADS_NN, 1);
int ncol = a->ncol;
int blocks_per_row = CEIL_DIV(ncol, block.x);
@@ -107,20 +125,42 @@ extern "C" {
MATRIX_ELEM *res;
size_t stride;
cudaMallocPitch(&res, &stride, blocks_per_row * sizeof(MATRIX_ELEM), a->nrow);
- cudak_(block_reduce_sum)<<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \
+ cudak_(block_reduce_rowsum)<<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \
(MATRIX_ELEM_PTR(a), res,
a->stride / sizeof(MATRIX_ELEM), stride / sizeof(MATRIX_ELEM),
ncol);
ncol = blocks_per_row;
assert((unsigned long)ncol <= block.x);
grid.x = 1;
- cudak_(block_reduce_sum)<<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \
+ cudak_(block_reduce_rowsum)<<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \
(res, MATRIX_ELEM_PTR(b),
stride / sizeof(MATRIX_ELEM), b->stride / sizeof(MATRIX_ELEM),
ncol);
cudaFree(res);
}
+ void cudak_(cuda_colsum)(const Matrix *a, Matrix *b) {
+ dim3 block(1, CUDA_THREADS_NN);
+ int nrow = a->nrow;
+ int blocks_per_col = CEIL_DIV(nrow, block.x);
+ dim3 grid(a->ncol, blocks_per_col);
+ MATRIX_ELEM *res;
+ size_t stride;
+ cudaMallocPitch(&res, &stride, a->ncol * sizeof(MATRIX_ELEM), blocks_per_col);
+ cudak_(block_reduce_colsum)<<<grid, block, block.y * sizeof(MATRIX_ELEM)>>> \
+ (MATRIX_ELEM_PTR(a), res,
+ a->stride / sizeof(MATRIX_ELEM), stride / sizeof(MATRIX_ELEM),
+ nrow);
+ nrow = blocks_per_col;
+ assert((unsigned long)nrow <= block.y);
+ grid.y = 1;
+ cudak_(block_reduce_colsum)<<<grid, block, block.y * sizeof(MATRIX_ELEM)>>> \
+ (res, MATRIX_ELEM_PTR(b),
+ stride / sizeof(MATRIX_ELEM), b->stride / sizeof(MATRIX_ELEM),
+ nrow);
+ cudaFree(res);
+ }
+
void cudak_(cuda_softmax_final)(const Matrix *a, const Matrix *max,
const Matrix *deno, Matrix *b) {
dim3 threadsPerBlock(CUDA_THREADS_N,
@@ -144,7 +184,8 @@ extern "C" {
size_t stride;
assert(max->ncol == 1);
cudaMallocPitch(&res, &stride, blocks_per_row * sizeof(MATRIX_ELEM), a->nrow);
- cudak_(block_reduce_softmax_sum)<<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \
+ cudak_(block_reduce_softmax_rowsum) \
+ <<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \
(MATRIX_ELEM_PTR(a), res, MATRIX_ELEM_PTR(max),
a->stride / sizeof(MATRIX_ELEM), stride / sizeof(MATRIX_ELEM),
max->stride / sizeof(MATRIX_ELEM),
@@ -152,14 +193,15 @@ extern "C" {
ncol = blocks_per_row;
assert((unsigned long)ncol <= block.x);
grid.x = 1;
- cudak_(block_reduce_sum)<<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \
+ cudak_(block_reduce_rowsum) \
+ <<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \
(res, MATRIX_ELEM_PTR(b),
stride / sizeof(MATRIX_ELEM), b->stride / sizeof(MATRIX_ELEM),
ncol);
cudaFree(res);
}
- void cudak_(cuda_colmax)(const Matrix *a, Matrix *b) {
+ void cudak_(cuda_rowmax)(const Matrix *a, Matrix *b) {
dim3 block(CUDA_THREADS_NN, 1);
int ncol = a->ncol;
int blocks_per_row = CEIL_DIV(ncol, block.x);
@@ -167,14 +209,14 @@ extern "C" {
MATRIX_ELEM *res;
size_t stride;
cudaMallocPitch(&res, &stride, blocks_per_row * sizeof(MATRIX_ELEM), a->nrow);
- cudak_(block_reduce_max)<<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \
+ cudak_(block_reduce_rowmax)<<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \
(MATRIX_ELEM_PTR(a), res,
a->stride / sizeof(MATRIX_ELEM), stride / sizeof(MATRIX_ELEM),
ncol);
ncol = blocks_per_row;
assert((unsigned long)ncol <= block.x);
grid.x = 1;
- cudak_(block_reduce_max)<<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \
+ cudak_(block_reduce_rowmax)<<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \
(res, MATRIX_ELEM_PTR(b),
stride / sizeof(MATRIX_ELEM), b->stride / sizeof(MATRIX_ELEM),
ncol);
diff --git a/matrix/generic/cumatrix.c b/matrix/generic/cumatrix.c
index d98c559..f846a73 100644
--- a/matrix/generic/cumatrix.c
+++ b/matrix/generic/cumatrix.c
@@ -95,25 +95,33 @@ static int nerv_matrix_(softmax)(lua_State *L) {
Matrix *max = nerv_matrix_(new_)(a->nrow, 1);
Matrix *dno = nerv_matrix_(new_)(a->nrow, 1);
Matrix *b = nerv_matrix_(new_)(a->nrow, a->ncol);
- cudak_(cuda_colmax)(a, max);
+ cudak_(cuda_rowmax)(a, max);
cudak_(cuda_softmax_denominator)(a, max, dno);
cudak_(cuda_softmax_final)(a, max, dno, b);
luaT_pushudata(L, b, nerv_matrix_(tname));
return 1;
}
-static int nerv_matrix_(colsum)(lua_State *L) {
+static int nerv_matrix_(rowsum)(lua_State *L) {
Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
Matrix *b = nerv_matrix_(new_)(a->nrow, 1);
+ cudak_(cuda_rowsum)(a, b);
+ luaT_pushudata(L, b, nerv_matrix_(tname));
+ return 1;
+}
+
+static int nerv_matrix_(colsum)(lua_State *L) {
+ Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
+ Matrix *b = nerv_matrix_(new_)(1, a->ncol);
cudak_(cuda_colsum)(a, b);
luaT_pushudata(L, b, nerv_matrix_(tname));
return 1;
}
-static int nerv_matrix_(colmax)(lua_State *L) {
+static int nerv_matrix_(rowmax)(lua_State *L) {
Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
Matrix *b = nerv_matrix_(new_)(a->nrow, 1);
- cudak_(cuda_colmax)(a, b);
+ cudak_(cuda_rowmax)(a, b);
luaT_pushudata(L, b, nerv_matrix_(tname));
return 1;
}
@@ -125,7 +133,8 @@ static const luaL_Reg nerv_matrix_(extra_methods)[] = {
{"sigmoid", nerv_matrix_(sigmoid)},
{"softmax", nerv_matrix_(softmax)},
{"colsum", nerv_matrix_(colsum)},
- {"colmax", nerv_matrix_(colmax)},
+ {"rowsum", nerv_matrix_(rowsum)},
+ {"rowmax", nerv_matrix_(rowmax)},
{NULL, NULL}
};