aboutsummaryrefslogtreecommitdiff
path: root/matrix
diff options
context:
space:
mode:
Diffstat (limited to 'matrix')
-rw-r--r--matrix/cukernel.cu62
-rw-r--r--matrix/cukernel.h1
-rw-r--r--matrix/cumatrix.c16
-rw-r--r--matrix/generic/matrix.h2
-rw-r--r--matrix/matrix.c3
5 files changed, 71 insertions, 13 deletions
diff --git a/matrix/cukernel.cu b/matrix/cukernel.cu
index 91e7e35..d6d7997 100644
--- a/matrix/cukernel.cu
+++ b/matrix/cukernel.cu
@@ -1,5 +1,9 @@
+#include <assert.h>
#include "generic/matrix.h"
+#include <stdio.h>
+#include "cuda.h"
#define CUDA_THREADS_N 16
+#define CUDA_THREADS_NN (16 * 16)
#define CEIL_DIV(a, b) (((a) + (b) - 1) / (b))
__global__ void sigmoid(const float *a, float *b,
int nrow, int ncol, int stride) {
@@ -11,11 +15,55 @@ __global__ void sigmoid(const float *a, float *b,
b[idx] = 1.0 / (1.0 + exp(-a[idx]));
}
-extern "C" void cuda_sigmoid(const Matrix *a, Matrix *b) {
- dim3 threadsPerBlock(CUDA_THREADS_N,
- CUDA_THREADS_N);
- dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x),
- CEIL_DIV(b->nrow, threadsPerBlock.y));
- sigmoid<<<numBlocks, threadsPerBlock>>>(a->data.f, b->data.f, b->nrow, b->ncol,
- b->stride / sizeof(float));
+__global__ void block_sum(const float *input, float *output,
+ const int istride, const int ostride,
+ const int n) {
+ extern __shared__ float arr[];
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
+ arr[threadIdx.x] = j < n ? input[j + istride * blockIdx.y] : 0;
+ __syncthreads();
+ for (int offset = blockDim.x >> 1; offset; offset >>= 1)
+ {
+ if (threadIdx.x < offset)
+ arr[threadIdx.x] += arr[threadIdx.x + offset];
+ __syncthreads();
+ }
+ if (threadIdx.x == 0)
+ {
+ /* printf("bx: %d by: %d arr: %f\n", blockIdx.x, blockIdx.y, arr[0]); */
+ output[blockIdx.x + ostride * blockIdx.y] = arr[0];
+ }
+}
+
+extern "C" {
+ void cuda_sigmoid(const Matrix *a, Matrix *b) {
+ dim3 threadsPerBlock(CUDA_THREADS_N,
+ CUDA_THREADS_N);
+ dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x),
+ CEIL_DIV(b->nrow, threadsPerBlock.y));
+ sigmoid<<<numBlocks, threadsPerBlock>>>(a->data.f, b->data.f, b->nrow, b->ncol,
+ b->stride / sizeof(float));
+ }
+
+ void cuda_rowsum(const Matrix *a, Matrix *b) {
+ dim3 block(CUDA_THREADS_NN, 1);
+ int ncol = a->ncol;
+ int blocks_per_row = CEIL_DIV(ncol, block.x);
+ dim3 grid(blocks_per_row, a->nrow);
+ float *res;
+ size_t stride;
+ cudaMallocPitch(&res, &stride, blocks_per_row * sizeof(float), a->nrow);
+ block_sum<<<grid, block, block.x * sizeof(float)>>> \
+ (a->data.f, res,
+ a->stride / sizeof(float), stride / sizeof(float),
+ ncol);
+ ncol = blocks_per_row;
+ assert(ncol <= block.x);
+ grid.x = 1;
+ block_sum<<<grid, block, block.x * sizeof(float)>>> \
+ (res, b->data.f,
+ stride / sizeof(float), b->stride / sizeof(float),
+ ncol);
+ cudaFree(res);
+ }
}
diff --git a/matrix/cukernel.h b/matrix/cukernel.h
index 5b9e3a6..f86a69b 100644
--- a/matrix/cukernel.h
+++ b/matrix/cukernel.h
@@ -1,4 +1,5 @@
#ifndef NERV_CUKERNEL_H
#define NERV_CUKERNEL_H
void cuda_sigmoid(const Matrix *a, Matrix *b);
+void cuda_rowsum(const Matrix *a, Matrix *b);
#endif
diff --git a/matrix/cumatrix.c b/matrix/cumatrix.c
index 7759ca1..49b7fbf 100644
--- a/matrix/cumatrix.c
+++ b/matrix/cumatrix.c
@@ -9,6 +9,7 @@
#include "generic/matrix.h"
#include "cukernel.h"
#include "cuda.h"
+#include "cuda_runtime.h"
#include "driver_types.h"
#include "cublas_v2.h"
@@ -65,10 +66,19 @@ static int nerv_float_matrix_(sigmoid)(lua_State *L) {
return 1;
}
+static int nerv_float_matrix_(rowsum)(lua_State *L) {
+ Matrix *a = luaT_checkudata(L, 1, nerv_float_matrix_(tname));
+ Matrix *b = nerv_float_matrix_(new_)(a->nrow, 1);
+ cuda_rowsum(a, b);
+ luaT_pushudata(L, b, nerv_float_matrix_(tname));
+ return 1;
+}
+
static const luaL_Reg nerv_float_matrix_(extra_methods)[] = {
{"__add__", nerv_float_matrix_(add)},
{"__mul__", nerv_float_matrix_(mul)},
{"sigmoid", nerv_float_matrix_(sigmoid)},
+ {"rowsum", nerv_float_matrix_(rowsum)},
{NULL, NULL}
};
@@ -77,13 +87,13 @@ static void cuda_float_init(lua_State *L) {
cublasCreate(&cublas_handle);
}
-static cuda_float_array_free(float *ptr) {
+static void cuda_float_array_free(float *ptr) {
cudaFree(ptr);
}
-static cuda_float_array_alloc(float **dptr, long *stride,
+static void cuda_float_array_alloc(float **dptr, size_t *stride,
long width, long height) {
- cudaMallocPitch(dptr, stride, width, height);
+ cudaMallocPitch((void **)dptr, stride, width, height);
}
static float cuda_float_array_read(float *data, int idx) {
diff --git a/matrix/generic/matrix.h b/matrix/generic/matrix.h
index 655ff3d..264859b 100644
--- a/matrix/generic/matrix.h
+++ b/matrix/generic/matrix.h
@@ -2,7 +2,7 @@
#define NERV_GENERIC_MATRIX_H
typedef struct Matrix {
- long stride; /* size of a row */
+ size_t stride; /* size of a row */
long ncol, nrow, nmax; /* dimension of the matrix */
union {
float *f;
diff --git a/matrix/matrix.c b/matrix/matrix.c
index ef311d6..b392f56 100644
--- a/matrix/matrix.c
+++ b/matrix/matrix.c
@@ -10,7 +10,7 @@
const char *nerv_float_matrix_(tname) = "nerv.FloatMatrix";
-static void host_float_array_alloc(float **dptr, long *stride,
+static void host_float_array_alloc(float **dptr, size_t *stride,
long width, long height) {
*dptr = (float *)malloc(width * height);
*stride = width;
@@ -29,7 +29,6 @@ int nerv_float_matrix_(set_elem)(lua_State *L) {
Matrix *self = luaT_checkudata(L, 1, nerv_float_matrix_(tname));
int idx = luaL_checkinteger(L, 2);
float v = luaL_checknumber(L, 3);
- long upper = self->nrow * self->ncol;
if (idx < 0 || idx >= self->nmax)
nerv_error(L, "index must be within range [0, %d)", self->nmax);
self->data.f[idx] = v;