aboutsummaryrefslogtreecommitdiff
path: root/matrix/generic
diff options
context:
space:
mode:
authorcloudygoose <[email protected]>2015-05-31 11:14:20 +0800
committercloudygoose <[email protected]>2015-05-31 11:14:20 +0800
commitcfd06bb974c7088837a107d721b1311a4f160572 (patch)
tree4787a60f272e2cb9aa17ec1742a99b043487005e /matrix/generic
parentc4a70df7297f43e3251fa5c0149e813451e52391 (diff)
parentc6f6ac13a1cf00e440e998422f89b42c69b073a6 (diff)
Merge remote-tracking branch 'upstream/master'
Diffstat (limited to 'matrix/generic')
-rw-r--r--matrix/generic/cumatrix.c100
-rw-r--r--matrix/generic/matrix.c16
-rw-r--r--matrix/generic/mmatrix.c21
3 files changed, 72 insertions, 65 deletions
diff --git a/matrix/generic/cumatrix.c b/matrix/generic/cumatrix.c
index c4ba937..7b0aa2a 100644
--- a/matrix/generic/cumatrix.c
+++ b/matrix/generic/cumatrix.c
@@ -2,11 +2,11 @@
#include "matrix.h"
#include "elem_type.h"
-#define MATRIX_DATA_FREE(ptr) cuda_matrix_(free)(ptr)
-#define MATRIX_DATA_ALLOC(dptr, stride, width, height) \
- cuda_matrix_(alloc)(dptr, stride, width, height)
-#define MATRIX_DATA_WRITE(data, idx, val) cuda_matrix_(write)(data, idx, val)
-#define MATRIX_DATA_READ(data, idx) cuda_matrix_(read)(data, idx)
+#define MATRIX_DATA_FREE(L, ptr) cuda_matrix_(free)(L, ptr)
+#define MATRIX_DATA_ALLOC(L, dptr, stride, width, height) \
+ cuda_matrix_(alloc)(L, dptr, stride, width, height)
+#define MATRIX_DATA_WRITE(L, data, idx, val) cuda_matrix_(write)(L, data, idx, val)
+#define MATRIX_DATA_READ(L, data, idx) cuda_matrix_(read)(L, data, idx)
#define MATRIX_INIT(L) cuda_matrix_(init)(L)
#define MATRIX_BASE_TNAME nerv_matrix_cuda_tname
#define NERV_GENERIC_MATRIX
@@ -17,28 +17,24 @@
#include "cuda_runtime.h"
#include "driver_types.h"
#include "cublas_v2.h"
-
-#define CHECK_SAME_DIMENSION(a, b) \
- do { \
- if (!(a->nrow == b->nrow && a->ncol == b->ncol)) \
- nerv_error(L, "Matrices should be of the same dimension"); \
- } while (0)
+#include "../cuda_helper.h"
static cublasHandle_t cublas_handle;
-Matrix *nerv_matrix_(new_)(long nrow, long ncol);
-void nerv_matrix_(data_free)(Matrix *self);
+Matrix *nerv_matrix_(new_)(lua_State *L, long nrow, long ncol);
+void nerv_matrix_(data_free)(lua_State *L, Matrix *self);
-static void nerv_matrix_(add_)(const Matrix *a, const Matrix *b,
+static void nerv_matrix_(add_)(lua_State *L, const Matrix *a, const Matrix *b,
const Matrix *c,
MATRIX_ELEM alpha, MATRIX_ELEM beta) {
- NERV_CUBLAS_(geam)(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N,
+ CUBLAS_SAFE_CALL(
+ NERV_CUBLAS_(geam)(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N,
a->ncol, a->nrow,
&alpha,
MATRIX_ELEM_PTR(a), a->stride / sizeof(MATRIX_ELEM),
&beta,
MATRIX_ELEM_PTR(b), b->stride / sizeof(MATRIX_ELEM),
- MATRIX_ELEM_PTR(c), c->stride / sizeof(MATRIX_ELEM));
+ MATRIX_ELEM_PTR(c), c->stride / sizeof(MATRIX_ELEM)));
}
static int nerv_matrix_(add)(lua_State *L) {
@@ -49,7 +45,7 @@ static int nerv_matrix_(add)(lua_State *L) {
MATRIX_ELEM beta = luaL_checknumber(L, 5); /* alpha */
CHECK_SAME_DIMENSION(a, b);
CHECK_SAME_DIMENSION(a, c);
- nerv_matrix_(add_)(a, b, c, alpha, beta);
+ nerv_matrix_(add_)(L, a, b, c, alpha, beta);
return 0;
}
@@ -78,19 +74,20 @@ static int nerv_matrix_(mul)(lua_State *L) {
if (an != bm)
nerv_error(L, "Wrong dimension of multipliers");
/* MATRIX_ELEM alpha = 1.0f, beta = 0.0f; */
- NERV_CUBLAS_(gemm)(cublas_handle, tb, ta,
+ CUBLAS_SAFE_CALL(
+ NERV_CUBLAS_(gemm)(cublas_handle, tb, ta,
bn, am, bm,
&alpha,
MATRIX_ELEM_PTR(b), b->stride / sizeof(MATRIX_ELEM),
MATRIX_ELEM_PTR(a), a->stride / sizeof(MATRIX_ELEM),
&beta,
- MATRIX_ELEM_PTR(c), c->stride / sizeof(MATRIX_ELEM));
+ MATRIX_ELEM_PTR(c), c->stride / sizeof(MATRIX_ELEM)));
return 0;
}
static int nerv_matrix_(create)(lua_State *L) {
Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
- Matrix *b = nerv_matrix_(new_)(a->nrow, a->ncol);
+ Matrix *b = nerv_matrix_(new_)(L, a->nrow, a->ncol);
luaT_pushudata(L, b, nerv_matrix_(tname));
return 1;
}
@@ -116,20 +113,20 @@ static int nerv_matrix_(sigmoid_grad)(lua_State *L) {
static int nerv_matrix_(softmax)(lua_State *L) {
Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname));
Matrix *b = luaT_checkudata(L, 1, nerv_matrix_(tname));
- Matrix *max = nerv_matrix_(new_)(a->nrow, 1);
- Matrix *dno = nerv_matrix_(new_)(a->nrow, 1);
+ Matrix *max = nerv_matrix_(new_)(L, a->nrow, 1);
+ Matrix *dno = nerv_matrix_(new_)(L, a->nrow, 1);
CHECK_SAME_DIMENSION(a, b);
cudak_(cuda_rowmax)(a, max);
cudak_(cuda_softmax_denominator)(a, max, dno);
cudak_(cuda_softmax_final)(a, max, dno, b);
- nerv_matrix_(data_free)(max);
- nerv_matrix_(data_free)(dno);
+ nerv_matrix_(data_free)(L, max);
+ nerv_matrix_(data_free)(L, dno);
return 0;
}
static int nerv_matrix_(rowsum)(lua_State *L) {
Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
- Matrix *b = nerv_matrix_(new_)(a->nrow, 1);
+ Matrix *b = nerv_matrix_(new_)(L, a->nrow, 1);
cudak_(cuda_rowsum)(a, b);
luaT_pushudata(L, b, nerv_matrix_(tname));
return 1;
@@ -137,7 +134,7 @@ static int nerv_matrix_(rowsum)(lua_State *L) {
static int nerv_matrix_(colsum)(lua_State *L) {
Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
- Matrix *b = nerv_matrix_(new_)(1, a->ncol);
+ Matrix *b = nerv_matrix_(new_)(L, 1, a->ncol);
cudak_(cuda_colsum)(a, b);
luaT_pushudata(L, b, nerv_matrix_(tname));
return 1;
@@ -145,7 +142,7 @@ static int nerv_matrix_(colsum)(lua_State *L) {
static int nerv_matrix_(rowmax)(lua_State *L) {
Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
- Matrix *b = nerv_matrix_(new_)(a->nrow, 1);
+ Matrix *b = nerv_matrix_(new_)(L, a->nrow, 1);
cudak_(cuda_rowmax)(a, b);
luaT_pushudata(L, b, nerv_matrix_(tname));
return 1;
@@ -175,10 +172,11 @@ static int nerv_matrix_(copy_fromd)(lua_State *L) {
Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname));
CHECK_SAME_DIMENSION(a, b);
- cudaMemcpy2D(MATRIX_ELEM_PTR(a), a->stride,
+ CUDA_SAFE_SYNC_CALL(
+ cudaMemcpy2D(MATRIX_ELEM_PTR(a), a->stride,
MATRIX_ELEM_PTR(b), b->stride,
sizeof(MATRIX_ELEM) * b->ncol, b->nrow,
- cudaMemcpyDeviceToDevice);
+ cudaMemcpyDeviceToDevice));
return 0;
}
@@ -186,10 +184,11 @@ static int nerv_matrix_(copy_tod)(lua_State *L) {
Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname));
CHECK_SAME_DIMENSION(a, b);
- cudaMemcpy2D(MATRIX_ELEM_PTR(b), b->stride,
+ CUDA_SAFE_SYNC_CALL(
+ cudaMemcpy2D(MATRIX_ELEM_PTR(b), b->stride,
MATRIX_ELEM_PTR(a), a->stride,
sizeof(MATRIX_ELEM) * a->ncol, a->nrow,
- cudaMemcpyDeviceToDevice);
+ cudaMemcpyDeviceToDevice));
return 0;
}
@@ -198,10 +197,11 @@ static int nerv_matrix_(copy_fromh)(lua_State *L) {
Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
Matrix *b = luaT_checkudata(L, 2, MATRIX_CUMATRIX_HOST_TNAME);
CHECK_SAME_DIMENSION(a, b);
- cudaMemcpy2D(MATRIX_ELEM_PTR(a), a->stride,
+ CUDA_SAFE_SYNC_CALL(
+ cudaMemcpy2D(MATRIX_ELEM_PTR(a), a->stride,
MATRIX_ELEM_PTR(b), b->stride,
sizeof(MATRIX_ELEM) * b->ncol, b->nrow,
- cudaMemcpyHostToDevice);
+ cudaMemcpyHostToDevice));
return 0;
}
@@ -209,24 +209,26 @@ static int nerv_matrix_(copy_toh)(lua_State *L) {
Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
Matrix *b = luaT_checkudata(L, 2, MATRIX_CUMATRIX_HOST_TNAME);
CHECK_SAME_DIMENSION(a, b);
- cudaMemcpy2D(MATRIX_ELEM_PTR(b), b->stride,
+ CUDA_SAFE_SYNC_CALL(
+ cudaMemcpy2D(MATRIX_ELEM_PTR(b), b->stride,
MATRIX_ELEM_PTR(a), a->stride,
sizeof(MATRIX_ELEM) * a->ncol, a->nrow,
- cudaMemcpyDeviceToHost);
+ cudaMemcpyDeviceToHost));
return 0;
}
static int nerv_matrix_(trans)(lua_State *L) {
Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
- Matrix *b = nerv_matrix_(new_)(a->ncol, a->nrow);
+ Matrix *b = nerv_matrix_(new_)(L, a->ncol, a->nrow);
MATRIX_ELEM alpha = 1, beta = 0;
- NERV_CUBLAS_(geam)(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_T,
+ CUBLAS_SAFE_CALL(
+ NERV_CUBLAS_(geam)(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_T,
a->nrow, a->ncol,
&alpha,
MATRIX_ELEM_PTR(a), a->stride / sizeof(MATRIX_ELEM),
&beta,
MATRIX_ELEM_PTR(a), a->stride / sizeof(MATRIX_ELEM),
- MATRIX_ELEM_PTR(b), b->stride / sizeof(MATRIX_ELEM));
+ MATRIX_ELEM_PTR(b), b->stride / sizeof(MATRIX_ELEM)));
luaT_pushudata(L, b, nerv_matrix_(tname));
return 1;
}
@@ -277,23 +279,27 @@ static void cuda_matrix_(init)(lua_State *L) {
cublasCreate(&cublas_handle);
}
-static void cuda_matrix_(free)(MATRIX_ELEM *ptr) {
- cudaFree(ptr);
+static void cuda_matrix_(free)(lua_State *L, MATRIX_ELEM *ptr) {
+ CUDA_SAFE_SYNC_CALL(cudaFree(ptr));
}
-static void cuda_matrix_(alloc)(MATRIX_ELEM **dptr, size_t *stride,
- long width, long height) {
- cudaMallocPitch((void **)dptr, stride, width, height);
+static void cuda_matrix_(alloc)(lua_State *L, MATRIX_ELEM **dptr,
+ size_t *stride, long width, long height) {
+ CUDA_SAFE_SYNC_CALL(cudaMallocPitch((void **)dptr, stride, width, height));
}
-static MATRIX_ELEM cuda_matrix_(read)(MATRIX_ELEM *data, int idx) {
+static MATRIX_ELEM cuda_matrix_(read)(lua_State *L, MATRIX_ELEM *data,
+ int idx) {
MATRIX_ELEM res;
- cudaMemcpy(&res, data + idx, sizeof(MATRIX_ELEM), cudaMemcpyDeviceToHost);
+ CUDA_SAFE_SYNC_CALL(cudaMemcpy(&res, data + idx,
+ sizeof(MATRIX_ELEM), cudaMemcpyDeviceToHost));
return res;
}
-static void cuda_matrix_(write)(MATRIX_ELEM *data, int idx, MATRIX_ELEM val) {
- cudaMemcpy(data + idx, &val, sizeof(MATRIX_ELEM), cudaMemcpyHostToDevice);
+static void cuda_matrix_(write)(lua_State *L, MATRIX_ELEM *data,
+ int idx, MATRIX_ELEM val) {
+ CUDA_SAFE_SYNC_CALL(cudaMemcpy(data + idx, &val,
+ sizeof(MATRIX_ELEM), cudaMemcpyHostToDevice));
}
int nerv_matrix_(get_elem)(lua_State *L) {
diff --git a/matrix/generic/matrix.c b/matrix/generic/matrix.c
index d1cde88..e0098de 100644
--- a/matrix/generic/matrix.c
+++ b/matrix/generic/matrix.c
@@ -8,12 +8,12 @@
extern const char *nerv_matrix_(tname);
extern const char *MATRIX_BASE_TNAME;
-void nerv_matrix_(data_free)(Matrix *self) {
+void nerv_matrix_(data_free)(lua_State *L, Matrix *self) {
assert(*self->data_ref > 0);
if (--(*self->data_ref) == 0)
{
/* free matrix data */
- MATRIX_DATA_FREE(MATRIX_ELEM_PTR(self));
+ MATRIX_DATA_FREE(L, MATRIX_ELEM_PTR(self));
free(self->data_ref);
free(self);
}
@@ -23,12 +23,12 @@ void nerv_matrix_(data_retain)(Matrix *self) {
(*self->data_ref)++;
}
-Matrix *nerv_matrix_(new_)(long nrow, long ncol) {
+Matrix *nerv_matrix_(new_)(lua_State *L, long nrow, long ncol) {
Matrix *self = (Matrix *)malloc(sizeof(Matrix));
self->nrow = nrow;
self->ncol = ncol;
self->nmax = self->nrow * self->ncol;
- MATRIX_DATA_ALLOC(&MATRIX_ELEM_PTR(self), &self->stride,
+ MATRIX_DATA_ALLOC(L, &MATRIX_ELEM_PTR(self), &self->stride,
sizeof(MATRIX_ELEM) * self->ncol, self->nrow);
self->data_ref = (long *)malloc(sizeof(long));
*self->data_ref = 0;
@@ -37,7 +37,7 @@ Matrix *nerv_matrix_(new_)(long nrow, long ncol) {
}
int nerv_matrix_(new)(lua_State *L) {
- luaT_pushudata(L, nerv_matrix_(new_)(luaL_checkinteger(L, 1),
+ luaT_pushudata(L, nerv_matrix_(new_)(L, luaL_checkinteger(L, 1),
luaL_checkinteger(L, 2)),
nerv_matrix_(tname));
return 1;
@@ -45,7 +45,7 @@ int nerv_matrix_(new)(lua_State *L) {
int nerv_matrix_(destroy)(lua_State *L) {
Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname));
- nerv_matrix_(data_free)(self);
+ nerv_matrix_(data_free)(L, self);
return 1;
}
@@ -73,7 +73,7 @@ static int nerv_matrix_(newindex)(lua_State *L) {
{
if (idx < 0 || idx >= self->ncol)
nerv_error(L, "index must be within range [0, %d)", self->ncol);
- MATRIX_DATA_WRITE(MATRIX_ELEM_PTR(self), idx,
+ MATRIX_DATA_WRITE(L, MATRIX_ELEM_PTR(self), idx,
luaL_checknumber(L, 3));
}
else
@@ -98,7 +98,7 @@ static int nerv_matrix_(index)(lua_State *L) {
{
if (idx < 0 || idx >= self->ncol)
nerv_error(L, "index must be within range [0, %d)", self->ncol);
- lua_pushnumber(L, MATRIX_DATA_READ(MATRIX_ELEM_PTR(self), idx));
+ lua_pushnumber(L, MATRIX_DATA_READ(L, MATRIX_ELEM_PTR(self), idx));
}
else
{
diff --git a/matrix/generic/mmatrix.c b/matrix/generic/mmatrix.c
index 4b43572..3a9ae79 100644
--- a/matrix/generic/mmatrix.c
+++ b/matrix/generic/mmatrix.c
@@ -1,21 +1,22 @@
#ifdef NERV_GENERIC_MMATRIX
#include "matrix.h"
#include "elem_type.h"
-#define MATRIX_DATA_FREE(ptr) free(ptr)
-#define MATRIX_DATA_ALLOC(dptr, stride, width, height) \
- host_matrix_(alloc)(dptr, stride, width, height)
-#define MATRIX_DATA_STRIDE(ncol) (sizeof(MATRIX_ELEM) * (ncol))
-#define MATRIX_DATA_WRITE(data, idx, val) (data[idx] = val)
-#define MATRIX_DATA_READ(data, idx) (data[idx])
+#define MATRIX_DATA_FREE(L, ptr) free(ptr)
+#define MATRIX_DATA_ALLOC(L, dptr, stride, width, height) \
+ host_matrix_(alloc)(L, dptr, stride, width, height)
+#define MATRIX_DATA_WRITE(L, data, idx, val) (data[idx] = val)
+#define MATRIX_DATA_READ(L, data, idx) (data[idx])
#define MATRIX_INIT(L) host_matrix_(init)(L)
#define MATRIX_BASE_TNAME nerv_matrix_host_tname
#define NERV_GENERIC_MATRIX
#include "../../common.h"
#include "../../io/chunk_file.h"
-static void host_matrix_(alloc)(MATRIX_ELEM **dptr, size_t *stride,
- long width, long height) {
- *dptr = (MATRIX_ELEM *)malloc(width * height);
+static void host_matrix_(alloc)(lua_State *L,
+ MATRIX_ELEM **dptr, size_t *stride,
+ long width, long height) {
+ if ((*dptr = (MATRIX_ELEM *)malloc(width * height)) == NULL)
+ nerv_error(L, "mmatrix insufficient memory");
*stride = width;
}
@@ -53,7 +54,7 @@ int nerv_matrix_(load)(lua_State *L) {
FILE *fp = chunk->fp;
if (fscanf(fp, "%ld %ld", &nrow, &ncol) != 2)
return 0;
- self = nerv_matrix_(new_)(nrow, ncol);
+ self = nerv_matrix_(new_)(L, nrow, ncol);
for (i = 0; i < nrow; i++)
{
MATRIX_ELEM *row = MATRIX_ROW_PTR(self, i);