diff options
Diffstat (limited to 'nerv/matrix')
-rw-r--r-- | nerv/matrix/cuda_helper.h | 75 | ||||
-rw-r--r-- | nerv/matrix/cukernel.cu | 17 | ||||
-rw-r--r-- | nerv/matrix/cukernel.h | 20 | ||||
-rw-r--r-- | nerv/matrix/cumatrix.c | 44 | ||||
-rw-r--r-- | nerv/matrix/generic/cumatrix.c | 479 | ||||
-rw-r--r-- | nerv/matrix/generic/matrix.c | 102 | ||||
-rw-r--r-- | nerv/matrix/generic/matrix.h | 19 | ||||
-rw-r--r-- | nerv/matrix/generic/mmatrix.c | 80 | ||||
-rw-r--r-- | nerv/matrix/init.c | 9 | ||||
-rw-r--r-- | nerv/matrix/mmatrix.c | 34 |
10 files changed, 247 insertions, 632 deletions
diff --git a/nerv/matrix/cuda_helper.h b/nerv/matrix/cuda_helper.h deleted file mode 100644 index fde6f18..0000000 --- a/nerv/matrix/cuda_helper.h +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef NERV_CUDA_HELPER_H -#define NERV_CUDA_HELPER_H -#include "cuda.h" -#include "cuda_runtime.h" -#include "driver_types.h" -#include "cublas_v2.h" -#define CUBLAS_SAFE_SYNC_CALL(call) \ - do { \ - cublasStatus_t err = (call); \ - if (err != CUBLAS_STATUS_SUCCESS) \ - nerv_error(L, "cumatrix cublas error: %s at %s:%d", \ - cublasGetErrorString(err), __FILE__, __LINE__); \ - cudaDeviceSynchronize(); \ - } while (0) - -#define CUDA_SAFE_CALL(call) \ - do { \ - cudaError_t err = (call); \ - if (err != cudaSuccess) \ - nerv_error(L, "cumatrix CUDA error: %s at %s:%d", \ - cudaGetErrorString(err), __FILE__, __LINE__); \ - } while (0) - -#define CUDA_SAFE_SYNC_CALL(call) \ - do { \ - CUDA_SAFE_CALL(call); \ - cudaDeviceSynchronize(); \ - } while (0) - -#define CHECK_SAME_DIMENSION(a, b) \ - do { \ - if (!(a->nrow == b->nrow && a->ncol == b->ncol)) \ - nerv_error(L, "matrices should be of the same dimension"); \ - } while (0) - -static const char *cublasGetErrorString(cublasStatus_t err) { - switch (err) - { - case CUBLAS_STATUS_SUCCESS: - return "CUBLAS_STATUS_SUCCESS"; - case CUBLAS_STATUS_NOT_INITIALIZED: - return "CUBLAS_STATUS_NOT_INITIALIZED"; - case CUBLAS_STATUS_ALLOC_FAILED: - return "CUBLAS_STATUS_ALLOC_FAILED"; - case CUBLAS_STATUS_INVALID_VALUE: - return "CUBLAS_STATUS_INVALID_VALUE"; - case CUBLAS_STATUS_ARCH_MISMATCH: - return "CUBLAS_STATUS_ARCH_MISMATCH"; - case CUBLAS_STATUS_MAPPING_ERROR: - return "CUBLAS_STATUS_MAPPING_ERROR"; - case CUBLAS_STATUS_EXECUTION_FAILED: - return "CUBLAS_STATUS_EXECUTION_FAILED"; - case CUBLAS_STATUS_INTERNAL_ERROR: - return "CUBLAS_STATUS_INTERNAL_ERROR"; -/* case CUBLAS_STATUS_NOT_SUPPORTED: - return "CUBLAS_STATUS_NOT_SUPPORTED"; - case CUBLAS_STATUS_LICENSE_ERROR: - return "CUBLAS_STATUS_LICENSE_ERROR"; */ - } - return "<unknown>"; -} - -#define PROFILE_START \ - do { \ - cudaEventRecord(profile_start, 0); -#define PROFILE_STOP \ - cudaEventRecord(profile_stop, 0); \ - cudaEventSynchronize(profile_stop); \ - float milliseconds = 0; \ - cudaEventElapsedTime(&milliseconds, profile_start, profile_stop); \ - accu_profile(__func__, milliseconds / 1000); \ - } while (0); - -#define PROFILE_END -#endif diff --git a/nerv/matrix/cukernel.cu b/nerv/matrix/cukernel.cu deleted file mode 100644 index a19030a..0000000 --- a/nerv/matrix/cukernel.cu +++ /dev/null @@ -1,17 +0,0 @@ -#define NERV_GENERIC_CUKERNEL - -#define cudak_(NAME) cudak_float_ ## NAME -#define MATRIX_USE_FLOAT -#include "generic/elem_type.h" -#include "generic/cukernel.cu" -#undef cudak_ -#undef MATRIX_USE_FLOAT -#undef MATRIX_ELEM -#undef MATRIX_ELEM_PTR -#undef MATRIX_ELEM_FMT -#undef MATRIX_ELEM_WRITE_FMT - -#define cudak_(NAME) cudak_double_ ## NAME -#define MATRIX_USE_DOUBLE -#include "generic/elem_type.h" -#include "generic/cukernel.cu" diff --git a/nerv/matrix/cukernel.h b/nerv/matrix/cukernel.h deleted file mode 100644 index 8a1494f..0000000 --- a/nerv/matrix/cukernel.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifdef NERV_GENERIC_CUKERNEL -void cudak_(cuda_mul_elem)(const Matrix *a, const Matrix *b, Matrix *c); -void cudak_(cuda_log_elem)(const Matrix *a, Matrix *b); -void cudak_(cuda_sigmoid)(const Matrix *a, Matrix *b); -void cudak_(cuda_sigmoid_grad)(const Matrix *output, const Matrix *err, Matrix *nerr); -void cudak_(cuda_rowsum)(const Matrix *a, Matrix *b); -void cudak_(cuda_rowmax)(const Matrix *a, Matrix *b); -void cudak_(cuda_rowmax_idx)(const Matrix *a, Matrix *b, Matrix *idx); -void cudak_(cuda_colsum)(const Matrix *a, Matrix *b); -void cudak_(cuda_colsame)(const Matrix *a, const Matrix *ref, Matrix *b); -void cudak_(cuda_softmax_denominator)(const Matrix *a, const Matrix *max, Matrix *b); -void cudak_(cuda_softmax_final)(const Matrix *a, const Matrix *max, const Matrix *deno, Matrix *b); -void cudak_(cuda_add_row)(const Matrix *a, Matrix *b, double beta); -void cudak_(cuda_fill)(Matrix *a, double val); -void cudak_(cuda_expand_frm)(const Matrix *a, Matrix *b, int context); -void cudak_(cuda_rearrange_frm)(const Matrix *a, Matrix *b, int step); -void cudak_(cuda_scale_rows_by_row)(const Matrix *a, Matrix *b); -void cudak_(cuda_scale_rows_by_col)(const Matrix *a, Matrix *b); -void cudak_(cuda_decompress)(const Matrix *a, Matrix *b); -#endif diff --git a/nerv/matrix/cumatrix.c b/nerv/matrix/cumatrix.c index af34fb4..1bcb0f1 100644 --- a/nerv/matrix/cumatrix.c +++ b/nerv/matrix/cumatrix.c @@ -1,6 +1,7 @@ #define NERV_GENERIC_CUMATRIX #include "../common.h" -#include "cuda_helper.h" +#include "../lib/matrix/cumatrix.h" +#include "../lib/matrix/cuda_helper.h" #include <string.h> #define PROFILE_HASHMAP_SIZE 123457 static cublasHandle_t cublas_handle; @@ -8,54 +9,29 @@ static cudaEvent_t profile_start, profile_stop; static HashMap *profile; static int print_profile(lua_State *L) { - (void)L; - size_t i; - fprintf(stderr, "*** [nerv cumatrix profile] **\n"); - for (i = 0; i < profile->size; i++) - { - HashNode *ptr; - for (ptr = profile->bucket[i]; ptr; ptr = ptr->next) - { - fprintf(stderr, "%s:\t%.6f\n", ptr->key, *(float *)ptr->val); - } - } + nerv_cumatrix_print_profile(); return 0; } static int clear_profile(lua_State *L) { - (void)L; - hashmap_clear(profile); + nerv_cumatrix_clear_profile(); return 0; } -void accu_profile(const char *name, float delta) { - float *val = hashmap_getval(profile, name); - if (!val) - { - val = malloc(sizeof(float)); - *val = 0; - hashmap_setval(profile, name, val); - } - *val += delta; -} - static const luaL_Reg cumatrix_methods[] = { {"print_profile", print_profile}, {"clear_profile", clear_profile}, {NULL, NULL} }; -extern void nerv_matrix_cuda_float_init(lua_State *L); -extern void nerv_matrix_cuda_double_init(lua_State *L); +extern void nerv_matrix_cuda_float_lua_init(lua_State *L); +extern void nerv_matrix_cuda_double_lua_init(lua_State *L); -void nerv_cumatrix_init(lua_State *L) { +void nerv_lua_cumatrix_init(lua_State *L) { luaL_register(L, NULL, cumatrix_methods); - cublasCreate(&cublas_handle); - cudaEventCreate(&profile_start); - cudaEventCreate(&profile_stop); - profile = hashmap_create(PROFILE_HASHMAP_SIZE, bkdr_hash, strcmp); - nerv_matrix_cuda_float_init(L); - nerv_matrix_cuda_double_init(L); + nerv_cumatrix_init(); + nerv_matrix_cuda_float_lua_init(L); + nerv_matrix_cuda_double_lua_init(L); } #define MATRIX_USE_FLOAT diff --git a/nerv/matrix/generic/cumatrix.c b/nerv/matrix/generic/cumatrix.c index b5d1a35..859718e 100644 --- a/nerv/matrix/generic/cumatrix.c +++ b/nerv/matrix/generic/cumatrix.c @@ -1,10 +1,6 @@ #ifdef NERV_GENERIC_CUMATRIX -#include "matrix.h" +#include "../../lib/matrix/generic/matrix.h" #include "elem_type.h" - -#define MATRIX_DATA_FREE(L, ptr) cuda_matrix_(free)(L, ptr) -#define MATRIX_DATA_ALLOC(L, dptr, stride, width, height) \ - cuda_matrix_(alloc)(L, dptr, stride, width, height) #define MATRIX_DATA_WRITE(L, data, idx, val) cuda_matrix_(write)(L, data, idx, val) #define MATRIX_DATA_READ(L, data, idx) cuda_matrix_(read)(L, data, idx) #define MATRIX_INIT(L) cuda_matrix_(init)(L) @@ -12,443 +8,303 @@ #define NERV_GENERIC_MATRIX #define NERV_GENERIC_CUKERNEL #include "../../common.h" -#include "../cukernel.h" -#include "../cuda_helper.h" - -Matrix *nerv_matrix_(new_)(lua_State *L, long nrow, long ncol); -void nerv_matrix_(data_free)(lua_State *L, Matrix *self); - -static void nerv_matrix_(add_)(lua_State *L, const Matrix *a, const Matrix *b, - const Matrix *c, - MATRIX_ELEM alpha, MATRIX_ELEM beta) { - PROFILE_START - CUBLAS_SAFE_SYNC_CALL( - NERV_CUBLAS_(geam)(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, - a->ncol, a->nrow, - &alpha, - MATRIX_ELEM_PTR(a), a->stride / sizeof(MATRIX_ELEM), - &beta, - MATRIX_ELEM_PTR(b), b->stride / sizeof(MATRIX_ELEM), - MATRIX_ELEM_PTR(c), c->stride / sizeof(MATRIX_ELEM))); - PROFILE_STOP -} +#include "../../lib/matrix/generic/cumatrix.h" -static int nerv_matrix_(add)(lua_State *L) { +static int nerv_matrix_(lua_add)(lua_State *L) { + Status status; Matrix *c = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); - Matrix *b = luaT_checkudata(L, 3, nerv_matrix_(tname)); + const Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); + const Matrix *b = luaT_checkudata(L, 3, nerv_matrix_(tname)); MATRIX_ELEM alpha = luaL_checknumber(L, 4); MATRIX_ELEM beta = luaL_checknumber(L, 5); - CHECK_SAME_DIMENSION(a, b); - CHECK_SAME_DIMENSION(a, c); - nerv_matrix_(add_)(L, a, b, c, alpha, beta); + nerv_matrix_(add)(c, a, b, alpha, beta, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } -static int nerv_matrix_(get_cublas_op)(char ch) { +static int nerv_matrix_(lua_get_cublas_op)(char ch) { return (ch == 'T' || ch == 't') ? CUBLAS_OP_T : CUBLAS_OP_N; } -static int nerv_matrix_(mul)(lua_State *L) { -#define SWAP(a, b) \ - do { int t = (a); (a) = (b); (b) = t; } while (0) - +static int nerv_matrix_(lua_mul)(lua_State *L) { + Status status; Matrix *c = luaT_checkudata(L, 1, nerv_matrix_(tname)); Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); Matrix *b = luaT_checkudata(L, 3, nerv_matrix_(tname)); MATRIX_ELEM alpha = luaL_checknumber(L, 4); MATRIX_ELEM beta = luaL_checknumber(L, 5); int nargs = lua_gettop(L); - int ta = nargs > 5 ? nerv_matrix_(get_cublas_op)(*luaL_checkstring(L, 6)) \ + int ta = nargs > 5 ? nerv_matrix_(lua_get_cublas_op)(*luaL_checkstring(L, 6)) \ : CUBLAS_OP_N; - int tb = nargs > 6 ? nerv_matrix_(get_cublas_op)(*luaL_checkstring(L, 7)) \ + int tb = nargs > 6 ? nerv_matrix_(lua_get_cublas_op)(*luaL_checkstring(L, 7)) \ : CUBLAS_OP_N; - int am = a->nrow, an = a->ncol; - int bm = b->nrow, bn = b->ncol; - if (ta == CUBLAS_OP_T) SWAP(am, an); - if (tb == CUBLAS_OP_T) SWAP(bm, bn); - if (an != bm) - nerv_error(L, "Wrong dimension of multipliers"); -/* MATRIX_ELEM alpha = 1.0f, beta = 0.0f; */ - /* Because matrix in Nerv is row-major, here b comes first */ - PROFILE_START - CUBLAS_SAFE_SYNC_CALL( - NERV_CUBLAS_(gemm)(cublas_handle, tb, ta, - bn, am, bm, - &alpha, - MATRIX_ELEM_PTR(b), b->stride / sizeof(MATRIX_ELEM), - MATRIX_ELEM_PTR(a), a->stride / sizeof(MATRIX_ELEM), - &beta, - MATRIX_ELEM_PTR(c), c->stride / sizeof(MATRIX_ELEM))); - PROFILE_STOP + nerv_matrix_(mul)(c, a, b, alpha, beta, ta, tb, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } -static int nerv_matrix_(create)(lua_State *L) { +static int nerv_matrix_(lua_create)(lua_State *L) { + Status status; Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = nerv_matrix_(new_)(L, a->nrow, a->ncol); + Matrix *b = nerv_matrix_(create)(a->nrow, a->ncol, &status); + NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, b, nerv_matrix_(tname)); return 1; } -static int nerv_matrix_(sigmoid)(lua_State *L) { +static int nerv_matrix_(lua_sigmoid)(lua_State *L) { + Status status; Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); - CHECK_SAME_DIMENSION(a, b); - PROFILE_START - cudak_(cuda_sigmoid)(b, a); - PROFILE_STOP + nerv_matrix_(sigmoid)(a, b, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } -static int nerv_matrix_(sigmoid_grad)(lua_State *L) { +static int nerv_matrix_(lua_sigmoid_grad)(lua_State *L) { + Status status; Matrix *nerr = luaT_checkudata(L, 1, nerv_matrix_(tname)); Matrix *err = luaT_checkudata(L, 2, nerv_matrix_(tname)); Matrix *output = luaT_checkudata(L, 3, nerv_matrix_(tname)); - CHECK_SAME_DIMENSION(nerr, err); - CHECK_SAME_DIMENSION(nerr, output); - PROFILE_START - cudak_(cuda_sigmoid_grad)(output, err, nerr); - PROFILE_STOP + nerv_matrix_(sigmoid_grad)(nerr, err, output, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } -static int nerv_matrix_(softmax)(lua_State *L) { +static int nerv_matrix_(lua_softmax)(lua_State *L) { + Status status; Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); Matrix *b = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *max, *max_idx; - Matrix *dno; - CHECK_SAME_DIMENSION(a, b); - max = nerv_matrix_(new_)(L, a->nrow, 1); - max_idx = nerv_matrix_(new_)(L, a->nrow, 1); - dno = nerv_matrix_(new_)(L, a->nrow, 1); - PROFILE_START - cudak_(cuda_rowmax_idx)(a, max, max_idx); - cudak_(cuda_softmax_denominator)(a, max, dno); - cudak_(cuda_softmax_final)(a, max, dno, b); - PROFILE_STOP - nerv_matrix_(data_free)(L, max); - nerv_matrix_(data_free)(L, dno); + Matrix *max_idx = nerv_matrix_(softmax)(b, a, &status); + NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, max_idx, nerv_matrix_(tname)); return 1; } -static int nerv_matrix_(rowsum)(lua_State *L) { +static int nerv_matrix_(lua_rowsum)(lua_State *L) { + Status status; Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = nerv_matrix_(new_)(L, a->nrow, 1); - PROFILE_START - cudak_(cuda_rowsum)(a, b); - PROFILE_STOP + Matrix *b = nerv_matrix_(rowsum)(a, &status); + NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, b, nerv_matrix_(tname)); return 1; } -static int nerv_matrix_(colsum)(lua_State *L) { +static int nerv_matrix_(lua_colsum)(lua_State *L) { + Status status; Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = nerv_matrix_(new_)(L, 1, a->ncol); - PROFILE_START - cudak_(cuda_colsum)(a, b); - PROFILE_STOP + Matrix *b = nerv_matrix_(colsum)(a, &status); + NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, b, nerv_matrix_(tname)); return 1; } -static int nerv_matrix_(colsame)(lua_State *L) { +static int nerv_matrix_(lua_colsame)(lua_State *L) { + Status status; Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *ref = luaT_checkudata(L, 2, nerv_matrix_(tname)); - Matrix *b = nerv_matrix_(new_)(L, 1, a->ncol); - CHECK_SAME_DIMENSION(a, ref); - PROFILE_START - cudak_(cuda_colsame)(a, ref, b); - PROFILE_STOP + const Matrix *ref = luaT_checkudata(L, 2, nerv_matrix_(tname)); + Matrix *b = nerv_matrix_(colsame)(a, ref, &status); + NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, b, nerv_matrix_(tname)); return 1; } -static int nerv_matrix_(rowmax)(lua_State *L) { +static int nerv_matrix_(lua_rowmax)(lua_State *L) { + Status status; Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = nerv_matrix_(new_)(L, a->nrow, 1); - PROFILE_START - cudak_(cuda_rowmax)(a, b); - PROFILE_STOP + Matrix *b = nerv_matrix_(rowmax)(a, &status); + NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, b, nerv_matrix_(tname)); return 1; } -static int nerv_matrix_(rowmax_idx)(lua_State *L) { +static int nerv_matrix_(lua_rowmax_idx)(lua_State *L) { + Status status; Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = nerv_matrix_(new_)(L, a->nrow, 1); - Matrix *idx = nerv_matrix_(new_)(L, a->nrow, 1); - PROFILE_START - cudak_(cuda_rowmax_idx)(a, b, idx); - PROFILE_STOP + Matrix *b; + Matrix *idx; + nerv_matrix_(rowmax_idx)(a, &b, &idx, &status); + NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, b, nerv_matrix_(tname)); luaT_pushudata(L, idx, nerv_matrix_(tname)); return 2; } -static int nerv_matrix_(add_row)(lua_State *L) { - Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); +static int nerv_matrix_(lua_add_row)(lua_State *L) { + Status status; + const Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); Matrix *b = luaT_checkudata(L, 1, nerv_matrix_(tname)); double beta = luaL_checknumber(L, 3); - if (a->ncol != b->ncol) - nerv_error(L, "the number of columns is not the same"); - if (a->nrow != 1) - nerv_error(L, "a row vector is expected"); - PROFILE_START - cudak_(cuda_add_row)(a, b, beta); - PROFILE_STOP + nerv_matrix_(add_row)(b, a, beta, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } -static int nerv_matrix_(fill)(lua_State *L) { +static int nerv_matrix_(lua_fill)(lua_State *L) { + Status status; Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname)); double val = luaL_checknumber(L, 2); - PROFILE_START - cudak_(cuda_fill)(self, val); - PROFILE_STOP + nerv_matrix_(fill)(self, val, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } -static int nerv_matrix_(copy_fromd)(lua_State *L) { +static int nerv_matrix_(lua_copy_fromd)(lua_State *L) { + Status status; Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); + const Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); int nargs = lua_gettop(L); int b_begin = nargs > 2 ? luaL_checkinteger(L, 3) : 0; int b_end = nargs > 3 ? luaL_checkinteger(L, 4) : b->nrow; int a_begin = nargs > 4 ? luaL_checkinteger(L, 5) : 0; - if (!(0 <= b_begin && b_begin < b_end && b_end <= b->nrow && - a_begin + b_end - b_begin <= a->nrow)) - nerv_error(L, "invalid copy interval"); - if (a->ncol != b->ncol) - nerv_error(L, "matrices should be of the same dimension"); - PROFILE_START - CUDA_SAFE_SYNC_CALL( - cudaMemcpy2D(MATRIX_ROW_PTR(a, a_begin), a->stride, - MATRIX_ROW_PTR(b, b_begin), b->stride, - sizeof(MATRIX_ELEM) * b->ncol, b_end - b_begin, - cudaMemcpyDeviceToDevice)); - PROFILE_STOP + nerv_matrix_(copy_fromd)(a, b, a_begin, b_begin, b_end, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } extern const char *MATRIX_CUMATRIX_HOST_TNAME; -static int nerv_matrix_(copy_fromh)(lua_State *L) { +static int nerv_matrix_(lua_copy_fromh)(lua_State *L) { + Status status; Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = luaT_checkudata(L, 2, MATRIX_CUMATRIX_HOST_TNAME); + const Matrix *b = luaT_checkudata(L, 2, MATRIX_CUMATRIX_HOST_TNAME); int nargs = lua_gettop(L); int b_begin = nargs > 2 ? luaL_checkinteger(L, 3) : 0; int b_end = nargs > 3 ? luaL_checkinteger(L, 4) : b->nrow; int a_begin = nargs > 4 ? luaL_checkinteger(L, 5) : 0; - if (!(0 <= b_begin && b_begin < b_end && b_end <= b->nrow && - a_begin + b_end - b_begin <= a->nrow)) - nerv_error(L, "invalid copy interval"); - if (a->ncol != b->ncol) - nerv_error(L, "matrices should be of the same dimension"); - PROFILE_START - CUDA_SAFE_SYNC_CALL( - cudaMemcpy2D(MATRIX_ROW_PTR(a, a_begin), a->stride, - MATRIX_ROW_PTR(b, b_begin), b->stride, - sizeof(MATRIX_ELEM) * b->ncol, b_end - b_begin, - cudaMemcpyHostToDevice)); - PROFILE_STOP + nerv_matrix_(copy_fromh)(a, b, a_begin, b_begin, b_end, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } -static int nerv_matrix_(copy_toh)(lua_State *L) { +static int nerv_matrix_(lua_copy_toh)(lua_State *L) { + Status status; Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = luaT_checkudata(L, 2, MATRIX_CUMATRIX_HOST_TNAME); + const Matrix *b = luaT_checkudata(L, 2, MATRIX_CUMATRIX_HOST_TNAME); int nargs = lua_gettop(L); int a_begin = nargs > 2 ? luaL_checkinteger(L, 3) : 0; int a_end = nargs > 3 ? luaL_checkinteger(L, 4) : a->nrow; int b_begin = nargs > 4 ? luaL_checkinteger(L, 5) : 0; - if (!(0 <= a_begin && a_begin < a_end && a_end <= a->nrow && - b_begin + a_end - a_begin <= b->nrow)) - nerv_error(L, "invalid copy interval"); - if (b->ncol != a->ncol) - nerv_error(L, "matrices should be of the same dimension"); - PROFILE_START - CUDA_SAFE_SYNC_CALL( - cudaMemcpy2D(MATRIX_ROW_PTR(b, b_begin), b->stride, - MATRIX_ROW_PTR(a, a_begin), a->stride, - sizeof(MATRIX_ELEM) * a->ncol, a_end - a_begin, - cudaMemcpyDeviceToHost)); - PROFILE_STOP + nerv_matrix_(copy_toh)(a, b, a_begin, a_end, b_begin, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } -static int nerv_matrix_(trans)(lua_State *L) { +static int nerv_matrix_(lua_trans)(lua_State *L) { + Status status; Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = nerv_matrix_(new_)(L, a->ncol, a->nrow); - MATRIX_ELEM alpha = 1, beta = 0; - /* FIXME: possible memory leak when lua error is raised */ - PROFILE_START - CUBLAS_SAFE_SYNC_CALL( - NERV_CUBLAS_(geam)(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_T, - a->nrow, a->ncol, - &alpha, - MATRIX_ELEM_PTR(a), a->stride / sizeof(MATRIX_ELEM), - &beta, - MATRIX_ELEM_PTR(a), a->stride / sizeof(MATRIX_ELEM), - MATRIX_ELEM_PTR(b), b->stride / sizeof(MATRIX_ELEM))); - PROFILE_STOP + Matrix *b = nerv_matrix_(trans)(a, &status); + NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, b, nerv_matrix_(tname)); return 1; } -static int nerv_matrix_(mul_elem)(lua_State *L) { - Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); - Matrix *b = luaT_checkudata(L, 3, nerv_matrix_(tname)); +static int nerv_matrix_(lua_mul_elem)(lua_State *L) { + Status status; + const Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); + const Matrix *b = luaT_checkudata(L, 3, nerv_matrix_(tname)); Matrix *c = luaT_checkudata(L, 1, nerv_matrix_(tname)); - CHECK_SAME_DIMENSION(a, b); - CHECK_SAME_DIMENSION(a, c); - PROFILE_START - cudak_(cuda_mul_elem)(a, b, c); - PROFILE_STOP + nerv_matrix_(mul_elem)(c, a, b, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } -static int nerv_matrix_(log_elem)(lua_State *L) { - Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); +static int nerv_matrix_(lua_log_elem)(lua_State *L) { + Status status; + const Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); Matrix *b = luaT_checkudata(L, 1, nerv_matrix_(tname)); - CHECK_SAME_DIMENSION(a, b); - PROFILE_START - cudak_(cuda_log_elem)(a, b); - PROFILE_STOP + nerv_matrix_(log_elem)(b, a, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } -static int nerv_matrix_(decompress)(lua_State *L) { - Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b; +static int nerv_matrix_(lua_decompress)(lua_State *L) { + Status status; + const Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); int orig_col = luaL_checkinteger(L, 2); - if (a->ncol != 1) - nerv_error(L, "the compressed matrix must be a column vector"); - b = nerv_matrix_(new_)(L, a->nrow, orig_col); - PROFILE_START - cudak_(cuda_fill)(b, 0.0); - cudak_(cuda_decompress)(a, b); - PROFILE_STOP + Matrix *b = nerv_matrix_(decompress)(a, orig_col, &status); + NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, b, nerv_matrix_(tname)); return 1; } extern const char *nerv_matrix_host_int_tname; -static int nerv_matrix_(copy_rows_fromh_by_idx)(lua_State *L) { +static int nerv_matrix_(lua_copy_rows_fromh_by_idx)(lua_State *L) { + Status status; Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = luaT_checkudata(L, 2, MATRIX_CUMATRIX_HOST_TNAME); - Matrix *idx = luaT_checkudata(L, 3, nerv_matrix_host_int_tname); + const Matrix *b = luaT_checkudata(L, 2, MATRIX_CUMATRIX_HOST_TNAME); + const Matrix *idx = luaT_checkudata(L, 3, nerv_matrix_host_int_tname); long nrow = a->nrow; int b_begin = lua_gettop(L) > 3 ? luaL_checkinteger(L, 4) : 0; - if (!(0 <= b_begin && b_begin + nrow <= idx->ncol)) - nerv_error(L, "invalid copy interval"); - long *idx_ptr = idx->data.i; - int i; - if (idx->nrow != 1) - nerv_error(L, "index should be a vector"); - if (a->ncol != b->ncol) - nerv_error(L, "source/destination dimension mismatch"); - cudaStream_t *streams = (cudaStream_t*)malloc(sizeof(cudaStream_t) * nrow); - for (i = 0; i < nrow; i++) - { - int src_row = idx_ptr[b_begin + i]; - if (!(0 <= src_row && src_row < b->nrow)) - nerv_error(L, "invalid index"); - CUDA_SAFE_CALL(cudaStreamCreate(streams + i)); - CUDA_SAFE_CALL(cudaMemcpyAsync(MATRIX_ROW_PTR(a, i), - MATRIX_ROW_PTR(b, src_row), - b->stride, - cudaMemcpyHostToDevice, streams[i])); - } - for (i = 0; i < nrow; i++) - { - CUDA_SAFE_CALL(cudaStreamSynchronize(streams[i])); - CUDA_SAFE_CALL(cudaStreamDestroy(streams[i])); - } - free(streams); + nerv_matrix_(copy_rows_fromh_by_idx)(a, b, idx, b_begin, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } -static int nerv_matrix_(expand_frm)(lua_State *L) { +static int nerv_matrix_(lua_expand_frm)(lua_State *L) { + Status status; Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); + const Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); int context = luaL_checkinteger(L, 3); - if (a->nrow != b->nrow) - nerv_error(L, "mismatching number of frames"); - if (a->ncol != b->ncol * (context * 2 + 1)) - nerv_error(L, "the width should be 2 * context + 1"); - PROFILE_START - cudak_(cuda_expand_frm)(b, a, context); - PROFILE_STOP + nerv_matrix_(expand_frm)(a, b, context, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } -static int nerv_matrix_(rearrange_frm)(lua_State *L) { +static int nerv_matrix_(lua_rearrange_frm)(lua_State *L) { + Status status; Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); + const Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); int step = luaL_checkinteger(L, 3); - CHECK_SAME_DIMENSION(a, b); - if (b->ncol % step) - nerv_error(L, "the dimension of columns is not divisible by step"); - PROFILE_START - cudak_(cuda_rearrange_frm)(b, a, step); - PROFILE_STOP + nerv_matrix_(rearrange_frm)(a, b, step, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } -static int nerv_matrix_(scale_rows_by_col)(lua_State *L) { +static int nerv_matrix_(lua_scale_rows_by_col)(lua_State *L) { + Status status; Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); - if (a->nrow != b->nrow) - nerv_error(L, "the number of rows is not the same"); - if (b->ncol != 1) - nerv_error(L, "a column vector is expected"); - PROFILE_START - cudak_(cuda_scale_rows_by_col)(b, a); - PROFILE_STOP + const Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); + nerv_matrix_(scale_rows_by_col)(a, b, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } -static int nerv_matrix_(scale_rows_by_row)(lua_State *L) { +static int nerv_matrix_(lua_scale_rows_by_row)(lua_State *L) { + Status status; Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); - if (a->ncol != b->ncol) - nerv_error(L, "the number of columns is not the same"); - if (b->nrow != 1) - nerv_error(L, "a row vector is expected"); - PROFILE_START - cudak_(cuda_scale_rows_by_row)(b, a); - PROFILE_STOP + const Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); + nerv_matrix_(scale_rows_by_row)(a, b, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } static const luaL_Reg nerv_matrix_(extra_methods)[] = { - {"create", nerv_matrix_(create)}, - {"colsum", nerv_matrix_(colsum)}, - {"colsame", nerv_matrix_(colsame)}, - {"rowsum", nerv_matrix_(rowsum)}, - {"rowmax", nerv_matrix_(rowmax)}, - {"rowmax_idx", nerv_matrix_(rowmax_idx)}, - {"trans", nerv_matrix_(trans)}, - {"decompress", nerv_matrix_(decompress)}, + {"create", nerv_matrix_(lua_create)}, + {"colsum", nerv_matrix_(lua_colsum)}, + {"colsame", nerv_matrix_(lua_colsame)}, + {"rowsum", nerv_matrix_(lua_rowsum)}, + {"rowmax", nerv_matrix_(lua_rowmax)}, + {"rowmax_idx", nerv_matrix_(lua_rowmax_idx)}, + {"trans", nerv_matrix_(lua_trans)}, + {"decompress", nerv_matrix_(lua_decompress)}, /* in-place calc */ - {"copy_fromh", nerv_matrix_(copy_fromh)}, - {"copy_fromd", nerv_matrix_(copy_fromd)}, - {"copy_toh", nerv_matrix_(copy_toh)}, - {"add", nerv_matrix_(add)}, - {"mul", nerv_matrix_(mul)}, - {"add_row", nerv_matrix_(add_row)}, - {"fill", nerv_matrix_(fill)}, - {"sigmoid", nerv_matrix_(sigmoid)}, - {"sigmoid_grad", nerv_matrix_(sigmoid_grad)}, - {"softmax", nerv_matrix_(softmax)}, - {"mul_elem", nerv_matrix_(mul_elem)}, - {"log_elem", nerv_matrix_(log_elem)}, - {"copy_rows_fromh_by_idx", nerv_matrix_(copy_rows_fromh_by_idx)}, - {"expand_frm", nerv_matrix_(expand_frm)}, - {"rearrange_frm", nerv_matrix_(rearrange_frm)}, - {"scale_rows_by_row", nerv_matrix_(scale_rows_by_row)}, - {"scale_rows_by_col", nerv_matrix_(scale_rows_by_col)}, + {"copy_fromh", nerv_matrix_(lua_copy_fromh)}, + {"copy_fromd", nerv_matrix_(lua_copy_fromd)}, + {"copy_toh", nerv_matrix_(lua_copy_toh)}, + {"add", nerv_matrix_(lua_add)}, + {"mul", nerv_matrix_(lua_mul)}, + {"add_row", nerv_matrix_(lua_add_row)}, + {"fill", nerv_matrix_(lua_fill)}, + {"sigmoid", nerv_matrix_(lua_sigmoid)}, + {"sigmoid_grad", nerv_matrix_(lua_sigmoid_grad)}, + {"softmax", nerv_matrix_(lua_softmax)}, + {"mul_elem", nerv_matrix_(lua_mul_elem)}, + {"log_elem", nerv_matrix_(lua_log_elem)}, + {"copy_rows_fromh_by_idx", nerv_matrix_(lua_copy_rows_fromh_by_idx)}, + {"expand_frm", nerv_matrix_(lua_expand_frm)}, + {"rearrange_frm", nerv_matrix_(lua_rearrange_frm)}, + {"scale_rows_by_row", nerv_matrix_(lua_scale_rows_by_row)}, + {"scale_rows_by_col", nerv_matrix_(lua_scale_rows_by_col)}, {NULL, NULL} }; @@ -456,37 +312,34 @@ static void cuda_matrix_(init)(lua_State *L) { luaN_append_methods(L, nerv_matrix_(extra_methods)); } -static void cuda_matrix_(free)(lua_State *L, MATRIX_ELEM *ptr) { - CUDA_SAFE_SYNC_CALL(cudaFree(ptr)); +int nerv_matrix_(lua_get_elem)(lua_State *L) { + return nerv_error_method_not_implemented(L); } -static void cuda_matrix_(alloc)(lua_State *L, MATRIX_ELEM **dptr, - size_t *stride, long width, long height) { - PROFILE_START - CUDA_SAFE_SYNC_CALL(cudaMallocPitch((void **)dptr, stride, width, height)); - PROFILE_STOP +int nerv_matrix_(lua_set_elem)(lua_State *L) { + return nerv_error_method_not_implemented(L); } static MATRIX_ELEM cuda_matrix_(read)(lua_State *L, MATRIX_ELEM *data, - int idx) { + int idx) { + cudaError_t err; MATRIX_ELEM res; - CUDA_SAFE_SYNC_CALL(cudaMemcpy(&res, data + idx, - sizeof(MATRIX_ELEM), cudaMemcpyDeviceToHost)); + err = cudaMemcpy(&res, data + idx, + sizeof(MATRIX_ELEM), cudaMemcpyDeviceToHost); + if (err != cudaSuccess) + nerv_error(L, "cuda error: error while reading element"); + cudaDeviceSynchronize(); return res; } static void cuda_matrix_(write)(lua_State *L, MATRIX_ELEM *data, int idx, MATRIX_ELEM val) { - CUDA_SAFE_SYNC_CALL(cudaMemcpy(data + idx, &val, - sizeof(MATRIX_ELEM), cudaMemcpyHostToDevice)); -} - -int nerv_matrix_(get_elem)(lua_State *L) { - return nerv_error_method_not_implemented(L); -} - -int nerv_matrix_(set_elem)(lua_State *L) { - return nerv_error_method_not_implemented(L); + cudaError_t err; + err = cudaMemcpy(data + idx, &val, + sizeof(MATRIX_ELEM), cudaMemcpyHostToDevice); + if (err != cudaSuccess) + nerv_error(L, "cuda error: error while writing element"); + cudaDeviceSynchronize(); } #include "matrix.c" diff --git a/nerv/matrix/generic/matrix.c b/nerv/matrix/generic/matrix.c index e17fb42..9d2521b 100644 --- a/nerv/matrix/generic/matrix.c +++ b/nerv/matrix/generic/matrix.c @@ -1,68 +1,32 @@ #ifdef NERV_GENERIC_MATRIX #include "../../common.h" -#include "matrix.h" +#include "../../lib/matrix/generic/matrix.h" extern const char *nerv_matrix_(tname); extern const char *MATRIX_BASE_TNAME; -void nerv_matrix_(data_free)(lua_State *L, Matrix *self) { - (void)L; - assert(*self->data_ref > 0); - if (--(*self->data_ref) == 0) - { - /* free matrix data */ - MATRIX_DATA_FREE(L, MATRIX_ELEM_PTR(self)); - free(self->data_ref); - free(self); - } -} - -void nerv_matrix_(data_retain)(Matrix *self) { - (*self->data_ref)++; -} - -Matrix *nerv_matrix_(new_)(lua_State *L, long nrow, long ncol) { - Matrix *self = (Matrix *)malloc(sizeof(Matrix)); - self->nrow = nrow; - self->ncol = ncol; - self->nmax = self->nrow * self->ncol; - MATRIX_DATA_ALLOC(L, &MATRIX_ELEM_PTR(self), &self->stride, - sizeof(MATRIX_ELEM) * self->ncol, self->nrow); - self->data_ref = (long *)malloc(sizeof(long)); - *self->data_ref = 0; - nerv_matrix_(data_retain)(self); - return self; -} -int nerv_matrix_(new)(lua_State *L) { - luaT_pushudata(L, nerv_matrix_(new_)(L, luaL_checkinteger(L, 1), - luaL_checkinteger(L, 2)), - nerv_matrix_(tname)); +int nerv_matrix_(lua_new)(lua_State *L) { + Status status; + Matrix *self = nerv_matrix_(create)(luaL_checkinteger(L, 1), + luaL_checkinteger(L, 2), &status); + NERV_LUA_CHECK_STATUS(L, status); + luaT_pushudata(L, self, nerv_matrix_(tname)); return 1; } -int nerv_matrix_(destroy)(lua_State *L) { +int nerv_matrix_(lua_destroy)(lua_State *L) { + Status status; Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname)); - nerv_matrix_(data_free)(L, self); + nerv_matrix_(destroy)(self, &status); + NERV_LUA_CHECK_STATUS(L, status); return 1; } -int nerv_matrix_(get_elem)(lua_State *L); -int nerv_matrix_(set_elem)(lua_State *L); - -static Matrix *nerv_matrix_(getrow)(Matrix *self, int row) { - Matrix *prow = (Matrix *)malloc(sizeof(Matrix)); - prow->ncol = self->ncol; - prow->nrow = 1; - prow->stride = self->stride; - prow->nmax = prow->ncol; - MATRIX_ELEM_PTR(prow) = MATRIX_ROW_PTR(self, row); - prow->data_ref = self->data_ref; - nerv_matrix_(data_retain)(prow); - return prow; -} +int nerv_matrix_(lua_get_elem)(lua_State *L); +int nerv_matrix_(lua_set_elem)(lua_State *L); -static int nerv_matrix_(newindex)(lua_State *L) { +static int nerv_matrix_(lua_newindex)(lua_State *L) { Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname)); if (lua_isnumber(L, 2)) { @@ -87,7 +51,8 @@ static int nerv_matrix_(newindex)(lua_State *L) { } -static int nerv_matrix_(index)(lua_State *L) { +static int nerv_matrix_(lua_index)(lua_State *L) { + Status status; Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname)); if (lua_isnumber(L, 2)) { @@ -102,7 +67,8 @@ static int nerv_matrix_(index)(lua_State *L) { { if (idx < 0 || idx >= self->nrow) nerv_error(L, "index must be within range [0, %d)", self->nrow); - luaT_pushudata(L, nerv_matrix_(getrow)(self, idx), nerv_matrix_(tname)); + luaT_pushudata(L, nerv_matrix_(getrow)(self, idx), + nerv_matrix_(tname)); } lua_pushboolean(L, 1); return 2; @@ -114,38 +80,38 @@ static int nerv_matrix_(index)(lua_State *L) { } } -static int nerv_matrix_(ncol)(lua_State *L) { +static int nerv_matrix_(lua_ncol)(lua_State *L) { Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname)); lua_pushinteger(L, self->ncol); return 1; } -static int nerv_matrix_(nrow)(lua_State *L) { +static int nerv_matrix_(lua_nrow)(lua_State *L) { Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname)); lua_pushinteger(L, self->nrow); return 1; } -static int nerv_matrix_(get_dataref_value)(lua_State *L) { - Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname)); - lua_pushinteger(L, *(self->data_ref)); - return 1; -} +static int nerv_matrix_(lua_get_dataref_value)(lua_State *L) { + Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname)); + lua_pushinteger(L, *(self->data_ref)); + return 1; +} static const luaL_Reg nerv_matrix_(methods)[] = { - {"get_elem", nerv_matrix_(get_elem)}, - {"set_elem", nerv_matrix_(set_elem)}, - {"ncol", nerv_matrix_(ncol)}, - {"nrow", nerv_matrix_(nrow)}, - {"get_dataref_value", nerv_matrix_(get_dataref_value)}, - {"__index__", nerv_matrix_(index)}, - {"__newindex__", nerv_matrix_(newindex)}, + {"get_elem", nerv_matrix_(lua_get_elem)}, + {"set_elem", nerv_matrix_(lua_set_elem)}, + {"ncol", nerv_matrix_(lua_ncol)}, + {"nrow", nerv_matrix_(lua_nrow)}, + {"get_dataref_value", nerv_matrix_(lua_get_dataref_value)}, + {"__index__", nerv_matrix_(lua_index)}, + {"__newindex__", nerv_matrix_(lua_newindex)}, {NULL, NULL} }; -void nerv_matrix_(init)(lua_State *L) { +void nerv_matrix_(lua_init)(lua_State *L) { luaT_newmetatable(L, nerv_matrix_(tname), MATRIX_BASE_TNAME, - nerv_matrix_(new), nerv_matrix_(destroy), NULL); + nerv_matrix_(lua_new), nerv_matrix_(lua_destroy), NULL); luaL_register(L, NULL, nerv_matrix_(methods)); #ifdef MATRIX_INIT MATRIX_INIT(L); diff --git a/nerv/matrix/generic/matrix.h b/nerv/matrix/generic/matrix.h deleted file mode 100644 index 833724b..0000000 --- a/nerv/matrix/generic/matrix.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef NERV_GENERIC_MATRIX_H -#define NERV_GENERIC_MATRIX_H - -#include <stddef.h> -typedef struct Matrix { - size_t stride; /* size of a row */ - long ncol, nrow, nmax; /* dimension of the matrix */ - union { - float *f; - double *d; - long *i; - } data; /* pointer to actual storage */ - long *data_ref; -} Matrix; - -#define MATRIX_ROW_PTR(self, row) \ - (MATRIX_ELEM *)((char *)MATRIX_ELEM_PTR(self) + (row) * (self)->stride) - -#endif diff --git a/nerv/matrix/generic/mmatrix.c b/nerv/matrix/generic/mmatrix.c index 697c9fc..233102a 100644 --- a/nerv/matrix/generic/mmatrix.c +++ b/nerv/matrix/generic/mmatrix.c @@ -1,9 +1,6 @@ #ifdef NERV_GENERIC_MMATRIX -#include "matrix.h" +#include "../../lib/matrix/generic/matrix.h" #include "elem_type.h" -#define MATRIX_DATA_FREE(L, ptr) free(ptr) -#define MATRIX_DATA_ALLOC(L, dptr, stride, width, height) \ - host_matrix_(alloc)(L, dptr, stride, width, height) #define MATRIX_DATA_WRITE(L, data, idx, val) (data[idx] = val) #define MATRIX_DATA_READ(L, data, idx) (data[idx]) #define MATRIX_INIT(L) host_matrix_(init)(L) @@ -11,17 +8,10 @@ #define NERV_GENERIC_MATRIX #include "../../common.h" #include "../../io/chunk_file.h" +#include "../../lib/matrix/generic/mmatrix.h" #include "string.h" -static void host_matrix_(alloc)(lua_State *L, - MATRIX_ELEM **dptr, size_t *stride, - long width, long height) { - if ((*dptr = (MATRIX_ELEM *)malloc(width * height)) == NULL) - nerv_error(L, "mmatrix insufficient memory"); - *stride = width; -} - -int nerv_matrix_(get_elem)(lua_State *L) { +int nerv_matrix_(lua_get_elem)(lua_State *L) { Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname)); int idx = luaL_checkinteger(L, 2); if (idx < 0 || idx >= self->nmax) @@ -30,7 +20,7 @@ int nerv_matrix_(get_elem)(lua_State *L) { return 1; } -int nerv_matrix_(set_elem)(lua_State *L) { +int nerv_matrix_(lua_set_elem)(lua_State *L) { Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname)); int idx = luaL_checkinteger(L, 2); MATRIX_ELEM v = luaL_checknumber(L, 3); @@ -50,72 +40,42 @@ static void host_matrix_(init)(lua_State *L) { #include "matrix.c" -int nerv_matrix_(load)(lua_State *L) { +int nerv_matrix_(lua_load)(lua_State *L) { + Status status; ChunkData *cdp = luaT_checkudata(L, 1, nerv_chunk_data_tname); - Matrix *self; - int i, j; - long nrow, ncol; - FILE *fp = cdp->fp; - if (fscanf(fp, "%ld %ld", &nrow, &ncol) != 2) - return 0; - self = nerv_matrix_(new_)(L, nrow, ncol); - for (i = 0; i < nrow; i++) - { - MATRIX_ELEM *row = MATRIX_ROW_PTR(self, i); - for (j = 0; j < ncol; j++) - if (fscanf(fp, MATRIX_ELEM_FMT, row + j) != 1) - { - free(self); - return 0; - } - } + Matrix *self = nerv_matrix_(load)(cdp, &status); + NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, self, nerv_matrix_(tname)); return 1; } -int nerv_matrix_(save)(lua_State *L) { +int nerv_matrix_(lua_save)(lua_State *L) { + Status status; ChunkFile *cfp = luaT_checkudata(L, 2, nerv_chunk_file_handle_tname); Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname)); - int i, j; - long nrow = self->nrow, ncol = self->ncol; - FILE *fp = cfp->fp; - if (fprintf(fp, "%ld %ld\n", nrow, ncol) < 0) - return 0; - for (i = 0; i < nrow; i++) - { - MATRIX_ELEM *row = MATRIX_ROW_PTR(self, i); - for (j = 0; j < ncol; j++) - if (fprintf(fp, MATRIX_ELEM_WRITE_FMT " ", row[j]) < 0) - return 0; - if (fprintf(fp, "\n") < 0) - return 0; - } + nerv_matrix_(save)(self, cfp, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } -static int nerv_matrix_(copy_from)(lua_State *L) { +int nerv_matrix_(lua_copy_from)(lua_State *L) { + Status status; Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); + const Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); int nargs = lua_gettop(L); int b_begin = nargs > 2 ? luaL_checkinteger(L, 3) : 0; int b_end = nargs > 3 ? luaL_checkinteger(L, 4) : b->nrow; int a_begin = nargs > 4 ? luaL_checkinteger(L, 5) : 0; - if (!(0 <= b_begin && b_begin < b_end && b_end <= b->nrow && - a_begin + b_end - b_begin <= a->nrow)) - nerv_error(L, "invalid copy interval"); - if (a->ncol != b->ncol) - nerv_error(L, "matrices should be of the same dimension"); - memmove(MATRIX_ROW_PTR(a, a_begin), - MATRIX_ROW_PTR(b, b_begin), - sizeof(MATRIX_ELEM) * b->ncol * (b_end - b_begin)); + nerv_matrix_(copy_from)(a, b, a_begin, b_begin, b_end, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } static const luaL_Reg nerv_matrix_(extra_methods)[] = { - {"load", nerv_matrix_(load)}, - {"save", nerv_matrix_(save)}, - {"copy_from", nerv_matrix_(copy_from)}, + {"load", nerv_matrix_(lua_load)}, + {"save", nerv_matrix_(lua_save)}, + {"copy_from", nerv_matrix_(lua_copy_from)}, {NULL, NULL} }; diff --git a/nerv/matrix/init.c b/nerv/matrix/init.c index c29d7e9..27472b9 100644 --- a/nerv/matrix/init.c +++ b/nerv/matrix/init.c @@ -1,12 +1,11 @@ #include "../common.h" -#include "generic/matrix.h" const char *nerv_matrix_tname = "nerv.Matrix"; const char *nerv_matrix_cuda_tname = "nerv.CuMatrix"; const char *nerv_matrix_host_tname = "nerv.MMatrix"; -void nerv_cumatrix_init(lua_State *L); -void nerv_mmatrix_init(lua_State *L); +void nerv_lua_cumatrix_init(lua_State *L); +void nerv_lua_mmatrix_init(lua_State *L); static const luaL_Reg matrix_methods[] = { {"__tostring__", nerv_error_method_not_implemented }, @@ -25,11 +24,11 @@ void nerv_matrix_init(lua_State *L) { /* CuMatrix inherits from Matrix */ luaT_newmetatable(L, nerv_matrix_cuda_tname, nerv_matrix_tname, NULL, NULL, NULL); - nerv_cumatrix_init(L); + nerv_lua_cumatrix_init(L); lua_pop(L, 1); /* MMatrix inherits from Matrix */ luaT_newmetatable(L, nerv_matrix_host_tname, nerv_matrix_tname, NULL, NULL, NULL); - nerv_mmatrix_init(L); + nerv_lua_mmatrix_init(L); lua_pop(L, 1); } diff --git a/nerv/matrix/mmatrix.c b/nerv/matrix/mmatrix.c index d1d68b9..b25af03 100644 --- a/nerv/matrix/mmatrix.c +++ b/nerv/matrix/mmatrix.c @@ -1,15 +1,15 @@ #define NERV_GENERIC_MMATRIX #include <stdlib.h> #include "../common.h" -void nerv_matrix_host_float_init(lua_State *L); -void nerv_matrix_host_double_init(lua_State *L); -void nerv_matrix_host_int_init(lua_State *L); +void nerv_matrix_host_float_lua_init(lua_State *L); +void nerv_matrix_host_double_lua_init(lua_State *L); +void nerv_matrix_host_int_lua_init(lua_State *L); -void nerv_mmatrix_init(lua_State *L) { +void nerv_lua_mmatrix_init(lua_State *L) { srand(1); - nerv_matrix_host_float_init(L); - nerv_matrix_host_double_init(L); - nerv_matrix_host_int_init(L); + nerv_matrix_host_float_lua_init(L); + nerv_matrix_host_double_lua_init(L); + nerv_matrix_host_int_lua_init(L); } #define MATRIX_USE_FLOAT @@ -52,26 +52,18 @@ static void host_matrix_(init_extra)(lua_State *L) { } #include "generic/mmatrix.c" +#include "../lib/matrix/mmatrix.h" -static int nerv_matrix_(perm_gen)(lua_State *L) { +static int nerv_matrix_(lua_perm_gen)(lua_State *L) { + Status status; int i, ncol = luaL_checkinteger(L, 1); - Matrix *self = nerv_matrix_(new_)(L, 1, ncol); - long *prow = self->data.i; - for (i = 0; i < ncol; i++) - prow[i] = i; - for (i = ncol - 1; i >= 0; i--) - { - size_t j = rand() % (i + 1); - long tmp = prow[i]; - prow[i] = prow[j]; - prow[j] = tmp; - } + Matrix *self = nerv_matrix_(perm_gen)(ncol, &status); + NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, self, nerv_matrix_(tname)); return 1; } static const luaL_Reg nerv_matrix_(extra_methods_int)[] = { - {"perm_gen", nerv_matrix_(perm_gen)}, + {"perm_gen", nerv_matrix_(lua_perm_gen)}, {NULL, NULL} }; - |