diff options
27 files changed, 730 insertions, 319 deletions
diff --git a/nerv/Makefile b/nerv/Makefile index c0db53a..a2155b9 100644 --- a/nerv/Makefile +++ b/nerv/Makefile @@ -6,14 +6,15 @@ LIB_PATH := $(LUA_BINDIR)/../lib INC_PATH := $(LUA_BINDIR)/../include/nerv LUA_DIR = $(INST_LUADIR)/nerv OBJ_DIR := $(BUILD_DIR)/objs -ISUBDIR := io matrix luaT +ISUBDIR := lib matrix lib/io lib/matrix lib/luaT SUBDIR := matrix io layer examples nn tnn lib/io lib/luaT lib/matrix INC_SUBDIR := $(addprefix $(INC_PATH)/,$(ISUBDIR)) OBJ_SUBDIR := $(addprefix $(OBJ_DIR)/,$(SUBDIR)) LUA_SUBDIR := $(addprefix $(LUA_DIR)/,$(SUBDIR)) -INCS := common.h matrix/matrix.h io/chunk_file.h luaT/luaT.h +INCS := lib/common.h lib/matrix/matrix.h lib/matrix/mmatrix.h lib/io/chunk_file.h lib/luaT/luaT.h \ + matrix/matrix.h CORE_OBJS := lib/common.o lib/io/chunk_file.o \ lib/matrix/mmatrix.o lib/matrix/cumatrix.o lib/matrix/cukernel.o NERV_OBJS := nerv.o \ @@ -82,5 +83,5 @@ clean: install: $(LIBS) $(LUA_DIR) $(LUA_SUBDIR) $(LUA_LIBS) $(INC_SUBDIR) $(INCS) -$(INC_PATH)/%.h: lib/%.h +$(INC_PATH)/%.h: %.h cp $< $@ diff --git a/nerv/init.lua b/nerv/init.lua index 6312df1..e7d668c 100644 --- a/nerv/init.lua +++ b/nerv/init.lua @@ -54,7 +54,7 @@ end function nerv.warning(fmt, ...) nerv.printf( string.format("(%s)[nerv] warning: %s\n", - os.date("%H:%M:%S %F"), fmt), ...) + os.date("%H:%M:%S.%N %F"), fmt), ...) end --- Create a class (Torch-compatible). diff --git a/nerv/lib/common.c b/nerv/lib/common.c index d977f8d..879ae9d 100644 --- a/nerv/lib/common.c +++ b/nerv/lib/common.c @@ -56,7 +56,7 @@ void luaN_append_methods(lua_State *L, const luaL_Reg *mlist) { } } -HashMap *hashmap_create(size_t size, HashKey_t hfunc, HashMapCmp_t cmp) { +HashMap *nerv_hashmap_create(size_t size, HashKey_t hfunc, HashMapCmp_t cmp) { HashMap *res = (HashMap *)malloc(sizeof(HashMap)); res->bucket = calloc(size, sizeof(HashNode)); res->cmp = cmp; @@ -65,7 +65,7 @@ HashMap *hashmap_create(size_t size, HashKey_t hfunc, HashMapCmp_t cmp) { return res; } -void *hashmap_getval(HashMap *h, const char *key) { +void *nerv_hashmap_getval(HashMap *h, const char *key) { size_t idx = h->hfunc(key) % h->size; HashNode *ptr; for (ptr = h->bucket[idx]; ptr; ptr = ptr->next) @@ -76,7 +76,7 @@ void *hashmap_getval(HashMap *h, const char *key) { return NULL; } -void hashmap_setval(HashMap *h, const char *key, void *val) { +void nerv_hashmap_setval(HashMap *h, const char *key, void *val) { size_t idx = h->hfunc(key) % h->size; HashNode *ptr = malloc(sizeof(HashNode)); ptr->next = h->bucket[idx]; @@ -85,7 +85,7 @@ void hashmap_setval(HashMap *h, const char *key, void *val) { ptr->val = val; } -void hashmap_clear(HashMap *h) { +void nerv_hashmap_clear(HashMap *h) { size_t i; for (i = 0; i < h->size; i++) { @@ -100,6 +100,11 @@ void hashmap_clear(HashMap *h) { } } +void nerv_hashmap_destroy(HashMap *h) { + nerv_hashmap_clear(h); + free(h); +} + size_t bkdr_hash(const char *key) { unsigned int seed = 131; unsigned int res = 0; diff --git a/nerv/lib/common.h b/nerv/lib/common.h index 1c588d1..3283ac1 100644 --- a/nerv/lib/common.h +++ b/nerv/lib/common.h @@ -7,6 +7,8 @@ #include <stdio.h> #include <stdlib.h> +#define PROFILE_HASHMAP_SIZE 123457 + typedef enum ErrCode { NERV_NORMAL, /* matrix err */ @@ -75,10 +77,11 @@ typedef struct HashMap { size_t size; } HashMap; -HashMap *hashmap_create(size_t size, HashKey_t hfunc, HashMapCmp_t cmp); -void *hashmap_getval(HashMap *h, const char *key); -void hashmap_setval(HashMap *h, const char *key, void *val); -void hashmap_clear(HashMap *h); +HashMap *nerv_hashmap_create(size_t size, HashKey_t hfunc, HashMapCmp_t cmp); +void *nerv_hashmap_getval(HashMap *h, const char *key); +void nerv_hashmap_setval(HashMap *h, const char *key, void *val); +void nerv_hashmap_clear(HashMap *h); +void nerv_hashmap_destroy(HashMap *h); size_t bkdr_hash(const char *key); diff --git a/nerv/lib/matrix/cuda_helper.h b/nerv/lib/matrix/cuda_helper.h index 13d5728..5c75e38 100644 --- a/nerv/lib/matrix/cuda_helper.h +++ b/nerv/lib/matrix/cuda_helper.h @@ -54,6 +54,28 @@ cudaDeviceSynchronize(); \ } while (0) +#define CURAND_SAFE_SYNC_CALL(call, status) \ + do { \ + curandStatus_t err = (call); \ + if (err != CURAND_STATUS_SUCCESS) \ + { \ + NERV_SET_STATUS(status, MAT_CUBLAS_ERR, curandGetErrorString(err)); \ + return; \ + } \ + cudaDeviceSynchronize(); \ + } while (0) + +#define CURAND_SAFE_SYNC_CALL_RET(call, status) \ + do { \ + curandStatus_t err = (call); \ + if (err != CURAND_STATUS_SUCCESS) \ + { \ + NERV_SET_STATUS(status, MAT_CUBLAS_ERR, curandGetErrorString(err)); \ + return 0; \ + } \ + cudaDeviceSynchronize(); \ + } while (0) + #define CHECK_SAME_DIMENSION(a, b, status) \ do { \ if (!(a->nrow == b->nrow && a->ncol == b->ncol)) \ @@ -96,15 +118,46 @@ static const char *cublasGetErrorString(cublasStatus_t err) { return "<unknown>"; } +static const char *curandGetErrorString(curandStatus_t err) { + switch (err) + { + case CURAND_STATUS_VERSION_MISMATCH: + return "Header file and linked library version do not match"; + case CURAND_STATUS_NOT_INITIALIZED: + return "Generator not initialized"; + case CURAND_STATUS_ALLOCATION_FAILED: + return "Memory allocation failed"; + case CURAND_STATUS_TYPE_ERROR: + return "Generator is wrong type"; + case CURAND_STATUS_OUT_OF_RANGE: + return "Argument out of range"; + case CURAND_STATUS_LENGTH_NOT_MULTIPLE: + return "Length requested is not a multple of dimension"; + case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED: + return "GPU does not have double precision required by MRG32k3a"; + case CURAND_STATUS_LAUNCH_FAILURE: + return "Kernel launch failure"; + case CURAND_STATUS_PREEXISTING_FAILURE: + return "Preexisting failure on library entry"; + case CURAND_STATUS_INITIALIZATION_FAILED: + return "Initialization of CUDA failed"; + case CURAND_STATUS_ARCH_MISMATCH: + return "Architecture mismatch, GPU does not support requested feature"; + case CURAND_STATUS_INTERNAL_ERROR: + return "Internal library error"; + } + return "<unknown>"; +} #define PROFILE_START \ do { \ - cudaEventRecord(profile_start, 0); + cudaEventRecord(context->profile_start, 0); #define PROFILE_STOP \ - cudaEventRecord(profile_stop, 0); \ - cudaEventSynchronize(profile_stop); \ + cudaEventRecord(context->profile_stop, 0); \ + cudaEventSynchronize(context->profile_stop); \ float milliseconds = 0; \ - cudaEventElapsedTime(&milliseconds, profile_start, profile_stop); \ - accu_profile(__func__, milliseconds / 1000); \ + cudaEventElapsedTime(&milliseconds, context->profile_start, \ + context->profile_stop); \ + nerv_cuda_context_accu_profile(context, __func__, milliseconds / 1000); \ } while (0); #define PROFILE_END diff --git a/nerv/lib/matrix/cukernel.h b/nerv/lib/matrix/cukernel.h index c84200e..d59a070 100644 --- a/nerv/lib/matrix/cukernel.h +++ b/nerv/lib/matrix/cukernel.h @@ -3,7 +3,7 @@ void cudak_(cuda_mul_elem)(const Matrix *a, const Matrix *b, Matrix *c); void cudak_(cuda_log_elem)(const Matrix *a, Matrix *b); void cudak_(cuda_sigmoid)(const Matrix *a, Matrix *b); void cudak_(cuda_sigmoid_grad)(const Matrix *output, const Matrix *err, Matrix *nerr); -void cudak_(cuda_rand_uniform)(const Matrix *a); /* a's curand_gen may be modified */ +void cudak_(cuda_rand_uniform)(const Matrix *a, CuContext *context); /* a's curand_gen may be modified */ void cudak_(cuda_thres_mask)(const Matrix *a, const Matrix *b, double thres, double low, double high); void cudak_(cuda_tanh)(const Matrix *a, Matrix *b); void cudak_(cuda_tanh_grad)(const Matrix *output, const Matrix *err, Matrix *nerr); diff --git a/nerv/lib/matrix/cumatrix.c b/nerv/lib/matrix/cumatrix.c index d998871..2fbe7d8 100644 --- a/nerv/lib/matrix/cumatrix.c +++ b/nerv/lib/matrix/cumatrix.c @@ -1,23 +1,12 @@ #define NERV_GENERIC_CUMATRIX +#define MATRIX_CONTEXT CuContext #include "cumatrix.h" #include "cuda_helper.h" #include <string.h> #include <time.h> -#define PROFILE_HASHMAP_SIZE 123457 -static cublasHandle_t cublas_handle; -static cudaEvent_t profile_start, profile_stop; -curandGenerator_t curand_gen; -static HashMap *profile; -void nerv_cumatrix_select_gpu(int dev, Status *status) { - fprintf(stderr, "** selecting GPU %d\n", dev); - NERV_SET_STATUS(status, NERV_NORMAL, 0); - CUDA_SAFE_SYNC_CALL(cudaSetDevice(dev), status); - CUDA_SAFE_SYNC_CALL(cublasDestroy(cublas_handle), status); - CUDA_SAFE_SYNC_CALL(cublasCreate(&cublas_handle), status); -} - -void nerv_cumatrix_print_profile() { +void nerv_cuda_context_print_profile(CuContext *context) { + HashMap *profile = context->profile; size_t i; fprintf(stderr, "*** [nerv cumatrix profile] **\n"); for (i = 0; i < profile->size; i++) @@ -30,28 +19,72 @@ void nerv_cumatrix_print_profile() { } } -void nerv_cumatrix_clear_profile() { - hashmap_clear(profile); +void nerv_cuda_context_clear_profile(CuContext *context) { + nerv_hashmap_clear(context->profile); } -void accu_profile(const char *name, float delta) { - float *val = hashmap_getval(profile, name); +void nerv_cuda_context_accu_profile(CuContext *context, + const char *name, float delta) { + HashMap *profile = context->profile; + float *val = nerv_hashmap_getval(profile, name); if (!val) { val = malloc(sizeof(float)); *val = 0; - hashmap_setval(profile, name, val); + nerv_hashmap_setval(profile, name, val); } *val += delta; } -void nerv_cumatrix_init() { - cublasCreate(&cublas_handle); - curandCreateGenerator(&curand_gen, CURAND_RNG_PSEUDO_DEFAULT); - curandSetPseudoRandomGeneratorSeed(curand_gen, time(NULL)); - cudaEventCreate(&profile_start); - cudaEventCreate(&profile_stop); - profile = hashmap_create(PROFILE_HASHMAP_SIZE, bkdr_hash, strcmp); +static void new_cuda_handles(CuContext *context, Status *status) { + CUBLAS_SAFE_SYNC_CALL(cublasCreate(&(context->cublas_handle)), status); + CURAND_SAFE_SYNC_CALL(curandCreateGenerator(&(context->curand_gen), + CURAND_RNG_PSEUDO_DEFAULT), status); + CURAND_SAFE_SYNC_CALL( + curandSetPseudoRandomGeneratorSeed(context->curand_gen, time(NULL)), + status); + CUDA_SAFE_SYNC_CALL(cudaEventCreate(&(context->profile_start)), status); + CUDA_SAFE_SYNC_CALL(cudaEventCreate(&(context->profile_stop)), status); + NERV_SET_STATUS(status, NERV_NORMAL, 0); +} + +static void free_cuda_handles(CuContext *context, Status *status) { + CUBLAS_SAFE_SYNC_CALL(cublasDestroy(context->cublas_handle), status); + CURAND_SAFE_SYNC_CALL(curandDestroyGenerator(context->curand_gen), status); + CUDA_SAFE_SYNC_CALL(cudaEventDestroy(context->profile_start), status); + CUDA_SAFE_SYNC_CALL(cudaEventDestroy(context->profile_stop), status); + NERV_SET_STATUS(status, NERV_NORMAL, 0); +} + +CuContext *nerv_cuda_context_create(Status *status) { + CuContext *context = (CuContext *)malloc(sizeof(CuContext)); + new_cuda_handles(context, status); + if (status->err_code != NERV_NORMAL) + return NULL; + context->profile = nerv_hashmap_create(PROFILE_HASHMAP_SIZE, bkdr_hash, strcmp); + NERV_SET_STATUS(status, NERV_NORMAL, 0); + return context; +} + +void nerv_cuda_context_destroy(CuContext *context, Status *status) { + free_cuda_handles(context, status); + if (status->err_code != NERV_NORMAL) + return; + nerv_hashmap_destroy(context->profile); + free(context); + NERV_SET_STATUS(status, NERV_NORMAL, 0); +} + +void nerv_cuda_context_select_gpu(CuContext *context, + int dev, Status *status) { + free_cuda_handles(context, status); + if (status->err_code != NERV_NORMAL) + return; + CUDA_SAFE_SYNC_CALL(cudaSetDevice(dev), status); + new_cuda_handles(context, status); + if (status->err_code != NERV_NORMAL) + return; + NERV_SET_STATUS(status, NERV_NORMAL, 0); } #define MATRIX_USE_FLOAT @@ -59,7 +92,6 @@ void nerv_cumatrix_init() { #define nerv_matrix_(NAME) nerv_matrix_cuda_float_##NAME #define cudak_(NAME) cudak_float_ ## NAME #define NERV_CUBLAS_(NAME) cublasS##NAME -#define MATRIX_CUMATRIX_HOST_TNAME nerv_matrix_host_float_tname #include "generic/cumatrix.c" #undef NERV_CUBLAS_ @@ -72,12 +104,10 @@ void nerv_cumatrix_init() { #undef MATRIX_ELEM_PTR_BASE #undef MATRIX_ELEM_FMT #undef MATRIX_ELEM_WRITE_FMT -#undef MATRIX_CUMATRIX_HOST_TNAME #define MATRIX_USE_DOUBLE #define cuda_matrix_(NAME) cuda_matrix_double_##NAME #define nerv_matrix_(NAME) nerv_matrix_cuda_double_##NAME #define cudak_(NAME) cudak_double_ ## NAME #define NERV_CUBLAS_(NAME) cublasD##NAME -#define MATRIX_CUMATRIX_HOST_TNAME nerv_matrix_host_double_tname #include "generic/cumatrix.c" diff --git a/nerv/lib/matrix/cumatrix.h b/nerv/lib/matrix/cumatrix.h index b47e14b..280035b 100644 --- a/nerv/lib/matrix/cumatrix.h +++ b/nerv/lib/matrix/cumatrix.h @@ -2,8 +2,19 @@ #define NERV_CUMATRIX_H #include "matrix.h" #include "../common.h" -void nerv_cumatrix_print_profile(); -void nerv_cumatrix_clear_profile(); -void nerv_cumatrix_init(); -void nerv_cumatrix_select_gpu(int dev, Status *status); +#include "cuda_helper.h" + +typedef struct CuContext { + cublasHandle_t cublas_handle; + cudaEvent_t profile_start, profile_stop; + curandGenerator_t curand_gen; + HashMap *profile; +} CuContext; + +void nerv_cuda_context_print_profile(CuContext *context); +void nerv_cuda_context_clear_profile(CuContext *context); +void nerv_cuda_context_accu_profile(CuContext *context, const char *name, float delta); +void nerv_cuda_context_select_gpu(CuContext *context, int dev, Status *status); +CuContext *nerv_cuda_context_create(Status *status); +void nerv_cuda_context_destroy(CuContext *contex, Status *status); #endif diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu index 51e3b6a..0e09cfa 100644 --- a/nerv/lib/matrix/generic/cukernel.cu +++ b/nerv/lib/matrix/generic/cukernel.cu @@ -445,13 +445,12 @@ extern "C" { cudaStreamSynchronize(0); } - extern curandGenerator_t curand_gen; - void cudak_(cuda_rand_uniform)(const Matrix *a) { + void cudak_(cuda_rand_uniform)(const Matrix *a, CuContext *context) { #ifdef MATRIX_USE_FLOAT - curandGenerateUniform(curand_gen, MATRIX_ELEM_PTR(a), a->nrow * a->stride / sizeof(MATRIX_ELEM)); + curandGenerateUniform(context->curand_gen, MATRIX_ELEM_PTR(a), a->nrow * a->stride / sizeof(MATRIX_ELEM)); #endif #ifdef MATRIX_USE_DOUBLE - curandGenerateUniformDouble(curand_gen, MATRIX_ELEM_PTR(a), a->nrow * a->stride / sizeof(MATRIX_ELEM)); + curandGenerateUniformDouble(context->curand_gen, MATRIX_ELEM_PTR(a), a->nrow * a->stride / sizeof(MATRIX_ELEM)); #endif } diff --git a/nerv/lib/matrix/generic/cumatrix.c b/nerv/lib/matrix/generic/cumatrix.c index 7b70607..6342d90 100644 --- a/nerv/lib/matrix/generic/cumatrix.c +++ b/nerv/lib/matrix/generic/cumatrix.c @@ -1,10 +1,11 @@ #ifdef NERV_GENERIC_CUMATRIX #include "matrix.h" #include "elem_type.h" -#define MATRIX_DATA_FREE(ptr, status) cuda_matrix_(free)(ptr, status) -#define MATRIX_DATA_ALLOC(dptr, stride, width, height, status) \ - cuda_matrix_(alloc)(dptr, stride, width, height, status) - +#define MATRIX_DATA_FREE(ptr, context, status) \ + cuda_matrix_(free)(ptr, context, status) +#define MATRIX_DATA_ALLOC(dptr, stride, width, height, context, status) \ + cuda_matrix_(alloc)(dptr, stride, width, height, \ + context, status) #define NERV_GENERIC_MATRIX #define NERV_GENERIC_CUKERNEL #include "../../common.h" @@ -14,12 +15,13 @@ void nerv_matrix_(add)(Matrix *c, const Matrix *a, const Matrix *b, MATRIX_ELEM alpha, MATRIX_ELEM beta, + CuContext *context, Status *status) { CHECK_SAME_DIMENSION(a, b, status); CHECK_SAME_DIMENSION(a, c, status); PROFILE_START CUBLAS_SAFE_SYNC_CALL( - NERV_CUBLAS_(geam)(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, + NERV_CUBLAS_(geam)(context->cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, a->ncol, a->nrow, &alpha, MATRIX_ELEM_PTR(a), a->stride / sizeof(MATRIX_ELEM), @@ -33,7 +35,8 @@ void nerv_matrix_(add)(Matrix *c, const Matrix *a, const Matrix *b, void nerv_matrix_(mul)(Matrix *c, const Matrix *a, const Matrix *b, MATRIX_ELEM alpha, MATRIX_ELEM beta, - int ta, int tb, Status *status) { + int ta, int tb, + CuContext *context, Status *status) { #define SWAP(a, b) \ do { int t = (a); (a) = (b); (b) = t; } while (0) @@ -46,7 +49,7 @@ void nerv_matrix_(mul)(Matrix *c, const Matrix *a, const Matrix *b, /* Because matrix in Nerv is row-major, here b comes first */ PROFILE_START CUBLAS_SAFE_SYNC_CALL( - NERV_CUBLAS_(gemm)(cublas_handle, tb, ta, + NERV_CUBLAS_(gemm)(context->cublas_handle, tb, ta, bn, am, bm, &alpha, MATRIX_ELEM_PTR(b), b->stride / sizeof(MATRIX_ELEM), @@ -58,7 +61,8 @@ void nerv_matrix_(mul)(Matrix *c, const Matrix *a, const Matrix *b, NERV_SET_STATUS(status, NERV_NORMAL, 0); } -void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, Status *status) { +void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, + CuContext *context, Status *status) { CHECK_SAME_DIMENSION(a, b, status); PROFILE_START cudak_(cuda_sigmoid)(b, a); @@ -67,7 +71,8 @@ void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, Status *status) { } void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err, - const Matrix *output, Status *status) { + const Matrix *output, + CuContext *context, Status *status) { CHECK_SAME_DIMENSION(nerr, err, status); CHECK_SAME_DIMENSION(nerr, output, status); PROFILE_START @@ -76,14 +81,16 @@ void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err, NERV_SET_STATUS(status, NERV_NORMAL, 0); } -void nerv_matrix_(rand_uniform)(Matrix *a, Status *status) { +void nerv_matrix_(rand_uniform)(Matrix *a, CuContext *context, Status *status) { PROFILE_START - cudak_(cuda_rand_uniform)(a); + cudak_(cuda_rand_uniform)(a, context); PROFILE_STOP NERV_SET_STATUS(status, NERV_NORMAL, 0); } -void nerv_matrix_(thres_mask)(Matrix *a, Matrix *b, double thres, double low, double high, Status *status) { +void nerv_matrix_(thres_mask)(Matrix *a, Matrix *b, double thres, + double low, double high, + CuContext *context, Status *status) { CHECK_SAME_DIMENSION(a, b, status); PROFILE_START cudak_(cuda_thres_mask)(a, b, thres, low, high); @@ -91,7 +98,8 @@ void nerv_matrix_(thres_mask)(Matrix *a, Matrix *b, double thres, double low, do NERV_SET_STATUS(status, NERV_NORMAL, 0); } -void nerv_matrix_(tanh)(Matrix *a, const Matrix *b, Status *status) { +void nerv_matrix_(tanh)(Matrix *a, const Matrix *b, + CuContext *context, Status *status) { CHECK_SAME_DIMENSION(a, b, status); PROFILE_START cudak_(cuda_tanh)(b, a); @@ -99,8 +107,8 @@ void nerv_matrix_(tanh)(Matrix *a, const Matrix *b, Status *status) { NERV_SET_STATUS(status, NERV_NORMAL, 0); } -void nerv_matrix_(tanh_grad)(Matrix *nerr, const Matrix *err, - const Matrix *output, Status *status) { +void nerv_matrix_(tanh_grad)(Matrix *nerr, const Matrix *err, const Matrix *output, + CuContext *context, Status *status) { CHECK_SAME_DIMENSION(nerr, err, status); CHECK_SAME_DIMENSION(nerr, output, status); PROFILE_START @@ -109,24 +117,25 @@ void nerv_matrix_(tanh_grad)(Matrix *nerr, const Matrix *err, NERV_SET_STATUS(status, NERV_NORMAL, 0); } -Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, Status *status) { +Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, + CuContext *context, Status *status) { Matrix *max, *max_idx; Matrix *dno; CHECK_SAME_DIMENSION_RET(a, b, status); - max = nerv_matrix_(create)(a->nrow, 1, status); + max = nerv_matrix_(create)(a->nrow, 1, context, status); if (status->err_code != NERV_NORMAL) return NULL; - max_idx = nerv_matrix_(create)(a->nrow, 1, status); + max_idx = nerv_matrix_(create)(a->nrow, 1, context, status); if (status->err_code != NERV_NORMAL) { - nerv_matrix_(destroy)(max, status); + nerv_matrix_(destroy)(max, context, status); return NULL; } - dno = nerv_matrix_(create)(a->nrow, 1, status); + dno = nerv_matrix_(create)(a->nrow, 1, context, status); if (status->err_code != NERV_NORMAL) { /* FIXME: destroy may also fail? */ - nerv_matrix_(destroy)(max, status); - nerv_matrix_(destroy)(max_idx, status); + nerv_matrix_(destroy)(max, context, status); + nerv_matrix_(destroy)(max_idx, context, status); return NULL; } PROFILE_START @@ -134,14 +143,14 @@ Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, Status *status) { cudak_(cuda_softmax_denominator)(a, max, dno); cudak_(cuda_softmax_final)(a, max, dno, b); PROFILE_STOP - nerv_matrix_(destroy)(max, status); - nerv_matrix_(destroy)(dno, status); + nerv_matrix_(destroy)(max, context, status); + nerv_matrix_(destroy)(dno, context, status); NERV_SET_STATUS(status, NERV_NORMAL, 0); return max_idx; } -Matrix *nerv_matrix_(rowsum)(Matrix *a, Status *status) { - Matrix *b = nerv_matrix_(create)(a->nrow, 1, status); +Matrix *nerv_matrix_(rowsum)(Matrix *a, CuContext *context, Status *status) { + Matrix *b = nerv_matrix_(create)(a->nrow, 1, context, status); if (status->err_code != NERV_NORMAL) return NULL; PROFILE_START @@ -151,8 +160,8 @@ Matrix *nerv_matrix_(rowsum)(Matrix *a, Status *status) { return b; } -Matrix *nerv_matrix_(colsum)(Matrix *a, Status *status) { - Matrix *b = nerv_matrix_(create)(1, a->ncol, status); +Matrix *nerv_matrix_(colsum)(Matrix *a, CuContext *context, Status *status) { + Matrix *b = nerv_matrix_(create)(1, a->ncol, context, status); if (status->err_code != NERV_NORMAL) return NULL; PROFILE_START @@ -163,8 +172,8 @@ Matrix *nerv_matrix_(colsum)(Matrix *a, Status *status) { } Matrix *nerv_matrix_(colsame)(Matrix *a, const Matrix *ref, - Status *status) { - Matrix *b = nerv_matrix_(create)(1, a->ncol, status); + CuContext *context, Status *status) { + Matrix *b = nerv_matrix_(create)(1, a->ncol, context, status); if (status->err_code != NERV_NORMAL) return NULL; CHECK_SAME_DIMENSION_RET(a, ref, status); @@ -175,8 +184,8 @@ Matrix *nerv_matrix_(colsame)(Matrix *a, const Matrix *ref, return b; } -Matrix *nerv_matrix_(rowmax)(Matrix *a, Status *status) { - Matrix *b = nerv_matrix_(create)(a->nrow, 1, status); +Matrix *nerv_matrix_(rowmax)(Matrix *a, CuContext *context, Status *status) { + Matrix *b = nerv_matrix_(create)(a->nrow, 1, context, status); if (status->err_code != NERV_NORMAL) return NULL; PROFILE_START @@ -187,15 +196,15 @@ Matrix *nerv_matrix_(rowmax)(Matrix *a, Status *status) { } void nerv_matrix_(rowmax_idx)(Matrix *a, Matrix **b, Matrix **idx, - Status *status) { - *b = nerv_matrix_(create)(a->nrow, 1, status); + CuContext *context, Status *status) { + *b = nerv_matrix_(create)(a->nrow, 1, context, status); if (status->err_code != NERV_NORMAL) return; - *idx = nerv_matrix_(create)(a->nrow, 1, status); + *idx = nerv_matrix_(create)(a->nrow, 1, context, status); if (status->err_code != NERV_NORMAL) { /* FIXME: destroy may also fail? */ - nerv_matrix_(destroy)(*b, status); + nerv_matrix_(destroy)(*b, context, status); return; } PROFILE_START @@ -205,7 +214,7 @@ void nerv_matrix_(rowmax_idx)(Matrix *a, Matrix **b, Matrix **idx, } void nerv_matrix_(add_row)(Matrix *b, const Matrix *a, double beta, - Status *status) { + CuContext *context, Status *status) { if (a->ncol != b->ncol) NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0); if (a->nrow != 1) @@ -216,23 +225,25 @@ void nerv_matrix_(add_row)(Matrix *b, const Matrix *a, double beta, NERV_SET_STATUS(status, NERV_NORMAL, 0); } -void nerv_matrix_(fill)(Matrix *self, double val, Status *status) { +void nerv_matrix_(fill)(Matrix *self, double val, + CuContext *context, Status *status) { PROFILE_START cudak_(cuda_fill)(self, val); PROFILE_STOP NERV_SET_STATUS(status, NERV_NORMAL, 0); } -void nerv_matrix_(clip)(Matrix *self, double val_1, double val_2, Status *status) { +void nerv_matrix_(clip)(Matrix *self, double val1, double val2, + CuContext *context, Status *status) { PROFILE_START - cudak_(cuda_clip)(self, val_1, val_2); + cudak_(cuda_clip)(self, val1, val2); PROFILE_STOP NERV_SET_STATUS(status, NERV_NORMAL, 0); } void nerv_matrix_(copy_fromd)(Matrix *a, const Matrix *b, int a_begin, int b_begin, int b_end, - Status *status) { + CuContext *context, Status *status) { if (!(0 <= b_begin && b_begin < b_end && b_end <= b->nrow && a_begin + b_end - b_begin <= a->nrow)) NERV_EXIT_STATUS(status, MAT_INVALID_COPY_INTERVAL, 0); @@ -251,7 +262,7 @@ void nerv_matrix_(copy_fromd)(Matrix *a, const Matrix *b, void nerv_matrix_(copy_fromh)(Matrix *a, const Matrix *b, int a_begin, int b_begin, int b_end, - Status *status) { + CuContext *context, Status *status) { if (!(0 <= b_begin && b_begin < b_end && b_end <= b->nrow && a_begin + b_end - b_begin <= a->nrow)) NERV_EXIT_STATUS(status, MAT_INVALID_COPY_INTERVAL, 0); @@ -270,7 +281,7 @@ void nerv_matrix_(copy_fromh)(Matrix *a, const Matrix *b, void nerv_matrix_(copy_toh)(Matrix *a, const Matrix *b, int a_begin, int a_end, int b_begin, - Status *status) { + CuContext *context, Status *status) { if (!(0 <= a_begin && a_begin < a_end && a_end <= a->nrow && b_begin + a_end - a_begin <= b->nrow)) NERV_EXIT_STATUS(status, MAT_INVALID_COPY_INTERVAL, 0); @@ -287,15 +298,15 @@ void nerv_matrix_(copy_toh)(Matrix *a, const Matrix *b, NERV_SET_STATUS(status, NERV_NORMAL, 0); } -Matrix *nerv_matrix_(trans)(Matrix *a, Status *status) { +Matrix *nerv_matrix_(trans)(Matrix *a, CuContext *context, Status *status) { MATRIX_ELEM alpha = 1, beta = 0; - Matrix *b = nerv_matrix_(create)(a->ncol, a->nrow, status); + Matrix *b = nerv_matrix_(create)(a->ncol, a->nrow, context, status); if (status->err_code != NERV_NORMAL) return NULL; /* FIXME: possible memory leak when lua error is raised */ PROFILE_START CUBLAS_SAFE_SYNC_CALL_RET( - NERV_CUBLAS_(geam)(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_T, + NERV_CUBLAS_(geam)(context->cublas_handle, CUBLAS_OP_T, CUBLAS_OP_T, a->nrow, a->ncol, &alpha, MATRIX_ELEM_PTR(a), a->stride / sizeof(MATRIX_ELEM), @@ -309,7 +320,7 @@ Matrix *nerv_matrix_(trans)(Matrix *a, Status *status) { } void nerv_matrix_(mul_elem)(Matrix *c, const Matrix *a, const Matrix *b, - Status *status) { + CuContext *context, Status *status) { CHECK_SAME_DIMENSION(a, b, status); CHECK_SAME_DIMENSION(a, c, status); PROFILE_START @@ -318,7 +329,8 @@ void nerv_matrix_(mul_elem)(Matrix *c, const Matrix *a, const Matrix *b, NERV_SET_STATUS(status, NERV_NORMAL, 0); } -void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a, Status *status) { +void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a, + CuContext *context, Status *status) { CHECK_SAME_DIMENSION(a, b, status); PROFILE_START cudak_(cuda_log_elem)(a, b); @@ -326,14 +338,15 @@ void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a, Status *status) { NERV_SET_STATUS(status, NERV_NORMAL, 0); } -Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col, Status *status) { +Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col, + CuContext *context, Status *status) { Matrix *b; if (a->ncol != 1) { NERV_SET_STATUS(status, MAT_COL_VECTOR_EXP, 0); return NULL; } - b = nerv_matrix_(create)(a->nrow, orig_col, status); + b = nerv_matrix_(create)(a->nrow, orig_col, context, status); if (status->err_code != NERV_NORMAL) return NULL; PROFILE_START @@ -345,7 +358,8 @@ Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col, Status *status) } void nerv_matrix_(copy_rows_fromh_by_idx)(Matrix *a, const Matrix *b, - const Matrix *idx, int b_begin, Status *status) { + const Matrix *idx, int b_begin, + CuContext *context, Status *status) { long nrow = a->nrow; if (!(0 <= b_begin && b_begin + nrow <= idx->ncol)) NERV_EXIT_STATUS(status, MAT_INVALID_COPY_INTERVAL, 0); @@ -379,7 +393,8 @@ void nerv_matrix_(copy_rows_fromh_by_idx)(Matrix *a, const Matrix *b, } void nerv_matrix_(copy_rows_fromd_by_idx)(Matrix *a, const Matrix *b, - const Matrix *idx, int b_begin, Status *status) { + const Matrix *idx, int b_begin, + CuContext *context, Status *status) { long nrow = a->nrow; if (!(0 <= b_begin && b_begin + nrow <= idx->ncol)) NERV_EXIT_STATUS(status, MAT_INVALID_COPY_INTERVAL, 0); @@ -394,7 +409,8 @@ void nerv_matrix_(copy_rows_fromd_by_idx)(Matrix *a, const Matrix *b, } void nerv_matrix_(copy_rows_fromd_by_colidx)(Matrix *a, const Matrix *b, - const Matrix *idx, int b_begin, Status *status) { + const Matrix *idx, int b_begin, + CuContext *context, Status *status) { long nrow = a->nrow; if (!(0 <= b_begin && b_begin + nrow <= idx->nrow)) NERV_EXIT_STATUS(status, MAT_INVALID_COPY_INTERVAL, 0); @@ -412,7 +428,9 @@ void nerv_matrix_(copy_rows_fromd_by_colidx)(Matrix *a, const Matrix *b, #ifdef __NERV_FUTURE_CUDA_7 -void nerv_matrix_(update_select_rows_by_rowidx)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta, Status *status) { +void nerv_matrix_(update_select_rows_by_rowidx)(Matrix *c, const Matrix *a, + const Matrix *idx, double alpha, double beta, + CuContext *context, Status *status) { long nrow = a->nrow; if (idx->nrow != 1 || idx->ncol != a->nrow) NERV_EXIT_STATUS(status, MAT_IDX_VECTOR_EXP, 0); @@ -424,7 +442,9 @@ void nerv_matrix_(update_select_rows_by_rowidx)(Matrix *c, const Matrix *a, cons NERV_SET_STATUS(status, NERV_NORMAL, 0); } -void nerv_matrix_(update_select_rows_by_colidx)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta, Status *status) { +void nerv_matrix_(update_select_rows_by_colidx)(Matrix *c, const Matrix *a, + const Matrix *idx, double alpha, double beta, + CuContext *context, Status *status) { long nrow = a->nrow; if (idx->ncol != 1 || idx->nrow != a->nrow) NERV_EXIT_STATUS(status, MAT_IDX_VECTOR_EXP, 0); @@ -438,20 +458,20 @@ void nerv_matrix_(update_select_rows_by_colidx)(Matrix *c, const Matrix *a, cons #endif void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b, - int context, Status *status) { + int cont, CuContext *context, Status *status) { if (a->nrow != b->nrow) NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0); - if (a->ncol != b->ncol * (context * 2 + 1)) + if (a->ncol != b->ncol * (cont * 2 + 1)) NERV_EXIT_STATUS(status, MAT_GENERAL_ERR, "the width should be 2 * context + 1"); PROFILE_START - cudak_(cuda_expand_frm)(b, a, context); + cudak_(cuda_expand_frm)(b, a, cont); PROFILE_STOP NERV_SET_STATUS(status, NERV_NORMAL, 0); } void nerv_matrix_(rearrange_frm)(Matrix *a, const Matrix *b, - int step, Status *status) { + int step, CuContext *context, Status *status) { CHECK_SAME_DIMENSION(a, b, status); if (b->ncol % step) NERV_EXIT_STATUS(status, MAT_GENERAL_ERR, @@ -463,7 +483,7 @@ void nerv_matrix_(rearrange_frm)(Matrix *a, const Matrix *b, } void nerv_matrix_(scale_rows_by_col)(Matrix *a, const Matrix *b, - Status *status) { + CuContext *context, Status *status) { if (a->nrow != b->nrow) NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0); if (b->ncol != 1) @@ -475,7 +495,7 @@ void nerv_matrix_(scale_rows_by_col)(Matrix *a, const Matrix *b, } void nerv_matrix_(scale_rows_by_row)(Matrix *a, const Matrix *b, - Status *status) { + CuContext *context, Status *status) { if (a->ncol != b->ncol) NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0); if (b->nrow != 1) @@ -486,7 +506,8 @@ void nerv_matrix_(scale_rows_by_row)(Matrix *a, const Matrix *b, NERV_SET_STATUS(status, NERV_NORMAL, 0); } -void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b, Status *status) { +void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b, + CuContext *context, Status *status) { CHECK_SAME_DIMENSION(a, b, status); PROFILE_START cudak_(cuda_prefixsum_row)(b, a); @@ -494,14 +515,14 @@ void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b, Status *status) { NERV_SET_STATUS(status, NERV_NORMAL, 0); } -static void cuda_matrix_(free)(MATRIX_ELEM *ptr, Status *status) { +static void cuda_matrix_(free)(MATRIX_ELEM *ptr, CuContext *context, Status *status) { CUDA_SAFE_SYNC_CALL(cudaFree(ptr), status); NERV_SET_STATUS(status, NERV_NORMAL, 0); } static void cuda_matrix_(alloc)(MATRIX_ELEM **dptr, size_t *stride, long width, long height, - Status *status) { + CuContext *context, Status *status) { PROFILE_START CUDA_SAFE_SYNC_CALL(cudaMallocPitch((void **)dptr, stride, width, height), status); diff --git a/nerv/lib/matrix/generic/cumatrix.h b/nerv/lib/matrix/generic/cumatrix.h index f3c2df8..fe83b5d 100644 --- a/nerv/lib/matrix/generic/cumatrix.h +++ b/nerv/lib/matrix/generic/cumatrix.h @@ -2,76 +2,99 @@ void nerv_matrix_(add)(Matrix *c, const Matrix *a, const Matrix *b, MATRIX_ELEM alpha, MATRIX_ELEM beta, - Status *status); + CuContext *context, Status *status); void nerv_matrix_(mul)(Matrix *c, const Matrix *a, const Matrix *b, MATRIX_ELEM alpha, MATRIX_ELEM beta, - int ta, int tb, Status *status); -void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, Status *status); + int ta, int tb, + CuContext *context, Status *status); +void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, + CuContext *context, Status *status); void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err, - const Matrix *output, Status *status); -void nerv_matrix_(tanh)(Matrix *a, const Matrix *b, Status *status); + const Matrix *output, + CuContext *context, Status *status); +void nerv_matrix_(tanh)(Matrix *a, const Matrix *b, + CuContext *context, Status *status); void nerv_matrix_(tanh_grad)(Matrix *nerr, const Matrix *err, - const Matrix *output, Status *status); + const Matrix *output, + CuContext *context, Status *status); -Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, Status *status); -Matrix *nerv_matrix_(rowsum)(Matrix *a, Status *status); -Matrix *nerv_matrix_(colsum)(Matrix *a, Status *status); +Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, + CuContext *context, Status *status); +Matrix *nerv_matrix_(rowsum)(Matrix *a, CuContext *context, Status *status); +Matrix *nerv_matrix_(colsum)(Matrix *a, CuContext *context, Status *status); Matrix *nerv_matrix_(colsame)(Matrix *a, const Matrix *ref, - Status *status); -Matrix *nerv_matrix_(rowmax)(Matrix *a, Status *status); + CuContext *context, Status *status); +Matrix *nerv_matrix_(rowmax)(Matrix *a, CuContext *context, Status *status); void nerv_matrix_(rowmax_idx)(Matrix *a, Matrix **b, Matrix **idx, - Status *status); + CuContext *context, Status *status); void nerv_matrix_(add_row)(Matrix *b, const Matrix *a, double beta, - Status *status); -void nerv_matrix_(clip)(Matrix *self, double val_1, double val_2, Status *status); -void nerv_matrix_(fill)(Matrix *self, double val, Status *status); + CuContext *context, Status *status); +void nerv_matrix_(clip)(Matrix *self, double val1, double val2, + CuContext *context, Status *status); +void nerv_matrix_(fill)(Matrix *self, double val, + CuContext *context, Status *status); void nerv_matrix_(copy_fromd)(Matrix *a, const Matrix *b, int a_begin, int b_begin, int b_end, - Status *status); + CuContext *context, Status *status); void nerv_matrix_(copy_fromh)(Matrix *a, const Matrix *b, int a_begin, int b_begin, int b_end, - Status *status); + CuContext *context, Status *status); void nerv_matrix_(copy_toh)(Matrix *a, const Matrix *b, int a_begin, int a_end, int b_begin, - Status *status); -Matrix *nerv_matrix_(trans)(Matrix *a, Status *status); + CuContext *context, Status *status); +Matrix *nerv_matrix_(trans)(Matrix *a, CuContext *context, Status *status); void nerv_matrix_(mul_elem)(Matrix *c, const Matrix *a, const Matrix *b, - Status *status); + CuContext *context, Status *status); -void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a, Status *status); +void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a, + CuContext *context, Status *status); -Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col, Status *status); +Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col, + CuContext *context, Status *status); void nerv_matrix_(copy_rows_fromh_by_idx)(Matrix *a, const Matrix *b, - const Matrix *idx, int b_begin, Status *status); + const Matrix *idx, int b_begin, + CuContext *context, Status *status); void nerv_matrix_(copy_rows_fromd_by_idx)(Matrix *a, const Matrix *b, - const Matrix *idx, int b_begin, Status *status); + const Matrix *idx, int b_begin, + CuContext *context, Status *status); void nerv_matrix_(copy_rows_fromd_by_colidx)(Matrix *a, const Matrix *b, - const Matrix *idx, int b_begin, Status *status); + const Matrix *idx, int b_begin, + CuContext *context, Status *status); #ifdef __NERV_FUTURE_CUDA_7 -void nerv_matrix_(update_select_rows_by_rowidx)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta, Status *status); -void nerv_matrix_(update_select_rows_by_colidx)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta, Status *status); +void nerv_matrix_(update_select_rows_by_rowidx)(Matrix *c, const Matrix *a, + const Matrix *idx, double alpha, double beta, + CuContext *context, Status *status); +void nerv_matrix_(update_select_rows_by_colidx)(Matrix *c, const Matrix *a, + const Matrix *idx, double alpha, double beta, + CuContext *context, Status *status); #endif void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b, - int context, Status *status); + int cont, CuContext *context, Status *status); void nerv_matrix_(rearrange_frm)(Matrix *a, const Matrix *b, - int step, Status *status); + int step, CuContext *context, Status *status); void nerv_matrix_(scale_rows_by_col)(Matrix *a, const Matrix *b, - Status *status); + CuContext *context, Status *status); void nerv_matrix_(scale_rows_by_row)(Matrix *a, const Matrix *b, - Status *status); -void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b, Status *status); + CuContext *context, Status *status); +void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b, + CuContext *context, Status *status); void nerv_matrix_(thres_mask)(Matrix *a, Matrix *b, double thres, double low, double high, - Status *status); -void nerv_matrix_(rand_uniform)(Matrix *a, Status *status); + CuContext *context, Status *status); +void nerv_matrix_(rand_uniform)(Matrix *a, CuContext *context, Status *status); #ifdef __NERV_FUTURE_CUDA_7 -void nerv_matrix_(update_select_rows_by_rowidx)(Matrix *c, const Matrix *a, const Matrix *idx, - double alpha, double beta, Status *status); -void nerv_matrix_(update_select_rows_by_colidx)(Matrix *c, const Matrix *a, const Matrix *idx, - double alpha, double beta, Status *status); +void nerv_matrix_(update_select_rows_by_rowidx)(Matrix *c, const Matrix *a, + const Matrix *idx, + double alpha, double beta, + CuContext *context, Status *status); +void nerv_matrix_(update_select_rows_by_colidx)(Matrix *c, const Matrix *a, + const Matrix *idx, + double alpha, double beta, + CuContext *context, Status *status); #endif -void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b, Status *status); +void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b, + CuContext *context, Status *status); diff --git a/nerv/lib/matrix/generic/matrix.c b/nerv/lib/matrix/generic/matrix.c index 998d107..3bcc251 100644 --- a/nerv/lib/matrix/generic/matrix.c +++ b/nerv/lib/matrix/generic/matrix.c @@ -3,12 +3,12 @@ #include "matrix.h" /* FIXME: malloc failure detection */ -void nerv_matrix_(data_free)(Matrix *self, Status *status) { +void nerv_matrix_(data_free)(Matrix *self, MATRIX_CONTEXT *context, Status *status) { assert(*self->data_ref > 0); if (--(*self->data_ref) == 0) { /* free matrix data */ - MATRIX_DATA_FREE(MATRIX_ELEM_PTR_BASE(self), status); + MATRIX_DATA_FREE(MATRIX_ELEM_PTR_BASE(self), context, status); free(self->data_ref); free(self); } @@ -22,7 +22,8 @@ void nerv_matrix_(data_retain)(Matrix *self) { (*self->data_ref)++; } -Matrix *nerv_matrix_(create)(long nrow, long ncol, Status *status) { +Matrix *nerv_matrix_(create)(long nrow, long ncol, + MATRIX_CONTEXT *context, Status *status) { Matrix *self = (Matrix *)malloc(sizeof(Matrix)); self->nrow = nrow; self->ncol = ncol; @@ -30,7 +31,7 @@ Matrix *nerv_matrix_(create)(long nrow, long ncol, Status *status) { self->dim = 2; MATRIX_DATA_ALLOC(&MATRIX_ELEM_PTR_BASE(self), &self->stride, sizeof(MATRIX_ELEM) * self->ncol, self->nrow, - status); + context, status); if (status->err_code != NERV_NORMAL) { free(self); @@ -44,8 +45,8 @@ Matrix *nerv_matrix_(create)(long nrow, long ncol, Status *status) { return self; } -void nerv_matrix_(destroy)(Matrix *self, Status *status) { - nerv_matrix_(data_free)(self, status); +void nerv_matrix_(destroy)(Matrix *self, MATRIX_CONTEXT *context, Status *status) { + nerv_matrix_(data_free)(self, context, status); } Matrix *nerv_matrix_(getrow)(Matrix *self, int row) { diff --git a/nerv/lib/matrix/generic/matrix.h b/nerv/lib/matrix/generic/matrix.h index 69b4e6d..2770c3e 100644 --- a/nerv/lib/matrix/generic/matrix.h +++ b/nerv/lib/matrix/generic/matrix.h @@ -1,6 +1,7 @@ #include "../matrix.h" -Matrix *nerv_matrix_(create)(long nrow, long ncol, Status *status); -void nerv_matrix_(destroy)(Matrix *self, Status *status); +Matrix *nerv_matrix_(create)(long nrow, long ncol, + MATRIX_CONTEXT *context, Status *status); +void nerv_matrix_(destroy)(Matrix *self, MATRIX_CONTEXT *context, Status *status); Matrix *nerv_matrix_(getrow)(Matrix *self, int row); -void nerv_matrix_(data_free)(Matrix *self, Status *status); +void nerv_matrix_(data_free)(Matrix *self, MATRIX_CONTEXT *context, Status *status); void nerv_matrix_(data_retain)(Matrix *self); diff --git a/nerv/lib/matrix/generic/mmatrix.c b/nerv/lib/matrix/generic/mmatrix.c index fa1dc5f..ad334e3 100644 --- a/nerv/lib/matrix/generic/mmatrix.c +++ b/nerv/lib/matrix/generic/mmatrix.c @@ -1,9 +1,11 @@ #ifdef NERV_GENERIC_MMATRIX #include "matrix.h" #include "elem_type.h" -#define MATRIX_DATA_FREE(ptr, status) host_matrix_(free)(ptr, status) -#define MATRIX_DATA_ALLOC(dptr, stride, width, height, status) \ - host_matrix_(alloc)(dptr, stride, width, height, status) +#define MATRIX_DATA_FREE(ptr, context, status) \ + host_matrix_(free)(ptr, context, status) +#define MATRIX_DATA_ALLOC(dptr, stride, width, height, context, status) \ + host_matrix_(alloc)(dptr, stride, width, height, \ + context, status) #define NERV_GENERIC_MATRIX #include "../cuda_helper.h" #include "../../common.h" @@ -12,8 +14,8 @@ #include <cblas.h> #include <float.h> -Matrix *nerv_matrix_(colsum)(Matrix *a, Status *status) { - Matrix *b = nerv_matrix_(create)(1, a->ncol, status); +Matrix *nerv_matrix_(colsum)(Matrix *a, MContext *context, Status *status) { + Matrix *b = nerv_matrix_(create)(1, a->ncol, context, status); if (status->err_code != NERV_NORMAL) return NULL; MATRIX_ELEM *arow = MATRIX_ELEM_PTR(a), @@ -31,8 +33,9 @@ Matrix *nerv_matrix_(colsum)(Matrix *a, Status *status) { return b; } -Matrix *nerv_matrix_(colsame)(Matrix *a, const Matrix *ref, Status *status) { - Matrix *b = nerv_matrix_(create)(1, a->ncol, status); +Matrix *nerv_matrix_(colsame)(Matrix *a, const Matrix *ref, + MContext *context, Status *status) { + Matrix *b = nerv_matrix_(create)(1, a->ncol, context, status); if (status->err_code != NERV_NORMAL) return NULL; CHECK_SAME_DIMENSION_RET(a, ref, status); @@ -55,8 +58,8 @@ Matrix *nerv_matrix_(colsame)(Matrix *a, const Matrix *ref, Status *status) { return b; } -Matrix *nerv_matrix_(rowsum)(Matrix *a, Status *status) { - Matrix *b = nerv_matrix_(create)(a->nrow, 1, status); +Matrix *nerv_matrix_(rowsum)(Matrix *a, MContext *context, Status *status) { + Matrix *b = nerv_matrix_(create)(a->nrow, 1, context, status); if (status->err_code != NERV_NORMAL) return NULL; MATRIX_ELEM *arow = MATRIX_ELEM_PTR(a), @@ -75,8 +78,8 @@ Matrix *nerv_matrix_(rowsum)(Matrix *a, Status *status) { return b; } -Matrix *nerv_matrix_(rowmax)(Matrix *a, Status *status) { - Matrix *b = nerv_matrix_(create)(a->nrow, 1, status); +Matrix *nerv_matrix_(rowmax)(Matrix *a, MContext *context, Status *status) { + Matrix *b = nerv_matrix_(create)(a->nrow, 1, context, status); if (status->err_code != NERV_NORMAL) return NULL; MATRIX_ELEM *arow = MATRIX_ELEM_PTR(a), @@ -96,15 +99,16 @@ Matrix *nerv_matrix_(rowmax)(Matrix *a, Status *status) { return b; } -void nerv_matrix_(rowmax_idx)(Matrix *a, Matrix **b, Matrix **idx, Status *status) { - *b = nerv_matrix_(create)(a->nrow, 1, status); +void nerv_matrix_(rowmax_idx)(Matrix *a, Matrix **b, Matrix **idx, + MContext *context, Status *status) { + *b = nerv_matrix_(create)(a->nrow, 1, context, status); if (status->err_code != NERV_NORMAL) return; - *idx = nerv_matrix_(create)(a->nrow, 1, status); + *idx = nerv_matrix_(create)(a->nrow, 1, context, status); if (status->err_code != NERV_NORMAL) { /* FIXME: destroy may also fail! */ - nerv_matrix_(destroy)(*b, status); + nerv_matrix_(destroy)(*b, context, status); return; } MATRIX_ELEM *arow = MATRIX_ELEM_PTR(a), @@ -127,8 +131,8 @@ void nerv_matrix_(rowmax_idx)(Matrix *a, Matrix **b, Matrix **idx, Status *statu NERV_SET_STATUS(status, NERV_NORMAL, 0); } -Matrix *nerv_matrix_(trans)(Matrix *a, Status *status) { - Matrix *b = nerv_matrix_(create)(a->ncol, a->nrow, status); +Matrix *nerv_matrix_(trans)(Matrix *a, MContext *context, Status *status) { + Matrix *b = nerv_matrix_(create)(a->ncol, a->nrow, context, status); if (status->err_code != NERV_NORMAL) return NULL; MATRIX_ELEM *arow = MATRIX_ELEM_PTR(a); @@ -148,14 +152,15 @@ Matrix *nerv_matrix_(trans)(Matrix *a, Status *status) { return b; } -Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col, Status *status) { +Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col, + MContext *context, Status *status) { Matrix *b; if (a->ncol != 1) { NERV_SET_STATUS(status, MAT_COL_VECTOR_EXP, 0); return NULL; } - b = nerv_matrix_(create)(a->nrow, orig_col, status); + b = nerv_matrix_(create)(a->nrow, orig_col, context, status); if (status->err_code != NERV_NORMAL) return NULL; int i; @@ -173,7 +178,9 @@ Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col, Status *status) return b; } -void nerv_matrix_(add)(Matrix *c, const Matrix *a, const Matrix *b, MATRIX_ELEM alpha, MATRIX_ELEM beta, Status *status) { +void nerv_matrix_(add)(Matrix *c, const Matrix *a, const Matrix *b, + MATRIX_ELEM alpha, MATRIX_ELEM beta, + MContext *context, Status *status) { CHECK_SAME_DIMENSION(a, b, status); CHECK_SAME_DIMENSION(a, c, status); int i, j; @@ -197,7 +204,7 @@ void nerv_matrix_(add)(Matrix *c, const Matrix *a, const Matrix *b, MATRIX_ELEM void nerv_matrix_(mul)(Matrix *c, const Matrix *a, const Matrix *b, MATRIX_ELEM alpha, MATRIX_ELEM beta, - int ta, int tb, Status *status) { + int ta, int tb, MContext *context, Status *status) { #define SWAP(a, b) \ do { int t = (a); (a) = (b); (b) = t; } while (0) @@ -218,7 +225,7 @@ void nerv_matrix_(mul)(Matrix *c, const Matrix *a, const Matrix *b, } void nerv_matrix_(add_row)(Matrix *b, const Matrix *a, double beta, - Status *status) { + MContext *context, Status *status) { if (a->ncol != b->ncol) NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0); if (a->nrow != 1) @@ -236,23 +243,25 @@ void nerv_matrix_(add_row)(Matrix *b, const Matrix *a, double beta, NERV_SET_STATUS(status, NERV_NORMAL, 0); } -void nerv_matrix_(clip)(Matrix *self, double val_1, double val_2, Status *status) { +void nerv_matrix_(clip)(Matrix *self, double val1, double val2, + MContext *context, Status *status) { int i, j; size_t astride = self->stride; MATRIX_ELEM *arow = MATRIX_ELEM_PTR(self); for (i = 0; i < self->nrow; i++) { for (j = 0; j < self->ncol; j++) - if (arow[j] > val_2) - arow[j] = val_2; - else if (arow[j] < val_1) - arow[j] = val_1; + if (arow[j] > val2) + arow[j] = val2; + else if (arow[j] < val1) + arow[j] = val1; arow = MATRIX_NEXT_ROW_PTR(arow, astride); } NERV_SET_STATUS(status, NERV_NORMAL, 0); } -void nerv_matrix_(fill)(Matrix *self, double val, Status *status) { +void nerv_matrix_(fill)(Matrix *self, double val, + MContext *context, Status *status) { int i, j; size_t astride = self->stride; MATRIX_ELEM *arow = MATRIX_ELEM_PTR(self); @@ -265,7 +274,8 @@ void nerv_matrix_(fill)(Matrix *self, double val, Status *status) { NERV_SET_STATUS(status, NERV_NORMAL, 0); } -void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, Status *status) { +void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, + MContext *context, Status *status) { CHECK_SAME_DIMENSION(a, b, status); int i, j; size_t astride = a->stride, bstride = b->stride; @@ -282,7 +292,8 @@ void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, Status *status) { } void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err, - const Matrix *output, Status *status) { + const Matrix *output, + MContext *context, Status *status) { CHECK_SAME_DIMENSION(nerr, err, status); CHECK_SAME_DIMENSION(nerr, output, status); int i, j; @@ -303,10 +314,11 @@ void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err, NERV_SET_STATUS(status, NERV_NORMAL, 0); } -Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, Status *status) { +Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, + MContext *context, Status *status) { Matrix *max_idx; CHECK_SAME_DIMENSION_RET(a, b, status); - max_idx = nerv_matrix_(create)(a->nrow, 1, status); + max_idx = nerv_matrix_(create)(a->nrow, 1, context, status); if (status->err_code != NERV_NORMAL) return NULL; int i, j; @@ -337,7 +349,7 @@ Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, Status *status) { } void nerv_matrix_(mul_elem)(Matrix *c, const Matrix *a, const Matrix *b, - Status *status) { + MContext *context, Status *status) { CHECK_SAME_DIMENSION(a, b, status); CHECK_SAME_DIMENSION(a, c, status); int i, j; @@ -358,7 +370,8 @@ void nerv_matrix_(mul_elem)(Matrix *c, const Matrix *a, const Matrix *b, NERV_SET_STATUS(status, NERV_NORMAL, 0); } -void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a, Status *status) { +void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a, + MContext *context, Status *status) { CHECK_SAME_DIMENSION(a, b, status); int i, j; size_t astride = a->stride, bstride = b->stride; @@ -383,10 +396,10 @@ void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a, Status *status) { } void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b, - int context, Status *status) { + int cont, MContext *context, Status *status) { if (a->nrow != b->nrow) NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0); - if (a->ncol != b->ncol * (context * 2 + 1)) + if (a->ncol != b->ncol * (cont * 2 + 1)) NERV_EXIT_STATUS(status, MAT_GENERAL_ERR, "the width should be 2 * context + 1"); int i, j, k; @@ -395,10 +408,10 @@ void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b, for (i = 0; i < a->nrow; i++) { MATRIX_ELEM *a_subrow = arow; - int start = i - context; + int start = i - cont; if (start < 0) start = 0; const MATRIX_ELEM *brow = MATRIX_ROW_PTR(b, start); - for (j = i - context; j <= i + context; j++) + for (j = i - cont; j <= i + cont; j++) { for (k = 0; k < b->ncol; k++) a_subrow[k] = brow[k]; @@ -412,7 +425,7 @@ void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b, } void nerv_matrix_(rearrange_frm)(Matrix *a, const Matrix *b, - int step, Status *status) { + int step, MContext *context, Status *status) { CHECK_SAME_DIMENSION(a, b, status); if (b->ncol % step) NERV_EXIT_STATUS(status, MAT_GENERAL_ERR, @@ -439,7 +452,7 @@ void nerv_matrix_(rearrange_frm)(Matrix *a, const Matrix *b, } void nerv_matrix_(scale_rows_by_row)(Matrix *a, const Matrix *b, - Status *status) { + MContext *context, Status *status) { if (a->ncol != b->ncol) NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0); if (b->nrow != 1) @@ -458,7 +471,7 @@ void nerv_matrix_(scale_rows_by_row)(Matrix *a, const Matrix *b, } void nerv_matrix_(scale_rows_by_col)(Matrix *a, const Matrix *b, - Status *status) { + MContext *context,Status *status) { if (a->nrow != b->nrow) NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0); if (b->ncol != 1) @@ -477,13 +490,14 @@ void nerv_matrix_(scale_rows_by_col)(Matrix *a, const Matrix *b, NERV_SET_STATUS(status, NERV_NORMAL, 0); } -static void host_matrix_(free)(MATRIX_ELEM *ptr, Status *status) { +static void host_matrix_(free)(MATRIX_ELEM *ptr, MContext *context, Status *status) { free(ptr); NERV_SET_STATUS(status, NERV_NORMAL, 0); } static void host_matrix_(alloc)(MATRIX_ELEM **dptr, size_t *stride, - long width, long height, Status *status) { + long width, long height, + MContext *context, Status *status) { if ((*dptr = (MATRIX_ELEM *)malloc(width * height)) == NULL) NERV_EXIT_STATUS(status, MAT_INSUF_MEM, 0); *stride = width; @@ -491,7 +505,7 @@ static void host_matrix_(alloc)(MATRIX_ELEM **dptr, size_t *stride, } #include "matrix.c" -Matrix *nerv_matrix_(load)(ChunkData *cdp, Status *status) { +Matrix *nerv_matrix_(load)(ChunkData *cdp, MContext *context, Status *status) { int i, j; long nrow, ncol; FILE *fp = cdp->fp; @@ -501,7 +515,7 @@ Matrix *nerv_matrix_(load)(ChunkData *cdp, Status *status) { NERV_SET_STATUS(status, MAT_INVALID_FORMAT, 0); return 0; } - self = nerv_matrix_(create)(nrow, ncol, status); + self = nerv_matrix_(create)(nrow, ncol, context, status); if (status->err_code != NERV_NORMAL) return NULL; for (i = 0; i < nrow; i++) @@ -519,7 +533,7 @@ Matrix *nerv_matrix_(load)(ChunkData *cdp, Status *status) { return self; } -void nerv_matrix_(save)(Matrix *self, ChunkFile *cfp, Status *status) { +void nerv_matrix_(save)(Matrix *self, ChunkFile *cfp, MContext *context, Status *status) { int i, j; long nrow = self->nrow, ncol = self->ncol; FILE *fp = cfp->fp; @@ -540,7 +554,7 @@ void nerv_matrix_(save)(Matrix *self, ChunkFile *cfp, Status *status) { void nerv_matrix_(copy_fromh)(Matrix *a, const Matrix *b, int a_begin, int b_begin, int b_end, - Status *status) { + MContext *context, Status *status) { if (!(0 <= b_begin && b_begin < b_end && b_end <= b->nrow && a_begin + b_end - b_begin <= a->nrow)) NERV_EXIT_STATUS(status, MAT_INVALID_COPY_INTERVAL, 0); @@ -553,7 +567,8 @@ void nerv_matrix_(copy_fromh)(Matrix *a, const Matrix *b, } void nerv_matrix_(copy_rows_fromh_by_idx)(Matrix *a, const Matrix *b, - const Matrix *idx, int b_begin, Status *status) { + const Matrix *idx, int b_begin, + MContext *context, Status *status) { if (!(0 <= b_begin && b_begin + a->nrow <= idx->ncol)) NERV_EXIT_STATUS(status, MAT_INVALID_COPY_INTERVAL, 0); if (idx->nrow != 1) diff --git a/nerv/lib/matrix/generic/mmatrix.h b/nerv/lib/matrix/generic/mmatrix.h index c54c4e5..6e0589a 100644 --- a/nerv/lib/matrix/generic/mmatrix.h +++ b/nerv/lib/matrix/generic/mmatrix.h @@ -3,45 +3,53 @@ void nerv_matrix_(add)(Matrix *c, const Matrix *a, const Matrix *b, MATRIX_ELEM alpha, MATRIX_ELEM beta, - Status *status); + MContext *context, Status *status); void nerv_matrix_(mul)(Matrix *c, const Matrix *a, const Matrix *b, MATRIX_ELEM alpha, MATRIX_ELEM beta, - int ta, int tb, Status *status); -void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, Status *status); + int ta, int tb, + MContext *context, Status *status); +void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, + MContext *context, Status *status); void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err, - const Matrix *output, Status *status); + const Matrix *output, + MContext *context, Status *status); -Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, Status *status); -Matrix *nerv_matrix_(rowsum)(Matrix *a, Status *status); -Matrix *nerv_matrix_(colsum)(Matrix *a, Status *status); +Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, + MContext *context, Status *status); +Matrix *nerv_matrix_(rowsum)(Matrix *a, MContext *context, Status *status); +Matrix *nerv_matrix_(colsum)(Matrix *a, MContext *context, Status *status); Matrix *nerv_matrix_(colsame)(Matrix *a, const Matrix *ref, - Status *status); -Matrix *nerv_matrix_(rowmax)(Matrix *a, Status *status); + MContext *context, Status *status); +Matrix *nerv_matrix_(rowmax)(Matrix *a, MContext *context, Status *status); void nerv_matrix_(rowmax_idx)(Matrix *a, Matrix **b, Matrix **idx, - Status *status); + MContext *context, Status *status); void nerv_matrix_(add_row)(Matrix *b, const Matrix *a, double beta, - Status *status); -void nerv_matrix_(clip)(Matrix *self, double val_1, double val_2, Status *status); -void nerv_matrix_(fill)(Matrix *self, double val, Status *status); + MContext *context, Status *status); +void nerv_matrix_(clip)(Matrix *self, double val1, double val2, + MContext *context, Status *status); +void nerv_matrix_(fill)(Matrix *self, double val, MContext *context, Status *status); void nerv_matrix_(copy_fromh)(Matrix *a, const Matrix *b, int a_begin, int b_begin, int b_end, - Status *status); -Matrix *nerv_matrix_(trans)(Matrix *a, Status *status); + MContext *context, Status *status); +Matrix *nerv_matrix_(trans)(Matrix *a, MContext *context, Status *status); void nerv_matrix_(mul_elem)(Matrix *c, const Matrix *a, const Matrix *b, - Status *status); + MContext *context, Status *status); -void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a, Status *status); +void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a, + MContext *context, Status *status); -Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col, Status *status); +Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col, + MContext *context, Status *status); void nerv_matrix_(copy_rows_fromh_by_idx)(Matrix *a, const Matrix *b, - const Matrix *idx, int b_begin, Status *status); + const Matrix *idx, int b_begin, + MContext *context, Status *status); void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b, - int context, Status *status); + int cont, MContext *context, Status *status); void nerv_matrix_(rearrange_frm)(Matrix *a, const Matrix *b, - int step, Status *status); + int step, MContext *context, Status *status); void nerv_matrix_(scale_rows_by_col)(Matrix *a, const Matrix *b, - Status *status); + MContext *context, Status *status); void nerv_matrix_(scale_rows_by_row)(Matrix *a, const Matrix *b, - Status *status); -Matrix *nerv_matrix_(load)(ChunkData *cdp, Status *status); -void nerv_matrix_(save)(Matrix *self, ChunkFile *cfp, Status *status); + MContext *context, Status *status); +Matrix *nerv_matrix_(load)(ChunkData *cdp, MContext *context, Status *status); +void nerv_matrix_(save)(Matrix *self, ChunkFile *cfp, MContext *context, Status *status); diff --git a/nerv/lib/matrix/mmatrix.c b/nerv/lib/matrix/mmatrix.c index 3125ab6..f1cbc75 100644 --- a/nerv/lib/matrix/mmatrix.c +++ b/nerv/lib/matrix/mmatrix.c @@ -1,6 +1,8 @@ #define NERV_GENERIC_MMATRIX +#define MATRIX_CONTEXT MContext #include <stdlib.h> #include "../common.h" +#include "mmatrix.h" #define MATRIX_USE_FLOAT #define host_matrix_(NAME) host_matrix_float_##NAME @@ -10,9 +12,40 @@ #include "generic/elem_type.h" #include "generic/mmatrix.c" -Matrix *nerv_matrix_(perm_gen)(int ncol, Status *status) { +void nerv_host_context_print_profile(MContext *context) { + HashMap *profile = context->profile; + size_t i; + fprintf(stderr, "*** [nerv mmatrix profile] **\n"); + for (i = 0; i < profile->size; i++) + { + HashNode *ptr; + for (ptr = profile->bucket[i]; ptr; ptr = ptr->next) + { + fprintf(stderr, "%s:\t%.6f\n", ptr->key, *(float *)ptr->val); + } + } +} + +void nerv_host_context_clear_profile(MContext *context) { + nerv_hashmap_clear(context->profile); +} + +MContext *nerv_host_context_create(Status *status) { + MContext *context = (MContext *)malloc(sizeof(MContext)); + context->profile = nerv_hashmap_create(PROFILE_HASHMAP_SIZE, bkdr_hash, strcmp); + NERV_SET_STATUS(status, NERV_NORMAL, 0); + return context; +} + +void nerv_host_context_destroy(MContext *context, Status *status) { + nerv_hashmap_destroy(context->profile); + free(context); + NERV_SET_STATUS(status, NERV_NORMAL, 0); +} + +Matrix *nerv_matrix_(perm_gen)(int ncol, MContext *context, Status *status) { int i; - Matrix *self = nerv_matrix_(create)(1, ncol, status); + Matrix *self = nerv_matrix_(create)(1, ncol, context, status); if (status->err_code != NERV_NORMAL) return NULL; float *prow = MATRIX_ELEM_PTR_F(self); diff --git a/nerv/lib/matrix/mmatrix.h b/nerv/lib/matrix/mmatrix.h index 31e7984..6061683 100644 --- a/nerv/lib/matrix/mmatrix.h +++ b/nerv/lib/matrix/mmatrix.h @@ -1,5 +1,15 @@ #ifndef NERV_MMATRIX_H #define NERV_MMATRIX_H #include "matrix.h" -Matrix *nerv_matrix_host_float_perm_gen(int ncol, Status *status); +#include "../common.h" + +typedef struct MContext { + HashMap *profile; +} MContext; + +Matrix *nerv_matrix_host_float_perm_gen(int ncol, MContext *context, Status *status); +void nerv_host_context_print_profile(MContext *context); +void nerv_host_context_clear_profile(MContext *context); +MContext *nerv_host_context_create(Status *status); +void nerv_host_context_destroy(MContext *contex, Status *status); #endif diff --git a/nerv/matrix/cumatrix.c b/nerv/matrix/cumatrix.c index 7f22d68..26b055b 100644 --- a/nerv/matrix/cumatrix.c +++ b/nerv/matrix/cumatrix.c @@ -4,45 +4,74 @@ #include "../lib/matrix/cuda_helper.h" #include <string.h> #define PROFILE_HASHMAP_SIZE 123457 -static cublasHandle_t cublas_handle; -static cudaEvent_t profile_start, profile_stop; -static HashMap *profile; -static int select_gpu(lua_State *L) { +const char *nerv_cuda_context_tname = "nerv.CuContext"; + +int nerv_cuda_context_lua_select_gpu(lua_State *L) { Status status; - int dev = luaL_checkinteger(L, 1); - nerv_cumatrix_select_gpu(dev, &status); + nerv_cuda_context_select_gpu(luaT_checkudata(L, 1, nerv_cuda_context_tname), + luaL_checkinteger(L, 1), &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } -static int print_profile(lua_State *L) { - nerv_cumatrix_print_profile(); +int nerv_cuda_context_lua_print_profile(lua_State *L) { + nerv_cuda_context_print_profile(luaT_checkudata(L, 1, nerv_cuda_context_tname)); return 0; } -static int clear_profile(lua_State *L) { - nerv_cumatrix_clear_profile(); +int nerv_cuda_context_lua_clear_profile(lua_State *L) { + nerv_cuda_context_clear_profile(luaT_checkudata(L, 1, nerv_cuda_context_tname)); return 0; } -static const luaL_Reg cumatrix_methods[] = { - {"print_profile", print_profile}, - {"clear_profile", clear_profile}, - {"select_gpu", select_gpu}, +int nerv_cuda_context_lua_new(lua_State *L) { + Status status; + CuContext *self = nerv_cuda_context_create(&status); + NERV_LUA_CHECK_STATUS(L, status); + luaT_pushudata(L, self, nerv_cuda_context_tname); + return 1; +} + +int nerv_cuda_context_lua_destroy(lua_State *L) { + Status status; + CuContext *self = luaT_checkudata(L, 1, nerv_cuda_context_tname); + nerv_cuda_context_destroy(self, &status); + NERV_LUA_CHECK_STATUS(L, status); + return 1; +} + +static const luaL_Reg nerv_cuda_context_methods[] = { + {"print_profile", nerv_cuda_context_lua_print_profile}, + {"clear_profile", nerv_cuda_context_lua_clear_profile}, + {"select_gpu", nerv_cuda_context_lua_select_gpu}, {NULL, NULL} }; +void nerv_cuda_context_lua_init(lua_State *L) { + luaT_newmetatable(L, nerv_cuda_context_tname, NULL, + nerv_cuda_context_lua_new, + nerv_cuda_context_lua_destroy, NULL); + luaL_register(L, NULL, nerv_cuda_context_methods); +} + extern void nerv_matrix_cuda_float_lua_init(lua_State *L); extern void nerv_matrix_cuda_double_lua_init(lua_State *L); +static const luaL_Reg cumatrix_methods[] = { + {NULL, NULL} +}; + void nerv_lua_cumatrix_init(lua_State *L) { luaL_register(L, NULL, cumatrix_methods); - nerv_cumatrix_init(); + nerv_cuda_context_lua_init(L); nerv_matrix_cuda_float_lua_init(L); nerv_matrix_cuda_double_lua_init(L); } +#define MATRIX_CONTEXT CuContext +#define MATRIX_CONTEXT_TNAME nerv_cuda_context_tname + #define MATRIX_USE_FLOAT #define cuda_matrix_(NAME) cuda_matrix_float_##NAME #define nerv_matrix_(NAME) nerv_matrix_cuda_float_##NAME diff --git a/nerv/matrix/generic/cumatrix.c b/nerv/matrix/generic/cumatrix.c index b706c21..16c0e3a 100644 --- a/nerv/matrix/generic/cumatrix.c +++ b/nerv/matrix/generic/cumatrix.c @@ -6,6 +6,7 @@ #define MATRIX_BASE_TNAME nerv_matrix_cuda_tname #define NERV_GENERIC_MATRIX #define NERV_GENERIC_CUKERNEL +#include "../matrix.h" #include "../../lib/common.h" #include "../../lib/matrix/generic/matrix.h" #include "../../lib/matrix/generic/cumatrix.h" @@ -17,48 +18,58 @@ static int nerv_matrix_(lua_get_blas_op)(char ch) { static int nerv_matrix_(lua_prefixsum_row)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 3); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); - nerv_matrix_(prefixsum_row)(a, b, &status); + nerv_matrix_(prefixsum_row)(a, b, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_thres_mask)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 6); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); MATRIX_ELEM thres = luaL_checknumber(L, 3); MATRIX_ELEM low = luaL_checknumber(L, 4); MATRIX_ELEM high = luaL_checknumber(L, 5); - nerv_matrix_(thres_mask)(a, b, thres, low, high, &status); + nerv_matrix_(thres_mask)(a, b, thres, low, high, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_rand_uniform)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 2); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - nerv_matrix_(rand_uniform)(a, &status); + nerv_matrix_(rand_uniform)(a, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_tanh)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 3); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); - nerv_matrix_(tanh)(a, b, &status); + nerv_matrix_(tanh)(a, b, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_tanh_grad)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 4); Matrix *nerr = luaT_checkudata(L, 1, nerv_matrix_(tname)); Matrix *err = luaT_checkudata(L, 2, nerv_matrix_(tname)); Matrix *output = luaT_checkudata(L, 3, nerv_matrix_(tname)); - nerv_matrix_(tanh_grad)(nerr, err, output, &status); + nerv_matrix_(tanh_grad)(nerr, err, output, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } @@ -66,39 +77,45 @@ static int nerv_matrix_(lua_tanh_grad)(lua_State *L) { extern const char *MATRIX_CUMATRIX_HOST_TNAME; static int nerv_matrix_(lua_copy_fromh)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 6); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); const Matrix *b = luaT_checkudata(L, 2, MATRIX_CUMATRIX_HOST_TNAME); int nargs = lua_gettop(L); int b_begin = nargs > 2 ? luaL_checkinteger(L, 3) : 0; int b_end = nargs > 3 ? luaL_checkinteger(L, 4) : b->nrow; int a_begin = nargs > 4 ? luaL_checkinteger(L, 5) : 0; - nerv_matrix_(copy_fromh)(a, b, a_begin, b_begin, b_end, &status); + nerv_matrix_(copy_fromh)(a, b, a_begin, b_begin, b_end, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_copy_toh)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 6); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); const Matrix *b = luaT_checkudata(L, 2, MATRIX_CUMATRIX_HOST_TNAME); int nargs = lua_gettop(L); int a_begin = nargs > 2 ? luaL_checkinteger(L, 3) : 0; int a_end = nargs > 3 ? luaL_checkinteger(L, 4) : a->nrow; int b_begin = nargs > 4 ? luaL_checkinteger(L, 5) : 0; - nerv_matrix_(copy_toh)(a, b, a_begin, a_end, b_begin, &status); + nerv_matrix_(copy_toh)(a, b, a_begin, a_end, b_begin, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_copy_fromd)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 6); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); const Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); int nargs = lua_gettop(L); int b_begin = nargs > 2 ? luaL_checkinteger(L, 3) : 0; int b_end = nargs > 3 ? luaL_checkinteger(L, 4) : b->nrow; int a_begin = nargs > 4 ? luaL_checkinteger(L, 5) : 0; - nerv_matrix_(copy_fromd)(a, b, a_begin, b_begin, b_end, &status); + nerv_matrix_(copy_fromd)(a, b, a_begin, b_begin, b_end, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } @@ -106,36 +123,42 @@ static int nerv_matrix_(lua_copy_fromd)(lua_State *L) { extern const char *nerv_matrix_host_float_tname; static int nerv_matrix_(lua_copy_rows_fromh_by_idx)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 5); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); const Matrix *b = luaT_checkudata(L, 2, MATRIX_CUMATRIX_HOST_TNAME); const Matrix *idx = luaT_checkudata(L, 3, nerv_matrix_host_float_tname); long nrow = a->nrow; int b_begin = lua_gettop(L) > 3 ? luaL_checkinteger(L, 4) : 0; - nerv_matrix_(copy_rows_fromh_by_idx)(a, b, idx, b_begin, &status); + nerv_matrix_(copy_rows_fromh_by_idx)(a, b, idx, b_begin, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_copy_rows_fromd_by_idx)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 5); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); const Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); const Matrix *idx = luaT_checkudata(L, 3, nerv_matrix_(tname)); long nrow = a->nrow; int idx_begin = lua_gettop(L) > 3 ? luaL_checkinteger(L, 4) : 0; - nerv_matrix_(copy_rows_fromd_by_idx)(a, b, idx, idx_begin, &status); + nerv_matrix_(copy_rows_fromd_by_idx)(a, b, idx, idx_begin, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_copy_rows_fromd_by_colidx)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 5); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); const Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); const Matrix *idx = luaT_checkudata(L, 3, nerv_matrix_(tname)); long nrow = a->nrow; int idx_begin = lua_gettop(L) > 3 ? luaL_checkinteger(L, 4) : 0; - nerv_matrix_(copy_rows_fromd_by_colidx)(a, b, idx, idx_begin, &status); + nerv_matrix_(copy_rows_fromd_by_colidx)(a, b, idx, idx_begin, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } @@ -145,12 +168,14 @@ static int nerv_matrix_(lua_update_select_rows_by_rowidx)(lua_State *L) { /* update c's select rows, * i.e. c[idx[i]] = c[idx[i]] * (1 - beta * alpha) + a[i] * alpha */ Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 6); Matrix *c = luaT_checkudata(L, 1, nerv_matrix_(tname)); const Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); const Matrix *idx = luaT_checkudata(L, 3, nerv_matrix_(tname)); MATRIX_ELEM alpha = luaL_checknumber(L, 4); MATRIX_ELEM beta = luaL_checknumber(L, 5); - nerv_matrix_(update_select_rows_by_rowidx)(c, a, idx, alpha, beta, &status); + nerv_matrix_(update_select_rows_by_rowidx)(c, a, idx, alpha, beta, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } @@ -159,12 +184,14 @@ static int nerv_matrix_(lua_update_select_rows_by_colidx)(lua_State *L) { /* update c's select rows, * i.e. c[idx[i]] = c[idx[i]] * (1 - beta * alpha) + a[i] * alpha */ Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 6); Matrix *c = luaT_checkudata(L, 1, nerv_matrix_(tname)); const Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); const Matrix *idx = luaT_checkudata(L, 3, nerv_matrix_(tname)); MATRIX_ELEM alpha = luaL_checknumber(L, 4); MATRIX_ELEM beta = luaL_checknumber(L, 5); - nerv_matrix_(update_select_rows_by_colidx)(c, a, idx, alpha, beta, &status); + nerv_matrix_(update_select_rows_by_colidx)(c, a, idx, alpha, beta, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } diff --git a/nerv/matrix/generic/matrix.c b/nerv/matrix/generic/matrix.c index c1da774..c2e57b8 100644 --- a/nerv/matrix/generic/matrix.c +++ b/nerv/matrix/generic/matrix.c @@ -1,15 +1,18 @@ #ifdef NERV_GENERIC_MATRIX +#include "../matrix.h" #include "../../lib/common.h" #include "../../lib/matrix/generic/matrix.h" extern const char *nerv_matrix_(tname); extern const char *MATRIX_BASE_TNAME; - int nerv_matrix_(lua_new)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 3); Matrix *self = nerv_matrix_(create)(luaL_checkinteger(L, 1), - luaL_checkinteger(L, 2), &status); + luaL_checkinteger(L, 2), + context, &status); NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, self, nerv_matrix_(tname)); return 1; @@ -17,8 +20,10 @@ int nerv_matrix_(lua_new)(lua_State *L) { int nerv_matrix_(lua_destroy)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 2); Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname)); - nerv_matrix_(destroy)(self, &status); + nerv_matrix_(destroy)(self, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 1; } @@ -128,18 +133,22 @@ void nerv_matrix_(lua_init)(lua_State *L) { static int nerv_matrix_(lua_add)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 6); Matrix *c = luaT_checkudata(L, 1, nerv_matrix_(tname)); const Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); const Matrix *b = luaT_checkudata(L, 3, nerv_matrix_(tname)); MATRIX_ELEM alpha = luaL_checknumber(L, 4); MATRIX_ELEM beta = luaL_checknumber(L, 5); - nerv_matrix_(add)(c, a, b, alpha, beta, &status); + nerv_matrix_(add)(c, a, b, alpha, beta, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_mul)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 8); Matrix *c = luaT_checkudata(L, 1, nerv_matrix_(tname)); Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); Matrix *b = luaT_checkudata(L, 3, nerv_matrix_(tname)); @@ -150,35 +159,41 @@ static int nerv_matrix_(lua_mul)(lua_State *L) { : BLAS_OP_N; int tb = nargs > 6 ? nerv_matrix_(lua_get_blas_op)(*luaL_checkstring(L, 7)) \ : BLAS_OP_N; - nerv_matrix_(mul)(c, a, b, alpha, beta, ta, tb, &status); + nerv_matrix_(mul)(c, a, b, alpha, beta, ta, tb, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_sigmoid)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 3); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); - nerv_matrix_(sigmoid)(a, b, &status); + nerv_matrix_(sigmoid)(a, b, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_sigmoid_grad)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 4); Matrix *nerr = luaT_checkudata(L, 1, nerv_matrix_(tname)); Matrix *err = luaT_checkudata(L, 2, nerv_matrix_(tname)); Matrix *output = luaT_checkudata(L, 3, nerv_matrix_(tname)); - nerv_matrix_(sigmoid_grad)(nerr, err, output, &status); + nerv_matrix_(sigmoid_grad)(nerr, err, output, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_softmax)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 3); Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); Matrix *b = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *max_idx = nerv_matrix_(softmax)(b, a, &status); + Matrix *max_idx = nerv_matrix_(softmax)(b, a, context, &status); NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, max_idx, nerv_matrix_(tname)); return 1; @@ -186,8 +201,10 @@ static int nerv_matrix_(lua_softmax)(lua_State *L) { static int nerv_matrix_(lua_rowsum)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 2); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = nerv_matrix_(rowsum)(a, &status); + Matrix *b = nerv_matrix_(rowsum)(a, context, &status); NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, b, nerv_matrix_(tname)); return 1; @@ -195,8 +212,10 @@ static int nerv_matrix_(lua_rowsum)(lua_State *L) { static int nerv_matrix_(lua_colsum)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 2); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = nerv_matrix_(colsum)(a, &status); + Matrix *b = nerv_matrix_(colsum)(a, context, &status); NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, b, nerv_matrix_(tname)); return 1; @@ -204,9 +223,11 @@ static int nerv_matrix_(lua_colsum)(lua_State *L) { static int nerv_matrix_(lua_colsame)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 3); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); const Matrix *ref = luaT_checkudata(L, 2, nerv_matrix_(tname)); - Matrix *b = nerv_matrix_(colsame)(a, ref, &status); + Matrix *b = nerv_matrix_(colsame)(a, ref, context, &status); NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, b, nerv_matrix_(tname)); return 1; @@ -214,8 +235,10 @@ static int nerv_matrix_(lua_colsame)(lua_State *L) { static int nerv_matrix_(lua_rowmax)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 2); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = nerv_matrix_(rowmax)(a, &status); + Matrix *b = nerv_matrix_(rowmax)(a, context, &status); NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, b, nerv_matrix_(tname)); return 1; @@ -223,10 +246,12 @@ static int nerv_matrix_(lua_rowmax)(lua_State *L) { static int nerv_matrix_(lua_rowmax_idx)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 2); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); Matrix *b; Matrix *idx; - nerv_matrix_(rowmax_idx)(a, &b, &idx, &status); + nerv_matrix_(rowmax_idx)(a, &b, &idx, context, &status); NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, b, nerv_matrix_(tname)); luaT_pushudata(L, idx, nerv_matrix_(tname)); @@ -235,37 +260,45 @@ static int nerv_matrix_(lua_rowmax_idx)(lua_State *L) { static int nerv_matrix_(lua_add_row)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 4); const Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); Matrix *b = luaT_checkudata(L, 1, nerv_matrix_(tname)); double beta = luaL_checknumber(L, 3); - nerv_matrix_(add_row)(b, a, beta, &status); + nerv_matrix_(add_row)(b, a, beta, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_fill)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 3); Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname)); double val = luaL_checknumber(L, 2); - nerv_matrix_(fill)(self, val, &status); + nerv_matrix_(fill)(self, val, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_clip)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 4); Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname)); - double val_1 = luaL_checknumber(L, 2); - double val_2 = luaL_checknumber(L, 3); - nerv_matrix_(clip)(self, val_1, val_2, &status); + double val1 = luaL_checknumber(L, 2); + double val2 = luaL_checknumber(L, 3); + nerv_matrix_(clip)(self, val1, val2, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_trans)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 2); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); - Matrix *b = nerv_matrix_(trans)(a, &status); + Matrix *b = nerv_matrix_(trans)(a, context, &status); NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, b, nerv_matrix_(tname)); return 1; @@ -273,28 +306,34 @@ static int nerv_matrix_(lua_trans)(lua_State *L) { static int nerv_matrix_(lua_mul_elem)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 4); const Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); const Matrix *b = luaT_checkudata(L, 3, nerv_matrix_(tname)); Matrix *c = luaT_checkudata(L, 1, nerv_matrix_(tname)); - nerv_matrix_(mul_elem)(c, a, b, &status); + nerv_matrix_(mul_elem)(c, a, b, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_log_elem)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 3); const Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); Matrix *b = luaT_checkudata(L, 1, nerv_matrix_(tname)); - nerv_matrix_(log_elem)(b, a, &status); + nerv_matrix_(log_elem)(b, a, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_decompress)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 3); const Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); int orig_col = luaL_checkinteger(L, 2); - Matrix *b = nerv_matrix_(decompress)(a, orig_col, &status); + Matrix *b = nerv_matrix_(decompress)(a, orig_col, context, &status); NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, b, nerv_matrix_(tname)); return 1; @@ -302,38 +341,46 @@ static int nerv_matrix_(lua_decompress)(lua_State *L) { static int nerv_matrix_(lua_expand_frm)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 4); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); const Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); - int context = luaL_checkinteger(L, 3); - nerv_matrix_(expand_frm)(a, b, context, &status); + int cont = luaL_checkinteger(L, 3); + nerv_matrix_(expand_frm)(a, b, cont, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_rearrange_frm)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 4); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); const Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); int step = luaL_checkinteger(L, 3); - nerv_matrix_(rearrange_frm)(a, b, step, &status); + nerv_matrix_(rearrange_frm)(a, b, step, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_scale_rows_by_col)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 3); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); const Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); - nerv_matrix_(scale_rows_by_col)(a, b, &status); + nerv_matrix_(scale_rows_by_col)(a, b, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_scale_rows_by_row)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 3); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); const Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); - nerv_matrix_(scale_rows_by_row)(a, b, &status); + nerv_matrix_(scale_rows_by_row)(a, b, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } diff --git a/nerv/matrix/generic/mmatrix.c b/nerv/matrix/generic/mmatrix.c index 93562d0..69000b7 100644 --- a/nerv/matrix/generic/mmatrix.c +++ b/nerv/matrix/generic/mmatrix.c @@ -1,4 +1,5 @@ #ifdef NERV_GENERIC_MMATRIX +#include "../matrix.h" #include "../../lib/matrix/generic/matrix.h" #include "../../lib/matrix/generic/elem_type.h" #define MATRIX_DATA_WRITE(L, data, idx, val) (data[idx] = val) @@ -48,8 +49,10 @@ static void host_matrix_(init)(lua_State *L) { static int nerv_matrix_(lua_load)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 2); ChunkData *cdp = luaT_checkudata(L, 1, nerv_chunk_data_tname); - Matrix *self = nerv_matrix_(load)(cdp, &status); + Matrix *self = nerv_matrix_(load)(cdp, context, &status); NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, self, nerv_matrix_(tname)); return 1; @@ -57,23 +60,27 @@ static int nerv_matrix_(lua_load)(lua_State *L) { static int nerv_matrix_(lua_save)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 3); ChunkFile *cfp = luaT_checkudata(L, 2, nerv_chunk_file_handle_tname); Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname)); - nerv_matrix_(save)(self, cfp, &status); + nerv_matrix_(save)(self, cfp, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } static int nerv_matrix_(lua_copy_fromh)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 6); Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); const Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); int nargs = lua_gettop(L); int b_begin = nargs > 2 ? luaL_checkinteger(L, 3) : 0; int b_end = nargs > 3 ? luaL_checkinteger(L, 4) : b->nrow; int a_begin = nargs > 4 ? luaL_checkinteger(L, 5) : 0; - nerv_matrix_(copy_fromh)(a, b, a_begin, b_begin, b_end, &status); + nerv_matrix_(copy_fromh)(a, b, a_begin, b_begin, b_end, context, &status); NERV_LUA_CHECK_STATUS(L, status); return 0; } @@ -81,12 +88,14 @@ static int nerv_matrix_(lua_copy_fromh)(lua_State *L) { static int nerv_matrix_(lua_copy_rows_fromh_by_idx)(lua_State *L) { Status status; - Matrix *a=luaT_checkudata(L,1,nerv_matrix_(tname)); - const Matrix *b=luaT_checkudata(L,2,nerv_matrix_(tname)); - const Matrix *idx=luaT_checkudata(L,3,nerv_matrix_(tname)); - int b_begin=lua_gettop(L)>3?luaL_checkinteger(L,4):0; - nerv_matrix_(copy_rows_fromh_by_idx)(a,b,idx,b_begin,&status); - NERV_LUA_CHECK_STATUS(L,status); + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 5); + Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); + const Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); + const Matrix *idx = luaT_checkudata(L, 3, nerv_matrix_(tname)); + int b_begin = lua_gettop(L) > 3 ? luaL_checkinteger(L, 4) : 0; + nerv_matrix_(copy_rows_fromh_by_idx)(a, b, idx, b_begin, context, &status); + NERV_LUA_CHECK_STATUS(L, status); return 0; } diff --git a/nerv/matrix/init.lua b/nerv/matrix/init.lua index da76e1b..ef2fb6b 100644 --- a/nerv/matrix/init.lua +++ b/nerv/matrix/init.lua @@ -130,12 +130,3 @@ end function nerv.MMatrix:copy_toh(b, ...) b:copy_fromh(self, ...) end - ---- Print profiling info of host matrices -function nerv.MMatrix.print_profile() - nerv.info("mmatrix profile not available") -end - ---- Clear profiling info of host matrices -function nerv.MMatrix.clear_profile() -end diff --git a/nerv/matrix/matrix.h b/nerv/matrix/matrix.h new file mode 100644 index 0000000..788f596 --- /dev/null +++ b/nerv/matrix/matrix.h @@ -0,0 +1,24 @@ +#ifndef NERV_LUA_MATRIX_H +#define NERV_LUA_MATRIX_H +#include "../lib/luaT/luaT.h" +#define _MATRIX_GET_CONTEXT(L, p, tname, ctname) \ + do { \ + if (lua_gettop(L) < p) \ + { \ + luaT_pushmetatable(L, tname); \ + lua_getfield(L, -1, "_default_context"); \ + context = luaT_checkudata(L, -1, ctname); \ + lua_pop(L, 2); \ + } \ + else \ + { \ + context = luaT_checkudata(L, p, ctname); \ + } \ + } while (0) + +extern const char *nerv_cuda_context_tname; +extern const char *nerv_host_context_tname; +extern const char *nerv_matrix_host_tname; +#define MATRIX_GET_CONTEXT(L, p) _MATRIX_GET_CONTEXT(L, p, nerv_matrix_(tname), MATRIX_CONTEXT_TNAME) +#define MMATRIX_GET_CONTEXT(L, p) _MATRIX_GET_CONTEXT(L, p, nerv_matrix_host_tname, nerv_host_context_tname) +#endif diff --git a/nerv/matrix/mmatrix.c b/nerv/matrix/mmatrix.c index a68506d..45cb238 100644 --- a/nerv/matrix/mmatrix.c +++ b/nerv/matrix/mmatrix.c @@ -1,17 +1,64 @@ #define NERV_GENERIC_MMATRIX #include <stdlib.h> +#include "../lib/matrix/mmatrix.h" #include "../lib/common.h" + +const char *nerv_host_context_tname = "nerv.MContext"; + +int nerv_host_context_lua_print_profile(lua_State *L) { + nerv_host_context_print_profile(luaT_checkudata(L, 1, nerv_host_context_tname)); + return 0; +} + +int nerv_host_context_lua_clear_profile(lua_State *L) { + nerv_host_context_clear_profile(luaT_checkudata(L, 1, nerv_host_context_tname)); + return 0; +} + +int nerv_host_context_lua_new(lua_State *L) { + Status status; + MContext *self = nerv_host_context_create(&status); + NERV_LUA_CHECK_STATUS(L, status); + luaT_pushudata(L, self, nerv_host_context_tname); + return 1; +} + +int nerv_host_context_lua_destroy(lua_State *L) { + Status status; + MContext *self = luaT_checkudata(L, 1, nerv_host_context_tname); + nerv_host_context_destroy(self, &status); + NERV_LUA_CHECK_STATUS(L, status); + return 1; +} + +static const luaL_Reg nerv_host_context_methods[] = { + {"print_profile", nerv_host_context_lua_print_profile}, + {"clear_profile", nerv_host_context_lua_clear_profile}, + {NULL, NULL} +}; + +void nerv_host_context_lua_init(lua_State *L) { + luaT_newmetatable(L, nerv_host_context_tname, NULL, + nerv_host_context_lua_new, + nerv_host_context_lua_destroy, NULL); + luaL_register(L, NULL, nerv_host_context_methods); +} + void nerv_matrix_host_float_lua_init(lua_State *L); void nerv_matrix_host_double_lua_init(lua_State *L); void nerv_matrix_host_int_lua_init(lua_State *L); void nerv_lua_mmatrix_init(lua_State *L) { srand(1); + nerv_host_context_lua_init(L); nerv_matrix_host_float_lua_init(L); nerv_matrix_host_double_lua_init(L); nerv_matrix_host_int_lua_init(L); } +#define MATRIX_CONTEXT MContext +#define MATRIX_CONTEXT_TNAME nerv_host_context_tname + #define MATRIX_USE_FLOAT #define host_matrix_(NAME) host_matrix_float_##NAME #define nerv_matrix_(NAME) nerv_matrix_host_float_##NAME @@ -29,8 +76,10 @@ static void host_matrix_(init_extra)(lua_State *L) { static int nerv_matrix_(lua_perm_gen)(lua_State *L) { Status status; + MATRIX_CONTEXT *context; + MATRIX_GET_CONTEXT(L, 2); int i, ncol = luaL_checkinteger(L, 1); - Matrix *self = nerv_matrix_(perm_gen)(ncol, &status); + Matrix *self = nerv_matrix_(perm_gen)(ncol, context, &status); NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, self, nerv_matrix_(tname)); return 1; @@ -1,6 +1,24 @@ #! /usr/bin/env luajit require 'nerv' -print("Greetings") +nerv.printf("*** NERV: A Lua-based toolkit for high-performance deep learning (alpha) ***\n") +nerv.info("automatically initialize a default CuContext...") +nerv.CuMatrix._default_context = nerv.CuContext() +nerv.info("the default CuContext is ok") + +nerv.info("automatically initialize a default MContext...") +nerv.MMatrix._default_context = nerv.MContext() +nerv.info("the default MContext is ok") + +-- only for backward compatibilty, will be removed in the future +local function _add_profile_method(cls) + local c = cls._default_context + cls.print_profile = function () c:print_profile() end + cls.clear_profile = function () c:clear_profile() end +end +_add_profile_method(nerv.CuMatrix) +_add_profile_method(nerv.MMatrix) + + if #arg < 1 then return end @@ -11,3 +29,5 @@ for i = 2, #arg do end arg = script_arg dofile(script) +nerv.CuMatrix.print_profile() +nerv.MMatrix.print_profile() diff --git a/nerv/nerv-scm-1.rockspec b/nerv/nerv-scm-1.rockspec index 6949f54..0e1e47f 100644 --- a/nerv/nerv-scm-1.rockspec +++ b/nerv/nerv-scm-1.rockspec @@ -17,6 +17,7 @@ build = { type = "make", build_variables = { CFLAGS="$(CFLAGS) -Wall -Wextra -g -O2", + --CFLAGS="$(CFLAGS) -Wall -Wextra -g", LIBFLAG="$(LIBFLAG)", LUA_LIBDIR="$(LUA_LIBDIR)", LUA_BINDIR="$(LUA_BINDIR)", diff --git a/nerv/test/matrix_func.lua b/nerv/test/matrix_func.lua index 3750ddd..07ddf9c 100644 --- a/nerv/test/matrix_func.lua +++ b/nerv/test/matrix_func.lua @@ -128,7 +128,7 @@ function _test_all_shape(mat_type, m, n, k, fill) print(a) b:copy_rows_from_by_idx(a, idx) b = mat_type(2, m) - b:copy_rows_from_by_idx(a, idx, 2, 2) + b:copy_rows_from_by_idx(a, idx, 2) print(a) print(b) -- test expand_frm |