aboutsummaryrefslogtreecommitdiff
path: root/nerv/lib/matrix
diff options
context:
space:
mode:
Diffstat (limited to 'nerv/lib/matrix')
-rw-r--r--nerv/lib/matrix/cuda_helper.h63
-rw-r--r--nerv/lib/matrix/cukernel.h2
-rw-r--r--nerv/lib/matrix/cumatrix.c88
-rw-r--r--nerv/lib/matrix/cumatrix.h19
-rw-r--r--nerv/lib/matrix/generic/cukernel.cu7
-rw-r--r--nerv/lib/matrix/generic/cumatrix.c149
-rw-r--r--nerv/lib/matrix/generic/cumatrix.h101
-rw-r--r--nerv/lib/matrix/generic/matrix.c13
-rw-r--r--nerv/lib/matrix/generic/matrix.h7
-rw-r--r--nerv/lib/matrix/generic/mmatrix.c111
-rw-r--r--nerv/lib/matrix/generic/mmatrix.h58
-rw-r--r--nerv/lib/matrix/mmatrix.c37
-rw-r--r--nerv/lib/matrix/mmatrix.h12
13 files changed, 436 insertions, 231 deletions
diff --git a/nerv/lib/matrix/cuda_helper.h b/nerv/lib/matrix/cuda_helper.h
index 13d5728..5c75e38 100644
--- a/nerv/lib/matrix/cuda_helper.h
+++ b/nerv/lib/matrix/cuda_helper.h
@@ -54,6 +54,28 @@
cudaDeviceSynchronize(); \
} while (0)
+#define CURAND_SAFE_SYNC_CALL(call, status) \
+ do { \
+ curandStatus_t err = (call); \
+ if (err != CURAND_STATUS_SUCCESS) \
+ { \
+ NERV_SET_STATUS(status, MAT_CUBLAS_ERR, curandGetErrorString(err)); \
+ return; \
+ } \
+ cudaDeviceSynchronize(); \
+ } while (0)
+
+#define CURAND_SAFE_SYNC_CALL_RET(call, status) \
+ do { \
+ curandStatus_t err = (call); \
+ if (err != CURAND_STATUS_SUCCESS) \
+ { \
+ NERV_SET_STATUS(status, MAT_CUBLAS_ERR, curandGetErrorString(err)); \
+ return 0; \
+ } \
+ cudaDeviceSynchronize(); \
+ } while (0)
+
#define CHECK_SAME_DIMENSION(a, b, status) \
do { \
if (!(a->nrow == b->nrow && a->ncol == b->ncol)) \
@@ -96,15 +118,46 @@ static const char *cublasGetErrorString(cublasStatus_t err) {
return "<unknown>";
}
+static const char *curandGetErrorString(curandStatus_t err) {
+ switch (err)
+ {
+ case CURAND_STATUS_VERSION_MISMATCH:
+ return "Header file and linked library version do not match";
+ case CURAND_STATUS_NOT_INITIALIZED:
+ return "Generator not initialized";
+ case CURAND_STATUS_ALLOCATION_FAILED:
+ return "Memory allocation failed";
+ case CURAND_STATUS_TYPE_ERROR:
+ return "Generator is wrong type";
+ case CURAND_STATUS_OUT_OF_RANGE:
+ return "Argument out of range";
+ case CURAND_STATUS_LENGTH_NOT_MULTIPLE:
+ return "Length requested is not a multple of dimension";
+ case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED:
+ return "GPU does not have double precision required by MRG32k3a";
+ case CURAND_STATUS_LAUNCH_FAILURE:
+ return "Kernel launch failure";
+ case CURAND_STATUS_PREEXISTING_FAILURE:
+ return "Preexisting failure on library entry";
+ case CURAND_STATUS_INITIALIZATION_FAILED:
+ return "Initialization of CUDA failed";
+ case CURAND_STATUS_ARCH_MISMATCH:
+ return "Architecture mismatch, GPU does not support requested feature";
+ case CURAND_STATUS_INTERNAL_ERROR:
+ return "Internal library error";
+ }
+ return "<unknown>";
+}
#define PROFILE_START \
do { \
- cudaEventRecord(profile_start, 0);
+ cudaEventRecord(context->profile_start, 0);
#define PROFILE_STOP \
- cudaEventRecord(profile_stop, 0); \
- cudaEventSynchronize(profile_stop); \
+ cudaEventRecord(context->profile_stop, 0); \
+ cudaEventSynchronize(context->profile_stop); \
float milliseconds = 0; \
- cudaEventElapsedTime(&milliseconds, profile_start, profile_stop); \
- accu_profile(__func__, milliseconds / 1000); \
+ cudaEventElapsedTime(&milliseconds, context->profile_start, \
+ context->profile_stop); \
+ nerv_cuda_context_accu_profile(context, __func__, milliseconds / 1000); \
} while (0);
#define PROFILE_END
diff --git a/nerv/lib/matrix/cukernel.h b/nerv/lib/matrix/cukernel.h
index c84200e..d59a070 100644
--- a/nerv/lib/matrix/cukernel.h
+++ b/nerv/lib/matrix/cukernel.h
@@ -3,7 +3,7 @@ void cudak_(cuda_mul_elem)(const Matrix *a, const Matrix *b, Matrix *c);
void cudak_(cuda_log_elem)(const Matrix *a, Matrix *b);
void cudak_(cuda_sigmoid)(const Matrix *a, Matrix *b);
void cudak_(cuda_sigmoid_grad)(const Matrix *output, const Matrix *err, Matrix *nerr);
-void cudak_(cuda_rand_uniform)(const Matrix *a); /* a's curand_gen may be modified */
+void cudak_(cuda_rand_uniform)(const Matrix *a, CuContext *context); /* a's curand_gen may be modified */
void cudak_(cuda_thres_mask)(const Matrix *a, const Matrix *b, double thres, double low, double high);
void cudak_(cuda_tanh)(const Matrix *a, Matrix *b);
void cudak_(cuda_tanh_grad)(const Matrix *output, const Matrix *err, Matrix *nerr);
diff --git a/nerv/lib/matrix/cumatrix.c b/nerv/lib/matrix/cumatrix.c
index d998871..2fbe7d8 100644
--- a/nerv/lib/matrix/cumatrix.c
+++ b/nerv/lib/matrix/cumatrix.c
@@ -1,23 +1,12 @@
#define NERV_GENERIC_CUMATRIX
+#define MATRIX_CONTEXT CuContext
#include "cumatrix.h"
#include "cuda_helper.h"
#include <string.h>
#include <time.h>
-#define PROFILE_HASHMAP_SIZE 123457
-static cublasHandle_t cublas_handle;
-static cudaEvent_t profile_start, profile_stop;
-curandGenerator_t curand_gen;
-static HashMap *profile;
-void nerv_cumatrix_select_gpu(int dev, Status *status) {
- fprintf(stderr, "** selecting GPU %d\n", dev);
- NERV_SET_STATUS(status, NERV_NORMAL, 0);
- CUDA_SAFE_SYNC_CALL(cudaSetDevice(dev), status);
- CUDA_SAFE_SYNC_CALL(cublasDestroy(cublas_handle), status);
- CUDA_SAFE_SYNC_CALL(cublasCreate(&cublas_handle), status);
-}
-
-void nerv_cumatrix_print_profile() {
+void nerv_cuda_context_print_profile(CuContext *context) {
+ HashMap *profile = context->profile;
size_t i;
fprintf(stderr, "*** [nerv cumatrix profile] **\n");
for (i = 0; i < profile->size; i++)
@@ -30,28 +19,72 @@ void nerv_cumatrix_print_profile() {
}
}
-void nerv_cumatrix_clear_profile() {
- hashmap_clear(profile);
+void nerv_cuda_context_clear_profile(CuContext *context) {
+ nerv_hashmap_clear(context->profile);
}
-void accu_profile(const char *name, float delta) {
- float *val = hashmap_getval(profile, name);
+void nerv_cuda_context_accu_profile(CuContext *context,
+ const char *name, float delta) {
+ HashMap *profile = context->profile;
+ float *val = nerv_hashmap_getval(profile, name);
if (!val)
{
val = malloc(sizeof(float));
*val = 0;
- hashmap_setval(profile, name, val);
+ nerv_hashmap_setval(profile, name, val);
}
*val += delta;
}
-void nerv_cumatrix_init() {
- cublasCreate(&cublas_handle);
- curandCreateGenerator(&curand_gen, CURAND_RNG_PSEUDO_DEFAULT);
- curandSetPseudoRandomGeneratorSeed(curand_gen, time(NULL));
- cudaEventCreate(&profile_start);
- cudaEventCreate(&profile_stop);
- profile = hashmap_create(PROFILE_HASHMAP_SIZE, bkdr_hash, strcmp);
+static void new_cuda_handles(CuContext *context, Status *status) {
+ CUBLAS_SAFE_SYNC_CALL(cublasCreate(&(context->cublas_handle)), status);
+ CURAND_SAFE_SYNC_CALL(curandCreateGenerator(&(context->curand_gen),
+ CURAND_RNG_PSEUDO_DEFAULT), status);
+ CURAND_SAFE_SYNC_CALL(
+ curandSetPseudoRandomGeneratorSeed(context->curand_gen, time(NULL)),
+ status);
+ CUDA_SAFE_SYNC_CALL(cudaEventCreate(&(context->profile_start)), status);
+ CUDA_SAFE_SYNC_CALL(cudaEventCreate(&(context->profile_stop)), status);
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
+static void free_cuda_handles(CuContext *context, Status *status) {
+ CUBLAS_SAFE_SYNC_CALL(cublasDestroy(context->cublas_handle), status);
+ CURAND_SAFE_SYNC_CALL(curandDestroyGenerator(context->curand_gen), status);
+ CUDA_SAFE_SYNC_CALL(cudaEventDestroy(context->profile_start), status);
+ CUDA_SAFE_SYNC_CALL(cudaEventDestroy(context->profile_stop), status);
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
+CuContext *nerv_cuda_context_create(Status *status) {
+ CuContext *context = (CuContext *)malloc(sizeof(CuContext));
+ new_cuda_handles(context, status);
+ if (status->err_code != NERV_NORMAL)
+ return NULL;
+ context->profile = nerv_hashmap_create(PROFILE_HASHMAP_SIZE, bkdr_hash, strcmp);
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+ return context;
+}
+
+void nerv_cuda_context_destroy(CuContext *context, Status *status) {
+ free_cuda_handles(context, status);
+ if (status->err_code != NERV_NORMAL)
+ return;
+ nerv_hashmap_destroy(context->profile);
+ free(context);
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
+void nerv_cuda_context_select_gpu(CuContext *context,
+ int dev, Status *status) {
+ free_cuda_handles(context, status);
+ if (status->err_code != NERV_NORMAL)
+ return;
+ CUDA_SAFE_SYNC_CALL(cudaSetDevice(dev), status);
+ new_cuda_handles(context, status);
+ if (status->err_code != NERV_NORMAL)
+ return;
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
#define MATRIX_USE_FLOAT
@@ -59,7 +92,6 @@ void nerv_cumatrix_init() {
#define nerv_matrix_(NAME) nerv_matrix_cuda_float_##NAME
#define cudak_(NAME) cudak_float_ ## NAME
#define NERV_CUBLAS_(NAME) cublasS##NAME
-#define MATRIX_CUMATRIX_HOST_TNAME nerv_matrix_host_float_tname
#include "generic/cumatrix.c"
#undef NERV_CUBLAS_
@@ -72,12 +104,10 @@ void nerv_cumatrix_init() {
#undef MATRIX_ELEM_PTR_BASE
#undef MATRIX_ELEM_FMT
#undef MATRIX_ELEM_WRITE_FMT
-#undef MATRIX_CUMATRIX_HOST_TNAME
#define MATRIX_USE_DOUBLE
#define cuda_matrix_(NAME) cuda_matrix_double_##NAME
#define nerv_matrix_(NAME) nerv_matrix_cuda_double_##NAME
#define cudak_(NAME) cudak_double_ ## NAME
#define NERV_CUBLAS_(NAME) cublasD##NAME
-#define MATRIX_CUMATRIX_HOST_TNAME nerv_matrix_host_double_tname
#include "generic/cumatrix.c"
diff --git a/nerv/lib/matrix/cumatrix.h b/nerv/lib/matrix/cumatrix.h
index b47e14b..280035b 100644
--- a/nerv/lib/matrix/cumatrix.h
+++ b/nerv/lib/matrix/cumatrix.h
@@ -2,8 +2,19 @@
#define NERV_CUMATRIX_H
#include "matrix.h"
#include "../common.h"
-void nerv_cumatrix_print_profile();
-void nerv_cumatrix_clear_profile();
-void nerv_cumatrix_init();
-void nerv_cumatrix_select_gpu(int dev, Status *status);
+#include "cuda_helper.h"
+
+typedef struct CuContext {
+ cublasHandle_t cublas_handle;
+ cudaEvent_t profile_start, profile_stop;
+ curandGenerator_t curand_gen;
+ HashMap *profile;
+} CuContext;
+
+void nerv_cuda_context_print_profile(CuContext *context);
+void nerv_cuda_context_clear_profile(CuContext *context);
+void nerv_cuda_context_accu_profile(CuContext *context, const char *name, float delta);
+void nerv_cuda_context_select_gpu(CuContext *context, int dev, Status *status);
+CuContext *nerv_cuda_context_create(Status *status);
+void nerv_cuda_context_destroy(CuContext *contex, Status *status);
#endif
diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu
index 51e3b6a..0e09cfa 100644
--- a/nerv/lib/matrix/generic/cukernel.cu
+++ b/nerv/lib/matrix/generic/cukernel.cu
@@ -445,13 +445,12 @@ extern "C" {
cudaStreamSynchronize(0);
}
- extern curandGenerator_t curand_gen;
- void cudak_(cuda_rand_uniform)(const Matrix *a) {
+ void cudak_(cuda_rand_uniform)(const Matrix *a, CuContext *context) {
#ifdef MATRIX_USE_FLOAT
- curandGenerateUniform(curand_gen, MATRIX_ELEM_PTR(a), a->nrow * a->stride / sizeof(MATRIX_ELEM));
+ curandGenerateUniform(context->curand_gen, MATRIX_ELEM_PTR(a), a->nrow * a->stride / sizeof(MATRIX_ELEM));
#endif
#ifdef MATRIX_USE_DOUBLE
- curandGenerateUniformDouble(curand_gen, MATRIX_ELEM_PTR(a), a->nrow * a->stride / sizeof(MATRIX_ELEM));
+ curandGenerateUniformDouble(context->curand_gen, MATRIX_ELEM_PTR(a), a->nrow * a->stride / sizeof(MATRIX_ELEM));
#endif
}
diff --git a/nerv/lib/matrix/generic/cumatrix.c b/nerv/lib/matrix/generic/cumatrix.c
index 7b70607..6342d90 100644
--- a/nerv/lib/matrix/generic/cumatrix.c
+++ b/nerv/lib/matrix/generic/cumatrix.c
@@ -1,10 +1,11 @@
#ifdef NERV_GENERIC_CUMATRIX
#include "matrix.h"
#include "elem_type.h"
-#define MATRIX_DATA_FREE(ptr, status) cuda_matrix_(free)(ptr, status)
-#define MATRIX_DATA_ALLOC(dptr, stride, width, height, status) \
- cuda_matrix_(alloc)(dptr, stride, width, height, status)
-
+#define MATRIX_DATA_FREE(ptr, context, status) \
+ cuda_matrix_(free)(ptr, context, status)
+#define MATRIX_DATA_ALLOC(dptr, stride, width, height, context, status) \
+ cuda_matrix_(alloc)(dptr, stride, width, height, \
+ context, status)
#define NERV_GENERIC_MATRIX
#define NERV_GENERIC_CUKERNEL
#include "../../common.h"
@@ -14,12 +15,13 @@
void nerv_matrix_(add)(Matrix *c, const Matrix *a, const Matrix *b,
MATRIX_ELEM alpha, MATRIX_ELEM beta,
+ CuContext *context,
Status *status) {
CHECK_SAME_DIMENSION(a, b, status);
CHECK_SAME_DIMENSION(a, c, status);
PROFILE_START
CUBLAS_SAFE_SYNC_CALL(
- NERV_CUBLAS_(geam)(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N,
+ NERV_CUBLAS_(geam)(context->cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N,
a->ncol, a->nrow,
&alpha,
MATRIX_ELEM_PTR(a), a->stride / sizeof(MATRIX_ELEM),
@@ -33,7 +35,8 @@ void nerv_matrix_(add)(Matrix *c, const Matrix *a, const Matrix *b,
void nerv_matrix_(mul)(Matrix *c, const Matrix *a, const Matrix *b,
MATRIX_ELEM alpha, MATRIX_ELEM beta,
- int ta, int tb, Status *status) {
+ int ta, int tb,
+ CuContext *context, Status *status) {
#define SWAP(a, b) \
do { int t = (a); (a) = (b); (b) = t; } while (0)
@@ -46,7 +49,7 @@ void nerv_matrix_(mul)(Matrix *c, const Matrix *a, const Matrix *b,
/* Because matrix in Nerv is row-major, here b comes first */
PROFILE_START
CUBLAS_SAFE_SYNC_CALL(
- NERV_CUBLAS_(gemm)(cublas_handle, tb, ta,
+ NERV_CUBLAS_(gemm)(context->cublas_handle, tb, ta,
bn, am, bm,
&alpha,
MATRIX_ELEM_PTR(b), b->stride / sizeof(MATRIX_ELEM),
@@ -58,7 +61,8 @@ void nerv_matrix_(mul)(Matrix *c, const Matrix *a, const Matrix *b,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, Status *status) {
+void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b,
+ CuContext *context, Status *status) {
CHECK_SAME_DIMENSION(a, b, status);
PROFILE_START
cudak_(cuda_sigmoid)(b, a);
@@ -67,7 +71,8 @@ void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, Status *status) {
}
void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err,
- const Matrix *output, Status *status) {
+ const Matrix *output,
+ CuContext *context, Status *status) {
CHECK_SAME_DIMENSION(nerr, err, status);
CHECK_SAME_DIMENSION(nerr, output, status);
PROFILE_START
@@ -76,14 +81,16 @@ void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-void nerv_matrix_(rand_uniform)(Matrix *a, Status *status) {
+void nerv_matrix_(rand_uniform)(Matrix *a, CuContext *context, Status *status) {
PROFILE_START
- cudak_(cuda_rand_uniform)(a);
+ cudak_(cuda_rand_uniform)(a, context);
PROFILE_STOP
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-void nerv_matrix_(thres_mask)(Matrix *a, Matrix *b, double thres, double low, double high, Status *status) {
+void nerv_matrix_(thres_mask)(Matrix *a, Matrix *b, double thres,
+ double low, double high,
+ CuContext *context, Status *status) {
CHECK_SAME_DIMENSION(a, b, status);
PROFILE_START
cudak_(cuda_thres_mask)(a, b, thres, low, high);
@@ -91,7 +98,8 @@ void nerv_matrix_(thres_mask)(Matrix *a, Matrix *b, double thres, double low, do
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-void nerv_matrix_(tanh)(Matrix *a, const Matrix *b, Status *status) {
+void nerv_matrix_(tanh)(Matrix *a, const Matrix *b,
+ CuContext *context, Status *status) {
CHECK_SAME_DIMENSION(a, b, status);
PROFILE_START
cudak_(cuda_tanh)(b, a);
@@ -99,8 +107,8 @@ void nerv_matrix_(tanh)(Matrix *a, const Matrix *b, Status *status) {
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-void nerv_matrix_(tanh_grad)(Matrix *nerr, const Matrix *err,
- const Matrix *output, Status *status) {
+void nerv_matrix_(tanh_grad)(Matrix *nerr, const Matrix *err, const Matrix *output,
+ CuContext *context, Status *status) {
CHECK_SAME_DIMENSION(nerr, err, status);
CHECK_SAME_DIMENSION(nerr, output, status);
PROFILE_START
@@ -109,24 +117,25 @@ void nerv_matrix_(tanh_grad)(Matrix *nerr, const Matrix *err,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, Status *status) {
+Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a,
+ CuContext *context, Status *status) {
Matrix *max, *max_idx;
Matrix *dno;
CHECK_SAME_DIMENSION_RET(a, b, status);
- max = nerv_matrix_(create)(a->nrow, 1, status);
+ max = nerv_matrix_(create)(a->nrow, 1, context, status);
if (status->err_code != NERV_NORMAL)
return NULL;
- max_idx = nerv_matrix_(create)(a->nrow, 1, status);
+ max_idx = nerv_matrix_(create)(a->nrow, 1, context, status);
if (status->err_code != NERV_NORMAL)
{
- nerv_matrix_(destroy)(max, status);
+ nerv_matrix_(destroy)(max, context, status);
return NULL;
}
- dno = nerv_matrix_(create)(a->nrow, 1, status);
+ dno = nerv_matrix_(create)(a->nrow, 1, context, status);
if (status->err_code != NERV_NORMAL)
{ /* FIXME: destroy may also fail? */
- nerv_matrix_(destroy)(max, status);
- nerv_matrix_(destroy)(max_idx, status);
+ nerv_matrix_(destroy)(max, context, status);
+ nerv_matrix_(destroy)(max_idx, context, status);
return NULL;
}
PROFILE_START
@@ -134,14 +143,14 @@ Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, Status *status) {
cudak_(cuda_softmax_denominator)(a, max, dno);
cudak_(cuda_softmax_final)(a, max, dno, b);
PROFILE_STOP
- nerv_matrix_(destroy)(max, status);
- nerv_matrix_(destroy)(dno, status);
+ nerv_matrix_(destroy)(max, context, status);
+ nerv_matrix_(destroy)(dno, context, status);
NERV_SET_STATUS(status, NERV_NORMAL, 0);
return max_idx;
}
-Matrix *nerv_matrix_(rowsum)(Matrix *a, Status *status) {
- Matrix *b = nerv_matrix_(create)(a->nrow, 1, status);
+Matrix *nerv_matrix_(rowsum)(Matrix *a, CuContext *context, Status *status) {
+ Matrix *b = nerv_matrix_(create)(a->nrow, 1, context, status);
if (status->err_code != NERV_NORMAL)
return NULL;
PROFILE_START
@@ -151,8 +160,8 @@ Matrix *nerv_matrix_(rowsum)(Matrix *a, Status *status) {
return b;
}
-Matrix *nerv_matrix_(colsum)(Matrix *a, Status *status) {
- Matrix *b = nerv_matrix_(create)(1, a->ncol, status);
+Matrix *nerv_matrix_(colsum)(Matrix *a, CuContext *context, Status *status) {
+ Matrix *b = nerv_matrix_(create)(1, a->ncol, context, status);
if (status->err_code != NERV_NORMAL)
return NULL;
PROFILE_START
@@ -163,8 +172,8 @@ Matrix *nerv_matrix_(colsum)(Matrix *a, Status *status) {
}
Matrix *nerv_matrix_(colsame)(Matrix *a, const Matrix *ref,
- Status *status) {
- Matrix *b = nerv_matrix_(create)(1, a->ncol, status);
+ CuContext *context, Status *status) {
+ Matrix *b = nerv_matrix_(create)(1, a->ncol, context, status);
if (status->err_code != NERV_NORMAL)
return NULL;
CHECK_SAME_DIMENSION_RET(a, ref, status);
@@ -175,8 +184,8 @@ Matrix *nerv_matrix_(colsame)(Matrix *a, const Matrix *ref,
return b;
}
-Matrix *nerv_matrix_(rowmax)(Matrix *a, Status *status) {
- Matrix *b = nerv_matrix_(create)(a->nrow, 1, status);
+Matrix *nerv_matrix_(rowmax)(Matrix *a, CuContext *context, Status *status) {
+ Matrix *b = nerv_matrix_(create)(a->nrow, 1, context, status);
if (status->err_code != NERV_NORMAL)
return NULL;
PROFILE_START
@@ -187,15 +196,15 @@ Matrix *nerv_matrix_(rowmax)(Matrix *a, Status *status) {
}
void nerv_matrix_(rowmax_idx)(Matrix *a, Matrix **b, Matrix **idx,
- Status *status) {
- *b = nerv_matrix_(create)(a->nrow, 1, status);
+ CuContext *context, Status *status) {
+ *b = nerv_matrix_(create)(a->nrow, 1, context, status);
if (status->err_code != NERV_NORMAL)
return;
- *idx = nerv_matrix_(create)(a->nrow, 1, status);
+ *idx = nerv_matrix_(create)(a->nrow, 1, context, status);
if (status->err_code != NERV_NORMAL)
{
/* FIXME: destroy may also fail? */
- nerv_matrix_(destroy)(*b, status);
+ nerv_matrix_(destroy)(*b, context, status);
return;
}
PROFILE_START
@@ -205,7 +214,7 @@ void nerv_matrix_(rowmax_idx)(Matrix *a, Matrix **b, Matrix **idx,
}
void nerv_matrix_(add_row)(Matrix *b, const Matrix *a, double beta,
- Status *status) {
+ CuContext *context, Status *status) {
if (a->ncol != b->ncol)
NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0);
if (a->nrow != 1)
@@ -216,23 +225,25 @@ void nerv_matrix_(add_row)(Matrix *b, const Matrix *a, double beta,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-void nerv_matrix_(fill)(Matrix *self, double val, Status *status) {
+void nerv_matrix_(fill)(Matrix *self, double val,
+ CuContext *context, Status *status) {
PROFILE_START
cudak_(cuda_fill)(self, val);
PROFILE_STOP
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-void nerv_matrix_(clip)(Matrix *self, double val_1, double val_2, Status *status) {
+void nerv_matrix_(clip)(Matrix *self, double val1, double val2,
+ CuContext *context, Status *status) {
PROFILE_START
- cudak_(cuda_clip)(self, val_1, val_2);
+ cudak_(cuda_clip)(self, val1, val2);
PROFILE_STOP
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
void nerv_matrix_(copy_fromd)(Matrix *a, const Matrix *b,
int a_begin, int b_begin, int b_end,
- Status *status) {
+ CuContext *context, Status *status) {
if (!(0 <= b_begin && b_begin < b_end && b_end <= b->nrow &&
a_begin + b_end - b_begin <= a->nrow))
NERV_EXIT_STATUS(status, MAT_INVALID_COPY_INTERVAL, 0);
@@ -251,7 +262,7 @@ void nerv_matrix_(copy_fromd)(Matrix *a, const Matrix *b,
void nerv_matrix_(copy_fromh)(Matrix *a, const Matrix *b,
int a_begin, int b_begin, int b_end,
- Status *status) {
+ CuContext *context, Status *status) {
if (!(0 <= b_begin && b_begin < b_end && b_end <= b->nrow &&
a_begin + b_end - b_begin <= a->nrow))
NERV_EXIT_STATUS(status, MAT_INVALID_COPY_INTERVAL, 0);
@@ -270,7 +281,7 @@ void nerv_matrix_(copy_fromh)(Matrix *a, const Matrix *b,
void nerv_matrix_(copy_toh)(Matrix *a, const Matrix *b,
int a_begin, int a_end, int b_begin,
- Status *status) {
+ CuContext *context, Status *status) {
if (!(0 <= a_begin && a_begin < a_end && a_end <= a->nrow &&
b_begin + a_end - a_begin <= b->nrow))
NERV_EXIT_STATUS(status, MAT_INVALID_COPY_INTERVAL, 0);
@@ -287,15 +298,15 @@ void nerv_matrix_(copy_toh)(Matrix *a, const Matrix *b,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-Matrix *nerv_matrix_(trans)(Matrix *a, Status *status) {
+Matrix *nerv_matrix_(trans)(Matrix *a, CuContext *context, Status *status) {
MATRIX_ELEM alpha = 1, beta = 0;
- Matrix *b = nerv_matrix_(create)(a->ncol, a->nrow, status);
+ Matrix *b = nerv_matrix_(create)(a->ncol, a->nrow, context, status);
if (status->err_code != NERV_NORMAL)
return NULL;
/* FIXME: possible memory leak when lua error is raised */
PROFILE_START
CUBLAS_SAFE_SYNC_CALL_RET(
- NERV_CUBLAS_(geam)(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_T,
+ NERV_CUBLAS_(geam)(context->cublas_handle, CUBLAS_OP_T, CUBLAS_OP_T,
a->nrow, a->ncol,
&alpha,
MATRIX_ELEM_PTR(a), a->stride / sizeof(MATRIX_ELEM),
@@ -309,7 +320,7 @@ Matrix *nerv_matrix_(trans)(Matrix *a, Status *status) {
}
void nerv_matrix_(mul_elem)(Matrix *c, const Matrix *a, const Matrix *b,
- Status *status) {
+ CuContext *context, Status *status) {
CHECK_SAME_DIMENSION(a, b, status);
CHECK_SAME_DIMENSION(a, c, status);
PROFILE_START
@@ -318,7 +329,8 @@ void nerv_matrix_(mul_elem)(Matrix *c, const Matrix *a, const Matrix *b,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a, Status *status) {
+void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a,
+ CuContext *context, Status *status) {
CHECK_SAME_DIMENSION(a, b, status);
PROFILE_START
cudak_(cuda_log_elem)(a, b);
@@ -326,14 +338,15 @@ void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a, Status *status) {
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col, Status *status) {
+Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col,
+ CuContext *context, Status *status) {
Matrix *b;
if (a->ncol != 1)
{
NERV_SET_STATUS(status, MAT_COL_VECTOR_EXP, 0);
return NULL;
}
- b = nerv_matrix_(create)(a->nrow, orig_col, status);
+ b = nerv_matrix_(create)(a->nrow, orig_col, context, status);
if (status->err_code != NERV_NORMAL)
return NULL;
PROFILE_START
@@ -345,7 +358,8 @@ Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col, Status *status)
}
void nerv_matrix_(copy_rows_fromh_by_idx)(Matrix *a, const Matrix *b,
- const Matrix *idx, int b_begin, Status *status) {
+ const Matrix *idx, int b_begin,
+ CuContext *context, Status *status) {
long nrow = a->nrow;
if (!(0 <= b_begin && b_begin + nrow <= idx->ncol))
NERV_EXIT_STATUS(status, MAT_INVALID_COPY_INTERVAL, 0);
@@ -379,7 +393,8 @@ void nerv_matrix_(copy_rows_fromh_by_idx)(Matrix *a, const Matrix *b,
}
void nerv_matrix_(copy_rows_fromd_by_idx)(Matrix *a, const Matrix *b,
- const Matrix *idx, int b_begin, Status *status) {
+ const Matrix *idx, int b_begin,
+ CuContext *context, Status *status) {
long nrow = a->nrow;
if (!(0 <= b_begin && b_begin + nrow <= idx->ncol))
NERV_EXIT_STATUS(status, MAT_INVALID_COPY_INTERVAL, 0);
@@ -394,7 +409,8 @@ void nerv_matrix_(copy_rows_fromd_by_idx)(Matrix *a, const Matrix *b,
}
void nerv_matrix_(copy_rows_fromd_by_colidx)(Matrix *a, const Matrix *b,
- const Matrix *idx, int b_begin, Status *status) {
+ const Matrix *idx, int b_begin,
+ CuContext *context, Status *status) {
long nrow = a->nrow;
if (!(0 <= b_begin && b_begin + nrow <= idx->nrow))
NERV_EXIT_STATUS(status, MAT_INVALID_COPY_INTERVAL, 0);
@@ -412,7 +428,9 @@ void nerv_matrix_(copy_rows_fromd_by_colidx)(Matrix *a, const Matrix *b,
#ifdef __NERV_FUTURE_CUDA_7
-void nerv_matrix_(update_select_rows_by_rowidx)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta, Status *status) {
+void nerv_matrix_(update_select_rows_by_rowidx)(Matrix *c, const Matrix *a,
+ const Matrix *idx, double alpha, double beta,
+ CuContext *context, Status *status) {
long nrow = a->nrow;
if (idx->nrow != 1 || idx->ncol != a->nrow)
NERV_EXIT_STATUS(status, MAT_IDX_VECTOR_EXP, 0);
@@ -424,7 +442,9 @@ void nerv_matrix_(update_select_rows_by_rowidx)(Matrix *c, const Matrix *a, cons
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-void nerv_matrix_(update_select_rows_by_colidx)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta, Status *status) {
+void nerv_matrix_(update_select_rows_by_colidx)(Matrix *c, const Matrix *a,
+ const Matrix *idx, double alpha, double beta,
+ CuContext *context, Status *status) {
long nrow = a->nrow;
if (idx->ncol != 1 || idx->nrow != a->nrow)
NERV_EXIT_STATUS(status, MAT_IDX_VECTOR_EXP, 0);
@@ -438,20 +458,20 @@ void nerv_matrix_(update_select_rows_by_colidx)(Matrix *c, const Matrix *a, cons
#endif
void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b,
- int context, Status *status) {
+ int cont, CuContext *context, Status *status) {
if (a->nrow != b->nrow)
NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0);
- if (a->ncol != b->ncol * (context * 2 + 1))
+ if (a->ncol != b->ncol * (cont * 2 + 1))
NERV_EXIT_STATUS(status, MAT_GENERAL_ERR,
"the width should be 2 * context + 1");
PROFILE_START
- cudak_(cuda_expand_frm)(b, a, context);
+ cudak_(cuda_expand_frm)(b, a, cont);
PROFILE_STOP
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
void nerv_matrix_(rearrange_frm)(Matrix *a, const Matrix *b,
- int step, Status *status) {
+ int step, CuContext *context, Status *status) {
CHECK_SAME_DIMENSION(a, b, status);
if (b->ncol % step)
NERV_EXIT_STATUS(status, MAT_GENERAL_ERR,
@@ -463,7 +483,7 @@ void nerv_matrix_(rearrange_frm)(Matrix *a, const Matrix *b,
}
void nerv_matrix_(scale_rows_by_col)(Matrix *a, const Matrix *b,
- Status *status) {
+ CuContext *context, Status *status) {
if (a->nrow != b->nrow)
NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0);
if (b->ncol != 1)
@@ -475,7 +495,7 @@ void nerv_matrix_(scale_rows_by_col)(Matrix *a, const Matrix *b,
}
void nerv_matrix_(scale_rows_by_row)(Matrix *a, const Matrix *b,
- Status *status) {
+ CuContext *context, Status *status) {
if (a->ncol != b->ncol)
NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0);
if (b->nrow != 1)
@@ -486,7 +506,8 @@ void nerv_matrix_(scale_rows_by_row)(Matrix *a, const Matrix *b,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b, Status *status) {
+void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b,
+ CuContext *context, Status *status) {
CHECK_SAME_DIMENSION(a, b, status);
PROFILE_START
cudak_(cuda_prefixsum_row)(b, a);
@@ -494,14 +515,14 @@ void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b, Status *status) {
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-static void cuda_matrix_(free)(MATRIX_ELEM *ptr, Status *status) {
+static void cuda_matrix_(free)(MATRIX_ELEM *ptr, CuContext *context, Status *status) {
CUDA_SAFE_SYNC_CALL(cudaFree(ptr), status);
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
static void cuda_matrix_(alloc)(MATRIX_ELEM **dptr,
size_t *stride, long width, long height,
- Status *status) {
+ CuContext *context, Status *status) {
PROFILE_START
CUDA_SAFE_SYNC_CALL(cudaMallocPitch((void **)dptr, stride, width, height),
status);
diff --git a/nerv/lib/matrix/generic/cumatrix.h b/nerv/lib/matrix/generic/cumatrix.h
index f3c2df8..fe83b5d 100644
--- a/nerv/lib/matrix/generic/cumatrix.h
+++ b/nerv/lib/matrix/generic/cumatrix.h
@@ -2,76 +2,99 @@
void nerv_matrix_(add)(Matrix *c, const Matrix *a, const Matrix *b,
MATRIX_ELEM alpha, MATRIX_ELEM beta,
- Status *status);
+ CuContext *context, Status *status);
void nerv_matrix_(mul)(Matrix *c, const Matrix *a, const Matrix *b,
MATRIX_ELEM alpha, MATRIX_ELEM beta,
- int ta, int tb, Status *status);
-void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, Status *status);
+ int ta, int tb,
+ CuContext *context, Status *status);
+void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b,
+ CuContext *context, Status *status);
void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err,
- const Matrix *output, Status *status);
-void nerv_matrix_(tanh)(Matrix *a, const Matrix *b, Status *status);
+ const Matrix *output,
+ CuContext *context, Status *status);
+void nerv_matrix_(tanh)(Matrix *a, const Matrix *b,
+ CuContext *context, Status *status);
void nerv_matrix_(tanh_grad)(Matrix *nerr, const Matrix *err,
- const Matrix *output, Status *status);
+ const Matrix *output,
+ CuContext *context, Status *status);
-Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, Status *status);
-Matrix *nerv_matrix_(rowsum)(Matrix *a, Status *status);
-Matrix *nerv_matrix_(colsum)(Matrix *a, Status *status);
+Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a,
+ CuContext *context, Status *status);
+Matrix *nerv_matrix_(rowsum)(Matrix *a, CuContext *context, Status *status);
+Matrix *nerv_matrix_(colsum)(Matrix *a, CuContext *context, Status *status);
Matrix *nerv_matrix_(colsame)(Matrix *a, const Matrix *ref,
- Status *status);
-Matrix *nerv_matrix_(rowmax)(Matrix *a, Status *status);
+ CuContext *context, Status *status);
+Matrix *nerv_matrix_(rowmax)(Matrix *a, CuContext *context, Status *status);
void nerv_matrix_(rowmax_idx)(Matrix *a, Matrix **b, Matrix **idx,
- Status *status);
+ CuContext *context, Status *status);
void nerv_matrix_(add_row)(Matrix *b, const Matrix *a, double beta,
- Status *status);
-void nerv_matrix_(clip)(Matrix *self, double val_1, double val_2, Status *status);
-void nerv_matrix_(fill)(Matrix *self, double val, Status *status);
+ CuContext *context, Status *status);
+void nerv_matrix_(clip)(Matrix *self, double val1, double val2,
+ CuContext *context, Status *status);
+void nerv_matrix_(fill)(Matrix *self, double val,
+ CuContext *context, Status *status);
void nerv_matrix_(copy_fromd)(Matrix *a, const Matrix *b,
int a_begin, int b_begin, int b_end,
- Status *status);
+ CuContext *context, Status *status);
void nerv_matrix_(copy_fromh)(Matrix *a, const Matrix *b,
int a_begin, int b_begin, int b_end,
- Status *status);
+ CuContext *context, Status *status);
void nerv_matrix_(copy_toh)(Matrix *a, const Matrix *b,
int a_begin, int a_end, int b_begin,
- Status *status);
-Matrix *nerv_matrix_(trans)(Matrix *a, Status *status);
+ CuContext *context, Status *status);
+Matrix *nerv_matrix_(trans)(Matrix *a, CuContext *context, Status *status);
void nerv_matrix_(mul_elem)(Matrix *c, const Matrix *a, const Matrix *b,
- Status *status);
+ CuContext *context, Status *status);
-void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a, Status *status);
+void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a,
+ CuContext *context, Status *status);
-Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col, Status *status);
+Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col,
+ CuContext *context, Status *status);
void nerv_matrix_(copy_rows_fromh_by_idx)(Matrix *a, const Matrix *b,
- const Matrix *idx, int b_begin, Status *status);
+ const Matrix *idx, int b_begin,
+ CuContext *context, Status *status);
void nerv_matrix_(copy_rows_fromd_by_idx)(Matrix *a, const Matrix *b,
- const Matrix *idx, int b_begin, Status *status);
+ const Matrix *idx, int b_begin,
+ CuContext *context, Status *status);
void nerv_matrix_(copy_rows_fromd_by_colidx)(Matrix *a, const Matrix *b,
- const Matrix *idx, int b_begin, Status *status);
+ const Matrix *idx, int b_begin,
+ CuContext *context, Status *status);
#ifdef __NERV_FUTURE_CUDA_7
-void nerv_matrix_(update_select_rows_by_rowidx)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta, Status *status);
-void nerv_matrix_(update_select_rows_by_colidx)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta, Status *status);
+void nerv_matrix_(update_select_rows_by_rowidx)(Matrix *c, const Matrix *a,
+ const Matrix *idx, double alpha, double beta,
+ CuContext *context, Status *status);
+void nerv_matrix_(update_select_rows_by_colidx)(Matrix *c, const Matrix *a,
+ const Matrix *idx, double alpha, double beta,
+ CuContext *context, Status *status);
#endif
void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b,
- int context, Status *status);
+ int cont, CuContext *context, Status *status);
void nerv_matrix_(rearrange_frm)(Matrix *a, const Matrix *b,
- int step, Status *status);
+ int step, CuContext *context, Status *status);
void nerv_matrix_(scale_rows_by_col)(Matrix *a, const Matrix *b,
- Status *status);
+ CuContext *context, Status *status);
void nerv_matrix_(scale_rows_by_row)(Matrix *a, const Matrix *b,
- Status *status);
-void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b, Status *status);
+ CuContext *context, Status *status);
+void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b,
+ CuContext *context, Status *status);
void nerv_matrix_(thres_mask)(Matrix *a, Matrix *b,
double thres, double low, double high,
- Status *status);
-void nerv_matrix_(rand_uniform)(Matrix *a, Status *status);
+ CuContext *context, Status *status);
+void nerv_matrix_(rand_uniform)(Matrix *a, CuContext *context, Status *status);
#ifdef __NERV_FUTURE_CUDA_7
-void nerv_matrix_(update_select_rows_by_rowidx)(Matrix *c, const Matrix *a, const Matrix *idx,
- double alpha, double beta, Status *status);
-void nerv_matrix_(update_select_rows_by_colidx)(Matrix *c, const Matrix *a, const Matrix *idx,
- double alpha, double beta, Status *status);
+void nerv_matrix_(update_select_rows_by_rowidx)(Matrix *c, const Matrix *a,
+ const Matrix *idx,
+ double alpha, double beta,
+ CuContext *context, Status *status);
+void nerv_matrix_(update_select_rows_by_colidx)(Matrix *c, const Matrix *a,
+ const Matrix *idx,
+ double alpha, double beta,
+ CuContext *context, Status *status);
#endif
-void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b, Status *status);
+void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b,
+ CuContext *context, Status *status);
diff --git a/nerv/lib/matrix/generic/matrix.c b/nerv/lib/matrix/generic/matrix.c
index 998d107..3bcc251 100644
--- a/nerv/lib/matrix/generic/matrix.c
+++ b/nerv/lib/matrix/generic/matrix.c
@@ -3,12 +3,12 @@
#include "matrix.h"
/* FIXME: malloc failure detection */
-void nerv_matrix_(data_free)(Matrix *self, Status *status) {
+void nerv_matrix_(data_free)(Matrix *self, MATRIX_CONTEXT *context, Status *status) {
assert(*self->data_ref > 0);
if (--(*self->data_ref) == 0)
{
/* free matrix data */
- MATRIX_DATA_FREE(MATRIX_ELEM_PTR_BASE(self), status);
+ MATRIX_DATA_FREE(MATRIX_ELEM_PTR_BASE(self), context, status);
free(self->data_ref);
free(self);
}
@@ -22,7 +22,8 @@ void nerv_matrix_(data_retain)(Matrix *self) {
(*self->data_ref)++;
}
-Matrix *nerv_matrix_(create)(long nrow, long ncol, Status *status) {
+Matrix *nerv_matrix_(create)(long nrow, long ncol,
+ MATRIX_CONTEXT *context, Status *status) {
Matrix *self = (Matrix *)malloc(sizeof(Matrix));
self->nrow = nrow;
self->ncol = ncol;
@@ -30,7 +31,7 @@ Matrix *nerv_matrix_(create)(long nrow, long ncol, Status *status) {
self->dim = 2;
MATRIX_DATA_ALLOC(&MATRIX_ELEM_PTR_BASE(self), &self->stride,
sizeof(MATRIX_ELEM) * self->ncol, self->nrow,
- status);
+ context, status);
if (status->err_code != NERV_NORMAL)
{
free(self);
@@ -44,8 +45,8 @@ Matrix *nerv_matrix_(create)(long nrow, long ncol, Status *status) {
return self;
}
-void nerv_matrix_(destroy)(Matrix *self, Status *status) {
- nerv_matrix_(data_free)(self, status);
+void nerv_matrix_(destroy)(Matrix *self, MATRIX_CONTEXT *context, Status *status) {
+ nerv_matrix_(data_free)(self, context, status);
}
Matrix *nerv_matrix_(getrow)(Matrix *self, int row) {
diff --git a/nerv/lib/matrix/generic/matrix.h b/nerv/lib/matrix/generic/matrix.h
index 69b4e6d..2770c3e 100644
--- a/nerv/lib/matrix/generic/matrix.h
+++ b/nerv/lib/matrix/generic/matrix.h
@@ -1,6 +1,7 @@
#include "../matrix.h"
-Matrix *nerv_matrix_(create)(long nrow, long ncol, Status *status);
-void nerv_matrix_(destroy)(Matrix *self, Status *status);
+Matrix *nerv_matrix_(create)(long nrow, long ncol,
+ MATRIX_CONTEXT *context, Status *status);
+void nerv_matrix_(destroy)(Matrix *self, MATRIX_CONTEXT *context, Status *status);
Matrix *nerv_matrix_(getrow)(Matrix *self, int row);
-void nerv_matrix_(data_free)(Matrix *self, Status *status);
+void nerv_matrix_(data_free)(Matrix *self, MATRIX_CONTEXT *context, Status *status);
void nerv_matrix_(data_retain)(Matrix *self);
diff --git a/nerv/lib/matrix/generic/mmatrix.c b/nerv/lib/matrix/generic/mmatrix.c
index fa1dc5f..ad334e3 100644
--- a/nerv/lib/matrix/generic/mmatrix.c
+++ b/nerv/lib/matrix/generic/mmatrix.c
@@ -1,9 +1,11 @@
#ifdef NERV_GENERIC_MMATRIX
#include "matrix.h"
#include "elem_type.h"
-#define MATRIX_DATA_FREE(ptr, status) host_matrix_(free)(ptr, status)
-#define MATRIX_DATA_ALLOC(dptr, stride, width, height, status) \
- host_matrix_(alloc)(dptr, stride, width, height, status)
+#define MATRIX_DATA_FREE(ptr, context, status) \
+ host_matrix_(free)(ptr, context, status)
+#define MATRIX_DATA_ALLOC(dptr, stride, width, height, context, status) \
+ host_matrix_(alloc)(dptr, stride, width, height, \
+ context, status)
#define NERV_GENERIC_MATRIX
#include "../cuda_helper.h"
#include "../../common.h"
@@ -12,8 +14,8 @@
#include <cblas.h>
#include <float.h>
-Matrix *nerv_matrix_(colsum)(Matrix *a, Status *status) {
- Matrix *b = nerv_matrix_(create)(1, a->ncol, status);
+Matrix *nerv_matrix_(colsum)(Matrix *a, MContext *context, Status *status) {
+ Matrix *b = nerv_matrix_(create)(1, a->ncol, context, status);
if (status->err_code != NERV_NORMAL)
return NULL;
MATRIX_ELEM *arow = MATRIX_ELEM_PTR(a),
@@ -31,8 +33,9 @@ Matrix *nerv_matrix_(colsum)(Matrix *a, Status *status) {
return b;
}
-Matrix *nerv_matrix_(colsame)(Matrix *a, const Matrix *ref, Status *status) {
- Matrix *b = nerv_matrix_(create)(1, a->ncol, status);
+Matrix *nerv_matrix_(colsame)(Matrix *a, const Matrix *ref,
+ MContext *context, Status *status) {
+ Matrix *b = nerv_matrix_(create)(1, a->ncol, context, status);
if (status->err_code != NERV_NORMAL)
return NULL;
CHECK_SAME_DIMENSION_RET(a, ref, status);
@@ -55,8 +58,8 @@ Matrix *nerv_matrix_(colsame)(Matrix *a, const Matrix *ref, Status *status) {
return b;
}
-Matrix *nerv_matrix_(rowsum)(Matrix *a, Status *status) {
- Matrix *b = nerv_matrix_(create)(a->nrow, 1, status);
+Matrix *nerv_matrix_(rowsum)(Matrix *a, MContext *context, Status *status) {
+ Matrix *b = nerv_matrix_(create)(a->nrow, 1, context, status);
if (status->err_code != NERV_NORMAL)
return NULL;
MATRIX_ELEM *arow = MATRIX_ELEM_PTR(a),
@@ -75,8 +78,8 @@ Matrix *nerv_matrix_(rowsum)(Matrix *a, Status *status) {
return b;
}
-Matrix *nerv_matrix_(rowmax)(Matrix *a, Status *status) {
- Matrix *b = nerv_matrix_(create)(a->nrow, 1, status);
+Matrix *nerv_matrix_(rowmax)(Matrix *a, MContext *context, Status *status) {
+ Matrix *b = nerv_matrix_(create)(a->nrow, 1, context, status);
if (status->err_code != NERV_NORMAL)
return NULL;
MATRIX_ELEM *arow = MATRIX_ELEM_PTR(a),
@@ -96,15 +99,16 @@ Matrix *nerv_matrix_(rowmax)(Matrix *a, Status *status) {
return b;
}
-void nerv_matrix_(rowmax_idx)(Matrix *a, Matrix **b, Matrix **idx, Status *status) {
- *b = nerv_matrix_(create)(a->nrow, 1, status);
+void nerv_matrix_(rowmax_idx)(Matrix *a, Matrix **b, Matrix **idx,
+ MContext *context, Status *status) {
+ *b = nerv_matrix_(create)(a->nrow, 1, context, status);
if (status->err_code != NERV_NORMAL)
return;
- *idx = nerv_matrix_(create)(a->nrow, 1, status);
+ *idx = nerv_matrix_(create)(a->nrow, 1, context, status);
if (status->err_code != NERV_NORMAL)
{
/* FIXME: destroy may also fail! */
- nerv_matrix_(destroy)(*b, status);
+ nerv_matrix_(destroy)(*b, context, status);
return;
}
MATRIX_ELEM *arow = MATRIX_ELEM_PTR(a),
@@ -127,8 +131,8 @@ void nerv_matrix_(rowmax_idx)(Matrix *a, Matrix **b, Matrix **idx, Status *statu
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-Matrix *nerv_matrix_(trans)(Matrix *a, Status *status) {
- Matrix *b = nerv_matrix_(create)(a->ncol, a->nrow, status);
+Matrix *nerv_matrix_(trans)(Matrix *a, MContext *context, Status *status) {
+ Matrix *b = nerv_matrix_(create)(a->ncol, a->nrow, context, status);
if (status->err_code != NERV_NORMAL)
return NULL;
MATRIX_ELEM *arow = MATRIX_ELEM_PTR(a);
@@ -148,14 +152,15 @@ Matrix *nerv_matrix_(trans)(Matrix *a, Status *status) {
return b;
}
-Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col, Status *status) {
+Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col,
+ MContext *context, Status *status) {
Matrix *b;
if (a->ncol != 1)
{
NERV_SET_STATUS(status, MAT_COL_VECTOR_EXP, 0);
return NULL;
}
- b = nerv_matrix_(create)(a->nrow, orig_col, status);
+ b = nerv_matrix_(create)(a->nrow, orig_col, context, status);
if (status->err_code != NERV_NORMAL)
return NULL;
int i;
@@ -173,7 +178,9 @@ Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col, Status *status)
return b;
}
-void nerv_matrix_(add)(Matrix *c, const Matrix *a, const Matrix *b, MATRIX_ELEM alpha, MATRIX_ELEM beta, Status *status) {
+void nerv_matrix_(add)(Matrix *c, const Matrix *a, const Matrix *b,
+ MATRIX_ELEM alpha, MATRIX_ELEM beta,
+ MContext *context, Status *status) {
CHECK_SAME_DIMENSION(a, b, status);
CHECK_SAME_DIMENSION(a, c, status);
int i, j;
@@ -197,7 +204,7 @@ void nerv_matrix_(add)(Matrix *c, const Matrix *a, const Matrix *b, MATRIX_ELEM
void nerv_matrix_(mul)(Matrix *c, const Matrix *a, const Matrix *b,
MATRIX_ELEM alpha, MATRIX_ELEM beta,
- int ta, int tb, Status *status) {
+ int ta, int tb, MContext *context, Status *status) {
#define SWAP(a, b) \
do { int t = (a); (a) = (b); (b) = t; } while (0)
@@ -218,7 +225,7 @@ void nerv_matrix_(mul)(Matrix *c, const Matrix *a, const Matrix *b,
}
void nerv_matrix_(add_row)(Matrix *b, const Matrix *a, double beta,
- Status *status) {
+ MContext *context, Status *status) {
if (a->ncol != b->ncol)
NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0);
if (a->nrow != 1)
@@ -236,23 +243,25 @@ void nerv_matrix_(add_row)(Matrix *b, const Matrix *a, double beta,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-void nerv_matrix_(clip)(Matrix *self, double val_1, double val_2, Status *status) {
+void nerv_matrix_(clip)(Matrix *self, double val1, double val2,
+ MContext *context, Status *status) {
int i, j;
size_t astride = self->stride;
MATRIX_ELEM *arow = MATRIX_ELEM_PTR(self);
for (i = 0; i < self->nrow; i++)
{
for (j = 0; j < self->ncol; j++)
- if (arow[j] > val_2)
- arow[j] = val_2;
- else if (arow[j] < val_1)
- arow[j] = val_1;
+ if (arow[j] > val2)
+ arow[j] = val2;
+ else if (arow[j] < val1)
+ arow[j] = val1;
arow = MATRIX_NEXT_ROW_PTR(arow, astride);
}
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-void nerv_matrix_(fill)(Matrix *self, double val, Status *status) {
+void nerv_matrix_(fill)(Matrix *self, double val,
+ MContext *context, Status *status) {
int i, j;
size_t astride = self->stride;
MATRIX_ELEM *arow = MATRIX_ELEM_PTR(self);
@@ -265,7 +274,8 @@ void nerv_matrix_(fill)(Matrix *self, double val, Status *status) {
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, Status *status) {
+void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b,
+ MContext *context, Status *status) {
CHECK_SAME_DIMENSION(a, b, status);
int i, j;
size_t astride = a->stride, bstride = b->stride;
@@ -282,7 +292,8 @@ void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, Status *status) {
}
void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err,
- const Matrix *output, Status *status) {
+ const Matrix *output,
+ MContext *context, Status *status) {
CHECK_SAME_DIMENSION(nerr, err, status);
CHECK_SAME_DIMENSION(nerr, output, status);
int i, j;
@@ -303,10 +314,11 @@ void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, Status *status) {
+Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a,
+ MContext *context, Status *status) {
Matrix *max_idx;
CHECK_SAME_DIMENSION_RET(a, b, status);
- max_idx = nerv_matrix_(create)(a->nrow, 1, status);
+ max_idx = nerv_matrix_(create)(a->nrow, 1, context, status);
if (status->err_code != NERV_NORMAL)
return NULL;
int i, j;
@@ -337,7 +349,7 @@ Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, Status *status) {
}
void nerv_matrix_(mul_elem)(Matrix *c, const Matrix *a, const Matrix *b,
- Status *status) {
+ MContext *context, Status *status) {
CHECK_SAME_DIMENSION(a, b, status);
CHECK_SAME_DIMENSION(a, c, status);
int i, j;
@@ -358,7 +370,8 @@ void nerv_matrix_(mul_elem)(Matrix *c, const Matrix *a, const Matrix *b,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a, Status *status) {
+void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a,
+ MContext *context, Status *status) {
CHECK_SAME_DIMENSION(a, b, status);
int i, j;
size_t astride = a->stride, bstride = b->stride;
@@ -383,10 +396,10 @@ void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a, Status *status) {
}
void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b,
- int context, Status *status) {
+ int cont, MContext *context, Status *status) {
if (a->nrow != b->nrow)
NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0);
- if (a->ncol != b->ncol * (context * 2 + 1))
+ if (a->ncol != b->ncol * (cont * 2 + 1))
NERV_EXIT_STATUS(status, MAT_GENERAL_ERR,
"the width should be 2 * context + 1");
int i, j, k;
@@ -395,10 +408,10 @@ void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b,
for (i = 0; i < a->nrow; i++)
{
MATRIX_ELEM *a_subrow = arow;
- int start = i - context;
+ int start = i - cont;
if (start < 0) start = 0;
const MATRIX_ELEM *brow = MATRIX_ROW_PTR(b, start);
- for (j = i - context; j <= i + context; j++)
+ for (j = i - cont; j <= i + cont; j++)
{
for (k = 0; k < b->ncol; k++)
a_subrow[k] = brow[k];
@@ -412,7 +425,7 @@ void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b,
}
void nerv_matrix_(rearrange_frm)(Matrix *a, const Matrix *b,
- int step, Status *status) {
+ int step, MContext *context, Status *status) {
CHECK_SAME_DIMENSION(a, b, status);
if (b->ncol % step)
NERV_EXIT_STATUS(status, MAT_GENERAL_ERR,
@@ -439,7 +452,7 @@ void nerv_matrix_(rearrange_frm)(Matrix *a, const Matrix *b,
}
void nerv_matrix_(scale_rows_by_row)(Matrix *a, const Matrix *b,
- Status *status) {
+ MContext *context, Status *status) {
if (a->ncol != b->ncol)
NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0);
if (b->nrow != 1)
@@ -458,7 +471,7 @@ void nerv_matrix_(scale_rows_by_row)(Matrix *a, const Matrix *b,
}
void nerv_matrix_(scale_rows_by_col)(Matrix *a, const Matrix *b,
- Status *status) {
+ MContext *context,Status *status) {
if (a->nrow != b->nrow)
NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0);
if (b->ncol != 1)
@@ -477,13 +490,14 @@ void nerv_matrix_(scale_rows_by_col)(Matrix *a, const Matrix *b,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-static void host_matrix_(free)(MATRIX_ELEM *ptr, Status *status) {
+static void host_matrix_(free)(MATRIX_ELEM *ptr, MContext *context, Status *status) {
free(ptr);
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
static void host_matrix_(alloc)(MATRIX_ELEM **dptr, size_t *stride,
- long width, long height, Status *status) {
+ long width, long height,
+ MContext *context, Status *status) {
if ((*dptr = (MATRIX_ELEM *)malloc(width * height)) == NULL)
NERV_EXIT_STATUS(status, MAT_INSUF_MEM, 0);
*stride = width;
@@ -491,7 +505,7 @@ static void host_matrix_(alloc)(MATRIX_ELEM **dptr, size_t *stride,
}
#include "matrix.c"
-Matrix *nerv_matrix_(load)(ChunkData *cdp, Status *status) {
+Matrix *nerv_matrix_(load)(ChunkData *cdp, MContext *context, Status *status) {
int i, j;
long nrow, ncol;
FILE *fp = cdp->fp;
@@ -501,7 +515,7 @@ Matrix *nerv_matrix_(load)(ChunkData *cdp, Status *status) {
NERV_SET_STATUS(status, MAT_INVALID_FORMAT, 0);
return 0;
}
- self = nerv_matrix_(create)(nrow, ncol, status);
+ self = nerv_matrix_(create)(nrow, ncol, context, status);
if (status->err_code != NERV_NORMAL)
return NULL;
for (i = 0; i < nrow; i++)
@@ -519,7 +533,7 @@ Matrix *nerv_matrix_(load)(ChunkData *cdp, Status *status) {
return self;
}
-void nerv_matrix_(save)(Matrix *self, ChunkFile *cfp, Status *status) {
+void nerv_matrix_(save)(Matrix *self, ChunkFile *cfp, MContext *context, Status *status) {
int i, j;
long nrow = self->nrow, ncol = self->ncol;
FILE *fp = cfp->fp;
@@ -540,7 +554,7 @@ void nerv_matrix_(save)(Matrix *self, ChunkFile *cfp, Status *status) {
void nerv_matrix_(copy_fromh)(Matrix *a, const Matrix *b,
int a_begin, int b_begin, int b_end,
- Status *status) {
+ MContext *context, Status *status) {
if (!(0 <= b_begin && b_begin < b_end && b_end <= b->nrow &&
a_begin + b_end - b_begin <= a->nrow))
NERV_EXIT_STATUS(status, MAT_INVALID_COPY_INTERVAL, 0);
@@ -553,7 +567,8 @@ void nerv_matrix_(copy_fromh)(Matrix *a, const Matrix *b,
}
void nerv_matrix_(copy_rows_fromh_by_idx)(Matrix *a, const Matrix *b,
- const Matrix *idx, int b_begin, Status *status) {
+ const Matrix *idx, int b_begin,
+ MContext *context, Status *status) {
if (!(0 <= b_begin && b_begin + a->nrow <= idx->ncol))
NERV_EXIT_STATUS(status, MAT_INVALID_COPY_INTERVAL, 0);
if (idx->nrow != 1)
diff --git a/nerv/lib/matrix/generic/mmatrix.h b/nerv/lib/matrix/generic/mmatrix.h
index c54c4e5..6e0589a 100644
--- a/nerv/lib/matrix/generic/mmatrix.h
+++ b/nerv/lib/matrix/generic/mmatrix.h
@@ -3,45 +3,53 @@
void nerv_matrix_(add)(Matrix *c, const Matrix *a, const Matrix *b,
MATRIX_ELEM alpha, MATRIX_ELEM beta,
- Status *status);
+ MContext *context, Status *status);
void nerv_matrix_(mul)(Matrix *c, const Matrix *a, const Matrix *b,
MATRIX_ELEM alpha, MATRIX_ELEM beta,
- int ta, int tb, Status *status);
-void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, Status *status);
+ int ta, int tb,
+ MContext *context, Status *status);
+void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b,
+ MContext *context, Status *status);
void nerv_matrix_(sigmoid_grad)(Matrix *nerr, const Matrix *err,
- const Matrix *output, Status *status);
+ const Matrix *output,
+ MContext *context, Status *status);
-Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a, Status *status);
-Matrix *nerv_matrix_(rowsum)(Matrix *a, Status *status);
-Matrix *nerv_matrix_(colsum)(Matrix *a, Status *status);
+Matrix *nerv_matrix_(softmax)(Matrix *b, const Matrix *a,
+ MContext *context, Status *status);
+Matrix *nerv_matrix_(rowsum)(Matrix *a, MContext *context, Status *status);
+Matrix *nerv_matrix_(colsum)(Matrix *a, MContext *context, Status *status);
Matrix *nerv_matrix_(colsame)(Matrix *a, const Matrix *ref,
- Status *status);
-Matrix *nerv_matrix_(rowmax)(Matrix *a, Status *status);
+ MContext *context, Status *status);
+Matrix *nerv_matrix_(rowmax)(Matrix *a, MContext *context, Status *status);
void nerv_matrix_(rowmax_idx)(Matrix *a, Matrix **b, Matrix **idx,
- Status *status);
+ MContext *context, Status *status);
void nerv_matrix_(add_row)(Matrix *b, const Matrix *a, double beta,
- Status *status);
-void nerv_matrix_(clip)(Matrix *self, double val_1, double val_2, Status *status);
-void nerv_matrix_(fill)(Matrix *self, double val, Status *status);
+ MContext *context, Status *status);
+void nerv_matrix_(clip)(Matrix *self, double val1, double val2,
+ MContext *context, Status *status);
+void nerv_matrix_(fill)(Matrix *self, double val, MContext *context, Status *status);
void nerv_matrix_(copy_fromh)(Matrix *a, const Matrix *b,
int a_begin, int b_begin, int b_end,
- Status *status);
-Matrix *nerv_matrix_(trans)(Matrix *a, Status *status);
+ MContext *context, Status *status);
+Matrix *nerv_matrix_(trans)(Matrix *a, MContext *context, Status *status);
void nerv_matrix_(mul_elem)(Matrix *c, const Matrix *a, const Matrix *b,
- Status *status);
+ MContext *context, Status *status);
-void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a, Status *status);
+void nerv_matrix_(log_elem)(Matrix *b, const Matrix *a,
+ MContext *context, Status *status);
-Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col, Status *status);
+Matrix *nerv_matrix_(decompress)(const Matrix *a, int orig_col,
+ MContext *context, Status *status);
void nerv_matrix_(copy_rows_fromh_by_idx)(Matrix *a, const Matrix *b,
- const Matrix *idx, int b_begin, Status *status);
+ const Matrix *idx, int b_begin,
+ MContext *context, Status *status);
void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b,
- int context, Status *status);
+ int cont, MContext *context, Status *status);
void nerv_matrix_(rearrange_frm)(Matrix *a, const Matrix *b,
- int step, Status *status);
+ int step, MContext *context, Status *status);
void nerv_matrix_(scale_rows_by_col)(Matrix *a, const Matrix *b,
- Status *status);
+ MContext *context, Status *status);
void nerv_matrix_(scale_rows_by_row)(Matrix *a, const Matrix *b,
- Status *status);
-Matrix *nerv_matrix_(load)(ChunkData *cdp, Status *status);
-void nerv_matrix_(save)(Matrix *self, ChunkFile *cfp, Status *status);
+ MContext *context, Status *status);
+Matrix *nerv_matrix_(load)(ChunkData *cdp, MContext *context, Status *status);
+void nerv_matrix_(save)(Matrix *self, ChunkFile *cfp, MContext *context, Status *status);
diff --git a/nerv/lib/matrix/mmatrix.c b/nerv/lib/matrix/mmatrix.c
index 3125ab6..f1cbc75 100644
--- a/nerv/lib/matrix/mmatrix.c
+++ b/nerv/lib/matrix/mmatrix.c
@@ -1,6 +1,8 @@
#define NERV_GENERIC_MMATRIX
+#define MATRIX_CONTEXT MContext
#include <stdlib.h>
#include "../common.h"
+#include "mmatrix.h"
#define MATRIX_USE_FLOAT
#define host_matrix_(NAME) host_matrix_float_##NAME
@@ -10,9 +12,40 @@
#include "generic/elem_type.h"
#include "generic/mmatrix.c"
-Matrix *nerv_matrix_(perm_gen)(int ncol, Status *status) {
+void nerv_host_context_print_profile(MContext *context) {
+ HashMap *profile = context->profile;
+ size_t i;
+ fprintf(stderr, "*** [nerv mmatrix profile] **\n");
+ for (i = 0; i < profile->size; i++)
+ {
+ HashNode *ptr;
+ for (ptr = profile->bucket[i]; ptr; ptr = ptr->next)
+ {
+ fprintf(stderr, "%s:\t%.6f\n", ptr->key, *(float *)ptr->val);
+ }
+ }
+}
+
+void nerv_host_context_clear_profile(MContext *context) {
+ nerv_hashmap_clear(context->profile);
+}
+
+MContext *nerv_host_context_create(Status *status) {
+ MContext *context = (MContext *)malloc(sizeof(MContext));
+ context->profile = nerv_hashmap_create(PROFILE_HASHMAP_SIZE, bkdr_hash, strcmp);
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+ return context;
+}
+
+void nerv_host_context_destroy(MContext *context, Status *status) {
+ nerv_hashmap_destroy(context->profile);
+ free(context);
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
+Matrix *nerv_matrix_(perm_gen)(int ncol, MContext *context, Status *status) {
int i;
- Matrix *self = nerv_matrix_(create)(1, ncol, status);
+ Matrix *self = nerv_matrix_(create)(1, ncol, context, status);
if (status->err_code != NERV_NORMAL)
return NULL;
float *prow = MATRIX_ELEM_PTR_F(self);
diff --git a/nerv/lib/matrix/mmatrix.h b/nerv/lib/matrix/mmatrix.h
index 31e7984..6061683 100644
--- a/nerv/lib/matrix/mmatrix.h
+++ b/nerv/lib/matrix/mmatrix.h
@@ -1,5 +1,15 @@
#ifndef NERV_MMATRIX_H
#define NERV_MMATRIX_H
#include "matrix.h"
-Matrix *nerv_matrix_host_float_perm_gen(int ncol, Status *status);
+#include "../common.h"
+
+typedef struct MContext {
+ HashMap *profile;
+} MContext;
+
+Matrix *nerv_matrix_host_float_perm_gen(int ncol, MContext *context, Status *status);
+void nerv_host_context_print_profile(MContext *context);
+void nerv_host_context_clear_profile(MContext *context);
+MContext *nerv_host_context_create(Status *status);
+void nerv_host_context_destroy(MContext *contex, Status *status);
#endif