summaryrefslogtreecommitdiff
path: root/matrix
diff options
context:
space:
mode:
authorDeterminant <[email protected]>2015-05-25 17:27:29 +0800
committerDeterminant <[email protected]>2015-05-25 17:27:29 +0800
commit81c115e09f9449ae61c7352edd77c68e9029f2dc (patch)
tree284196680a794d4317d667e6a6e04fa5b3ee8c75 /matrix
parentc1e714052f0c2654ebe2a92d6961382146450b9e (diff)
add "copy from/to cumatrix" functionality
Diffstat (limited to 'matrix')
-rw-r--r--matrix/cumatrix.c3
-rw-r--r--matrix/generic/cumatrix.c28
2 files changed, 31 insertions, 0 deletions
diff --git a/matrix/cumatrix.c b/matrix/cumatrix.c
index db4c784..51a3681 100644
--- a/matrix/cumatrix.c
+++ b/matrix/cumatrix.c
@@ -5,6 +5,7 @@
#define nerv_matrix_(NAME) nerv_matrix_cuda_float_##NAME
#define cudak_(NAME) cudak_float_ ## NAME
#define NERV_CUBLAS_(NAME) cublasS##NAME
+#define MATRIX_CUMATRIX_HOST_TNAME nerv_matrix_host_float_tname
const char *nerv_matrix_(tname) = "nerv.CuMatrixFloat";
#include "generic/cumatrix.c"
#undef NERV_CUBLAS_
@@ -15,11 +16,13 @@ const char *nerv_matrix_(tname) = "nerv.CuMatrixFloat";
#undef MATRIX_ELEM
#undef MATRIX_ELEM_PTR
#undef MATRIX_ELEM_FMT
+#undef MATRIX_CUMATRIX_HOST_TNAME
#define MATRIX_USE_DOUBLE
#define cuda_matrix_(NAME) cuda_matrix_double_##NAME
#define nerv_matrix_(NAME) nerv_matrix_cuda_double_##NAME
#define cudak_(NAME) cudak_double_ ## NAME
#define NERV_CUBLAS_(NAME) cublasD##NAME
+#define MATRIX_CUMATRIX_HOST_TNAME nerv_matrix_host_double_tname
const char *nerv_matrix_(tname) = "nerv.CuMatrixDouble";
#include "generic/cumatrix.c"
diff --git a/matrix/generic/cumatrix.c b/matrix/generic/cumatrix.c
index f846a73..557e4c1 100644
--- a/matrix/generic/cumatrix.c
+++ b/matrix/generic/cumatrix.c
@@ -126,6 +126,32 @@ static int nerv_matrix_(rowmax)(lua_State *L) {
return 1;
}
+extern const char *MATRIX_CUMATRIX_HOST_TNAME;
+static int nerv_matrix_(copy_from)(lua_State *L) {
+ Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
+ Matrix *b = luaT_checkudata(L, 2, MATRIX_CUMATRIX_HOST_TNAME);
+ if (!(a->nrow == b->nrow && a->ncol == b->ncol))
+ nerv_error(L, "Matrices should be of the same dimension");
+ cudaMemcpy2D(MATRIX_ELEM_PTR(a), a->stride,
+ MATRIX_ELEM_PTR(b), b->stride,
+ sizeof(MATRIX_ELEM) * b->ncol, b->nrow,
+ cudaMemcpyHostToDevice);
+ return 0;
+}
+
+static int nerv_matrix_(copy_to)(lua_State *L) {
+ Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
+ Matrix *b = luaT_checkudata(L, 2, MATRIX_CUMATRIX_HOST_TNAME);
+ if (!(a->nrow == b->nrow && a->ncol == b->ncol))
+ nerv_error(L, "Matrices should be of the same dimension");
+ cudaMemcpy2D(MATRIX_ELEM_PTR(b), b->stride,
+ MATRIX_ELEM_PTR(a), a->stride,
+ sizeof(MATRIX_ELEM) * a->ncol, a->nrow,
+ cudaMemcpyDeviceToHost);
+ return 0;
+}
+
+
static const luaL_Reg nerv_matrix_(extra_methods)[] = {
{"add", nerv_matrix_(add)},
{"mul", nerv_matrix_(mul)},
@@ -135,6 +161,8 @@ static const luaL_Reg nerv_matrix_(extra_methods)[] = {
{"colsum", nerv_matrix_(colsum)},
{"rowsum", nerv_matrix_(rowsum)},
{"rowmax", nerv_matrix_(rowmax)},
+ {"copy_from", nerv_matrix_(copy_from)},
+ {"copy_to", nerv_matrix_(copy_to)},
{NULL, NULL}
};