From a309ce5e33b22030bcac348c63576187676abee3 Mon Sep 17 00:00:00 2001 From: Determinant Date: Mon, 1 Jun 2015 17:37:13 +0800 Subject: add expand_frm, rearrange_frm, scale_row --- io/init.lua | 2 +- layer/affine.lua | 9 ++--- matrix/cukernel.h | 3 ++ matrix/generic/cukernel.cu | 87 ++++++++++++++++++++++++++++++++++++++-------- matrix/generic/cumatrix.c | 37 ++++++++++++++++++++ matrix/init.lua | 2 +- speech | 2 +- 7 files changed, 120 insertions(+), 22 deletions(-) diff --git a/io/init.lua b/io/init.lua index 8c3701e..7c312f4 100644 --- a/io/init.lua +++ b/io/init.lua @@ -20,7 +20,7 @@ end function nerv.ChunkFile:read_chunk(id, global_conf) local metadata = self.metadata[id] if metadata == nil then - nerv_error("chunk with id %s does not exist", id) + nerv.error("chunk with id %s does not exist", id) end local chunk_type = assert(loadstring("return " .. metadata.type))() local chunk = chunk_type(id, global_conf) diff --git a/layer/affine.lua b/layer/affine.lua index d88813e..573b98d 100644 --- a/layer/affine.lua +++ b/layer/affine.lua @@ -1,13 +1,14 @@ -local LinearTransParam = nerv.class('nerv.LinearTransParam', 'nerv.Param') -local BiasParam = nerv.class('nerv.BiasParam', 'nerv.LinearTransParam') +local MatrixParam = nerv.class('nerv.MatrixParam', 'nerv.Param') +local LinearTransParam = nerv.class('nerv.LinearTransParam', 'nerv.MatrixParam') +local BiasParam = nerv.class('nerv.BiasParam', 'nerv.MatrixParam') local AffineLayer = nerv.class('nerv.AffineLayer', 'nerv.Layer') -function LinearTransParam:read(pcdata) +function MatrixParam:read(pcdata) self.trans = self.gconf.mat_type.new_from_host( nerv.MMatrixFloat.load(pcdata)) end -function LinearTransParam:write(pfhandle) +function MatrixParam:write(pfhandle) self.trans:new_to_host():save(pfhandle) end diff --git a/matrix/cukernel.h b/matrix/cukernel.h index 232699d..178b7d3 100644 --- a/matrix/cukernel.h +++ b/matrix/cukernel.h @@ -10,4 +10,7 @@ void cudak_(cuda_softmax_denominator)(const Matrix *a, const Matrix *max, Matrix void cudak_(cuda_softmax_final)(const Matrix *a, const Matrix *max, const Matrix *deno, Matrix *b); void cudak_(cuda_add_row)(const Matrix *a, Matrix *b, double beta); void cudak_(cuda_fill)(Matrix *a, double val); +void cudak_(cuda_expand_frm)(const Matrix *a, Matrix *b, int context); +void cudak_(cuda_rearrange_frm)(const Matrix *a, Matrix *b, int step); +void cudak_(cuda_scale_row)(const Matrix *a, Matrix *b); #endif diff --git a/matrix/generic/cukernel.cu b/matrix/generic/cukernel.cu index 0e3d3cf..1d8b983 100644 --- a/matrix/generic/cukernel.cu +++ b/matrix/generic/cukernel.cu @@ -4,7 +4,7 @@ #include "matrix.h" #include "cuda.h" #define CUDA_THREADS_N 16 -#define CUDA_THREADS_NN (16 * 16) +#define CUDA_THREADS_NN ((CUDA_THREADS_N) * (CUDA_THREADS_N)) #define CEIL_DIV(a, b) (((a) + (b) - 1) / (b)) __global__ void cudak_(log_elem)(const MATRIX_ELEM *a, MATRIX_ELEM *b, int nrow, int ncol, int stride) { @@ -154,12 +154,43 @@ __global__ void cudak_(fill)(MATRIX_ELEM *a, a[j + i * stride] = val; } +__global__ void cudak_(expand_frm)(const MATRIX_ELEM *a, MATRIX_ELEM *b, + int nrow, int ncol, + int enrow, int encol, + int stride, int estride, + int context) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + int ridx; + if (i >= enrow || j >= encol) return; + ridx = i + j / ncol - context; + if (ridx < 0) ridx = 0; + else if (ridx >= nrow) ridx = nrow - 1; + b[j + i * estride] = a[j % ncol + ridx * stride]; +} + +__global__ void cudak_(rearrange_frm)(const MATRIX_ELEM *a, MATRIX_ELEM *b, + int nrow, int ncol, + int stride, int step, int orig_dim) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + if (i >= nrow || j >= ncol) return; + b[j + i * stride] = a[j / step + (j % step) * orig_dim + i * stride]; +} + +__global__ void cudak_(scale_row)(const MATRIX_ELEM *a, MATRIX_ELEM *b, + int nrow, int ncol, + int stride) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + if (i >= nrow || j >= ncol) return; + b[j + i * stride] *= a[j]; +} extern "C" { #include "../cukernel.h" void cudak_(cuda_log_elem)(const Matrix *a, Matrix *b) { - dim3 threadsPerBlock(CUDA_THREADS_N, - CUDA_THREADS_N); + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), CEIL_DIV(b->nrow, threadsPerBlock.y)); cudak_(log_elem)<<>> \ @@ -169,8 +200,7 @@ extern "C" { void cudak_(cuda_mul_elem)(const Matrix *a, const Matrix *b, Matrix *c) { - dim3 threadsPerBlock(CUDA_THREADS_N, - CUDA_THREADS_N); + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), CEIL_DIV(b->nrow, threadsPerBlock.y)); cudak_(mul_elem)<<>> \ @@ -180,8 +210,7 @@ extern "C" { } void cudak_(cuda_sigmoid)(const Matrix *a, Matrix *b) { - dim3 threadsPerBlock(CUDA_THREADS_N, - CUDA_THREADS_N); + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), CEIL_DIV(b->nrow, threadsPerBlock.y)); cudak_(sigmoid)<<>> \ @@ -191,8 +220,7 @@ extern "C" { void cudak_(cuda_sigmoid_grad)(const Matrix *output, const Matrix *err, Matrix *nerr) { - dim3 threadsPerBlock(CUDA_THREADS_N, - CUDA_THREADS_N); + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); dim3 numBlocks(CEIL_DIV(nerr->ncol, threadsPerBlock.x), CEIL_DIV(nerr->nrow, threadsPerBlock.y)); cudak_(sigmoid_grad)<<>> \ @@ -248,8 +276,7 @@ extern "C" { void cudak_(cuda_softmax_final)(const Matrix *a, const Matrix *max, const Matrix *deno, Matrix *b) { - dim3 threadsPerBlock(CUDA_THREADS_N, - CUDA_THREADS_N); + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), CEIL_DIV(b->nrow, threadsPerBlock.y)); cudak_(softmax_final)<<>> \ @@ -310,8 +337,7 @@ extern "C" { /* in-place calc */ void cudak_(cuda_add_row)(const Matrix *a, Matrix *b, double beta) { - dim3 threadsPerBlock(CUDA_THREADS_N, - CUDA_THREADS_N); + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), CEIL_DIV(b->nrow, threadsPerBlock.y)); cudak_(add_row)<<>> \ @@ -320,13 +346,44 @@ extern "C" { } void cudak_(cuda_fill)(Matrix *a, double val) { - dim3 threadsPerBlock(CUDA_THREADS_N, - CUDA_THREADS_N); + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x), CEIL_DIV(a->nrow, threadsPerBlock.y)); cudak_(fill)<<>> \ (MATRIX_ELEM_PTR(a), a->nrow, a->ncol, a->stride / sizeof(MATRIX_ELEM), val); } + + void cudak_(cuda_expand_frm)(const Matrix *a, Matrix *b, int context) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + cudak_(expand_frm)<<>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), + a->nrow, a->ncol, + b->nrow, b->ncol, + a->stride / sizeof(MATRIX_ELEM), + b->stride / sizeof(MATRIX_ELEM), + context); + } + + void cudak_(cuda_rearrange_frm)(const Matrix *a, Matrix *b, int step) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + cudak_(rearrange_frm)<<>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), + b->nrow, b->ncol, b->stride / sizeof(MATRIX_ELEM), + step, b->ncol / step); + } + + void cudak_(cuda_scale_row)(const Matrix *a, Matrix *b) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + cudak_(scale_row)<<>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), + b->nrow, b->ncol, b->stride / sizeof(MATRIX_ELEM)); + } } #endif diff --git a/matrix/generic/cumatrix.c b/matrix/generic/cumatrix.c index 3bc58d7..58f3679 100644 --- a/matrix/generic/cumatrix.c +++ b/matrix/generic/cumatrix.c @@ -282,6 +282,40 @@ static int nerv_matrix_(copy_rows_fromh_by_idx)(lua_State *L) { return 0; } +static int nerv_matrix_(expand_frm)(lua_State *L) { + Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); + Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); + int context = luaL_checkinteger(L, 3); + if (a->nrow != b->nrow) + nerv_error(L, "mismatching number of frames"); + if (a->ncol != b->ncol * (context * 2 + 1)) + nerv_error(L, "the width should be 2 * context + 1"); + cudak_(cuda_expand_frm)(b, a, context); + return 0; +} + +static int nerv_matrix_(rearrange_frm)(lua_State *L) { + Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); + Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); + int step = luaL_checkinteger(L, 3); + CHECK_SAME_DIMENSION(a, b); + if (b->ncol % step) + nerv_error(L, "the dimension of columns is not divisible by step"); + cudak_(cuda_rearrange_frm)(b, a, step); + return 0; +} + +static int nerv_matrix_(scale_row)(lua_State *L) { + Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); + Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); + if (a->ncol != b->ncol) + nerv_error(L, "the number of columns is not the same"); + if (b->nrow != 1) + nerv_error(L, "a row vector is expected"); + cudak_(cuda_scale_row)(b, a); + return 0; +} + static const luaL_Reg nerv_matrix_(extra_methods)[] = { {"create", nerv_matrix_(create)}, {"colsum", nerv_matrix_(colsum)}, @@ -303,6 +337,9 @@ static const luaL_Reg nerv_matrix_(extra_methods)[] = { {"mul_elem", nerv_matrix_(mul_elem)}, {"log_elem", nerv_matrix_(log_elem)}, {"copy_rows_fromh_by_idx", nerv_matrix_(copy_rows_fromh_by_idx)}, + {"expand_frm", nerv_matrix_(expand_frm)}, + {"rearrange_frm", nerv_matrix_(rearrange_frm)}, + {"scale_row", nerv_matrix_(scale_row)}, {NULL, NULL} }; diff --git a/matrix/init.lua b/matrix/init.lua index 057b085..f309f81 100644 --- a/matrix/init.lua +++ b/matrix/init.lua @@ -6,7 +6,7 @@ function nerv.Matrix:__tostring__() if self.fmt then fmt = self.fmt else - fmt = "%.10f " + fmt = "%.8f " end if nrow == 1 then for col = 0, ncol - 1 do diff --git a/speech b/speech index 3b156cd..d8ea67e 160000 --- a/speech +++ b/speech @@ -1 +1 @@ -Subproject commit 3b156cd0cd19aa7db97082161921064a0a871328 +Subproject commit d8ea67ee420c2fc73085da04de86df023acd98d7 -- cgit v1.2.3