diff options
-rw-r--r-- | Makefile | 1 | ||||
-rw-r--r-- | nerv/Makefile | 2 | ||||
-rw-r--r-- | nerv/matrix/generic/cukernel.cu | 592 | ||||
m--------- | speech | 0 |
4 files changed, 2 insertions, 593 deletions
@@ -9,6 +9,7 @@ luarocks: install: cd nerv; $(PREFIX)/bin/luarocks make speech: + cd speech/speech_utils; $(PREFIX)/bin/luarocks make cd speech/htk_io; $(PREFIX)/bin/luarocks make clean: cd nerv && make clean diff --git a/nerv/Makefile b/nerv/Makefile index fdffd12..728d010 100644 --- a/nerv/Makefile +++ b/nerv/Makefile @@ -66,7 +66,7 @@ $(LIB_PATH)/libluaT.so: $(LUAT_OBJS) $(INST_LIBDIR)/libnerv.so: $(NERV_OBJS) $(LIB_PATH)/libnervcore.so $(LIB_PATH)/libluaT.so gcc -shared -o $@ $(NERV_OBJS) $(LDFLAGS) -Wl,-rpath=$(LIB_PATH) -L$(LIB_PATH) -lnervcore -lluaT -$(OBJ_DIR)/matrix/cumatrix.o: matrix/generic/cumatrix.c matrix/generic/matrix.c matrix/generic/cukernel.cu +$(OBJ_DIR)/matrix/cumatrix.o: matrix/generic/cumatrix.c matrix/generic/matrix.c $(OBJ_DIR)/matrix/mmatrix.o: matrix/generic/mmatrix.c matrix/generic/matrix.c $(OBJ_DIR)/lib/matrix/cumatrix.o: lib/matrix/generic/cumatrix.c lib/matrix/generic/matrix.c lib/matrix/generic/cukernel.cu diff --git a/nerv/matrix/generic/cukernel.cu b/nerv/matrix/generic/cukernel.cu deleted file mode 100644 index 2ae5e62..0000000 --- a/nerv/matrix/generic/cukernel.cu +++ /dev/null @@ -1,592 +0,0 @@ -#ifdef NERV_GENERIC_CUKERNEL -#include <assert.h> -#include <stdio.h> -#include "matrix.h" -#include "cuda.h" -#include "float.h" -#define CUDA_THREADS_N 16 -#define CUDA_THREADS_NN ((CUDA_THREADS_N) * (CUDA_THREADS_N)) -#define CEIL_DIV(a, b) (((a) + (b) - 1) / (b)) -__global__ void cudak_(log_elem)(const MATRIX_ELEM *a, MATRIX_ELEM *b, - int nrow, int ncol, int stride) { - int j = blockIdx.x * blockDim.x + threadIdx.x; - int i = blockIdx.y * blockDim.y + threadIdx.y; - long idx; - MATRIX_ELEM tmp; - if (i >= nrow || j >= ncol) return; - idx = j + i * stride; - tmp = a[idx]; - if(tmp < FLT_MIN) tmp = FLT_MIN; - b[idx] = log(tmp); -} - -__global__ void cudak_(mul_elem)(const MATRIX_ELEM *a, const MATRIX_ELEM *b, - MATRIX_ELEM *c, - int nrow, int ncol, int stride) { - int j = blockIdx.x * blockDim.x + threadIdx.x; - int i = blockIdx.y * blockDim.y + threadIdx.y; - long idx; - if (i >= nrow || j >= ncol) return; - idx = j + i * stride; - c[idx] = a[idx] * b[idx]; -} - -__global__ void cudak_(sigmoid)(const MATRIX_ELEM *a, MATRIX_ELEM *b, - int nrow, int ncol, int stride) { - int j = blockIdx.x * blockDim.x + threadIdx.x; - int i = blockIdx.y * blockDim.y + threadIdx.y; - long idx; - if (i >= nrow || j >= ncol) return; - idx = j + i * stride; - b[idx] = 1.0 / (1.0 + exp(-a[idx])); -} - -__global__ void cudak_(sigmoid_grad)(const MATRIX_ELEM *output, - const MATRIX_ELEM *err, - MATRIX_ELEM *nerr, - int nrow, int ncol, int stride) { - int j = blockIdx.x * blockDim.x + threadIdx.x; - int i = blockIdx.y * blockDim.y + threadIdx.y; - long idx; - if (i >= nrow || j >= ncol) return; - idx = j + i * stride; - nerr[idx] = output[idx] * (1.0 - output[idx]) * err[idx]; -} - -__global__ void cudak_(softmax_final)(const MATRIX_ELEM *a, MATRIX_ELEM *b, - const MATRIX_ELEM *max, const MATRIX_ELEM *deno, - int nrow, int ncol, int stride, int mstride) { - int j = blockIdx.x * blockDim.x + threadIdx.x; - int i = blockIdx.y * blockDim.y + threadIdx.y; - long idx; - if (i >= nrow || j >= ncol) return; - idx = j + i * stride; - b[idx] = exp(a[idx] - max[0 + i * mstride]) / deno[0 + i * mstride]; -} - -__global__ void cudak_(block_reduce_rowsum)(const MATRIX_ELEM *input, - MATRIX_ELEM *output, - const int istride, const int ostride, - const int n) { - extern __shared__ MATRIX_ELEM cudak_(arr)[]; - int j = blockIdx.x * blockDim.x + threadIdx.x; - cudak_(arr)[threadIdx.x] = j < n ? input[j + istride * blockIdx.y] : 0; - __syncthreads(); - for (int offset = blockDim.x >> 1; offset; offset >>= 1) - { - if (threadIdx.x < offset) - cudak_(arr)[threadIdx.x] += cudak_(arr)[threadIdx.x + offset]; - __syncthreads(); - } - if (threadIdx.x == 0) - output[blockIdx.x + ostride * blockIdx.y] = cudak_(arr)[0]; -} - -__global__ void cudak_(block_reduce_colsum)(const MATRIX_ELEM *input, - MATRIX_ELEM *output, - const int istride, const int ostride, - const int n) { - extern __shared__ MATRIX_ELEM cudak_(arr)[]; - int i = blockIdx.y * blockDim.y + threadIdx.y; - cudak_(arr)[threadIdx.y] = i < n ? input[blockIdx.x + istride * i] : 0; - __syncthreads(); - for (int offset = blockDim.y >> 1; offset; offset >>= 1) - { - if (threadIdx.y < offset) - cudak_(arr)[threadIdx.y] += cudak_(arr)[threadIdx.y + offset]; - __syncthreads(); - } - if (threadIdx.y == 0) - output[blockIdx.x + ostride * blockIdx.y] = cudak_(arr)[0]; -} - -__global__ void cudak_(block_reduce_colsame)(const MATRIX_ELEM *input, - const MATRIX_ELEM *ref_input, - MATRIX_ELEM *output, - const int istride, const int ostride, - const int n) { - extern __shared__ MATRIX_ELEM cudak_(arr)[]; - int i = blockIdx.y * blockDim.y + threadIdx.y; - cudak_(arr)[threadIdx.y] = (i < n && input[blockIdx.x + istride * i] == \ - ref_input[blockIdx.x + istride * i]) ? 1.0 : 0; - __syncthreads(); - for (int offset = blockDim.y >> 1; offset; offset >>= 1) - { - if (threadIdx.y < offset) - cudak_(arr)[threadIdx.y] += cudak_(arr)[threadIdx.y + offset]; - __syncthreads(); - } - if (threadIdx.y == 0) - output[blockIdx.x + ostride * blockIdx.y] = cudak_(arr)[0]; -} - -__global__ void cudak_(block_reduce_softmax_rowsum)(const MATRIX_ELEM *input, - MATRIX_ELEM *output, - const MATRIX_ELEM *max, - const int istride, const int ostride, - const int mstride, const int n) { - extern __shared__ MATRIX_ELEM cudak_(arr)[]; - int j = blockIdx.x * blockDim.x + threadIdx.x; - cudak_(arr)[threadIdx.x] = j < n ? exp(input[j + istride * blockIdx.y] - \ - max[0 + mstride * blockIdx.y]) : 0; - __syncthreads(); - for (int offset = blockDim.x >> 1; offset; offset >>= 1) - { - if (threadIdx.x < offset) - cudak_(arr)[threadIdx.x] += cudak_(arr)[threadIdx.x + offset]; - __syncthreads(); - } - if (threadIdx.x == 0) - output[blockIdx.x + ostride * blockIdx.y] = cudak_(arr)[0]; -} - -__global__ void cudak_(block_reduce_rowmax)(const MATRIX_ELEM *input, - MATRIX_ELEM *output, - const int istride, const int ostride, - const int n) { - extern __shared__ MATRIX_ELEM cudak_(arr)[]; - int j = blockIdx.x * blockDim.x + threadIdx.x; - cudak_(arr)[threadIdx.x] = j < n ? input[j + istride * blockIdx.y] : -FLT_MAX; - __syncthreads(); - for (int offset = blockDim.x >> 1; offset; offset >>= 1) - { - if (threadIdx.x < offset) - { - MATRIX_ELEM l = cudak_(arr)[threadIdx.x], - r = cudak_(arr)[threadIdx.x + offset]; - if (r > l) - cudak_(arr)[threadIdx.x] = r; - } - __syncthreads(); - } - if (threadIdx.x == 0) - output[blockIdx.x + ostride * blockIdx.y] = cudak_(arr)[0]; -} - -__global__ void cudak_(block_reduce_rowmax_idx)(const MATRIX_ELEM *input, - const MATRIX_ELEM *idx_input, - MATRIX_ELEM *output, - MATRIX_ELEM *idx_output, - const int istride, const int ostride, - const int n) { - extern __shared__ MATRIX_ELEM cudak_(arr)[]; - MATRIX_ELEM *arr_val = cudak_(arr); - MATRIX_ELEM *arr_idx = arr_val + blockDim.x; - int j = blockIdx.x * blockDim.x + threadIdx.x; - arr_val[threadIdx.x] = j < n ? input[j + istride * blockIdx.y] : -FLT_MAX; - arr_idx[threadIdx.x] = j < n ? idx_input[j + istride * blockIdx.y] : 0; - __syncthreads(); - for (int offset = blockDim.x >> 1; offset; offset >>= 1) - { - if (threadIdx.x < offset) - { - MATRIX_ELEM l = arr_val[threadIdx.x], - r = arr_val[threadIdx.x + offset]; - if (r > l) - { - arr_val[threadIdx.x] = r; - arr_idx[threadIdx.x] = arr_idx[threadIdx.x + offset]; - } - } - __syncthreads(); - } - if (threadIdx.x == 0) - { - output[blockIdx.x + ostride * blockIdx.y] = arr_val[0]; - idx_output[blockIdx.x + ostride * blockIdx.y] = arr_idx[0]; - } -} - -__global__ void cudak_(add_row)(const MATRIX_ELEM *a, MATRIX_ELEM *b, - int nrow, int ncol, int stride, double beta) { - int j = blockIdx.x * blockDim.x + threadIdx.x; - int i = blockIdx.y * blockDim.y + threadIdx.y; - if (i >= nrow || j >= ncol) return; - b[j + i * stride] += beta * a[j]; -} - -__global__ void cudak_(fill)(MATRIX_ELEM *a, - int nrow, int ncol, int stride, double val) { - int j = blockIdx.x * blockDim.x + threadIdx.x; - int i = blockIdx.y * blockDim.y + threadIdx.y; - if (i >= nrow || j >= ncol) return; - a[j + i * stride] = val; -} - -__global__ void cudak_(clip)(MATRIX_ELEM *a, - int nrow, int ncol, int stride, double val_1, double val_2) { - int j = blockIdx.x * blockDim.x + threadIdx.x; - int i = blockIdx.y * blockDim.y + threadIdx.y; - if (i >= nrow || j >= ncol) return; - if (a[j + i * stride] > val_2) - a[j + i * stride] = val_2; - else if (a[j + i * stride] < val_1) - a[j + i * stride] = val_1; -} - -__global__ void cudak_(expand_frm)(const MATRIX_ELEM *a, MATRIX_ELEM *b, - int nrow, int ncol, - int enrow, int encol, - int stride, int estride, - int context) { - int j = blockIdx.x * blockDim.x + threadIdx.x; - int i = blockIdx.y * blockDim.y + threadIdx.y; - int ridx; - if (i >= enrow || j >= encol) return; - ridx = i + j / ncol - context; - if (ridx < 0) ridx = 0; - else if (ridx >= nrow) ridx = nrow - 1; - b[j + i * estride] = a[j % ncol + ridx * stride]; -} - -__global__ void cudak_(rearrange_frm)(const MATRIX_ELEM *a, MATRIX_ELEM *b, - int nrow, int ncol, - int stride, int step, int orig_dim) { - int j = blockIdx.x * blockDim.x + threadIdx.x; - int i = blockIdx.y * blockDim.y + threadIdx.y; - if (i >= nrow || j >= ncol) return; - b[j + i * stride] = a[j / step + (j % step) * orig_dim + i * stride]; -} - -__global__ void cudak_(scale_rows_by_col)(const MATRIX_ELEM *a, MATRIX_ELEM *b, - int nrow, int ncol, - int astride, int bstride) { - int j = blockIdx.x * blockDim.x + threadIdx.x; - int i = blockIdx.y * blockDim.y + threadIdx.y; - if (i >= nrow || j >= ncol) return; - b[j + i * bstride] *= a[i * astride]; -} - -__global__ void cudak_(scale_rows_by_row)(const MATRIX_ELEM *a, MATRIX_ELEM *b, - int nrow, int ncol, - int stride) { - int j = blockIdx.x * blockDim.x + threadIdx.x; - int i = blockIdx.y * blockDim.y + threadIdx.y; - if (i >= nrow || j >= ncol) return; - b[j + i * stride] *= a[j]; -} - -__global__ void cudak_(decompress)(const MATRIX_ELEM *a, MATRIX_ELEM *b, - int nrow, int ncol, - int stride_a, int stride_b) { - int j = blockIdx.x * blockDim.x + threadIdx.x; - int i = blockIdx.y * blockDim.y + threadIdx.y; - if (i >= nrow || j >= ncol) return; - b[lrintf(a[j + i * stride_a]) + i * stride_b] = 1.0; -} - -__global__ void cudak_(gen_col_idx)(MATRIX_ELEM *b, - int nrow, int ncol, int stride) { - int j = blockIdx.x * blockDim.x + threadIdx.x; - int i = blockIdx.y * blockDim.y + threadIdx.y; - if (i >= nrow || j >= ncol) return; - b[j + i * stride] = j; -} - -extern "C" { -#include "../cukernel.h" - void cudak_(cuda_log_elem)(const Matrix *a, Matrix *b) { - dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); - dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), - CEIL_DIV(b->nrow, threadsPerBlock.y)); - cudak_(log_elem)<<<numBlocks, threadsPerBlock>>> \ - (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), - b->nrow, b->ncol, b->stride / sizeof(MATRIX_ELEM)); - cudaStreamSynchronize(0); - } - - void cudak_(cuda_mul_elem)(const Matrix *a, const Matrix *b, - Matrix *c) { - dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); - dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), - CEIL_DIV(b->nrow, threadsPerBlock.y)); - cudak_(mul_elem)<<<numBlocks, threadsPerBlock>>> \ - (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), - MATRIX_ELEM_PTR(c), - b->nrow, b->ncol, b->stride / sizeof(MATRIX_ELEM)); - cudaStreamSynchronize(0); - } - - void cudak_(cuda_sigmoid)(const Matrix *a, Matrix *b) { - dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); - dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), - CEIL_DIV(b->nrow, threadsPerBlock.y)); - cudak_(sigmoid)<<<numBlocks, threadsPerBlock>>> \ - (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), b->nrow, b->ncol, - b->stride / sizeof(MATRIX_ELEM)); - cudaStreamSynchronize(0); - } - - void cudak_(cuda_sigmoid_grad)(const Matrix *output, - const Matrix *err, Matrix *nerr) { - dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); - dim3 numBlocks(CEIL_DIV(nerr->ncol, threadsPerBlock.x), - CEIL_DIV(nerr->nrow, threadsPerBlock.y)); - cudak_(sigmoid_grad)<<<numBlocks, threadsPerBlock>>> \ - (MATRIX_ELEM_PTR(output), MATRIX_ELEM_PTR(err), - MATRIX_ELEM_PTR(nerr), - nerr->nrow, nerr->ncol, - nerr->stride / sizeof(MATRIX_ELEM)); - cudaStreamSynchronize(0); - } - - void cudak_(cuda_rowsum)(const Matrix *a, Matrix *b) { - dim3 block(CUDA_THREADS_NN, 1); - int ncol = a->ncol; - int blocks_per_row = CEIL_DIV(ncol, block.x); - dim3 grid(blocks_per_row, a->nrow); - MATRIX_ELEM *res; - size_t stride; - cudaMallocPitch(&res, &stride, blocks_per_row * sizeof(MATRIX_ELEM), a->nrow); - cudak_(block_reduce_rowsum)<<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \ - (MATRIX_ELEM_PTR(a), res, - a->stride / sizeof(MATRIX_ELEM), stride / sizeof(MATRIX_ELEM), - ncol); - ncol = blocks_per_row; - assert((unsigned long)ncol <= block.x); - grid.x = 1; - cudaStreamSynchronize(0); - cudak_(block_reduce_rowsum)<<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \ - (res, MATRIX_ELEM_PTR(b), - stride / sizeof(MATRIX_ELEM), b->stride / sizeof(MATRIX_ELEM), - ncol); - cudaStreamSynchronize(0); - cudaFree(res); - } - - void cudak_(cuda_colsame)(const Matrix *a, const Matrix *ref, Matrix *b) { - dim3 block(1, CUDA_THREADS_NN); - int nrow = a->nrow; - int blocks_per_col = CEIL_DIV(nrow, block.y); - dim3 grid(a->ncol, blocks_per_col); - MATRIX_ELEM *res; - size_t stride; - cudaMallocPitch(&res, &stride, a->ncol * sizeof(MATRIX_ELEM), blocks_per_col); - cudak_(block_reduce_colsame)<<<grid, block, block.y * sizeof(MATRIX_ELEM)>>> \ - (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(ref), res, - a->stride / sizeof(MATRIX_ELEM), stride / sizeof(MATRIX_ELEM), - nrow); - nrow = blocks_per_col; - assert((unsigned long)nrow <= block.y); - grid.y = 1; - cudaStreamSynchronize(0); - cudak_(block_reduce_colsum)<<<grid, block, block.y * sizeof(MATRIX_ELEM)>>> \ - (res, MATRIX_ELEM_PTR(b), - stride / sizeof(MATRIX_ELEM), b->stride / sizeof(MATRIX_ELEM), - nrow); - cudaStreamSynchronize(0); - cudaFree(res); - } - - void cudak_(cuda_colsum)(const Matrix *a, Matrix *b) { - dim3 block(1, CUDA_THREADS_NN); - int nrow = a->nrow; - int blocks_per_col = CEIL_DIV(nrow, block.y); - dim3 grid(a->ncol, blocks_per_col); - MATRIX_ELEM *res; - size_t stride; - cudaMallocPitch(&res, &stride, a->ncol * sizeof(MATRIX_ELEM), blocks_per_col); - cudak_(block_reduce_colsum)<<<grid, block, block.y * sizeof(MATRIX_ELEM)>>> \ - (MATRIX_ELEM_PTR(a), res, - a->stride / sizeof(MATRIX_ELEM), stride / sizeof(MATRIX_ELEM), - nrow); - nrow = blocks_per_col; - assert((unsigned long)nrow <= block.y); - grid.y = 1; - cudaStreamSynchronize(0); - cudak_(block_reduce_colsum)<<<grid, block, block.y * sizeof(MATRIX_ELEM)>>> \ - (res, MATRIX_ELEM_PTR(b), - stride / sizeof(MATRIX_ELEM), b->stride / sizeof(MATRIX_ELEM), - nrow); - cudaStreamSynchronize(0); - cudaFree(res); - } - - void cudak_(cuda_softmax_final)(const Matrix *a, const Matrix *max, - const Matrix *deno, Matrix *b) { - dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); - dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), - CEIL_DIV(b->nrow, threadsPerBlock.y)); - cudak_(softmax_final)<<<numBlocks, threadsPerBlock>>> \ - (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), - MATRIX_ELEM_PTR(max), MATRIX_ELEM_PTR(deno), - b->nrow, b->ncol, - b->stride / sizeof(MATRIX_ELEM), - max->stride / sizeof(MATRIX_ELEM)); - cudaStreamSynchronize(0); - } - - void cudak_(cuda_softmax_denominator)(const Matrix *a, const Matrix *max, Matrix *b) { - dim3 block(CUDA_THREADS_NN, 1); - int ncol = a->ncol; - int blocks_per_row = CEIL_DIV(ncol, block.x); - dim3 grid(blocks_per_row, a->nrow); - MATRIX_ELEM *res; - size_t stride; - assert(max->ncol == 1); - cudaMallocPitch(&res, &stride, blocks_per_row * sizeof(MATRIX_ELEM), a->nrow); - cudak_(block_reduce_softmax_rowsum) \ - <<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \ - (MATRIX_ELEM_PTR(a), res, MATRIX_ELEM_PTR(max), - a->stride / sizeof(MATRIX_ELEM), stride / sizeof(MATRIX_ELEM), - max->stride / sizeof(MATRIX_ELEM), - ncol); - ncol = blocks_per_row; - assert((unsigned long)ncol <= block.x); - grid.x = 1; - cudaStreamSynchronize(0); - cudak_(block_reduce_rowsum) \ - <<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \ - (res, MATRIX_ELEM_PTR(b), - stride / sizeof(MATRIX_ELEM), b->stride / sizeof(MATRIX_ELEM), - ncol); - cudaStreamSynchronize(0); - cudaFree(res); - } - - void cudak_(cuda_rowmax)(const Matrix *a, Matrix *b) { - dim3 block(CUDA_THREADS_NN, 1); - int ncol = a->ncol; - int blocks_per_row = CEIL_DIV(ncol, block.x); - dim3 grid(blocks_per_row, a->nrow); - MATRIX_ELEM *res; - size_t stride; - cudaMallocPitch(&res, &stride, blocks_per_row * sizeof(MATRIX_ELEM), a->nrow); - cudak_(block_reduce_rowmax)<<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \ - (MATRIX_ELEM_PTR(a), res, - a->stride / sizeof(MATRIX_ELEM), stride / sizeof(MATRIX_ELEM), - ncol); - ncol = blocks_per_row; - assert((unsigned long)ncol <= block.x); - grid.x = 1; - cudaStreamSynchronize(0); - cudak_(block_reduce_rowmax)<<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \ - (res, MATRIX_ELEM_PTR(b), - stride / sizeof(MATRIX_ELEM), b->stride / sizeof(MATRIX_ELEM), - ncol); - cudaStreamSynchronize(0); - cudaFree(res); - } - - void cudak_(cuda_rowmax_idx)(const Matrix *a, Matrix *b, Matrix *b_idx) { - dim3 block(CUDA_THREADS_NN, 1); - int ncol = a->ncol; - int blocks_per_row = CEIL_DIV(ncol, block.x); - dim3 grid(blocks_per_row, a->nrow); - MATRIX_ELEM *a_idx, *res, *res_idx; - size_t stride; - cudaMallocPitch(&a_idx, &stride, a->stride, a->nrow); - cudak_(gen_col_idx)<<<grid, block>>>(a_idx, a->nrow, ncol, stride / sizeof(MATRIX_ELEM)); - cudaMallocPitch(&res, &stride, blocks_per_row * sizeof(MATRIX_ELEM), a->nrow); - cudaMallocPitch(&res_idx, &stride, blocks_per_row * sizeof(MATRIX_ELEM), a->nrow); - cudaStreamSynchronize(0); - cudak_(block_reduce_rowmax_idx)<<<grid, block, - 2 * block.x * sizeof(MATRIX_ELEM)>>> \ - (MATRIX_ELEM_PTR(a), a_idx, res, res_idx, - a->stride / sizeof(MATRIX_ELEM), stride / sizeof(MATRIX_ELEM), - ncol); - ncol = blocks_per_row; - assert((unsigned long)ncol <= block.x); - grid.x = 1; - cudaStreamSynchronize(0); - cudak_(block_reduce_rowmax_idx)<<<grid, block, - 2 * block.x * sizeof(MATRIX_ELEM)>>> \ - (res, res_idx, MATRIX_ELEM_PTR(b), MATRIX_ELEM_PTR(b_idx), - stride / sizeof(MATRIX_ELEM), b->stride / sizeof(MATRIX_ELEM), - ncol); - cudaStreamSynchronize(0); - cudaFree(a_idx); - cudaFree(res); - cudaFree(res_idx); - } - - /* in-place calc */ - void cudak_(cuda_add_row)(const Matrix *a, Matrix *b, double beta) { - dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); - dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), - CEIL_DIV(b->nrow, threadsPerBlock.y)); - cudak_(add_row)<<<numBlocks, threadsPerBlock>>> \ - (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), b->nrow, b->ncol, - b->stride / sizeof(MATRIX_ELEM), beta); - cudaStreamSynchronize(0); - } - - void cudak_(cuda_fill)(Matrix *a, double val) { - dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); - dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x), - CEIL_DIV(a->nrow, threadsPerBlock.y)); - cudak_(fill)<<<numBlocks, threadsPerBlock>>> \ - (MATRIX_ELEM_PTR(a), a->nrow, a->ncol, - a->stride / sizeof(MATRIX_ELEM), val); - cudaStreamSynchronize(0); - } - - void cudak_(cuda_clip)(Matrix *a, double val_1, double val_2) { - dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); - dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x), - CEIL_DIV(a->nrow, threadsPerBlock.y)); - cudak_(clip)<<<numBlocks, threadsPerBlock>>> \ - (MATRIX_ELEM_PTR(a), a->nrow, a->ncol, - a->stride / sizeof(MATRIX_ELEM), val_1, val_2); - cudaStreamSynchronize(0); - } - - void cudak_(cuda_expand_frm)(const Matrix *a, Matrix *b, int context) { - dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); - dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), - CEIL_DIV(b->nrow, threadsPerBlock.y)); - cudak_(expand_frm)<<<numBlocks, threadsPerBlock>>> \ - (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), - a->nrow, a->ncol, - b->nrow, b->ncol, - a->stride / sizeof(MATRIX_ELEM), - b->stride / sizeof(MATRIX_ELEM), - context); - cudaStreamSynchronize(0); - } - - void cudak_(cuda_rearrange_frm)(const Matrix *a, Matrix *b, int step) { - dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); - dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), - CEIL_DIV(b->nrow, threadsPerBlock.y)); - cudak_(rearrange_frm)<<<numBlocks, threadsPerBlock>>> \ - (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), - b->nrow, b->ncol, b->stride / sizeof(MATRIX_ELEM), - step, b->ncol / step); - cudaStreamSynchronize(0); - } - - void cudak_(cuda_scale_rows_by_col)(const Matrix *a, Matrix *b) { - dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); - dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), - CEIL_DIV(b->nrow, threadsPerBlock.y)); - cudak_(scale_rows_by_col)<<<numBlocks, threadsPerBlock>>> \ - (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), - b->nrow, b->ncol, - a->stride / sizeof(MATRIX_ELEM), - b->stride / sizeof(MATRIX_ELEM)); - cudaStreamSynchronize(0); - } - - void cudak_(cuda_scale_rows_by_row)(const Matrix *a, Matrix *b) { - dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); - dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), - CEIL_DIV(b->nrow, threadsPerBlock.y)); - cudak_(scale_rows_by_row)<<<numBlocks, threadsPerBlock>>> \ - (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), - b->nrow, b->ncol, b->stride / sizeof(MATRIX_ELEM)); - cudaStreamSynchronize(0); - } - - void cudak_(cuda_decompress)(const Matrix *a, Matrix *b) { - dim3 threadsPerBlock(1, CUDA_THREADS_NN); - dim3 numBlocks(1, CEIL_DIV(a->nrow, threadsPerBlock.y)); - cudak_(decompress)<<<numBlocks, threadsPerBlock>>> \ - (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), - a->nrow, a->ncol, - a->stride / sizeof(MATRIX_ELEM), - b->stride / sizeof(MATRIX_ELEM)); - cudaStreamSynchronize(0); - } -} -#endif diff --git a/speech b/speech -Subproject 08e33afa533af1f026ac271446a0c873fe0bb5c +Subproject acd1bc3cf812f69a6260179b584f2a3f0e6d6b8 |