diff options
author | Determinant <ted.sybil@gmail.com> | 2015-06-22 19:01:29 +0800 |
---|---|---|
committer | Determinant <ted.sybil@gmail.com> | 2015-06-22 19:01:29 +0800 |
commit | 2497fd9e7a0fae5ee4887890d7a312e0e08a93b8 (patch) | |
tree | 382f97575bd2df9ee6abb1662b11b279fc22d72b /nerv/matrix/generic | |
parent | 196e9b48a3541caccdffc5743001cced70667091 (diff) |
major change: use luarocks to manage project
Diffstat (limited to 'nerv/matrix/generic')
-rw-r--r-- | nerv/matrix/generic/cukernel.cu | 571 | ||||
-rw-r--r-- | nerv/matrix/generic/cumatrix.c | 493 | ||||
-rw-r--r-- | nerv/matrix/generic/elem_type.h | 22 | ||||
-rw-r--r-- | nerv/matrix/generic/matrix.c | 155 | ||||
-rw-r--r-- | nerv/matrix/generic/matrix.h | 19 | ||||
-rw-r--r-- | nerv/matrix/generic/mmatrix.c | 122 |
6 files changed, 1382 insertions, 0 deletions
diff --git a/nerv/matrix/generic/cukernel.cu b/nerv/matrix/generic/cukernel.cu new file mode 100644 index 0000000..d6c8adc --- /dev/null +++ b/nerv/matrix/generic/cukernel.cu @@ -0,0 +1,571 @@ +#ifdef NERV_GENERIC_CUKERNEL +#include <assert.h> +#include <stdio.h> +#include "matrix.h" +#include "cuda.h" +#include "float.h" +#define CUDA_THREADS_N 16 +#define CUDA_THREADS_NN ((CUDA_THREADS_N) * (CUDA_THREADS_N)) +#define CEIL_DIV(a, b) (((a) + (b) - 1) / (b)) +__global__ void cudak_(log_elem)(const MATRIX_ELEM *a, MATRIX_ELEM *b, + int nrow, int ncol, int stride) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + long idx; + MATRIX_ELEM tmp; + if (i >= nrow || j >= ncol) return; + idx = j + i * stride; + tmp = a[idx]; + if(tmp < FLT_MIN) tmp = FLT_MIN; + b[idx] = log(tmp); +} + +__global__ void cudak_(mul_elem)(const MATRIX_ELEM *a, const MATRIX_ELEM *b, + MATRIX_ELEM *c, + int nrow, int ncol, int stride) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + long idx; + if (i >= nrow || j >= ncol) return; + idx = j + i * stride; + c[idx] = a[idx] * b[idx]; +} + +__global__ void cudak_(sigmoid)(const MATRIX_ELEM *a, MATRIX_ELEM *b, + int nrow, int ncol, int stride) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + long idx; + if (i >= nrow || j >= ncol) return; + idx = j + i * stride; + b[idx] = 1.0 / (1.0 + exp(-a[idx])); +} + +__global__ void cudak_(sigmoid_grad)(const MATRIX_ELEM *output, + const MATRIX_ELEM *err, + MATRIX_ELEM *nerr, + int nrow, int ncol, int stride) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + long idx; + if (i >= nrow || j >= ncol) return; + idx = j + i * stride; + nerr[idx] = output[idx] * (1.0 - output[idx]) * err[idx]; +} + +__global__ void cudak_(softmax_final)(const MATRIX_ELEM *a, MATRIX_ELEM *b, + const MATRIX_ELEM *max, const MATRIX_ELEM *deno, + int nrow, int ncol, int stride, int mstride) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + long idx; + if (i >= nrow || j >= ncol) return; + idx = j + i * stride; + b[idx] = exp(a[idx] - max[0 + i * mstride]) / deno[0 + i * mstride]; +} + +__global__ void cudak_(block_reduce_rowsum)(const MATRIX_ELEM *input, + MATRIX_ELEM *output, + const int istride, const int ostride, + const int n) { + extern __shared__ MATRIX_ELEM cudak_(arr)[]; + int j = blockIdx.x * blockDim.x + threadIdx.x; + cudak_(arr)[threadIdx.x] = j < n ? input[j + istride * blockIdx.y] : 0; + __syncthreads(); + for (int offset = blockDim.x >> 1; offset; offset >>= 1) + { + if (threadIdx.x < offset) + cudak_(arr)[threadIdx.x] += cudak_(arr)[threadIdx.x + offset]; + __syncthreads(); + } + if (threadIdx.x == 0) + output[blockIdx.x + ostride * blockIdx.y] = cudak_(arr)[0]; +} + +__global__ void cudak_(block_reduce_colsum)(const MATRIX_ELEM *input, + MATRIX_ELEM *output, + const int istride, const int ostride, + const int n) { + extern __shared__ MATRIX_ELEM cudak_(arr)[]; + int i = blockIdx.y * blockDim.y + threadIdx.y; + cudak_(arr)[threadIdx.y] = i < n ? input[blockIdx.x + istride * i] : 0; + __syncthreads(); + for (int offset = blockDim.y >> 1; offset; offset >>= 1) + { + if (threadIdx.y < offset) + cudak_(arr)[threadIdx.y] += cudak_(arr)[threadIdx.y + offset]; + __syncthreads(); + } + if (threadIdx.y == 0) + output[blockIdx.x + ostride * blockIdx.y] = cudak_(arr)[0]; +} + +__global__ void cudak_(block_reduce_colsame)(const MATRIX_ELEM *input, + const MATRIX_ELEM *ref_input, + MATRIX_ELEM *output, + const int istride, const int ostride, + const int n) { + extern __shared__ MATRIX_ELEM cudak_(arr)[]; + int i = blockIdx.y * blockDim.y + threadIdx.y; + cudak_(arr)[threadIdx.y] = (i < n && input[blockIdx.x + istride * i] == \ + ref_input[blockIdx.x + istride * i]) ? 1.0 : 0; + __syncthreads(); + for (int offset = blockDim.y >> 1; offset; offset >>= 1) + { + if (threadIdx.y < offset) + cudak_(arr)[threadIdx.y] += cudak_(arr)[threadIdx.y + offset]; + __syncthreads(); + } + if (threadIdx.y == 0) + output[blockIdx.x + ostride * blockIdx.y] = cudak_(arr)[0]; +} + +__global__ void cudak_(block_reduce_softmax_rowsum)(const MATRIX_ELEM *input, + MATRIX_ELEM *output, + const MATRIX_ELEM *max, + const int istride, const int ostride, + const int mstride, const int n) { + extern __shared__ MATRIX_ELEM cudak_(arr)[]; + int j = blockIdx.x * blockDim.x + threadIdx.x; + cudak_(arr)[threadIdx.x] = j < n ? exp(input[j + istride * blockIdx.y] - \ + max[0 + mstride * blockIdx.y]) : 0; + __syncthreads(); + for (int offset = blockDim.x >> 1; offset; offset >>= 1) + { + if (threadIdx.x < offset) + cudak_(arr)[threadIdx.x] += cudak_(arr)[threadIdx.x + offset]; + __syncthreads(); + } + if (threadIdx.x == 0) + output[blockIdx.x + ostride * blockIdx.y] = cudak_(arr)[0]; +} + +__global__ void cudak_(block_reduce_rowmax)(const MATRIX_ELEM *input, + MATRIX_ELEM *output, + const int istride, const int ostride, + const int n) { + extern __shared__ MATRIX_ELEM cudak_(arr)[]; + int j = blockIdx.x * blockDim.x + threadIdx.x; + cudak_(arr)[threadIdx.x] = j < n ? input[j + istride * blockIdx.y] : -FLT_MAX; + __syncthreads(); + for (int offset = blockDim.x >> 1; offset; offset >>= 1) + { + if (threadIdx.x < offset) + { + MATRIX_ELEM l = cudak_(arr)[threadIdx.x], + r = cudak_(arr)[threadIdx.x + offset]; + if (r > l) + cudak_(arr)[threadIdx.x] = r; + } + __syncthreads(); + } + if (threadIdx.x == 0) + output[blockIdx.x + ostride * blockIdx.y] = cudak_(arr)[0]; +} + +__global__ void cudak_(block_reduce_rowmax_idx)(const MATRIX_ELEM *input, + const MATRIX_ELEM *idx_input, + MATRIX_ELEM *output, + MATRIX_ELEM *idx_output, + const int istride, const int ostride, + const int n) { + extern __shared__ MATRIX_ELEM cudak_(arr)[]; + MATRIX_ELEM *arr_val = cudak_(arr); + MATRIX_ELEM *arr_idx = arr_val + blockDim.x; + int j = blockIdx.x * blockDim.x + threadIdx.x; + arr_val[threadIdx.x] = j < n ? input[j + istride * blockIdx.y] : -FLT_MAX; + arr_idx[threadIdx.x] = j < n ? idx_input[j + istride * blockIdx.y] : 0; + __syncthreads(); + for (int offset = blockDim.x >> 1; offset; offset >>= 1) + { + if (threadIdx.x < offset) + { + MATRIX_ELEM l = arr_val[threadIdx.x], + r = arr_val[threadIdx.x + offset]; + if (r > l) + { + arr_val[threadIdx.x] = r; + arr_idx[threadIdx.x] = arr_idx[threadIdx.x + offset]; + } + } + __syncthreads(); + } + if (threadIdx.x == 0) + { + output[blockIdx.x + ostride * blockIdx.y] = arr_val[0]; + idx_output[blockIdx.x + ostride * blockIdx.y] = arr_idx[0]; + } +} + +__global__ void cudak_(add_row)(const MATRIX_ELEM *a, MATRIX_ELEM *b, + int nrow, int ncol, int stride, double beta) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + if (i >= nrow || j >= ncol) return; + b[j + i * stride] += beta * a[j]; +} + +__global__ void cudak_(fill)(MATRIX_ELEM *a, + int nrow, int ncol, int stride, double val) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + if (i >= nrow || j >= ncol) return; + a[j + i * stride] = val; +} + +__global__ void cudak_(expand_frm)(const MATRIX_ELEM *a, MATRIX_ELEM *b, + int nrow, int ncol, + int enrow, int encol, + int stride, int estride, + int context) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + int ridx; + if (i >= enrow || j >= encol) return; + ridx = i + j / ncol - context; + if (ridx < 0) ridx = 0; + else if (ridx >= nrow) ridx = nrow - 1; + b[j + i * estride] = a[j % ncol + ridx * stride]; +} + +__global__ void cudak_(rearrange_frm)(const MATRIX_ELEM *a, MATRIX_ELEM *b, + int nrow, int ncol, + int stride, int step, int orig_dim) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + if (i >= nrow || j >= ncol) return; + b[j + i * stride] = a[j / step + (j % step) * orig_dim + i * stride]; +} + +__global__ void cudak_(scale_rows_by_col)(const MATRIX_ELEM *a, MATRIX_ELEM *b, + int nrow, int ncol, + int astride, int bstride) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + if (i >= nrow || j >= ncol) return; + b[j + i * bstride] *= a[i * astride]; +} + +__global__ void cudak_(scale_rows_by_row)(const MATRIX_ELEM *a, MATRIX_ELEM *b, + int nrow, int ncol, + int stride) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + if (i >= nrow || j >= ncol) return; + b[j + i * stride] *= a[j]; +} + +__global__ void cudak_(decompress)(const MATRIX_ELEM *a, MATRIX_ELEM *b, + int nrow, int ncol, + int stride_a, int stride_b) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + if (i >= nrow || j >= ncol) return; + b[lrintf(a[j + i * stride_a]) + i * stride_b] = 1.0; +} + +__global__ void cudak_(gen_col_idx)(MATRIX_ELEM *b, + int nrow, int ncol, int stride) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + if (i >= nrow || j >= ncol) return; + b[j + i * stride] = j; +} + +extern "C" { +#include "../cukernel.h" + void cudak_(cuda_log_elem)(const Matrix *a, Matrix *b) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + cudak_(log_elem)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), + b->nrow, b->ncol, b->stride / sizeof(MATRIX_ELEM)); + cudaStreamSynchronize(0); + } + + void cudak_(cuda_mul_elem)(const Matrix *a, const Matrix *b, + Matrix *c) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + cudak_(mul_elem)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), + MATRIX_ELEM_PTR(c), + b->nrow, b->ncol, b->stride / sizeof(MATRIX_ELEM)); + cudaStreamSynchronize(0); + } + + void cudak_(cuda_sigmoid)(const Matrix *a, Matrix *b) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + cudak_(sigmoid)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), b->nrow, b->ncol, + b->stride / sizeof(MATRIX_ELEM)); + cudaStreamSynchronize(0); + } + + void cudak_(cuda_sigmoid_grad)(const Matrix *output, + const Matrix *err, Matrix *nerr) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(nerr->ncol, threadsPerBlock.x), + CEIL_DIV(nerr->nrow, threadsPerBlock.y)); + cudak_(sigmoid_grad)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(output), MATRIX_ELEM_PTR(err), + MATRIX_ELEM_PTR(nerr), + nerr->nrow, nerr->ncol, + nerr->stride / sizeof(MATRIX_ELEM)); + cudaStreamSynchronize(0); + } + + void cudak_(cuda_rowsum)(const Matrix *a, Matrix *b) { + dim3 block(CUDA_THREADS_NN, 1); + int ncol = a->ncol; + int blocks_per_row = CEIL_DIV(ncol, block.x); + dim3 grid(blocks_per_row, a->nrow); + MATRIX_ELEM *res; + size_t stride; + cudaMallocPitch(&res, &stride, blocks_per_row * sizeof(MATRIX_ELEM), a->nrow); + cudak_(block_reduce_rowsum)<<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \ + (MATRIX_ELEM_PTR(a), res, + a->stride / sizeof(MATRIX_ELEM), stride / sizeof(MATRIX_ELEM), + ncol); + ncol = blocks_per_row; + assert((unsigned long)ncol <= block.x); + grid.x = 1; + cudaStreamSynchronize(0); + cudak_(block_reduce_rowsum)<<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \ + (res, MATRIX_ELEM_PTR(b), + stride / sizeof(MATRIX_ELEM), b->stride / sizeof(MATRIX_ELEM), + ncol); + cudaStreamSynchronize(0); + cudaFree(res); + } + + void cudak_(cuda_colsame)(const Matrix *a, const Matrix *ref, Matrix *b) { + dim3 block(1, CUDA_THREADS_NN); + int nrow = a->nrow; + int blocks_per_col = CEIL_DIV(nrow, block.y); + dim3 grid(a->ncol, blocks_per_col); + MATRIX_ELEM *res; + size_t stride; + cudaMallocPitch(&res, &stride, a->ncol * sizeof(MATRIX_ELEM), blocks_per_col); + cudak_(block_reduce_colsame)<<<grid, block, block.y * sizeof(MATRIX_ELEM)>>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(ref), res, + a->stride / sizeof(MATRIX_ELEM), stride / sizeof(MATRIX_ELEM), + nrow); + nrow = blocks_per_col; + assert((unsigned long)nrow <= block.y); + grid.y = 1; + cudaStreamSynchronize(0); + cudak_(block_reduce_colsum)<<<grid, block, block.y * sizeof(MATRIX_ELEM)>>> \ + (res, MATRIX_ELEM_PTR(b), + stride / sizeof(MATRIX_ELEM), b->stride / sizeof(MATRIX_ELEM), + nrow); + cudaStreamSynchronize(0); + cudaFree(res); + } + + void cudak_(cuda_colsum)(const Matrix *a, Matrix *b) { + dim3 block(1, CUDA_THREADS_NN); + int nrow = a->nrow; + int blocks_per_col = CEIL_DIV(nrow, block.y); + dim3 grid(a->ncol, blocks_per_col); + MATRIX_ELEM *res; + size_t stride; + cudaMallocPitch(&res, &stride, a->ncol * sizeof(MATRIX_ELEM), blocks_per_col); + cudak_(block_reduce_colsum)<<<grid, block, block.y * sizeof(MATRIX_ELEM)>>> \ + (MATRIX_ELEM_PTR(a), res, + a->stride / sizeof(MATRIX_ELEM), stride / sizeof(MATRIX_ELEM), + nrow); + nrow = blocks_per_col; + assert((unsigned long)nrow <= block.y); + grid.y = 1; + cudaStreamSynchronize(0); + cudak_(block_reduce_colsum)<<<grid, block, block.y * sizeof(MATRIX_ELEM)>>> \ + (res, MATRIX_ELEM_PTR(b), + stride / sizeof(MATRIX_ELEM), b->stride / sizeof(MATRIX_ELEM), + nrow); + cudaStreamSynchronize(0); + cudaFree(res); + } + + void cudak_(cuda_softmax_final)(const Matrix *a, const Matrix *max, + const Matrix *deno, Matrix *b) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + cudak_(softmax_final)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), + MATRIX_ELEM_PTR(max), MATRIX_ELEM_PTR(deno), + b->nrow, b->ncol, + b->stride / sizeof(MATRIX_ELEM), + max->stride / sizeof(MATRIX_ELEM)); + cudaStreamSynchronize(0); + } + + void cudak_(cuda_softmax_denominator)(const Matrix *a, const Matrix *max, Matrix *b) { + dim3 block(CUDA_THREADS_NN, 1); + int ncol = a->ncol; + int blocks_per_row = CEIL_DIV(ncol, block.x); + dim3 grid(blocks_per_row, a->nrow); + MATRIX_ELEM *res; + size_t stride; + assert(max->ncol == 1); + cudaMallocPitch(&res, &stride, blocks_per_row * sizeof(MATRIX_ELEM), a->nrow); + cudak_(block_reduce_softmax_rowsum) \ + <<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \ + (MATRIX_ELEM_PTR(a), res, MATRIX_ELEM_PTR(max), + a->stride / sizeof(MATRIX_ELEM), stride / sizeof(MATRIX_ELEM), + max->stride / sizeof(MATRIX_ELEM), + ncol); + ncol = blocks_per_row; + assert((unsigned long)ncol <= block.x); + grid.x = 1; + cudaStreamSynchronize(0); + cudak_(block_reduce_rowsum) \ + <<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \ + (res, MATRIX_ELEM_PTR(b), + stride / sizeof(MATRIX_ELEM), b->stride / sizeof(MATRIX_ELEM), + ncol); + cudaStreamSynchronize(0); + cudaFree(res); + } + + void cudak_(cuda_rowmax)(const Matrix *a, Matrix *b) { + dim3 block(CUDA_THREADS_NN, 1); + int ncol = a->ncol; + int blocks_per_row = CEIL_DIV(ncol, block.x); + dim3 grid(blocks_per_row, a->nrow); + MATRIX_ELEM *res; + size_t stride; + cudaMallocPitch(&res, &stride, blocks_per_row * sizeof(MATRIX_ELEM), a->nrow); + cudak_(block_reduce_rowmax)<<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \ + (MATRIX_ELEM_PTR(a), res, + a->stride / sizeof(MATRIX_ELEM), stride / sizeof(MATRIX_ELEM), + ncol); + ncol = blocks_per_row; + assert((unsigned long)ncol <= block.x); + grid.x = 1; + cudaStreamSynchronize(0); + cudak_(block_reduce_rowmax)<<<grid, block, block.x * sizeof(MATRIX_ELEM)>>> \ + (res, MATRIX_ELEM_PTR(b), + stride / sizeof(MATRIX_ELEM), b->stride / sizeof(MATRIX_ELEM), + ncol); + cudaStreamSynchronize(0); + cudaFree(res); + } + + void cudak_(cuda_rowmax_idx)(const Matrix *a, Matrix *b, Matrix *b_idx) { + dim3 block(CUDA_THREADS_NN, 1); + int ncol = a->ncol; + int blocks_per_row = CEIL_DIV(ncol, block.x); + dim3 grid(blocks_per_row, a->nrow); + MATRIX_ELEM *a_idx, *res, *res_idx; + size_t stride; + cudaMallocPitch(&a_idx, &stride, a->stride, a->nrow); + cudak_(gen_col_idx)<<<grid, block>>>(a_idx, a->nrow, ncol, stride / sizeof(MATRIX_ELEM)); + cudaMallocPitch(&res, &stride, blocks_per_row * sizeof(MATRIX_ELEM), a->nrow); + cudaMallocPitch(&res_idx, &stride, blocks_per_row * sizeof(MATRIX_ELEM), a->nrow); + cudaStreamSynchronize(0); + cudak_(block_reduce_rowmax_idx)<<<grid, block, + 2 * block.x * sizeof(MATRIX_ELEM)>>> \ + (MATRIX_ELEM_PTR(a), a_idx, res, res_idx, + a->stride / sizeof(MATRIX_ELEM), stride / sizeof(MATRIX_ELEM), + ncol); + ncol = blocks_per_row; + assert((unsigned long)ncol <= block.x); + grid.x = 1; + cudaStreamSynchronize(0); + cudak_(block_reduce_rowmax_idx)<<<grid, block, + 2 * block.x * sizeof(MATRIX_ELEM)>>> \ + (res, res_idx, MATRIX_ELEM_PTR(b), MATRIX_ELEM_PTR(b_idx), + stride / sizeof(MATRIX_ELEM), b->stride / sizeof(MATRIX_ELEM), + ncol); + cudaStreamSynchronize(0); + cudaFree(a_idx); + cudaFree(res); + cudaFree(res_idx); + } + + /* in-place calc */ + void cudak_(cuda_add_row)(const Matrix *a, Matrix *b, double beta) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + cudak_(add_row)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), b->nrow, b->ncol, + b->stride / sizeof(MATRIX_ELEM), beta); + cudaStreamSynchronize(0); + } + + void cudak_(cuda_fill)(Matrix *a, double val) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x), + CEIL_DIV(a->nrow, threadsPerBlock.y)); + cudak_(fill)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(a), a->nrow, a->ncol, + a->stride / sizeof(MATRIX_ELEM), val); + cudaStreamSynchronize(0); + } + + void cudak_(cuda_expand_frm)(const Matrix *a, Matrix *b, int context) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + cudak_(expand_frm)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), + a->nrow, a->ncol, + b->nrow, b->ncol, + a->stride / sizeof(MATRIX_ELEM), + b->stride / sizeof(MATRIX_ELEM), + context); + cudaStreamSynchronize(0); + } + + void cudak_(cuda_rearrange_frm)(const Matrix *a, Matrix *b, int step) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + cudak_(rearrange_frm)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), + b->nrow, b->ncol, b->stride / sizeof(MATRIX_ELEM), + step, b->ncol / step); + cudaStreamSynchronize(0); + } + + void cudak_(cuda_scale_rows_by_col)(const Matrix *a, Matrix *b) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + cudak_(scale_rows_by_col)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), + b->nrow, b->ncol, + a->stride / sizeof(MATRIX_ELEM), + b->stride / sizeof(MATRIX_ELEM)); + cudaStreamSynchronize(0); + } + + void cudak_(cuda_scale_rows_by_row)(const Matrix *a, Matrix *b) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + cudak_(scale_rows_by_row)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), + b->nrow, b->ncol, b->stride / sizeof(MATRIX_ELEM)); + cudaStreamSynchronize(0); + } + + void cudak_(cuda_decompress)(const Matrix *a, Matrix *b) { + dim3 threadsPerBlock(1, CUDA_THREADS_NN); + dim3 numBlocks(1, CEIL_DIV(a->nrow, threadsPerBlock.y)); + cudak_(decompress)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), + a->nrow, a->ncol, + a->stride / sizeof(MATRIX_ELEM), + b->stride / sizeof(MATRIX_ELEM)); + cudaStreamSynchronize(0); + } +} +#endif diff --git a/nerv/matrix/generic/cumatrix.c b/nerv/matrix/generic/cumatrix.c new file mode 100644 index 0000000..b5d1a35 --- /dev/null +++ b/nerv/matrix/generic/cumatrix.c @@ -0,0 +1,493 @@ +#ifdef NERV_GENERIC_CUMATRIX +#include "matrix.h" +#include "elem_type.h" + +#define MATRIX_DATA_FREE(L, ptr) cuda_matrix_(free)(L, ptr) +#define MATRIX_DATA_ALLOC(L, dptr, stride, width, height) \ + cuda_matrix_(alloc)(L, dptr, stride, width, height) +#define MATRIX_DATA_WRITE(L, data, idx, val) cuda_matrix_(write)(L, data, idx, val) +#define MATRIX_DATA_READ(L, data, idx) cuda_matrix_(read)(L, data, idx) +#define MATRIX_INIT(L) cuda_matrix_(init)(L) +#define MATRIX_BASE_TNAME nerv_matrix_cuda_tname +#define NERV_GENERIC_MATRIX +#define NERV_GENERIC_CUKERNEL +#include "../../common.h" +#include "../cukernel.h" +#include "../cuda_helper.h" + +Matrix *nerv_matrix_(new_)(lua_State *L, long nrow, long ncol); +void nerv_matrix_(data_free)(lua_State *L, Matrix *self); + +static void nerv_matrix_(add_)(lua_State *L, const Matrix *a, const Matrix *b, + const Matrix *c, + MATRIX_ELEM alpha, MATRIX_ELEM beta) { + PROFILE_START + CUBLAS_SAFE_SYNC_CALL( + NERV_CUBLAS_(geam)(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, + a->ncol, a->nrow, + &alpha, + MATRIX_ELEM_PTR(a), a->stride / sizeof(MATRIX_ELEM), + &beta, + MATRIX_ELEM_PTR(b), b->stride / sizeof(MATRIX_ELEM), + MATRIX_ELEM_PTR(c), c->stride / sizeof(MATRIX_ELEM))); + PROFILE_STOP +} + +static int nerv_matrix_(add)(lua_State *L) { + Matrix *c = luaT_checkudata(L, 1, nerv_matrix_(tname)); + Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); + Matrix *b = luaT_checkudata(L, 3, nerv_matrix_(tname)); + MATRIX_ELEM alpha = luaL_checknumber(L, 4); + MATRIX_ELEM beta = luaL_checknumber(L, 5); + CHECK_SAME_DIMENSION(a, b); + CHECK_SAME_DIMENSION(a, c); + nerv_matrix_(add_)(L, a, b, c, alpha, beta); + return 0; +} + +static int nerv_matrix_(get_cublas_op)(char ch) { + return (ch == 'T' || ch == 't') ? CUBLAS_OP_T : CUBLAS_OP_N; +} + +static int nerv_matrix_(mul)(lua_State *L) { +#define SWAP(a, b) \ + do { int t = (a); (a) = (b); (b) = t; } while (0) + + Matrix *c = luaT_checkudata(L, 1, nerv_matrix_(tname)); + Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); + Matrix *b = luaT_checkudata(L, 3, nerv_matrix_(tname)); + MATRIX_ELEM alpha = luaL_checknumber(L, 4); + MATRIX_ELEM beta = luaL_checknumber(L, 5); + int nargs = lua_gettop(L); + int ta = nargs > 5 ? nerv_matrix_(get_cublas_op)(*luaL_checkstring(L, 6)) \ + : CUBLAS_OP_N; + int tb = nargs > 6 ? nerv_matrix_(get_cublas_op)(*luaL_checkstring(L, 7)) \ + : CUBLAS_OP_N; + int am = a->nrow, an = a->ncol; + int bm = b->nrow, bn = b->ncol; + if (ta == CUBLAS_OP_T) SWAP(am, an); + if (tb == CUBLAS_OP_T) SWAP(bm, bn); + if (an != bm) + nerv_error(L, "Wrong dimension of multipliers"); +/* MATRIX_ELEM alpha = 1.0f, beta = 0.0f; */ + /* Because matrix in Nerv is row-major, here b comes first */ + PROFILE_START + CUBLAS_SAFE_SYNC_CALL( + NERV_CUBLAS_(gemm)(cublas_handle, tb, ta, + bn, am, bm, + &alpha, + MATRIX_ELEM_PTR(b), b->stride / sizeof(MATRIX_ELEM), + MATRIX_ELEM_PTR(a), a->stride / sizeof(MATRIX_ELEM), + &beta, + MATRIX_ELEM_PTR(c), c->stride / sizeof(MATRIX_ELEM))); + PROFILE_STOP + return 0; +} + +static int nerv_matrix_(create)(lua_State *L) { + Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); + Matrix *b = nerv_matrix_(new_)(L, a->nrow, a->ncol); + luaT_pushudata(L, b, nerv_matrix_(tname)); + return 1; +} + +static int nerv_matrix_(sigmoid)(lua_State *L) { + Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); + Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); + CHECK_SAME_DIMENSION(a, b); + PROFILE_START + cudak_(cuda_sigmoid)(b, a); + PROFILE_STOP + return 0; +} + +static int nerv_matrix_(sigmoid_grad)(lua_State *L) { + Matrix *nerr = luaT_checkudata(L, 1, nerv_matrix_(tname)); + Matrix *err = luaT_checkudata(L, 2, nerv_matrix_(tname)); + Matrix *output = luaT_checkudata(L, 3, nerv_matrix_(tname)); + CHECK_SAME_DIMENSION(nerr, err); + CHECK_SAME_DIMENSION(nerr, output); + PROFILE_START + cudak_(cuda_sigmoid_grad)(output, err, nerr); + PROFILE_STOP + return 0; +} + +static int nerv_matrix_(softmax)(lua_State *L) { + Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); + Matrix *b = luaT_checkudata(L, 1, nerv_matrix_(tname)); + Matrix *max, *max_idx; + Matrix *dno; + CHECK_SAME_DIMENSION(a, b); + max = nerv_matrix_(new_)(L, a->nrow, 1); + max_idx = nerv_matrix_(new_)(L, a->nrow, 1); + dno = nerv_matrix_(new_)(L, a->nrow, 1); + PROFILE_START + cudak_(cuda_rowmax_idx)(a, max, max_idx); + cudak_(cuda_softmax_denominator)(a, max, dno); + cudak_(cuda_softmax_final)(a, max, dno, b); + PROFILE_STOP + nerv_matrix_(data_free)(L, max); + nerv_matrix_(data_free)(L, dno); + luaT_pushudata(L, max_idx, nerv_matrix_(tname)); + return 1; +} + +static int nerv_matrix_(rowsum)(lua_State *L) { + Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); + Matrix *b = nerv_matrix_(new_)(L, a->nrow, 1); + PROFILE_START + cudak_(cuda_rowsum)(a, b); + PROFILE_STOP + luaT_pushudata(L, b, nerv_matrix_(tname)); + return 1; +} + +static int nerv_matrix_(colsum)(lua_State *L) { + Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); + Matrix *b = nerv_matrix_(new_)(L, 1, a->ncol); + PROFILE_START + cudak_(cuda_colsum)(a, b); + PROFILE_STOP + luaT_pushudata(L, b, nerv_matrix_(tname)); + return 1; +} + +static int nerv_matrix_(colsame)(lua_State *L) { + Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); + Matrix *ref = luaT_checkudata(L, 2, nerv_matrix_(tname)); + Matrix *b = nerv_matrix_(new_)(L, 1, a->ncol); + CHECK_SAME_DIMENSION(a, ref); + PROFILE_START + cudak_(cuda_colsame)(a, ref, b); + PROFILE_STOP + luaT_pushudata(L, b, nerv_matrix_(tname)); + return 1; +} + +static int nerv_matrix_(rowmax)(lua_State *L) { + Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); + Matrix *b = nerv_matrix_(new_)(L, a->nrow, 1); + PROFILE_START + cudak_(cuda_rowmax)(a, b); + PROFILE_STOP + luaT_pushudata(L, b, nerv_matrix_(tname)); + return 1; +} + +static int nerv_matrix_(rowmax_idx)(lua_State *L) { + Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); + Matrix *b = nerv_matrix_(new_)(L, a->nrow, 1); + Matrix *idx = nerv_matrix_(new_)(L, a->nrow, 1); + PROFILE_START + cudak_(cuda_rowmax_idx)(a, b, idx); + PROFILE_STOP + luaT_pushudata(L, b, nerv_matrix_(tname)); + luaT_pushudata(L, idx, nerv_matrix_(tname)); + return 2; +} + +static int nerv_matrix_(add_row)(lua_State *L) { + Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); + Matrix *b = luaT_checkudata(L, 1, nerv_matrix_(tname)); + double beta = luaL_checknumber(L, 3); + if (a->ncol != b->ncol) + nerv_error(L, "the number of columns is not the same"); + if (a->nrow != 1) + nerv_error(L, "a row vector is expected"); + PROFILE_START + cudak_(cuda_add_row)(a, b, beta); + PROFILE_STOP + return 0; +} + +static int nerv_matrix_(fill)(lua_State *L) { + Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname)); + double val = luaL_checknumber(L, 2); + PROFILE_START + cudak_(cuda_fill)(self, val); + PROFILE_STOP + return 0; +} + +static int nerv_matrix_(copy_fromd)(lua_State *L) { + Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); + Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname)); + int nargs = lua_gettop(L); + int b_begin = nargs > 2 ? luaL_checkinteger(L, 3) : 0; + int b_end = nargs > 3 ? luaL_checkinteger(L, 4) : b->nrow; + int a_begin = nargs > 4 ? luaL_checkinteger(L, 5) : 0; + if (!(0 <= b_begin && b_begin < b_end && b_end <= b->nrow && + a_begin + b_end - b_begin <= a->nrow)) + nerv_error(L, "invalid copy interval"); + if (a->ncol != b->ncol) + nerv_error(L, "matrices should be of the same dimension"); + PROFILE_START + CUDA_SAFE_SYNC_CALL( + cudaMemcpy2D(MATRIX_ROW_PTR(a, a_begin), a->stride, + MATRIX_ROW_PTR(b, b_begin), b->stride, + sizeof(MATRIX_ELEM) * b->ncol, b_end - b_begin, + cudaMemcpyDeviceToDevice)); + PROFILE_STOP + return 0; +} + +extern const char *MATRIX_CUMATRIX_HOST_TNAME; +static int nerv_matrix_(copy_fromh)(lua_State *L) { + Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); + Matrix *b = luaT_checkudata(L, 2, MATRIX_CUMATRIX_HOST_TNAME); + int nargs = lua_gettop(L); + int b_begin = nargs > 2 ? luaL_checkinteger(L, 3) : 0; + int b_end = nargs > 3 ? luaL_checkinteger(L, 4) : b->nrow; + int a_begin = nargs > 4 ? luaL_checkinteger(L, 5) : 0; + if (!(0 <= b_begin && b_begin < b_end && b_end <= b->nrow && + a_begin + b_end - b_begin <= a->nrow)) + nerv_error(L, "invalid copy interval"); + if (a->ncol != b->ncol) + nerv_error(L, "matrices should be of the same dimension"); + PROFILE_START + CUDA_SAFE_SYNC_CALL( + cudaMemcpy2D(MATRIX_ROW_PTR(a, a_begin), a->stride, + MATRIX_ROW_PTR(b, b_begin), b->stride, + sizeof(MATRIX_ELEM) * b->ncol, b_end - b_begin, + cudaMemcpyHostToDevice)); + PROFILE_STOP + return 0; +} + +static int nerv_matrix_(copy_toh)(lua_State *L) { + Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); + Matrix *b = luaT_checkudata(L, 2, MATRIX_CUMATRIX_HOST_TNAME); + int nargs = lua_gettop(L); |