aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--nerv/examples/mmi_chime3.lua1
-rw-r--r--nerv/examples/mpe_chime3.lua10
-rw-r--r--nerv/examples/swb_baseline.lua4
-rw-r--r--nerv/lib/matrix/cukernel.cu1
-rw-r--r--nerv/lib/matrix/cumatrix.c1
-rw-r--r--nerv/lib/matrix/generic/cumatrix.c2
-rw-r--r--nerv/lib/matrix/generic/elem_type.h13
-rw-r--r--nerv/lib/matrix/generic/matrix.c9
-rw-r--r--nerv/lib/matrix/matrix.h1
-rw-r--r--nerv/lib/matrix/mmatrix.c5
-rw-r--r--nerv/matrix/cumatrix.c1
-rw-r--r--nerv/matrix/generic/cumatrix.c2
-rw-r--r--nerv/matrix/generic/elem_type.h22
-rw-r--r--nerv/matrix/generic/mmatrix.c2
-rw-r--r--nerv/matrix/mmatrix.c2
15 files changed, 37 insertions, 39 deletions
diff --git a/nerv/examples/mmi_chime3.lua b/nerv/examples/mmi_chime3.lua
index 6ac7f28..3daaafa 100644
--- a/nerv/examples/mmi_chime3.lua
+++ b/nerv/examples/mmi_chime3.lua
@@ -160,6 +160,7 @@ function make_readers(feature_rspecifier, layer_repo)
feature_rspecifier = feature_rspecifier,
frm_ext = gconf.frm_ext,
global_transf = layer_repo:get_layer("global_transf"),
+ need_key = true,
mlfs = {}
})
}
diff --git a/nerv/examples/mpe_chime3.lua b/nerv/examples/mpe_chime3.lua
index ec095b0..f9a2855 100644
--- a/nerv/examples/mpe_chime3.lua
+++ b/nerv/examples/mpe_chime3.lua
@@ -7,6 +7,8 @@ gconf = {lrate = 0.00001, wcost = 0, momentum = 0.0,
tr_scp = "ark,s,cs:/slfs6/users/ymz09/kaldi/src/featbin/copy-feats scp:/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced_smbr/train.scp ark:- |",
initialized_param = {"/slfs6/users/ymz09/nerv-project/nerv/nerv-speech/kaldi_seq/test/chime3_init.nerv",
"/slfs6/users/ymz09/nerv-project/nerv/nerv-speech/kaldi_seq/test/chime3_global_transf.nerv"},
+ decode_param = {"/slfs6/users/ymz09/nerv-project/test_mpe/1.nerv",
+ "/slfs6/users/ymz09/nerv-project/nerv/nerv-speech/kaldi_seq/test/chime3_global_transf.nerv"},
debug = false}
function make_layer_repo(param_repo)
@@ -125,13 +127,12 @@ function make_layer_repo(param_repo)
["mpe_crit[1]"] = "<output>[1]"
}
}},
- softmax_output = {{}, {
+ decode_output = {{}, {
dim_in = {440}, dim_out = {2011},
sub_layers = layer_repo,
connections = {
["<input>[1]"] = "main[1]",
- ["main[1]"] = "softmax[1]",
- ["softmax[1]"] = "<output>[1]"
+ ["main[1]"] = "<output>[1]"
}
}}
}
@@ -145,7 +146,7 @@ function get_network(layer_repo)
end
function get_decode_network(layer_repo)
- return layer_repo:get_layer("softmax_output")
+ return layer_repo:get_layer("decode_output")
end
function get_global_transf(layer_repo)
@@ -160,6 +161,7 @@ function make_readers(feature_rspecifier, layer_repo)
feature_rspecifier = feature_rspecifier,
frm_ext = gconf.frm_ext,
global_transf = layer_repo:get_layer("global_transf"),
+ need_key = true,
mlfs = {}
})
}
diff --git a/nerv/examples/swb_baseline.lua b/nerv/examples/swb_baseline.lua
index 8f72200..51052ba 100644
--- a/nerv/examples/swb_baseline.lua
+++ b/nerv/examples/swb_baseline.lua
@@ -2,9 +2,9 @@ require 'htk_io'
gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9,
cumat_type = nerv.CuMatrixFloat,
mmat_type = nerv.MMatrixFloat,
- direct_update = true,
+ rearrange = true, -- just to make the context order consistent with old results, deprecated
frm_ext = 5,
- frm_trim = 5,
+ frm_trim = 5, -- trim the first and last 5 frames, TNet just does this, deprecated
tr_scp = "/slfs1/users/mfy43/swb_ivec/train_bp.scp",
cv_scp = "/slfs1/users/mfy43/swb_ivec/train_cv.scp",
htk_conf = "/slfs1/users/mfy43/swb_ivec/plp_0_d_a.conf",
diff --git a/nerv/lib/matrix/cukernel.cu b/nerv/lib/matrix/cukernel.cu
index 210e6bf..c20e538 100644
--- a/nerv/lib/matrix/cukernel.cu
+++ b/nerv/lib/matrix/cukernel.cu
@@ -44,6 +44,7 @@ __device__ float atomicAdd_nvidia(float* address, float val) {
#undef MATRIX_USE_FLOAT
#undef MATRIX_ELEM
#undef MATRIX_ELEM_PTR
+#undef MATRIX_ELEM_PTR_BASE
#undef MATRIX_ELEM_FMT
#undef MATRIX_ELEM_WRITE_FMT
diff --git a/nerv/lib/matrix/cumatrix.c b/nerv/lib/matrix/cumatrix.c
index ff1168d..a8ed075 100644
--- a/nerv/lib/matrix/cumatrix.c
+++ b/nerv/lib/matrix/cumatrix.c
@@ -57,6 +57,7 @@ void nerv_cumatrix_init() {
#undef MATRIX_USE_FLOAT
#undef MATRIX_ELEM
#undef MATRIX_ELEM_PTR
+#undef MATRIX_ELEM_PTR_BASE
#undef MATRIX_ELEM_FMT
#undef MATRIX_ELEM_WRITE_FMT
#undef MATRIX_CUMATRIX_HOST_TNAME
diff --git a/nerv/lib/matrix/generic/cumatrix.c b/nerv/lib/matrix/generic/cumatrix.c
index 65e0788..68889ad 100644
--- a/nerv/lib/matrix/generic/cumatrix.c
+++ b/nerv/lib/matrix/generic/cumatrix.c
@@ -349,7 +349,7 @@ void nerv_matrix_(copy_rows_fromh_by_idx)(Matrix *a, const Matrix *b,
long nrow = a->nrow;
if (!(0 <= b_begin && b_begin + nrow <= idx->ncol))
NERV_EXIT_STATUS(status, MAT_INVALID_COPY_INTERVAL, 0);
- float *idx_ptr = idx->data.f;
+ float *idx_ptr = MATRIX_ELEM_PTR_F(idx);
int i;
if (idx->nrow != 1)
NERV_EXIT_STATUS(status, MAT_IDX_VECTOR_EXP, 0);
diff --git a/nerv/lib/matrix/generic/elem_type.h b/nerv/lib/matrix/generic/elem_type.h
index bffe940..07f6355 100644
--- a/nerv/lib/matrix/generic/elem_type.h
+++ b/nerv/lib/matrix/generic/elem_type.h
@@ -1,22 +1,29 @@
+#define MATRIX_ELEM_PTR_F(self) ((float *)((char *)((self)->data.f) + (self)->offset))
+#define MATRIX_ELEM_PTR_D(self) ((double *)((char *)((self)->data.d) + (self)->offset))
+#define MATRIX_ELEM_PTR_I(self) ((long *)((char *)((self)->data.i) + (self)->offset))
+
#ifdef MATRIX_USE_FLOAT
#define MATRIX_ELEM float
#define MATRIX_ELEM_FMT "%f"
#define MATRIX_ELEM_WRITE_FMT "%.8f"
-#define MATRIX_ELEM_PTR(self) ((self)->data.f)
+#define MATRIX_ELEM_PTR(self) MATRIX_ELEM_PTR_F(self)
+#define MATRIX_ELEM_PTR_BASE(self) ((self)->data.f)
#elif defined(MATRIX_USE_DOUBLE)
#define MATRIX_ELEM double
#define MATRIX_ELEM_FMT "%lf"
#define MATRIX_ELEM_WRITE_FMT "%.8lf"
-#define MATRIX_ELEM_PTR(self) ((self)->data.d)
+#define MATRIX_ELEM_PTR(self) MATRIX_ELEM_PTR_D(self)
+#define MATRIX_ELEM_PTR_BASE(self) ((self)->data.d)
#elif defined(MATRIX_USE_INT)
#define MATRIX_ELEM long
#define MATRIX_ELEM_FMT "%ld"
#define MATRIX_ELEM_WRITE_FMT "%ld"
-#define MATRIX_ELEM_PTR(self) ((self)->data.i)
+#define MATRIX_ELEM_PTR(self) MATRIX_ELEM_PTR_I(self)
+#define MATRIX_ELEM_PTR_BASE(self) ((self)->data.i)
#endif
diff --git a/nerv/lib/matrix/generic/matrix.c b/nerv/lib/matrix/generic/matrix.c
index fd5d28f..004d9aa 100644
--- a/nerv/lib/matrix/generic/matrix.c
+++ b/nerv/lib/matrix/generic/matrix.c
@@ -4,12 +4,11 @@
/* FIXME: malloc failure detection */
void nerv_matrix_(data_free)(Matrix *self, Status *status) {
- if(*self->data_ref == 0) return; /* FIXME: repeat free memory */
assert(*self->data_ref > 0);
if (--(*self->data_ref) == 0)
{
/* free matrix data */
- MATRIX_DATA_FREE(MATRIX_ELEM_PTR(self), status);
+ MATRIX_DATA_FREE(MATRIX_ELEM_PTR_BASE(self), status);
curandDestroyGenerator(*(self->curand_gen));
free(self->curand_gen);
free(self->data_ref);
@@ -31,7 +30,7 @@ Matrix *nerv_matrix_(create)(long nrow, long ncol, Status *status) {
self->ncol = ncol;
self->nmax = self->nrow * self->ncol;
self->dim = 2;
- MATRIX_DATA_ALLOC(&MATRIX_ELEM_PTR(self), &self->stride,
+ MATRIX_DATA_ALLOC(&MATRIX_ELEM_PTR_BASE(self), &self->stride,
sizeof(MATRIX_ELEM) * self->ncol, self->nrow,
status);
if (status->err_code != NERV_NORMAL)
@@ -46,6 +45,7 @@ Matrix *nerv_matrix_(create)(long nrow, long ncol, Status *status) {
curandCreateGenerator(self->curand_gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(*(self->curand_gen), time(NULL));
+ self->offset = 0;
nerv_matrix_(data_retain)(self);
NERV_SET_STATUS(status, NERV_NORMAL, 0);
return self;
@@ -62,9 +62,10 @@ Matrix *nerv_matrix_(getrow)(Matrix *self, int row) {
prow->dim = 1;
prow->stride = self->stride;
prow->nmax = prow->ncol;
- MATRIX_ELEM_PTR(prow) = MATRIX_ROW_PTR(self, row);
+ prow->data = self->data;
prow->data_ref = self->data_ref;
prow->curand_gen = self->curand_gen;
+ prow->offset = row * self->stride;
nerv_matrix_(data_retain)(prow);
return prow;
}
diff --git a/nerv/lib/matrix/matrix.h b/nerv/lib/matrix/matrix.h
index 5a85c08..a28fd97 100644
--- a/nerv/lib/matrix/matrix.h
+++ b/nerv/lib/matrix/matrix.h
@@ -13,6 +13,7 @@ typedef struct Matrix {
double *d;
long *i;
} data; /* pointer to actual storage */
+ unsigned long offset; /* the actual beginning of the matrix */
long *data_ref;
curandGenerator_t *curand_gen;
} Matrix;
diff --git a/nerv/lib/matrix/mmatrix.c b/nerv/lib/matrix/mmatrix.c
index b8157eb..b5670f2 100644
--- a/nerv/lib/matrix/mmatrix.c
+++ b/nerv/lib/matrix/mmatrix.c
@@ -6,6 +6,7 @@
#define host_matrix_(NAME) host_matrix_float_##NAME
#define nerv_matrix_(NAME) nerv_matrix_host_float_##NAME
#include "generic/matrix.h"
+#include "generic/elem_type.h"
#include "generic/mmatrix.c"
Matrix *nerv_matrix_(perm_gen)(int ncol, Status *status) {
@@ -13,7 +14,7 @@ Matrix *nerv_matrix_(perm_gen)(int ncol, Status *status) {
Matrix *self = nerv_matrix_(create)(1, ncol, status);
if (status->err_code != NERV_NORMAL)
return NULL;
- float *prow = self->data.f;
+ float *prow = MATRIX_ELEM_PTR_F(self);
for (i = 0; i < ncol; i++)
prow[i] = i;
for (i = ncol - 1; i >= 0; i--)
@@ -31,6 +32,7 @@ Matrix *nerv_matrix_(perm_gen)(int ncol, Status *status) {
#undef MATRIX_USE_FLOAT
#undef MATRIX_ELEM
#undef MATRIX_ELEM_PTR
+#undef MATRIX_ELEM_PTR_BASE
#undef MATRIX_ELEM_FMT
#undef MATRIX_ELEM_WRITE_FMT
@@ -44,6 +46,7 @@ Matrix *nerv_matrix_(perm_gen)(int ncol, Status *status) {
#undef MATRIX_USE_DOUBLE
#undef MATRIX_ELEM
#undef MATRIX_ELEM_PTR
+#undef MATRIX_ELEM_PTR_BASE
#undef MATRIX_ELEM_FMT
#undef MATRIX_ELEM_WRITE_FMT
diff --git a/nerv/matrix/cumatrix.c b/nerv/matrix/cumatrix.c
index fef03fc..bf92f92 100644
--- a/nerv/matrix/cumatrix.c
+++ b/nerv/matrix/cumatrix.c
@@ -49,6 +49,7 @@ const char *nerv_matrix_(tname) = "nerv.CuMatrixFloat";
#undef MATRIX_USE_FLOAT
#undef MATRIX_ELEM
#undef MATRIX_ELEM_PTR
+#undef MATRIX_ELEM_PTR_BASE
#undef MATRIX_ELEM_FMT
#undef MATRIX_ELEM_WRITE_FMT
#undef MATRIX_CUMATRIX_HOST_TNAME
diff --git a/nerv/matrix/generic/cumatrix.c b/nerv/matrix/generic/cumatrix.c
index fb36033..df858e6 100644
--- a/nerv/matrix/generic/cumatrix.c
+++ b/nerv/matrix/generic/cumatrix.c
@@ -1,5 +1,5 @@
#ifdef NERV_GENERIC_CUMATRIX
-#include "elem_type.h"
+#include "../../lib/matrix/generic/elem_type.h"
#define MATRIX_DATA_WRITE(L, data, idx, val) cuda_matrix_(write)(L, data, idx, val)
#define MATRIX_DATA_READ(L, data, idx) cuda_matrix_(read)(L, data, idx)
#define MATRIX_INIT(L) cuda_matrix_(init)(L)
diff --git a/nerv/matrix/generic/elem_type.h b/nerv/matrix/generic/elem_type.h
deleted file mode 100644
index bffe940..0000000
--- a/nerv/matrix/generic/elem_type.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifdef MATRIX_USE_FLOAT
-
-#define MATRIX_ELEM float
-#define MATRIX_ELEM_FMT "%f"
-#define MATRIX_ELEM_WRITE_FMT "%.8f"
-#define MATRIX_ELEM_PTR(self) ((self)->data.f)
-
-#elif defined(MATRIX_USE_DOUBLE)
-
-#define MATRIX_ELEM double
-#define MATRIX_ELEM_FMT "%lf"
-#define MATRIX_ELEM_WRITE_FMT "%.8lf"
-#define MATRIX_ELEM_PTR(self) ((self)->data.d)
-
-#elif defined(MATRIX_USE_INT)
-
-#define MATRIX_ELEM long
-#define MATRIX_ELEM_FMT "%ld"
-#define MATRIX_ELEM_WRITE_FMT "%ld"
-#define MATRIX_ELEM_PTR(self) ((self)->data.i)
-
-#endif
diff --git a/nerv/matrix/generic/mmatrix.c b/nerv/matrix/generic/mmatrix.c
index 01dd9e5..a4e8489 100644
--- a/nerv/matrix/generic/mmatrix.c
+++ b/nerv/matrix/generic/mmatrix.c
@@ -1,6 +1,6 @@
#ifdef NERV_GENERIC_MMATRIX
#include "../../lib/matrix/generic/matrix.h"
-#include "elem_type.h"
+#include "../../lib/matrix/generic/elem_type.h"
#define MATRIX_DATA_WRITE(L, data, idx, val) (data[idx] = val)
#define MATRIX_DATA_READ(L, data, idx) (data[idx])
#define MATRIX_INIT(L) host_matrix_(init)(L)
diff --git a/nerv/matrix/mmatrix.c b/nerv/matrix/mmatrix.c
index 961059c..20c31d6 100644
--- a/nerv/matrix/mmatrix.c
+++ b/nerv/matrix/mmatrix.c
@@ -45,6 +45,7 @@ static const luaL_Reg nerv_matrix_(extra_methods_int)[] = {
#undef MATRIX_USE_FLOAT
#undef MATRIX_ELEM
#undef MATRIX_ELEM_PTR
+#undef MATRIX_ELEM_PTR_BASE
#undef MATRIX_ELEM_FMT
#undef MATRIX_ELEM_WRITE_FMT
#undef MMATRIX_INIT
@@ -60,6 +61,7 @@ const char *nerv_matrix_(tname) = "nerv.MMatrixDouble";
#undef MATRIX_USE_DOUBLE
#undef MATRIX_ELEM
#undef MATRIX_ELEM_PTR
+#undef MATRIX_ELEM_PTR_BASE
#undef MATRIX_ELEM_FMT
#undef MATRIX_ELEM_WRITE_FMT