summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDeterminant <ted.sybil@gmail.com>2015-06-03 23:00:30 +0800
committerDeterminant <ted.sybil@gmail.com>2015-06-03 23:00:30 +0800
commita753eca0121ac3ec81ed76bd719d3f1cb9522680 (patch)
tree9777fdddf5d0404964353a0b3d2821e514f6eeb3
parent38962683e518dcbebc0cfa6c0c9c9616b25d5bd1 (diff)
...
-rw-r--r--examples/tnet_preprocessing_example.lua2
-rw-r--r--examples/tnet_preprocessing_example2.lua2
-rw-r--r--examples/tnet_sgd_buffer.lua70
-rw-r--r--init.lua16
-rw-r--r--tnet_io/cwrapper.cpp6
-rw-r--r--tnet_io/cwrapper.h1
-rw-r--r--tnet_io/init.c7
-rw-r--r--tools/tnet_to_nerv.c57
-rw-r--r--tools/tnet_to_nerv.cpp68
9 files changed, 220 insertions, 9 deletions
diff --git a/examples/tnet_preprocessing_example.lua b/examples/tnet_preprocessing_example.lua
index 8a65b44..5f20910 100644
--- a/examples/tnet_preprocessing_example.lua
+++ b/examples/tnet_preprocessing_example.lua
@@ -1,6 +1,6 @@
require 'libspeech'
frm_ext = 5
-gconf = {mat_type = nerv.CuMatrixFloat,
+gconf = {cumat_type = nerv.CuMatrixFloat,
batch_size = 158}
param_repo = nerv.ParamRepo({"global_transf.nerv"})
sublayer_repo = nerv.LayerRepo(
diff --git a/examples/tnet_preprocessing_example2.lua b/examples/tnet_preprocessing_example2.lua
index ae8d86b..8e1bc85 100644
--- a/examples/tnet_preprocessing_example2.lua
+++ b/examples/tnet_preprocessing_example2.lua
@@ -1,5 +1,5 @@
require 'speech.init'
-gconf = {mat_type = nerv.CuMatrixFloat,
+gconf = {cumat_type = nerv.CuMatrixFloat,
batch_size = 158}
param_repo = nerv.ParamRepo({"global_transf.nerv"})
diff --git a/examples/tnet_sgd_buffer.lua b/examples/tnet_sgd_buffer.lua
new file mode 100644
index 0000000..152d2f5
--- /dev/null
+++ b/examples/tnet_sgd_buffer.lua
@@ -0,0 +1,70 @@
+require 'speech.init'
+gconf = {cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ batch_size = 256}
+param_repo = nerv.ParamRepo({"global_transf.nerv"})
+
+sublayer_repo = nerv.LayerRepo(
+ {
+ ["nerv.BiasLayer"] =
+ {
+ blayer1 = {{bias = "bias1"}, {dim_in = {429}, dim_out = {429}}},
+ blayer2 = {{bias = "bias2"}, {dim_in = {429}, dim_out = {429}}}
+ },
+ ["nerv.WindowLayer"] =
+ {
+ wlayer1 = {{window = "window1"}, {dim_in = {429}, dim_out = {429}}},
+ wlayer2 = {{window = "window2"}, {dim_in = {429}, dim_out = {429}}}
+ }
+ }, param_repo, gconf)
+
+layer_repo = nerv.LayerRepo(
+ {
+ ["nerv.DAGLayer"] =
+ {
+ main = {{}, {
+ dim_in = {429}, dim_out = {429},
+ sub_layers = sublayer_repo,
+ connections = {
+ ["<input>[1]"] = "blayer1[1]",
+ ["blayer1[1]"] = "wlayer1[1]",
+ ["wlayer1[1]"] = "blayer2[1]",
+ ["blayer2[1]"] = "wlayer2[1]",
+ ["wlayer2[1]"] = "<output>[1]"
+ }
+ }}
+ }
+ }, param_repo, gconf)
+
+tnet_reader = nerv.TNetReader({},
+ {
+ id = "main_scp",
+-- scp_file = "/slfs1/users/mfy43/swb_ivec/train_bp.scp",
+ scp_file = "t.scp",
+ conf_file = "/slfs1/users/mfy43/swb_ivec/plp_0_d_a.conf",
+ frm_ext = 5,
+ mlfs = {
+ ref = {
+ file = "/slfs1/users/mfy43/swb_ivec/ref.mlf",
+ format = "map",
+ format_arg = "/slfs1/users/mfy43/swb_ivec/dict",
+ dir = "*/",
+ ext = "lab"
+ }
+ },
+ global_transf = layer_repo:get_layer("main")
+ })
+
+buffer = nerv.SGDBuffer(gconf,
+ {
+ buffer_size = 1024,
+ readers = {
+ { reader = tnet_reader,
+ data = {main_scp = 429, ref = 1}}
+ }
+ })
+
+for data in buffer.get_data, buffer do
+ print(data.main_scp)
+-- print(data.ref)
+end
diff --git a/init.lua b/init.lua
index 1f20096..39a1e9e 100644
--- a/init.lua
+++ b/init.lua
@@ -4,6 +4,8 @@ local TNetReader = nerv.class("nerv.TNetReader", "nerv.DataReader")
function TNetReader:__init(global_conf, reader_conf)
self.feat_id = reader_conf.id
self.frm_ext = reader_conf.frm_ext
+ self.gconf = global_conf
+ self.global_transf = reader_conf.global_transf
self.feat_repo = nerv.TNetFeatureRepo(reader_conf.scp_file,
reader_conf.conf_file,
reader_conf.frm_ext)
@@ -15,24 +17,26 @@ function TNetReader:__init(global_conf, reader_conf)
mlf_spec.dir,
mlf_spec.ext)
end
- self.global_transf = reader_conf.global_transf
end
function TNetReader:get_data()
+ if self.feat_repo:is_end() then
+ return nil
+ end
local res = {}
local frm_ext = self.frm_ext
local step = frm_ext * 2 + 1
local feat_utter = self.feat_repo:cur_utter()
- local expanded = nerv.CuMatrixFloat(feat_utter:nrow(), feat_utter:ncol() * step)
- expanded:expand_frm(nerv.CuMatrixFloat.new_from_host(feat_utter), frm_ext)
+ local expanded = self.gconf.cumat_type(feat_utter:nrow(), feat_utter:ncol() * step)
+ expanded:expand_frm(self.gconf.cumat_type.new_from_host(feat_utter), frm_ext)
local rearranged = expanded:create()
rearranged:rearrange_frm(expanded, step)
local input = {rearranged}
local output = {rearranged:create()}
- self.global_transf:init()
+ self.global_transf:init(input[1]:nrow())
self.global_transf:propagate(input, output)
- expanded = nerv.CuMatrixFloat(output[1]:nrow() - frm_ext * 2, output[1]:ncol())
- expanded:copy_fromd(output[1], frm_ext, feat_utter:nrow() - frm_ext)
+ expanded = self.gconf.mmat_type(output[1]:nrow() - frm_ext * 2, output[1]:ncol())
+ output[1]:copy_toh(expanded, frm_ext, feat_utter:nrow() - frm_ext)
res[self.feat_id] = expanded
for id, repo in pairs(self.lab_repo) do
local lab_utter = repo:get_utter(self.feat_repo, expanded:nrow())
diff --git a/tnet_io/cwrapper.cpp b/tnet_io/cwrapper.cpp
index e82f3f8..4149557 100644
--- a/tnet_io/cwrapper.cpp
+++ b/tnet_io/cwrapper.cpp
@@ -2,13 +2,13 @@
#include "KaldiLib/Labels.h"
#include "KaldiLib/Common.h"
#include "KaldiLib/UserInterface.h"
-#include "../../common.h"
#include <string>
#define SNAME "TNET"
extern "C" {
#include "cwrapper.h"
#include "string.h"
+#include "../../common.h"
extern Matrix *nerv_matrix_host_float_new_(lua_State *L, long nrow, long ncol);
@@ -77,6 +77,10 @@ extern "C" {
repo->feature_repo.MoveNext();
}
+ int tnet_feature_repo_is_end(TNetFeatureRepo *repo) {
+ return repo->feature_repo.EndOfList();
+ }
+
size_t tnet_feature_repo_current_samplerate(TNetFeatureRepo *repo) {
return repo->feature_repo.CurrentHeader().mSamplePeriod;
}
diff --git a/tnet_io/cwrapper.h b/tnet_io/cwrapper.h
index a34f090..54fb69b 100644
--- a/tnet_io/cwrapper.h
+++ b/tnet_io/cwrapper.h
@@ -14,6 +14,7 @@ extern "C" {
size_t tnet_feature_repo_current_samplerate(TNetFeatureRepo *repo);
const char *tnet_feature_repo_current_tag(TNetFeatureRepo *repo);
void tnet_feature_repo_next(TNetFeatureRepo *repo);
+ int tnet_feature_repo_is_end(TNetFeatureRepo *repo);
void tnet_feature_repo_destroy(TNetFeatureRepo *repo);
typedef struct TNetLabelRepo TNetLabelRepo;
diff --git a/tnet_io/init.c b/tnet_io/init.c
index 3fa7cb8..16f6f37 100644
--- a/tnet_io/init.c
+++ b/tnet_io/init.c
@@ -40,10 +40,17 @@ static int feat_repo_next(lua_State *L) {
return 0;
}
+static int feat_repo_is_end(lua_State *L) {
+ TNetFeatureRepo *repo = luaT_checkudata(L, 1, nerv_tnet_feat_repo_tname);
+ lua_pushboolean(L, tnet_feature_repo_is_end(repo));
+ return 1;
+}
+
static const luaL_Reg feat_repo_methods[] = {
{"cur_utter", feat_repo_current_utterance},
{"cur_tag", feat_repo_current_tag},
{"next", feat_repo_next},
+ {"is_end", feat_repo_is_end},
{NULL, NULL}
};
diff --git a/tools/tnet_to_nerv.c b/tools/tnet_to_nerv.c
new file mode 100644
index 0000000..f781236
--- /dev/null
+++ b/tools/tnet_to_nerv.c
@@ -0,0 +1,57 @@
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+char token[1024];
+double mat[4096][4096];
+int main() {
+ FILE *fout = fopen("converted.nerv", "w");
+ int cnt = 0;
+ while (scanf("%s", token) != EOF)
+ {
+ int nrow, ncol;
+ int i, j;
+ if (strcmp(token, "<biasedlinearity>") == 0)
+ {
+ scanf("%d %d", &ncol, &nrow);
+ scanf("%s %d %d", token, &ncol, &nrow);
+ printf("%d %d\n", nrow, ncol);
+ for (j = 0; j < ncol; j++)
+ for (i = 0; i < nrow; i++)
+ scanf("%lf", mat[i] + j);
+ off_t base = ftello(fout);
+ fprintf(fout, "%16d", 0);
+ fprintf(fout, "{type=\"nerv.LinearTransParam\",id=\"affine%d_ltp\"}\n",
+ cnt);
+ fprintf(fout, "%d %d\n", nrow, ncol);
+ for (i = 0; i < nrow; i++)
+ {
+ for (j = 0; j < ncol; j++)
+ fprintf(fout, "%.8f ", mat[i][j]);
+ fprintf(fout, "\n");
+ }
+ size_t length = ftello(fout) - base;
+ fseeko(fout, base, SEEK_SET);
+ fprintf(fout, "[%13lu]\n", length);
+ fseeko(fout, 0, SEEK_END);
+ if (scanf("%s %d", token, &ncol) == 2 && *token == 'v')
+ {
+ base = ftello(fout);
+ for (j = 0; j < ncol; j++)
+ scanf("%lf", mat[0] + j);
+ fprintf(fout, "%16d", 0);
+ fprintf(fout, "{type=\"nerv.BiasParam\",id=\"affine%d_bp\"}\n",
+ cnt);
+ fprintf(fout, "1 %d\n", nrow, ncol);
+ for (j = 0; j < ncol; j++)
+ fprintf(fout, "%.8f ", mat[0][j]);
+ fprintf(fout, "\n");
+ length = ftello(fout) - base;
+ fseeko(fout, base, SEEK_SET);
+ fprintf(fout, "[%13lu]\n", length);
+ cnt++;
+ fseeko(fout, 0, SEEK_END);
+ }
+ }
+ }
+ return 0;
+}
diff --git a/tools/tnet_to_nerv.cpp b/tools/tnet_to_nerv.cpp
new file mode 100644
index 0000000..cedf27a
--- /dev/null
+++ b/tools/tnet_to_nerv.cpp
@@ -0,0 +1,68 @@
+#include <cstdio>
+#include <fstream>
+#include <string>
+#include <cstring>
+char token[1024];
+char output[1024];
+double mat[4096][4096];
+int main() {
+ std::ofstream fout;
+ fout.open("converted.nerv");
+ int cnt = 0;
+ while (scanf("%s", token) != EOF)
+ {
+ int nrow, ncol;
+ int i, j;
+ if (strcmp(token, "<biasedlinearity>") == 0)
+ {
+ scanf("%d %d", &ncol, &nrow);
+ scanf("%s %d %d", token, &ncol, &nrow);
+ printf("%d %d\n", nrow, ncol);
+ for (j = 0; j < ncol; j++)
+ for (i = 0; i < nrow; i++)
+ scanf("%lf", mat[i] + j);
+ long base = fout.tellp();
+ sprintf(output, "%16d", 0);
+ fout << output;
+ sprintf(output, "{type=\"nerv.LinearTransParam\",id=\"affine%d_ltp\"}\n",
+ cnt);
+ fout << output;
+ sprintf(output, "%d %d\n", nrow, ncol);
+ fout << output;
+ for (i = 0; i < nrow; i++)
+ {
+ for (j = 0; j < ncol; j++)
+ fout << mat[i][j] << " ";
+ fout << std::endl;
+ }
+ long length = fout.tellp() - base;
+ fout.seekp(base);
+ sprintf(output, "[%13lu]\n", length);
+ fout << output;
+ fout.seekp(0, std::ios_base::end);
+ if (scanf("%s %d", token, &ncol) == 2 && *token == 'v')
+ {
+ base = fout.tellp();
+ for (j = 0; j < ncol; j++)
+ scanf("%lf", mat[0] + j);
+ sprintf(output, "%16d", 0);
+ fout << output;
+ sprintf(output, "{type=\"nerv.BiasParam\",id=\"affine%d_bp\"}\n",
+ cnt);
+ fout << output;
+ sprintf(output, "1 %d\n", ncol);
+ fout << output;
+ for (j = 0; j < ncol; j++)
+ fout << mat[0][j] << " ";
+ fout << std::endl;
+ length = fout.tellp() - base;
+ fout.seekp(base);
+ sprintf(output, "[%13lu]\n", length);
+ fout << output;
+ fout.seekp(0, std::ios_base::end);
+ cnt++;
+ }
+ }
+ }
+ return 0;
+}