aboutsummaryrefslogtreecommitdiff
path: root/lua
diff options
context:
space:
mode:
Diffstat (limited to 'lua')
-rw-r--r--lua/config.lua67
-rw-r--r--lua/main.lua45
-rw-r--r--lua/network.lua113
-rw-r--r--lua/reader.lua113
-rw-r--r--lua/select_linear.lua62
-rw-r--r--lua/timer.lua33
-rw-r--r--lua/tnn.lua136
7 files changed, 0 insertions, 569 deletions
diff --git a/lua/config.lua b/lua/config.lua
deleted file mode 100644
index 1ec1198..0000000
--- a/lua/config.lua
+++ /dev/null
@@ -1,67 +0,0 @@
-function get_global_conf()
- local global_conf = {
- lrate = 0.15,
- wcost = 1e-5,
- momentum = 0,
- clip = 5,
- cumat_type = nerv.CuMatrixFloat,
- mmat_type = nerv.MMatrixFloat,
- vocab_size = 10000,
- nn_act_default = 0,
- hidden_size = 300,
- layer_num = 1,
- chunk_size = 15,
- batch_size = 20,
- max_iter = 3,
- param_random = function() return (math.random() / 5 - 0.1) end,
- dropout = 0.5,
- timer = nerv.Timer(),
- pr = nerv.ParamRepo(),
- }
- return global_conf
-end
-
-function get_layers(global_conf)
- local pr = global_conf.pr
- local layers = {
- ['nerv.LSTMLayer'] = {},
- ['nerv.DropoutLayer'] = {},
- ['nerv.SelectLinearLayer'] = {
- ['select'] = {dim_in = {1}, dim_out = {global_conf.hidden_size}, vocab = global_conf.vocab_size, pr = pr},
- },
- ['nerv.CombinerLayer'] = {},
- ['nerv.AffineLayer'] = {
- output = {dim_in = {global_conf.hidden_size}, dim_out = {global_conf.vocab_size}, pr = pr}
- },
- ['nerv.SoftmaxCELayer'] = {
- softmax = {dim_in = {global_conf.vocab_size, global_conf.vocab_size}, dim_out = {1}, compressed = true},
- },
- }
- for i = 1, global_conf.layer_num do
- layers['nerv.LSTMLayer']['lstm' .. i] = {dim_in = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, dim_out = {global_conf.hidden_size, global_conf.hidden_size}, pr = pr}
- layers['nerv.DropoutLayer']['dropout' .. i] = {dim_in = {global_conf.hidden_size}, dim_out = {global_conf.hidden_size}}
- layers['nerv.CombinerLayer']['dup' .. i] = {dim_in = {global_conf.hidden_size}, dim_out = {global_conf.hidden_size, global_conf.hidden_size}, lambda = {1}}
- end
- return layers
-end
-
-function get_connections(global_conf)
- local connections = {
- {'<input>[1]', 'select[1]', 0},
- {'select[1]', 'lstm1[1]', 0},
- {'dropout' .. global_conf.layer_num .. '[1]', 'output[1]', 0},
- {'output[1]', 'softmax[1]', 0},
- {'<input>[2]', 'softmax[2]', 0},
- {'softmax[1]', '<output>[1]', 0},
- }
- for i = 1, global_conf.layer_num do
- table.insert(connections, {'lstm' .. i .. '[1]', 'dup' .. i .. '[1]', 0})
- table.insert(connections, {'lstm' .. i .. '[2]', 'lstm' .. i .. '[3]', 1})
- table.insert(connections, {'dup' .. i .. '[1]', 'lstm' .. i .. '[2]', 1})
- table.insert(connections, {'dup' .. i .. '[2]', 'dropout' .. i .. '[1]', 0})
- if i > 1 then
- table.insert(connections, {'dropout' .. (i - 1) .. '[1]', 'lstm' .. i .. '[1]', 0})
- end
- end
- return connections
-end
diff --git a/lua/main.lua b/lua/main.lua
deleted file mode 100644
index ce0270a..0000000
--- a/lua/main.lua
+++ /dev/null
@@ -1,45 +0,0 @@
-nerv.include('reader.lua')
-nerv.include('timer.lua')
-nerv.include('config.lua')
-nerv.include(arg[1])
-
-local global_conf = get_global_conf()
-local timer = global_conf.timer
-
-timer:tic('IO')
-
-local data_path = 'nerv/nerv/examples/lmptb/PTBdata/'
-local train_reader = nerv.Reader(data_path .. 'vocab', data_path .. 'ptb.valid.txt.adds')
-local val_reader = nerv.Reader(data_path .. 'vocab', data_path .. 'ptb.valid.txt.adds')
-
-local train_data = train_reader:get_all_batch(global_conf)
-local val_data = val_reader:get_all_batch(global_conf)
-
-local layers = get_layers(global_conf)
-local connections = get_connections(global_conf)
-
-local NN = nerv.NN(global_conf, train_data, val_data, layers, connections)
-
-timer:toc('IO')
-timer:check('IO')
-io.flush()
-
-timer:tic('global')
-local best_cv = 1e10
-for i = 1, global_conf.max_iter do
- timer:tic('Epoch' .. i)
- local train_ppl, val_ppl = NN:epoch()
- if val_ppl < best_cv then
- best_cv = val_ppl
- else
- global_conf.lrate = global_conf.lrate / 2.0
- end
- nerv.printf('Epoch %d: %f %f %f\n', i, global_conf.lrate, train_ppl, val_ppl)
- timer:toc('Epoch' .. i)
- timer:check('Epoch' .. i)
- io.flush()
-end
-timer:toc('global')
-timer:check('global')
-timer:check('network')
-timer:check('gc')
diff --git a/lua/network.lua b/lua/network.lua
deleted file mode 100644
index 0c11321..0000000
--- a/lua/network.lua
+++ /dev/null
@@ -1,113 +0,0 @@
-nerv.include('select_linear.lua')
-
-local nn = nerv.class('nerv.NN')
-
-function nn:__init(global_conf, train_data, val_data, layers, connections)
- self.gconf = global_conf
- self.network = self:get_network(layers, connections)
- self.train_data = self:get_data(train_data)
- self.val_data = self:get_data(val_data)
-end
-
-function nn:get_network(layers, connections)
- self.gconf.dropout_rate = 0
- local layer_repo = nerv.LayerRepo(layers, self.gconf.pr, self.gconf)
- local graph = nerv.GraphLayer('graph', self.gconf,
- {dim_in = {1, self.gconf.vocab_size}, dim_out = {1},
- layer_repo = layer_repo, connections = connections})
- local network = nerv.Network('network', self.gconf,
- {network = graph, clip = self.gconf.clip})
- network:init(self.gconf.batch_size, self.gconf.chunk_size)
- return network
-end
-
-function nn:get_data(data)
- local err_output = {}
- local softmax_output = {}
- local output = {}
- for i = 1, self.gconf.chunk_size do
- err_output[i] = self.gconf.cumat_type(self.gconf.batch_size, 1)
- softmax_output[i] = self.gconf.cumat_type(self.gconf.batch_size, self.gconf.vocab_size)
- output[i] = self.gconf.cumat_type(self.gconf.batch_size, 1)
- end
- local ret = {}
- for i = 1, #data do
- ret[i] = {}
- ret[i].input = {}
- ret[i].output = {}
- ret[i].err_input = {}
- ret[i].err_output = {}
- for t = 1, self.gconf.chunk_size do
- ret[i].input[t] = {}
- ret[i].output[t] = {}
- ret[i].err_input[t] = {}
- ret[i].err_output[t] = {}
- ret[i].input[t][1] = data[i].input[t]
- ret[i].input[t][2] = data[i].output[t]
- ret[i].output[t][1] = output[t]
- local err_input = self.gconf.mmat_type(self.gconf.batch_size, 1)
- for j = 1, self.gconf.batch_size do
- if t <= data[i].seq_len[j] then
- err_input[j - 1][0] = 1
- else
- err_input[j - 1][0] = 0
- end
- end
- ret[i].err_input[t][1] = self.gconf.cumat_type.new_from_host(err_input)
- ret[i].err_output[t][1] = err_output[t]
- ret[i].err_output[t][2] = softmax_output[t]
- end
- ret[i].seq_length = data[i].seq_len
- ret[i].new_seq = {}
- for j = 1, self.gconf.batch_size do
- if data[i].seq_start[j] then
- table.insert(ret[i].new_seq, j)
- end
- end
- end
- return ret
-end
-
-function nn:process(data, do_train)
- local timer = self.gconf.timer
- local total_err = 0
- local total_frame = 0
- for id = 1, #data do
- if do_train then
- self.gconf.dropout_rate = self.gconf.dropout
- data[id].do_train = true
- else
- self.gconf.dropout_rate = 0
- data[id].do_train = false
- end
- timer:tic('network')
- self.network:mini_batch_init(data[id])
- self.network:propagate()
- timer:toc('network')
- for t = 1, self.gconf.chunk_size do
- local tmp = data[id].output[t][1]:new_to_host()
- for i = 1, self.gconf.batch_size do
- if t <= data[id].seq_length[i] then
- total_err = total_err + math.log10(math.exp(tmp[i - 1][0]))
- total_frame = total_frame + 1
- end
- end
- end
- if do_train then
- timer:tic('network')
- self.network:back_propagate()
- self.network:update()
- timer:toc('network')
- end
- timer:tic('gc')
- collectgarbage('collect')
- timer:toc('gc')
- end
- return math.pow(10, - total_err / total_frame)
-end
-
-function nn:epoch()
- local train_error = self:process(self.train_data, true)
- local val_error = self:process(self.val_data, false)
- return train_error, val_error
-end
diff --git a/lua/reader.lua b/lua/reader.lua
deleted file mode 100644
index 0c7bcb6..0000000
--- a/lua/reader.lua
+++ /dev/null
@@ -1,113 +0,0 @@
-local Reader = nerv.class('nerv.Reader')
-
-function Reader:__init(vocab_file, input_file)
- self:get_vocab(vocab_file)
- self:get_seq(input_file)
-end
-
-function Reader:get_vocab(vocab_file)
- local f = io.open(vocab_file, 'r')
- local id = 0
- self.vocab = {}
- while true do
- local word = f:read()
- if word == nil then
- break
- end
- self.vocab[word] = id
- id = id + 1
- end
- self.size = id
-end
-
-function Reader:split(s, t)
- local ret = {}
- for x in (s .. t):gmatch('(.-)' .. t) do
- table.insert(ret, x)
- end
- return ret
-end
-
-function Reader:get_seq(input_file)
- local f = io.open(input_file, 'r')
- self.seq = {}
- while true do
- local seq = f:read()
- if seq == nil then
- break
- end
- seq = self:split(seq, ' ')
- local tmp = {}
- for i = 1, #seq do
- if seq[i] ~= '' then
- table.insert(tmp, self.vocab[seq[i]])
- end
- end
- table.insert(self.seq, tmp)
- end
-end
-
-function Reader:get_in_out(id, pos)
- return self.seq[id][pos], self.seq[id][pos + 1], pos + 1 == #self.seq[id]
-end
-
-function Reader:get_all_batch(global_conf)
- local data = {}
- local pos = {}
- local offset = 1
- for i = 1, global_conf.batch_size do
- pos[i] = nil
- end
- --while true do
- for i = 1, 100 do
- local input = {}
- local output = {}
- for i = 1, global_conf.chunk_size do
- input[i] = global_conf.mmat_type(global_conf.batch_size, 1)
- input[i]:fill(global_conf.nn_act_default)
- output[i] = global_conf.mmat_type(global_conf.batch_size, 1)
- output[i]:fill(global_conf.nn_act_default)
- end
- local seq_start = {}
- local seq_end = {}
- local seq_len = {}
- for i = 1, global_conf.batch_size do
- seq_start[i] = false
- seq_end[i] = false
- seq_len[i] = 0
- end
- local has_new = false
- for i = 1, global_conf.batch_size do
- if pos[i] == nil then
- if offset < #self.seq then
- seq_start[i] = true
- pos[i] = {offset, 1}
- offset = offset + 1
- end
- end
- if pos[i] ~= nil then
- has_new = true
- for j = 1, global_conf.chunk_size do
- local final
- input[j][i-1][0], output[j][i-1][0], final = self:get_in_out(pos[i][1], pos[i][2])
- seq_len[i] = j
- if final then
- seq_end[i] = true
- pos[i] = nil
- break
- end
- pos[i][2] = pos[i][2] + 1
- end
- end
- end
- if not has_new then
- break
- end
- for i = 1, global_conf.chunk_size do
- input[i] = global_conf.cumat_type.new_from_host(input[i])
- output[i] = global_conf.cumat_type.new_from_host(output[i])
- end
- table.insert(data, {input = input, output = output, seq_start = seq_start, seq_end = seq_end, seq_len = seq_len})
- end
- return data
-end
diff --git a/lua/select_linear.lua b/lua/select_linear.lua
deleted file mode 100644
index a7e20cc..0000000
--- a/lua/select_linear.lua
+++ /dev/null
@@ -1,62 +0,0 @@
-local SL = nerv.class('nerv.SelectLinearLayer', 'nerv.Layer')
-
---id: string
---global_conf: table
---layer_conf: table
---Get Parameters
-function SL:__init(id, global_conf, layer_conf)
- self.id = id
- self.dim_in = layer_conf.dim_in
- self.dim_out = layer_conf.dim_out
- self.gconf = global_conf
-
- self.vocab = layer_conf.vocab
- self.ltp = self:find_param("ltp", layer_conf, global_conf, nerv.LinearTransParam, {self.vocab, self.dim_out[1]}) --layer_conf.ltp
-
- self:check_dim_len(1, 1)
-end
-
---Check parameter
-function SL:init(batch_size)
- if (self.dim_in[1] ~= 1) then --one word id
- nerv.error("mismatching dimensions of ltp and input")
- end
- if (self.dim_out[1] ~= self.ltp.trans:ncol()) then
- nerv.error("mismatching dimensions of bp and output")
- end
-
- self.batch_size = bath_size
- self.ltp:train_init()
-end
-
-function SL:update(bp_err, input, output)
- --use this to produce reproducable result, don't forget to set the dropout to zero!
- --for i = 1, input[1]:nrow(), 1 do
- -- local word_vec = self.ltp.trans[input[1][i - 1][0]]
- -- word_vec:add(word_vec, bp_err[1][i - 1], 1, - self.gconf.lrate / self.gconf.batch_size)
- --end
-
- --I tried the update_select_rows kernel which uses atomicAdd, but it generates unreproducable result
- self.ltp.trans:update_select_rows_by_colidx(bp_err[1], input[1], - self.gconf.lrate / self.gconf.batch_size, 0)
- self.ltp.trans:add(self.ltp.trans, self.ltp.trans, 1.0, - self.gconf.lrate * self.gconf.wcost)
-end
-
-function SL:propagate(input, output)
- --for i = 0, input[1]:ncol() - 1, 1 do
- -- if (input[1][0][i] > 0) then
- -- output[1][i]:copy_fromd(self.ltp.trans[input[1][0][i]])
- -- else
- -- output[1][i]:fill(0)
- -- end
- --end
- output[1]:copy_rows_fromd_by_colidx(self.ltp.trans, input[1])
-end
-
-function SL:back_propagate(bp_err, next_bp_err, input, output)
- --input is compressed, do nothing
-end
-
-function SL:get_params()
- local paramRepo = nerv.ParamRepo({self.ltp})
- return paramRepo
-end
diff --git a/lua/timer.lua b/lua/timer.lua
deleted file mode 100644
index 2c54ca8..0000000
--- a/lua/timer.lua
+++ /dev/null
@@ -1,33 +0,0 @@
-local Timer = nerv.class("nerv.Timer")
-
-function Timer:__init()
- self.last = {}
- self.rec = {}
-end
-
-function Timer:tic(item)
- self.last[item] = os.clock()
-end
-
-function Timer:toc(item)
- if (self.last[item] == nil) then
- nerv.error("item not there")
- end
- if (self.rec[item] == nil) then
- self.rec[item] = 0
- end
- self.rec[item] = self.rec[item] + os.clock() - self.last[item]
-end
-
-function Timer:check(item)
- if self.rec[item]==nil then
- nerv.error('item not there')
- end
- nerv.printf('"%s" lasts for %f secs.\n',item,self.rec[item])
-end
-
-function Timer:flush()
- for key, value in pairs(self.rec) do
- self.rec[key] = nil
- end
-end
diff --git a/lua/tnn.lua b/lua/tnn.lua
deleted file mode 100644
index bf9f118..0000000
--- a/lua/tnn.lua
+++ /dev/null
@@ -1,136 +0,0 @@
-nerv.include('select_linear.lua')
-
-local reader = nerv.class('nerv.TNNReader')
-
-function reader:__init(global_conf, data)
- self.gconf = global_conf
- self.offset = 0
- self.data = data
-end
-
-function reader:get_batch(feeds)
- self.offset = self.offset + 1
- if self.offset > #self.data then
- return false
- end
- for i = 1, self.gconf.chunk_size do
- feeds.inputs_m[i][1]:copy_from(self.data[self.offset].input[i])
- feeds.inputs_m[i][2]:copy_from(self.data[self.offset].output[i]:decompress(self.gconf.vocab_size))
- end
- feeds.flags_now = self.data[self.offset].flags
- feeds.flagsPack_now = self.data[self.offset].flagsPack
- return true
-end
-
-function reader:has_data(t, i)
- return t <= self.data[self.offset].seq_len[i]
-end
-
-function reader:get_err_input()
- return self.data[self.offset].err_input
-end
-
-local nn = nerv.class('nerv.NN')
-
-function nn:__init(global_conf, train_data, val_data, layers, connections)
- self.gconf = global_conf
- self.tnn = self:get_tnn(layers, connections)
- self.train_data = self:get_data(train_data)
- self.val_data = self:get_data(val_data)
-end
-
-function nn:get_tnn(layers, connections)
- self.gconf.dropout_rate = 0
- local layer_repo = nerv.LayerRepo(layers, self.gconf.pr, self.gconf)
- local tnn = nerv.TNN('TNN', self.gconf, {dim_in = {1, self.gconf.vocab_size},
- dim_out = {1}, sub_layers = layer_repo, connections = connections,
- clip = self.gconf.clip})
- tnn:init(self.gconf.batch_size, self.gconf.chunk_size)
- return tnn
-end
-
-function nn:get_data(data)
- local ret = {}
- for i = 1, #data do
- ret[i] = {}
- ret[i].input = data[i].input
- ret[i].output = data[i].output
- ret[i].flags = {}
- ret[i].err_input = {}
- for t = 1, self.gconf.chunk_size do
- ret[i].flags[t] = {}
- local err_input = self.gconf.mmat_type(self.gconf.batch_size, 1)
- for j = 1, self.gconf.batch_size do
- if t <= data[i].seq_len[j] then
- ret[i].flags[t][j] = nerv.TNN.FC.SEQ_NORM
- err_input[j - 1][0] = 1
- else
- ret[i].flags[t][j] = 0
- err_input[j - 1][0] = 0
- end
- end
- ret[i].err_input[t] = self.gconf.cumat_type.new_from_host(err_input)
- end
- for j = 1, self.gconf.batch_size do
- if data[i].seq_start[j] then
- ret[i].flags[1][j] = bit.bor(ret[i].flags[1][j], nerv.TNN.FC.SEQ_START)
- end
- if data[i].seq_end[j] then
- local t = data[i].seq_len[j]
- ret[i].flags[t][j] = bit.bor(ret[i].flags[t][j], nerv.TNN.FC.SEQ_END)
- end
- end
- ret[i].flagsPack = {}
- for t = 1, self.gconf.chunk_size do
- ret[i].flagsPack[t] = 0
- for j = 1, self.gconf.batch_size do
- ret[i].flagsPack[t] = bit.bor(ret[i].flagsPack[t], ret[i].flags[t][j])
- end
- end
- ret[i].seq_len = data[i].seq_len
- end
- return ret
-end
-
-function nn:process(data, do_train)
- local total_err = 0
- local total_frame = 0
- local reader = nerv.TNNReader(self.gconf, data)
- while true do
- local r, _ = self.tnn:getfeed_from_reader(reader)
- if not r then
- break
- end
- if do_train then
- self.gconf.dropout_rate = self.gconf.dropout
- else
- self.gconf.dropout_rate = 0
- end
- self.tnn:net_propagate()
- for t = 1, self.gconf.chunk_size do
- local tmp = self.tnn.outputs_m[t][1]:new_to_host()
- for i = 1, self.gconf.batch_size do
- if reader:has_data(t, i) then
- total_err = total_err + math.log10(math.exp(tmp[i - 1][0]))
- total_frame = total_frame + 1
- end
- end
- end
- if do_train then
- local err_input = reader:get_err_input()
- for i = 1, self.gconf.chunk_size do
- self.tnn.err_inputs_m[i][1]:copy_from(err_input[i])
- end
- self.tnn:net_backpropagate(false)
- self.tnn:net_backpropagate(true)
- end
- collectgarbage('collect')
- end
- return math.pow(10, - total_err / total_frame)
-end
-
-function nn:epoch()
- local train_error = self:process(self.train_data, true)
- local val_error = self:process(self.val_data, false)
- return train_error, val_error
-end