--- Parameter and layer classes related to linear transform.
local MatrixParam = nerv.class('nerv.MatrixParam', 'nerv.Param')
local LinearTransParam = nerv.class('nerv.LinearTransParam', 'nerv.MatrixParam')
local BiasParam = nerv.class('nerv.BiasParam', 'nerv.MatrixParam')
local AffineLayer = nerv.class('nerv.AffineLayer', 'nerv.Layer')
--- A parameter that consists of a single matrix
-- @type nerv.MatrixParam
--- Read from a file handle.
-- @param handle the file handle
function MatrixParam:read(handle)
self.trans = self.gconf.mmat_type.load(handle)
if not self.gconf.use_cpu then
self.trans = self.gconf.cumat_type.new_from_host(self.trans)
end
end
function MatrixParam:write(handle)
local trans = self.trans
if not self.gconf.use_cpu then
trans = self.trans:new_to_host()
end
trans:save(handle)
end
function MatrixParam:train_init()
self.correction = self.trans:create()
self.correction:fill(0)
end
function MatrixParam:_update_by_gradient(gradient, alpha, beta)
local gconf = self.gconf
-- momentum gain
local mmt_gain = 1.0 / (1.0 - gconf.momentum)
local n = gconf.batch_size * mmt_gain
-- perform update
if gconf.momentum > 0 then
self.correction:add(self.correction, gradient, gconf.momentum, 1.0)
self.trans:add(self.trans, self.correction, alpha, -gconf.lrate / n * beta)
else
self.trans:add(self.trans, gradient, alpha, -gconf.lrate / n * beta)
end
end
function MatrixParam:_update_by_err_input(err, input, alpha, beta)
local gconf = self.gconf
-- momentum gain
local mmt_gain = 1.0 / (1.0 - gconf.momentum)
local n = gconf.batch_size * mmt_gain
-- perform update
if gconf.momentum > 0 then
self.correction:mul(input, err, 1.0, gconf.momentum, 'T', 'N')
self.trans:add(self.trans, self.correction, alpha, -gconf.lrate / n * beta)
else
self.trans:mul(input, err, -gconf.lrate / n * beta, alpha, 'T', 'N')
end
end
function MatrixParam:update_by_gradient(gradient)
self:_update_by_gradient(gradient, 1.0, 1.0)
end
function MatrixParam:update_by_err_input(err, input)
self:_update_by_err_input(err, input, 1.0, 1.0)
end
function LinearTransParam:update_by_err_input(err, input)
local gconf = self.gconf
local l2 = 1 - gconf.lrate * gconf.wcost
self:_update_by_err_input(err, input, l2, l2)
end
--- A fully-connected linear transform layer.
-- @type nerv.AffineLayer
--- The constructor.
function AffineLayer:__init(id, global_conf, layer_conf)
self.id = id
self.dim_in = layer_conf.dim_in
self.dim_out = layer_conf.dim_out
if layer_conf.ltp ~= nil and layer_conf.ltp1 == nil then
layer_conf.ltp1 = layer_conf.ltp
end
for i = 1, #self.dim_in do
self["ltp" .. i] = self:find_param("ltp" .. i, layer_conf, global_conf,
nerv.LinearTransParam,
{self.dim_in[i],