local Dropout = nerv.class("nerv.DropoutLayerT", "nerv.LayerT") function Dropout:__init(id, global_conf, layer_conf) self.id = id self.gconf = global_conf self.dim_in = layer_conf.dim_in self.dim_out = layer_conf.dim_out self:check_dim_len(1, 1) -- two inputs: nn output and label end function Dropout:init(batch_size, chunk_size) if self.dim_in[1] ~= self.dim_out[1] then nerv.error("mismatching dimensions of input and output") end if chunk_size == nil then chunk_size = 1 end self.mask_t = {} for t = 1, chunk_size do self.mask_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1]) end end function Dropout:batch_resize(batch_size, chunk_size) if chunk_size == nil then chunk_size = 1 end for t = 1, chunk_size do if self.mask_t[t] == nil or self.mask_t[t]:nrow() ~= batch_size then self.mask_t[t] = self.gconf.cumat_type(batch_size, self.dim_in[1]) end end end function Dropout:propagate(input, output, t) if t == nil then t = 1 end if self.gconf.dropout_rate == nil then nerv.info("DropoutLayerT:propagate warning, global_conf.dropout_rate is nil, setting it zero") self.gconf.dropout_rate = 0 end if self.gconf.dropout_rate == 0 then output[1]:copy_fromd(input[1]) else self.mask_t[t]:rand_uniform() --since we will lose a portion of the actvations, we multiply the activations by 1/(1-dr) to compensate self.mask_t[t]:thres_mask(self.mask_t[t], self.gconf.dropout_rate, 0, 1 / (1.0 - self.gconf.dropout_rate)) output[1]:mul_elem(input[1], self.mask_t[t]) end end function Dropout:update(bp_err, input, output, t) -- no params, therefore do nothing end function Dropout:back_propagate(bp_err, next_bp_err, input, output, t) if t == nil then t = 1 end if self.gconf.dropout_rate == 0 then next_bp_err[1]:copy_fromd(bp_err[1]) else next_bp_err[1]:mul_elem(bp_err[1], self.mask_t[t]) end end function Dropout:get_params() return nerv.ParamRepo({}) end