在实现贝叶斯卷积神经网络时,经常会遇到KL散度出现NaN的问题。这主要是由于在计算概率分布时,分母为0导致的。解决方法是在分母中加入一个小的常数,使其不等于0。具体而言,在使用PyTorch实现贝叶斯卷积层时,可以使用以下代码:
class BayesianConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super(BayesianConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.sigma_weight = Parameter(torch.Tensor(out_channels, in_channels // groups, kernel_size, kernel_size))
self.mean_weight = Parameter(torch.Tensor(out_channels, in_channels // groups, kernel_size, kernel_size))
self.register_buffer('eps_weight', torch.zeros(out_channels, in_channels // groups, kernel_size, kernel_size))
if bias:
self.sigma_bias = Parameter(torch.Tensor(out_channels))
self.mean_bias = Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.zeros(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
k = self.kernel_size * self.kernel_size * self.in_channels // self.groups
# 初始化均值和标准差,这里初始化遵循了论文中的建议
self.mean_weight.data.normal_(0, math.sqrt(2. / k))
self.sigma_weight.data.fill_(0.001)
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
self.mean_bias.data.normal_(0, math.sqrt(1. / fan_in))
self.sigma_bias.data.fill_(0.001)
def forward(self, input):
# 计算输入数据的四维 shape
if len(input.shape) == 2:
input = input.unsqueeze(2).unsqueeze(3)
batch_size, _, _, _ = input.shape
# 开始计算权重的 eps
weight_eps = torch.randn(self.out