使用正则化、提前停止和dropout等技术来减少过拟合问题。下面是一个代码示例,展示如何在BERT+BiGRU+Softmax网络中使用dropout技术来减少过拟合。
class BERT_BiGRU_Softmax(nn.Module):
def __init__(self, bert_config, num_labels, hidden_size):
super(BERT_BiGRU_Softmax, self).__init__()
self.bert = BertModel(bert_config)
self.dropout = nn.Dropout(0.1)
self.bigru = nn.GRU(bert_config.hidden_size, hidden_size,
num_layers=2, batch_first=True, bidirectional=True)
self.fc = nn.Linear(hidden_size * 2, num_labels)
self.num_labels = num_labels
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
bert_output = self.bert(input_ids=input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask)[0]
bert_output = self.dropout(bert_output)
bigru_output, _ = self.bigru(bert_output)
bigru_output = self.dropout(bigru_output)
logits = self.fc(bigru_output)
return F.softmax(logits, dim=-1)
model = BERT_BiGRU_Softmax(bert_config, num_labels, hidden_size)
optimizer = AdamW(model.parameters(), lr=2e-5, eps=1e-8, weight_decay=0.01)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=len(train_dataloader) * epochs)
loss_fn = nn.CrossEntropyLoss()
# 训练模型
for epoch in range(epochs):
model.train()
for batch in train_dataloader:
input_ids = batch['input_ids'].to(device)
token_type_ids = batch['token_type_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
labels = batch['labels'].to(device)
optimizer.zero_grad()
outputs = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
loss = loss_fn(outputs.view(-1, num_labels), labels.view(-1))
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step()
scheduler.step()
# 在验证集上测试并提前停止训练
model.eval()
with torch.no_grad():
total_loss = 0