更新了模型保存的条件,使用了ReduceLROnPlateau学习率更新策略

This commit is contained in:
carry 2023-08-03 22:57:10 +08:00
parent 09e51a3584
commit 6713993f4b

View File

@ -27,21 +27,20 @@ if __name__=="__main__":
# 数据加载 # 数据加载
train_dir = './train_data/1/train' train_dir = './train_data/1/train'
test_dir = 'train_data/1/val' test_dir = 'train_data/1/test'
# 训练轮数 # 训练轮数
num_epochs = 10 num_epochs = 20
# #加载数据集
train_dataset = datasets.ImageFolder(train_dir, transform=transform) train_dataset = datasets.ImageFolder(train_dir, transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True)
#
test_dataset = datasets.ImageFolder(test_dir, transform=transform) test_dataset = datasets.ImageFolder(test_dir, transform=transform)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=64, shuffle=False) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=64, shuffle=False)
# 构建MobileNetV2模型 # 构建MobileNetV2模型
model = models.mobilenet_v2(pretrained=True) model = models.mobilenet_v2(pretrained=True)
num_classes = len(train_dataset.classes) num_classes = len(train_dataset.classes)
model.classifier[1] = nn.Linear(in_features=1280, out_features=num_classes) model.classifier[1] = nn.Linear(in_features=1280, out_features=num_classes)
@ -50,7 +49,10 @@ if __name__=="__main__":
# 定义损失函数和优化器 # 定义损失函数和优化器
criterion = nn.CrossEntropyLoss() criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.00008) optimizer = optim.Adam(model.parameters(), lr=0.0001)
# 添加ReduceLROnPlateau调度器
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=2, verbose=False)
print(f"train data:{len(train_loader)}") print(f"train data:{len(train_loader)}")
print(f"test data:{len(test_loader)}") print(f"test data:{len(test_loader)}")
@ -59,10 +61,14 @@ if __name__=="__main__":
# 训练模型 # 训练模型
print("start training") print("start training")
temp = time.time() min_loss = 10000.0
max_accuracy = 0
#temp = time.time()
for epoch in range(num_epochs): for epoch in range(num_epochs):
train_start_time = time.time() train_start_time = time.time()
print(f"turn {epoch + 1}:") print(f"turn {epoch + 1}:")
current_lr = optimizer.param_groups[0]['lr']
print(f"Current learning rate: {current_lr}")
model.train() model.train()
running_loss = 0.0 running_loss = 0.0
for inputs, labels in tqdm(train_loader, desc="training", unit="item", ncols=100): for inputs, labels in tqdm(train_loader, desc="training", unit="item", ncols=100):
@ -77,29 +83,50 @@ if __name__=="__main__":
running_loss += loss.item() running_loss += loss.item()
train_end_time = time.time() train_end_time = time.time()
print(f"Epoch {epoch + 1}/{num_epochs}, Loss: {running_loss / len(train_loader)} train cost:{train_end_time -train_start_time}") print(f"Epoch {epoch + 1}/{num_epochs} Train loss: {running_loss / len(train_loader)} Train cost:{train_end_time -train_start_time}")
# 在测试集上评估模型 # 在测试集上评估模型
test_start_time = time.time() test_start_time = time.time()
model.eval() model.eval()
correct = 0 correct = 0
total = 0 total = 0
val_loss = 0.0
with torch.no_grad(): with torch.no_grad():
for inputs, labels in test_loader: for inputs, labels in test_loader:
inputs, labels = inputs.to(device), labels.to(device) inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs) outputs = model(inputs)
loss = criterion(outputs, labels)
val_loss += loss.item()
_, predicted = torch.max(outputs.data, 1) _, predicted = torch.max(outputs.data, 1)
total += labels.size(0) total += labels.size(0)
correct += (predicted == labels).sum().item() correct += (predicted == labels).sum().item()
val_loss /= len(test_loader)
# 更新学习率
scheduler.step(val_loss)
test_end_time = time.time() test_end_time = time.time()
accuracy = 100 * correct / total accuracy = 100 * correct / total
print(f"Test Accuracy: {accuracy:.2f}% test cost:{ test_end_time - test_start_time }") print(f"Test Accuracy: {accuracy:.2f}% Test loss:{ val_loss } test cost:{ test_end_time - test_start_time }")
# 保存模型 # 保存模型
if val_loss < min_loss or max_accuracy < accuracy:
min_loss = val_loss
max_accuracy = accuracy
torch.save(model.state_dict(), f"./model/1/epochs{epoch + 1} {accuracy:.2f}.pt") torch.save(model.state_dict(), f"./model/1/epochs{epoch + 1} {accuracy:.2f}.pt")
print("model saved success") print("model saved")
print("all finish") print("all finish")
'''
torch.save(model.state_dict(), f"./model/1/epochs{epoch + 1} {accuracy:.2f}.pt")
print("final model save")
'''
print(f"time:{time.time()-start_time}") print(f"time:{time.time()-start_time}")