def train(model, num_epochs):
# Define your execution device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("The model will be running on", device, "device")
# Convert model parameters and buffers to CPU or Cuda
model = model.to(device)
loss_fn = nn.CrossEntropyLoss()
from torch.optim import Adam
optimizer = Adam(model.parameters(), lr=0.0001, weight_decay=0.0001)
# best_accuracy = 0.0
for epoch in range(num_epochs):
running_loss = 0.0
for i, (images, labels) in enumerate(trainloader, 0):
images = images.to(device)
labels = labels.to(device)
print("labels : ", labels)
optimizer.zero_grad()
outputs = model(images)
print(type(outputs))
print(outputs)
print(outputs.data)
_, prediction = torch.max(outputs, 1)
print("prediction : ", prediction)
loss = loss_fn(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if divmod(epoch+1, 2)[1] == 0:
print("[%3d / %3d] loss : %.3f" % (epoch+1, num_epochs, running_loss/100))
running_loss = 0.0
# Result
1epoch의 모든 batch set마다 해당 값을 반복적으로 출력