Creo que en la funcion en la parte de " for inputs, targets in dataloader: " esta mal:
por eso lo modifique 😃
def train_and_evaluate(model, optimizer, loss_fn, dataloaders, device, num_epochs = 10, lr = 0.001):
for g in optimizer.param_groups:
g['lr'] = lr
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch+1, num_epochs))
print('-'*10)
for phase in ['train','val']:
if phase == 'train':
model.train()
else:
model.eval()
running_loss = RunningMetric() # perdida
running_acc = RunningMetric() # precision
for inputs, targets in dataloaders[phase]:
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = net(inputs)
_, preds = torch.max(outputs,1)
loss = loss_fn(outputs, targets)
if phase == 'train':
loss.backward() #magias: gradientes calculados automaticamente
optimizer.step() #magia2: actualiza las perillas o los parametros
batch_size = inputs.size()[0]
running_loss.update(loss.item()*batch_size,
batch_size)
running_acc.update(torch.sum(preds == targets).float(),
batch_size)
print("Loss: {:.4f} Acc: {:.4f} ".format(running_loss(),
running_acc()))
return model, running_loss(), running_acc()
¿Quieres ver más aportes, preguntas y respuestas de la comunidad? Crea una cuenta o inicia sesión.