I want to train a 1D CNN on time series. I get the following error message 1D target tensor expected, multi-target not supported
Here is the code with simulated data corresponding to the structures of my data as well as the error message
import torch
from torch.utils.data import DataLoader
import torch.utils.data as data
import torch.nn as nn
import numpy as np
import random
from tqdm.notebook import tqdm
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
train_dataset = []
n_item = 20
for i in range(0,n_item):
train_data = np.random.uniform(-10, 10, 500)
train_dataset.append(train_data)
train_dataset = np.asarray(train_dataset)
train_dataset.shape
ecg_train = torch.from_numpy(train_dataset).float()
labels_train = np.random.randint(2, size=n_item)
labels_train = torch.from_numpy(labels_train).long()
val_dataset = []
n_item = 10
for i in range(0,n_item):
val_data = np.random.uniform(-10, 10, 500)
val_dataset.append(val_data)
val_dataset = np.asarray(val_dataset)
val_dataset.shape
ecg_validation = torch.from_numpy(val_dataset).float()
labels_validation = np.random.randint(2, size=n_item)
labels_validation = torch.from_numpy(labels_validation).long()
class ECGNet(data.Dataset):
"""ImageNet Limited dataset."""
def __init__(self, ecgs, labls, transform=None):
self.ecg = ecgs
self.target = labls
self.transform = transform
def __getitem__(self, idx):
ecgVec = self.ecg[idx] #.reshape(10, -1)
labelID = self.target[idx].reshape(1)
return ecgVec,labelID
def __len__(self):
return len(self.ecg)
train_data = ECGNet(ecg_train,
labels_train,
)
print("size of Training dataset: {}".format(len(train_data)))
validation_data = ECGNet(ecg_validation,
labels_validation,
)
print("size of Training dataset: {}".format(len(validation_data)))
batch_size = 1
train_dataloader = DataLoader(dataset = train_data,
batch_size=batch_size,
shuffle = True,
num_workers = 0)
val_dataloader = DataLoader(dataset = validation_data,
batch_size=batch_size,
shuffle = True,
num_workers = 0)
def train_epoch(model, train_dataloader, optimizer, loss_fn):
losses = []
correct_predictions = 0
# Iterate mini batches over training dataset
for images, labels in tqdm(train_dataloader):
images = images.to(device)
#labels = labels.squeeze_()
labels = labels.to(device)
#labels = labels.to(device=device, dtype=torch.int64)
# Run predictions
output = model(images)
# Set gradients to zero
optimizer.zero_grad()
# Compute loss
loss = loss_fn(output, labels)
# Backpropagate (compute gradients)
loss.backward()
# Make an optimization step (update parameters)
optimizer.step()
# Log metrics
losses.append(loss.item())
predicted_labels = output.argmax(dim=1)
correct_predictions += (predicted_labels == labels).sum().item()
accuracy = 100.0 * correct_predictions / len(train_dataloader.dataset)
# Return loss values for each iteration and accuracy
mean_loss = np.array(losses).mean()
return mean_loss, accuracy
def evaluate(model, dataloader, loss_fn):
losses = []
correct_predictions = 0
with torch.no_grad():
for images, labels in dataloader:
images = images.to(device)
#labels = labels.squeeze_()
labels = labels.to(device=device, dtype=torch.int64)
# Run predictions
output = model(images)
# Compute loss
loss = loss_fn(output, labels)
# Save metrics
predicted_labels = output.argmax(dim=1)
correct_predictions += (predicted_labels == labels).sum().item()
losses.append(loss.item())
mean_loss = np.array(losses).mean()
accuracy = 100.0 * correct_predictions / len(dataloader.dataset)
# Return mean loss and accuracy
return mean_loss, accuracy
def train(model, train_dataloader, val_dataloader, optimizer, n_epochs, loss_function):
# We will monitor loss functions as the training progresses
train_losses = []
val_losses = []
train_accuracies = []
val_accuracies = []
for epoch in range(n_epochs):
model.train()
train_loss, train_accuracy = train_epoch(model, train_dataloader, optimizer, loss_fn)
model.eval()
val_loss, val_accuracy = evaluate(model, val_dataloader, loss_fn)
train_losses.append(train_loss)
val_losses.append(val_loss)
train_accuracies.append(train_accuracy)
val_accuracies.append(val_accuracy)
print('Epoch {}/{}: train_loss: {:.4f}, train_accuracy: {:.4f}, val_loss: {:.4f}, val_accuracy: {:.4f}'.format(epoch+1, n_epochs,
train_losses[-1],
train_accuracies[-1],
val_losses[-1],
val_accuracies[-1]))
return train_losses, val_losses, train_accuracies, val_accuracies
class Simple1DCNN(torch.nn.Module):
def __init__(self):
super(Simple1DCNN, self).__init__()
self.layer1 = torch.nn.Conv1d(in_channels=50,
out_channels=20,
kernel_size=5,
stride=2)
self.act1 = torch.nn.ReLU()
self.layer2 = torch.nn.Conv1d(in_channels=20,
out_channels=10,
kernel_size=1)
self.fc1 = nn.Linear(10* 3, 2)
def forward(self, x):
print(x.shape)
x = x.view(1, 50,-1)
print(x.shape)
x = self.layer1(x)
print(x.shape)
x = self.act1(x)
print(x.shape)
x = self.layer2(x)
print(x.shape)
x = x.view(1,-1)
print(x.shape)
x = self.fc1(x)
print(x.shape)
print(x)
return x
model_a = Simple1DCNN()
model_a = model_a.to(device)
criterion = nn.CrossEntropyLoss()
loss_fn = torch.nn.CrossEntropyLoss()
n_epochs_a = 50
learning_rate_a = 0.01
alpha_a = 1e-5
momentum_a = 0.9
optimizer = torch.optim.SGD(model_a.parameters(),
momentum = momentum_a,
nesterov = True,
weight_decay = alpha_a,
lr=learning_rate_a)
train_losses_a, val_losses_a, train_acc_a, val_acc_a = train(model_a,
train_dataloader,
val_dataloader,
optimizer,
n_epochs_a,
loss_fn
)
Error message:
cpu
size of Training dataset: 20
size of Training dataset: 10
0%| | 0/20 [00:00<?, ?it/s]
torch.Size([1, 500])
torch.Size([1, 50, 10])
torch.Size([1, 20, 3])
torch.Size([1, 20, 3])
torch.Size([1, 10, 3])
torch.Size([1, 30])
torch.Size([1, 2])
tensor([[ 0.5785, -1.0169]], grad_fn=<AddmmBackward>)
Traceback (most recent call last):
File "SO_question.py", line 219, in <module>
train_losses_a, val_losses_a, train_acc_a, val_acc_a = train(model_a,
File "SO_question.py", line 137, in train
train_loss, train_accuracy = train_epoch(model, train_dataloader, optimizer, loss_fn)
File "SO_question.py", line 93, in train_epoch
loss = loss_fn(output, labels)
File "/Users/mymac/Documents/programming/python/mainenv/lib/python3.8/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/Users/mymac/Documents/programming/python/mainenv/lib/python3.8/site-packages/torch/nn/modules/loss.py", line 961, in forward
return F.cross_entropy(input, target, weight=self.weight,
File "/Users/mymac/Documents/programming/python/mainenv/lib/python3.8/site-packages/torch/nn/functional.py", line 2468, in cross_entropy
return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
File "/Users/mymac/Documents/programming/python/mainenv/lib/python3.8/site-packages/torch/nn/functional.py", line 2264, in nll_loss
ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
RuntimeError: 1D target tensor expected, multi-target not supported
What am I doing wrong?
question from:
https://stackoverflow.com/questions/65951522/pytorch-1d-target-tensor-expected-multi-target-not-supported