when I am trying to create a DataLoader object with transforms I get this error. I am not sure why there are issues with tensor dimensions as shown here:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-3-e582b2a128f3> in <module>()
8 classes = ('none', 'mild', 'moderate', 'severe', 'proliferative')
9
---> 10 for data, target in train_data_loader:
11 for i in range(0, 4):
12 im = data[i]
/share/software/user/open/py-pytorch/1.4.0_py36/lib/python3.6/site-packages/torch/utils/data/dataloader.py in __next__(self)
343
344 def __next__(self):
--> 345 data = self._next_data()
346 self._num_yielded += 1
347 if self._dataset_kind == _DatasetKind.Iterable and
/share/software/user/open/py-pytorch/1.4.0_py36/lib/python3.6/site-packages/torch/utils/data/dataloader.py in _next_data(self)
854 else:
855 del self._task_info[idx]
--> 856 return self._process_data(data)
857
858 def _try_put_index(self):
/share/software/user/open/py-pytorch/1.4.0_py36/lib/python3.6/site-packages/torch/utils/data/dataloader.py in _process_data(self, data)
879 self._try_put_index()
880 if isinstance(data, ExceptionWrapper):
--> 881 data.reraise()
882 return data
883
/share/software/user/open/py-pytorch/1.4.0_py36/lib/python3.6/site-packages/torch/_utils.py in reraise(self)
392 # (https://bugs.python.org/issue2651), so we work around it.
393 msg = KeyErrorMessage(msg)
--> 394 raise self.exc_type(msg)
RuntimeError: Caught RuntimeError in DataLoader worker process 0.
Original Traceback (most recent call last):
File "/share/software/user/open/py-pytorch/1.4.0_py36/lib/python3.6/site-packages/torch/utils/data/_utils/worker.py", line 178, in _worker_loop
data = fetcher.fetch(index)
File "/share/software/user/open/py-pytorch/1.4.0_py36/lib/python3.6/site-packages/torch/utils/data/_utils/fetch.py", line 47, in fetch
return self.collate_fn(data)
File "/share/software/user/open/py-pytorch/1.4.0_py36/lib/python3.6/site-packages/torch/utils/data/_utils/collate.py", line 79, in default_collate
return [default_collate(samples) for samples in transposed]
File "/share/software/user/open/py-pytorch/1.4.0_py36/lib/python3.6/site-packages/torch/utils/data/_utils/collate.py", line 79, in <listcomp>
return [default_collate(samples) for samples in transposed]
File "/share/software/user/open/py-pytorch/1.4.0_py36/lib/python3.6/site-packages/torch/utils/data/_utils/collate.py", line 55, in default_collate
return torch.stack(batch, 0, out=out)
RuntimeError: invalid argument 0: Sizes of tensors must match except in dimension 0. Got 634 and 440 in dimension 2 at /pytorch/aten/src/TH/generic/THTensor.cpp:612
Here's how you can reproduce the code. The error comes from creating a DataLoader object and applying the transforms to the images:
def check_cuda():
is_cuda = False
if torch.cuda.is_available():
is_cuda = True
return is_cuda
def scaleRadius(img, scale):
x = img[int(img.shape[0] / 2), :, :].sum(1)
r = (x > x.mean() / 10).sum() / 2
s = scale * 1.0 / r
return cv2.resize(img, (0,0), fx=s, fy=s)
class DRDataset(Dataset):
def __init__(self, csv_path):
# Transforms
self.to_tensor = transforms.ToTensor()
# Read the csv file
self.data_info = pd.read_csv(csv_path, header=None)
# First column contains the image paths
self.image_arr = np.asarray(self.data_info.iloc[:, 0])
# Second column is the labels
self.label_arr = np.asarray(self.data_info.iloc[:, 1])
# Calculate len
self.data_len = len(self.data_info.index)
def __getitem__(self, index):
single_image_name = '/path/to/dataset' + self.image_arr[index] + '.jpeg'
a = cv2.imread(single_image_name)
scale = 300
a = scaleRadius(a, scale)
a = cv2.addWeighted(a, 4, cv2.GaussianBlur(a, (0,0), scale/30), -4, 128)
b = np.zeros(a.shape)
cv2.circle(b, (int(a.shape[1]/2), int(a.shape[0]/2)), int(scale*0.9), (1,1,1), -1, 8, 0)
a = a*b + 128*(1-b)
# Transform image to tensor
img_as_tensor = self.to_tensor(a)
# Get label(class) of the image based on the cropped pandas column
single_image_label = self.label_arr[index]
return (img_as_tensor, single_image_label)
def __len__(self):
return self.data_len
full_dataset = DRDataset('/path/to/dataset')
train_size = int(0.7 * len(full_dataset))
val_size = int(0.15 * len(full_dataset))
test_size = len(full_dataset) - train_size - val_size
train_dataset, val_dataset, test_dataset = torch.utils.data.random_split(full_dataset,
[train_size, val_size, test_size])
train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=8,
shuffle=True, num_workers=4, pin_memory=True)
valid_data_loader = torch.utils.data.DataLoader(val_dataset, batch_size=8,
shuffle=True, num_workers=4, pin_memory=True)
for data, target in train_data_loader:
for i in range(0, 4):
im = data[i]
im = torch.squeeze(im)
plt.imshow(np.transpose(im.numpy(), (1, 2, 0)), cmap='gray')
plt.show()
print(target[i] + ": " + classes[(int)(target[i])])
break
What am I doing wrong?
--BJ
question from:
https://stackoverflow.com/questions/65855641/runtimeerror-invalid-argument-0-sizes-of-tensors-must-match-except-in-dimensio