I am feeding my train loader with images(.nii.gz) with slice (240, 240) dim and expected torch.size(1(#batch_size), 3(#RGB) ,256,256) but the output is torch.size((1,240,256,256). where is the problem?
my code is:
import glob
root_dir='/content/GenerativeModels/T2/*'
root_dir_img0 = '/content/GenerativeModels/T2/img0.nii.gz'
images= os.path.join(root_dir)
images =sorted(glob.glob(root_dir))
data_dicts = [{"image": img1} for img1 in (images[:])]
img0 = nib.load(root_dir_img0)
print('img0.shape=', img0.shape) # img0.shape= (240, 240)
print('image number =', len(data_dicts)) # image number = 4
define folders for train , val , test
pp=np.int(len(data_dicts)/4)
#### For T1
pack1= data_dicts[0:pp]
pack2= data_dicts[pp:2*pp]
pack3= data_dicts[2*pp:3*pp]
pack4= data_dicts[3*pp:len(data_dicts)]
train_data = pack1+pack2+pack3
val_data = pack4
test_data = pack4
print(len(train_data))
my transform:
keys = ["image"]
train_transforms = val_transforms = mt.Compose(
[
mt.LoadImaged(keys),
mt.Resized(keys=["image"], spatial_size=(256,256)),
mt.Rotate90d(keys),
mt.ScaleIntensityd("image", channel_wise=True),
mt.EnsureTyped(keys),
mt.CopyItemsd(keys=["image"], times=1, names=["low_res_image"]),
mt.Resized(keys=["low_res_image"], spatial_size=(128,128)),
]
)
train_ds = Dataset(data=train_data, transform=train_transforms) #data=train_datalist
train_loader = DataLoader(train_ds, batch_size=1, shuffle=False, num_workers=1, persistent_workers=True) #train_ds
val_ds = Dataset(data=val_data, transform=val_transforms) #data=val_datalist
val_loader = DataLoader(val_ds, batch_size=1, shuffle=False, num_workers=1)
check_data = first(train_loader)
print(check_data["image"].shape, check_data["low_res_image"].shape , check_data["image"].dtype, check_data["low_res_image"].dtype)
Thanks for any help?
I get this: torch.Size([1, 240, 256, 256]) torch.Size([1, 240, 128, 128]) torch.float32 torch.float32 while I am expecting torch.Size([1, 3 , 256, 256]) torch.Size([1, 3 , 128, 128]) torch.float32 torch.float32