I have been trying to augment my around 360 images while training a model. This is how the code looks like
img_data_gen_args = dict(rotation_range=90,
width_shift_range=0.3,
height_shift_range=0.3,
shear_range=0.5,
zoom_range=0.3,
horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect')
mask_data_gen_args = dict(rotation_range=90,
width_shift_range=0.3,
height_shift_range=0.3,
shear_range=0.5,
zoom_range=0.3,
horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect',
preprocessing_function = lambda x: np.where(x>0, 1, 0).astype(x.dtype))
image_data_generator = ImageDataGenerator(**img_data_gen_args)
image_generator = image_data_generator.flow_from_directory(train_images_path,
seed=seed,
batch_size=batch_size,
class_mode=None,target_size=(IMG_HEIGHT,IMG_WIDTH)) #Very important to set this otherwise it returns multiple numpy arrays
mask_data_generator = ImageDataGenerator(**mask_data_gen_args)
mask_generator = mask_data_generator.flow_from_directory(train_masks_path,
seed=seed,
batch_size=batch_size,
color_mode = 'grayscale',
class_mode=None,target_size=(IMG_HEIGHT,IMG_WIDTH))
valid_img_generator = image_data_generator.flow_from_directory(val_images_path,
seed=seed,
batch_size=batch_size,
class_mode=None,target_size=(IMG_HEIGHT,IMG_WIDTH))
valid_mask_generator = mask_data_generator.flow_from_directory(val_masks_path,
seed=seed,
batch_size=batch_size,
color_mode = 'grayscale', #Read masks in grayscale
class_mode=None,target_size=(IMG_HEIGHT,IMG_WIDTH))
train_generator = zip(image_generator, mask_generator)
val_generator = zip(valid_img_generator, valid_mask_generator)
Apart from these augmentations I would like to use the functionality to change the RGB color intensities to make the model robust in low light conditions as well. Could anybody give a tip how to implement apply_transform function from the ImageDataGenerator class, in addition to what augmentations done in the code above.