when you say "change the learning rate base on iteration'' do you mean you want to change it at the end of each batch? If so you can do that with a custom callback. I have not tested this but the code would be something like
class LRA(keras.callbacks.Callback):
def __init__(self,model, initial_learning_rate, gamma, power):
super(LRA, self).__init__()
self.initial_learning=initial_learning
self.gamma=gamma
self.power= power
self.model=model # model is your compiled model
def on_train_begin(self, logs=None):
tf.keras.backend.set_value(self.model.optimizer.lr,
self.initial_learning_rate)
def on_train_batch_end(self, batch, logs=None):
lr=self.initial_learning_rate * tf.pow(((batch+1)*self.gamma+1),-self.power)
tf.keras.backend.set_value(self.model.optimizer.lr, lr)
# print('for ', batch, ' lr set to ', lr) remove comment if you want to see lr change
let me know if this works, again I have not tested it as yet
before you run model.fit include code
initial_learning_rate= .001 # set to desired value
gamma= # set to desired value
power= # set to desired value
callbacks=[LRA(model=model, initial_learning_rate=initial_learning_rate, gamma=gamma, power=power)