I am running the code in tensorflow federated learning (10 epochs, 100 rounds), but the accuracy is not increasing and it's around 0.5. Part of my code is as follows:
def create_compiled_keras_model():
layer1 = tf.keras.layers.GlobalAveragePooling2D()(output)
layer1 = tf.keras.layers.Dense(units=256)(output)
model_output = tf.keras.layers.Dense(units=2, activation='relu')(layer1)
model = tf.keras.Model(model.input, model_output)
return model
def model_fn():
keras_model = create_compiled_keras_model()
return tff.learning.from_keras_model(keras_model, sample_batch, loss=tf.keras.losses.CategoricalCrossentropy(),metrics=[tf.keras.metrics.CategoricalAccuracy()])
And,
for round_num in range(1, NUM_ROUNDS+1):
state, tff_metrics = iterative_process.next(state, federated_train_data)
print('round {:2d}, metrics{}'.format(round_num,tff_metrics['train'].items()))
eval_model = create_keras_model()
eval_model.compile(optimizer=optimizers.Adam(learning_rate=client_lr),
loss=losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.Accuracy()])
state.model.assign_weights_to(eval_model)
ev_result = eval_model.evaluate(x_val, y_val, verbose=2)
train_metrics = tff_metrics['train']
for name, value in tff_metrics['train'].items():
tf.summary.scalar(name,value, step=round_num)
I saw few questions on stackoverflow on this, but couldn't find the solution anywhere.I would appreciate any help.