I have a model which was trained in Pedestrian detection. When I put in a handmade sample of an image it raises
ValueError: ValueError: Input 0 of layer sequential_1 is incompatible with the layer: expected axis -1 of input shape to have value 1 but received input with shape [None, 60, 60, 3]
x_train[0].shape
(3600,)
Please help!
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D, MaxPool2D, Dropout, InputLayer
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
#### Reading and preprocessing MNIST data set
def load(paths, verbose=-1):
'''expects images for each class in seperate dir,
e.g all digits in 0 class in the directory named 0 '''
data = list()
labels = list()
# loop over the input images
for (i, imgpath) in enumerate(paths):
# load the image and extract the class labels
#*im_gray = cv2.imread(imgpath, cv2.IMREAD_GRAYSCALE)
im_gray = cv2.imread(imgpath)
image = cv2.resize(im_gray, (60, 60))
#*image = np.array(image).flatten()
label = imgpath.split(os.path.sep)[-2]
# scale the image to [0, 1] and add to list
data.append(image/255)
labels.append(label)
# show an update every `verbose` images
if verbose > 0 and i > 0 and (i + 1) % verbose == 0:
print("[INFO] processed {}/{}".format(i + 1, len(paths)))
# return a tuple of the data and labels
return data, labels
### Creating train-test split
img_path = 'D:\Programs\Programming\WorkPlace2\AITSD\pedestrian Recognition - Based with All train samples\Train'
#get the path list using the path object
image_paths = list(paths.list_images(img_path))
#apply our function
image_list, label_list = load(image_paths, verbose=10000)
#binarize the labels
#lb = LabelBinarizer()
#label_list = lb.fit_transform(label_list)
#split data into training and test set
X_train, X_test, y_train, y_test = train_test_split(image_list,
label_list,
test_size=0.1,
random_state=42)
###Federated Members (clients) as Data Shards
def create_clients(image_list, label_list, num_clients=10, initial='clients'):
''' return: a dictionary with keys clients' names and value as
data shards - tuple of images and label lists.
args:
image_list: a list of numpy arrays of training images
label_list:a list of binarized labels for each image
num_client: number of fedrated members (clients)
initials: the clients'name prefix, e.g, clients_1
'''
#create a list of client names
client_names = ['{}_{}'.format(initial, i+1) for i in range(num_clients)]
#randomize the data
data = list(zip(image_list, label_list))
random.shuffle(data)
#shard data and place at each client
size = len(data)//num_clients
shards = [data[i:i + size] for i in range(0, size*num_clients, size)]
#number of clients must equal number of shards
assert(len(shards) == len(client_names))
return {client_names[i] : shards[i] for i in range(len(client_names))}
#create clients
clients = create_clients(X_train, y_train, num_clients=10, initial='client')
###Preprocessing and batching clients' and test data
def batch_data(data_shard, bs=32):
'''Takes in a clients data shard and create a tfds object off it
args:
shard: a data, label constituting a client's data shard
bs:batch size
return:
tfds object'''
#seperate shard into data and labels lists
data, label = zip(*data_shard)
dataset = tf.data.Dataset.from_tensor_slices((list(data), list(label)))
return dataset.shuffle(len(label)).batch(bs)
#process and batch the training data for each client
clients_batched = dict()
for (client_name, data) in clients.items():
clients_batched[client_name] = batch_data(data)
#process and batch the test set
test_batched = tf.data.Dataset.from_tensor_slices((X_test, y_test)).batch(len(y_test))
#### Creating the Multi Layer Perceptron (MLP) model
class SimpleMLP:
@staticmethod
def build(shape, classes):
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu', input_shape=(60,60,3)))
model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
#model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(rate=0.5))
model.add(Dense(43, activation='softmax'))
return model
lr = 0.01
comms_round = 100 #Global epochs
loss='categorical_crossentropy'
metrics = ['accuracy']
# optimizer = Adam(lr=lr,
# decay=lr / comms_round,
# momentum=0.9
# )
optimizer = tf.keras.optimizers.Adam(
learning_rate=lr,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-07,
amsgrad=False,
name='Adam',
)
### Model Aggregation (Federated Averaging)
def weight_scalling_factor(clients_trn_data, client_name):
client_names = list(clients_trn_data.keys())
#get the bs
bs = list(clients_trn_data[client_name])[0][0].shape[0]
#first calculate the total training data points across clinets
global_count = sum([tf.data.experimental.cardinality(clients_trn_data[client_name]).numpy() for client_name in client_names])*bs
# get the total number of data points held by a client
local_count = tf.data.experimental.cardinality(clients_trn_data[client_name]).numpy()*bs
return local_count/global_count
def scale_model_weights(weight, scalar):
'''function for scaling a models weights'''
weight_final = []
steps = len(weight)
for i in range(steps):
weight_final.append(scalar * weight[i])
return weight_final
def sum_scaled_weights(scaled_weight_list):
'''Return the sum of the listed scaled weights. The is equivalent to scaled avg of the weights'''
avg_grad = list()
#get the average grad accross all client gradients
for grad_list_tuple in zip(*scaled_weight_list):
layer_mean = tf.math.reduce_sum(grad_list_tuple, axis=0)
avg_grad.append(layer_mean)
return avg_grad
def test_model(X_test, Y_test, model, comm_round):
cce = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
#logits = model.predict(X_test, batch_size=100)
logits = model.predict(X_test)
loss = cce(Y_test, logits)
acc = accuracy_score(tf.argmax(logits, axis=1), tf.argmax(Y_test, axis=1))
print('comm_round: {} | global_acc: {:.3%} | global_loss: {}'.format(comm_round, acc, loss))
return acc, loss
###Federated Model Training
#initialize global model
smlp_global = SimpleMLP()
#global_model = smlp_global.build(784, 10)
global_model = smlp_global.build(3600, 43) #Shape of samples(60*60*1) and number of classes
#commence global training loop
for comm_round in range(comms_round):
# get the global model's weights - will serve as the initial weights for all local models
global_weights = global_model.get_weights()
#initial list to collect local model weights after scalling
scaled_local_weight_list = list()
#randomize client data - using keys
client_names= list(clients_batched.keys())
random.shuffle(client_names)
#loop through each client and create new local model
for client in client_names:
smlp_local = SimpleMLP()
local_model = smlp_local.build(3600, 43)
local_model.compile(loss=loss,
optimizer=optimizer,
metrics=metrics)
#set local model weight to the weight of the global model
local_model.set_weights(global_weights)
#fit local model with client's data
local_model.fit(clients_batched[client], epochs=1, verbose=0)
#scale the model weights and add to list
scaling_factor = weight_scalling_factor(clients_batched, client)
scaled_weights = scale_model_weights(local_model.get_weights(), scaling_factor)
scaled_local_weight_list.append(scaled_weights)
#clear session to free memory after each communication round
K.clear_session()
#to get the average over all the local model, we simply take the sum of the scaled weights
average_weights = sum_scaled_weights(scaled_local_weight_list)
#update global model
global_model.set_weights(average_weights)
#test global model and print out metrics after each communications round
for(X_test, Y_test) in test_batched:
global_acc, global_loss = test_model(X_test, Y_test, global_model, comm_round)
ADAM_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)).shuffle(len(y_train)).batch(320)
ADAM_dataset = tf.reshape(ADAM_dataset, [60,60])
smlp_ADAM = SimpleMLP()
ADAM_model = smlp_ADAM.build(3600, 43)
ADAM_model.compile(loss=loss,
optimizer=optimizer,
metrics=metrics)
# fit the ADAM training data to model
_ = ADAM_model.fit(ADAM_dataset, epochs=100, verbose=0)
#test the ADAM global model and print out metrics
for(X_test, Y_test) in test_batched:
ADAM_acc, ADAM_loss = test_model(X_test, Y_test, ADAM_model, 1)