My dataset is the mnist sign_language_train image dataset with 27000 entries approx. I partitioned it into a train set with 21000 entries approx and a validation set with 6000 entries approx. However when I am fitting the data into the model it only trains on 687 entries per epoch.
X_train, X_validate, y_train, y_validate = train_test_split(X_train, y_train, test_size = 0.2,random_state = 123)
X_train = X_train.reshape(X_train.shape[0], *(28, 28, 1))
X_test = X_test.reshape(X_test.shape[0], *(28, 28, 1))
X_validate = X_validate.reshape(X_validate.shape[0], *(28, 28, 1))
print(X_train.shape) -->(21964, 28, 28, 1)
print(y_train.shape) -->(21964,)
print(X_validate.shape) -->(5491, 28, 28, 1)
print(y_validate.shape) -->(5491,)
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(28,28,1)))
model.add(MaxPool2D(pool_size=(2, 2), strides=2))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', input_shape = (28,28,1)))
model.add(MaxPool2D(pool_size=(2, 2), strides=2))
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu', input_shape = (28,28,1)))
model.add(MaxPool2D(pool_size=(2, 2), strides=2))
model.add(Flatten())
model.add(Dense(64,activation ="relu"))
model.add(Dense(128,activation ="relu"))
#model.add(Dropout(0.2))
model.add(Dense(128,activation ="relu"))
#model.add(Dropout(0.3))
model.add(Dense(25,activation ="softmax"))
And then I fit the above mentioned train and validation sets as shown below and for each epoch it trains on only 687 entries.
model.compile(optimizer=SGD(learning_rate=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
history2=model.fit(X_train, y_train, epochs = 50, validation_data = (X_validate, y_validate))