I have train dataset of 8000 images and labels. Validation set consists of 1957 images and labels. The test set contains 2487 images. Each image contains White Blood Cell images. WBC is divided innto 4 categories: Eosinophil, Neutrophil, Monocyte and Lymphocyte. Eosinophil and Neutrophil are Polynuclear while the remaining two are Mononuclear. The cells need to be classified between the two classes : Polynuclear and Mononuclear.
# import libraries
def get_data(folder):
X = []
y = []
for wbc_type in os.listdir(folder):
if not wbc_type.startswith('.'):
if wbc_type in ['NEUTROPHIL', 'EOSINOPHIL']:
label = 'POLYNUCLEAR'
else:
label = 'MONONUCLEAR'
for image_filename in tqdm(os.listdir(folder + wbc_type)):
img_file = cv2.imread(folder + wbc_type + '/' + image_filename)
if img_file is not None:
# Downsample the image to 120, 160, 3
img_file = scipy.misc.imresize(arr=img_file, size=(120, 160, 3))
img_arr = np.asarray(img_file)
X.append(img_arr)
y.append(label)
X = np.asarray(X)
y = np.asarray(y)
return X,y
X_train, y_train = get_data('C:/Users/Neerajan/Desktop/blood-cells/dataset2-master/dataset2-master/images/TRAIN/')
X_test, y_test = get_data('C:/Users/Neerajan/Desktop/blood-cells/dataset2-master/dataset2-master/images/TEST/')
encoder = LabelEncoder()
encoder.fit(y_train)
y_train = encoder.transform(y_train)
y_test = encoder.transform(y_test)
X_train=np.array((X_train), dtype = np.float32)
X_train=X_train/255.0
X_test=np.array((X_test), dtype = np.float32)
X_test=X_test/255.0
y_train = y_train.astype(int)
y_train = y_train.flatten()
from chainer.datasets import split_dataset_random
from chainer.dataset import DatasetMixin
class MyDataset(DatasetMixin):
def __init__(self, X, labels):
super(MyDataset, self).__init__()
self.X_ = X
self.labels_ = labels
self.size_ = X.shape[0]
def __len__(self):
return self.size_
def get_example(self, i):
return np.transpose(self.X_[i, ...], (2, 0, 1)), self.labels_[i]
batch_size = 32
dataset = MyDataset(X_train, y_train)
dataset_train, valid = split_dataset_random(dataset, 8000, seed=0)
train_iter = iterators.SerialIterator(dataset_train, batch_size)
valid_iter = iterators.SerialIterator(valid, batch_size, repeat=False, shuffle=False)
from chainer.dataset import concat_examples
batch_image, batch_label = concat_examples(next(train_iter))
print("batch_image.shape\n{}".format(batch_image.shape))
print("batch_label.shape\n{}".format(batch_label.shape))
batch_image.shape : (32,3,120,160) batch_label.shape : (32,)
class MyModel(chainer.Chain):
def __init__(self, n_out):
super(MyModel, self).__init__()
with self.init_scope():
self.conv1=L.Convolution2D(None, 32, 3, 3, 1)
self.conv2=L.Convolution2D(32, 64, 3, 3, 1)
self.conv3=L.Convolution2D(64, 128, 3, 3, 1)
self.fc4=L.Linear(None, 32)
self.fc5=L.Linear(32, n_out)
def __call__(self, x):
h = F.relu(self.conv1(x))
h = F.relu(self.conv2(h))
h = F.relu(self.conv3(h))
h = F.leaky_relu(self.fc4(h))
h = F.sigmoid(self.fc5(h))
return h
from chainer import training
def train(model_object, batchsize=32, gpu_id=-1, max_epoch=14):
model = L.Classifier(model_object)
if gpu_id >=0:
model.to_gpu(gpu_id)
#serializers.save_npz('kankata',model)
# 4. Optimizer
optimizer = optimizers.Adam()
optimizer.setup(model)
serializers.save_npz('my.state',optimizer)
# 5. Updater
updater = training.StandardUpdater(train_iter, optimizer, device=gpu_id)
# 6. Trainer
trainer = training.Trainer(updater, (max_epoch, 'epoch'), out='C:/Users/Neerajan/Desktop/ReportDump'.format(model_object.__class__.__name__))
# 7. Evaluator
class TestModeEvaluator(extensions.Evaluator):
def evaluate(self):
model = self.get_target('main')
ret = super(TestModeEvaluator, self).evaluate()
return ret
trainer.extend(extensions.LogReport())
trainer.extend(TestModeEvaluator(valid_iter, model, device=gpu_id))
trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy', 'validation/main/loss', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'], x_key='epoch', file_name='loss.png'))
trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'], x_key='epoch', file_name='accuracy.png'))
trainer.run()
del trainer
return model
gpu_id = -1 # Set to -1 if you don't have a GPU
model = train(MyModel(2), gpu_id=gpu_id)
It is recommended that for binary classification we use sigmoid activation function in the last layer of model and binary_cross_entropy in classifier.
How do I implement binary_cross_entropy as the loss_function in the classifier?