1
`from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import optimizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dropout, Flatten, Dense
from tensorflow.keras.models import Model
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import optimizers


from keras.applications.vgg19 import VGG19

vgg_conv = VGG19(weights='imagenet', include_top=False, input_shape=(400, 400, 3))
vgg_conv.summary()

for layer in vgg_conv.layers:
    layer.trainable = False

model = models.Sequential()
model.add(layers.Conv2D(filters=3, kernel_size=3,padding='same',
                        activation="relu",
                         input_shape=(400,400,1)))

model.add(vgg_conv)
 

model.add(layers.Flatten())
model.add(layers.Dense(1024, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(2, activation='softmax'))

model.summary()`
`from keras.callbacks import EarlyStopping
import tensorflow as tf
from tensorflow.keras import optimizers
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.callbacks import LearningRateScheduler, ReduceLROnPlateau
import tensorflow_addons as tfa

model.compile(loss='categorical_crossentropy',
              optimizer=tf.keras.optimizers.Adam(learning_rate=0.00001),
              `metrics=[tf.keras.metrics.Precision(name='precision')\
                          ,tf.keras.metrics.Recall(name='recall')\
                          ,tf.keras.metrics.FalsePositives(name='false_positives')\
                          ,tf.keras.metrics.FalseNegatives(name='false_negatives')\
                          ,tfa.metrics.FBetaScore(num_classes=2,average="micro",threshold=0.5)\
                          ,'acc'])`
                        #  ,'acc',f1_score])

lr = ReduceLROnPlateau(monitor="val_loss", factor=0.9, patience=6, verbose=1)
es = EarlyStopping(monitor="val_loss", patience=32, verbose=1, mode="min", restore_best_weights=True)
sv = ModelCheckpoint('/root/work/VGG19_gray10_categorical.h5', monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', save_freq='epoch')
`

I compiled model like this. I customed model architecture a little cuz my images are in one channel but VGG19 model need 3 channels. I wanted to know which part of my images this model pays attention to, so I decided to use grad cam model like below.

`def make_gradcam_heatmap(img_array, model, last_conv_layer_name, pred_index=None):
    grad_model = tf.keras.models.Model(
        [model.input], [model.get_layer('vgg19').get_layer(last_conv_layer_name).output, model.output]
    )
    with tf.GradientTape() as tape:
        last_conv_layer_output, preds = grad_model(img_array)
        if pred_index is None:
            pred_index = tf.argmax(preds[0])
        class_channel = preds[:, pred_index]

    grads = tape.gradient(class_channel, last_conv_layer_output)

    pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))

 
    last_conv_layer_output = last_conv_layer_output[0]
 
    heatmap = last_conv_layer_output  
    heatmap = tf.squeeze(heatmap)  

    heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
    return heatmap.numpy()`
make_gradcam_heatmap(train_reject[1], model, 'block5_conv4' , pred_index=None)

This code cause this error.

ValueError: Graph disconnected: cannot obtain value for tensor KerasTensor(type_spec=TensorSpec(shape=(None, 400, 400, 3), dtype=tf.float32, name='input_2'), name='input_2', description="created by layer 'input_2'") at layer "block1_conv1". The following previous layers were accessed without issue: []

Please help me anyone can solve this problem.

jammini
  • 11
  • 2

0 Answers0