1

I am trying to apply L2 regularization on the weights in a custom layer.

class Linear(keras.layers.Layer):
    def __init__(self, units=32, input_dim=32, regularizer_amount=0.01, name="linear", **kwargs):
        super(Linear, self).__init__(**kwargs)
        self.units = units
        self.input_dim = input_dim
        self.regularizer_amount = regularizer_amount

    def build_model(self):
        self.w = self.add_weight(shape=(self.input_dim, self.units), initializer="random_normal", regularizer=keras.regularizers.L2(self.regularizer_amount), trainable=True, )
        self.b = self.add_weight(shape=(self.units,), initializer="random_normal", trainable=True)

    def call(self, inputs):
        return tf.matmul(inputs, self.w) + self.b

Shouldn't this apply regularization to the model? Because the results that I receive when I train the network are the same as when I remove the regularizer part.

uiag
  • 11
  • 2

0 Answers0