2
def kl_divergence(p, p_hat):
    return (p * K.log(p / p_hat)) + ((1 - p) * K.log((1 - p) / (1 - p_hat)))

class SparseActivityRegularizer(Regularizer):
    sparsityBeta = None

def __init__(self, l1=0., l2=0., p=0.01, sparsityBeta=0.1):
    self.p = p
    self.sparsityBeta = sparsityBeta

def set_layer(self, layer):
    self.layer = layer

def __call__(self,loss):
    #p_hat needs to be the average activation of the units in the hidden layer.      
    p_hat = T.sum(T.mean(self.layer.get_output(True) , axis=0))

    loss += self.sparsityBeta * kl_divergence(self.p, p_hat)
    return loss

def get_config(self):
    return {"name": self.__class__.__name__,
        "p": self.l1}

when I call this custom regularizer in the model as shown below

dr=0.5
inputs = Input(shape=(392,))
x = Dense(1000,activation='relu',activity_regularizer=SparseActivityRegularizer())(inputs)
x=Dropout(dr)(x)
out= Dense(392, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=out)



model.compile(loss=euc_dist_keras,
         optimizer='adadelta', metrics=["accuracy"])
model.summary()

filepath="weightdae.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, 
save_best_only=True, mode='min')
callbacks_list = [checkpoint,TensorBoard(log_dir='/tmp/autoencoder')]
hist = model.fit(ptilde, p,
             nb_epoch=40,
             shuffle=True,
             validation_data=(ptilde_val,p_val),
            batch_size=32,
            callbacks=callbacks_list) 

I get the following error

AttributeError: 'SparseActivityRegularizer' object has no attribute 'layer'

Can someone please help me in solving this error? I have checked the implementation of the regularizer, and activity regularizers in keras have been implemented in the same way. But here somehow it cannot find the attribute 'layer' and throws this error.

hans
  • 1,043
  • 12
  • 33

2 Answers2

1

This type of Regularization declaration deprecated here.Since Keras 1.2.0, you must implement regularization as function like here or as callable class like here

Siamak S
  • 163
  • 9
  • 1
    While this may theoretically answer the question, it would be preferable to include the essential parts of the answer here, and provide the link for reference. – Alex Riabov Aug 23 '18 at 08:54
  • I found this deprecation from tracing github commits of Keras repository. All things that I know is this deprecation, I don't know how to implement this regularization. Link about deprecation added to answer. – Siamak S Aug 23 '18 at 09:03
0

try this:

class SparseRegularizer(keras.regularizers.Regularizer):
    
    def __init__(self, rho = 0.01,beta = 1):
        """
        rho  : Desired average activation of the hidden units
        beta : Weight of sparsity penalty term
        """
        self.rho = rho
        self.beta = beta
        

    def __call__(self, activation):
        rho = self.rho
        beta = self.beta
        # sigmoid because we need the probability distributions
        activation = tf.nn.sigmoid(activation)
        # average over the batch samples
        rho_bar = K.mean(activation, axis=0)
        # Avoid division by 0
        rho_bar = K.maximum(rho_bar,1e-10) 
        KLs = rho*K.log(rho/rho_bar) + (1-rho)*K.log((1-rho)/(1-rho_bar))
        return beta * K.sum(KLs) # sum over the layer units

    def get_config(self):
        return {
            'rho': self.rho,
            'beta': self.beta
        }
Jude TCHAYE
  • 434
  • 5
  • 14