0

I am using a created algorithm of logistic regresion binary classifier with grandient descent and mini batch to create and adaptation valid for multiple clases with one vs rest technique, but when I do a test with a sklearn dataset the score is very low.

First I am sharing my gradient descent algorithm:

from scipy.special import expit    

def sigmoide(x):
    return expit(x)

class RegresionLogisticaMiniBatch():

    def __init__(self,clases=[0,1],normalizacion=False,
                  rate=0.1,rate_decay=False,batch_tam=64):
    
        self.clases = clases;
        self.rate = rate;
        self.normalizacion = normalizacion;
        self.rate_decay = rate_decay;
        self.batch_tam = batch_tam;
        self.pesos = None
        self.media = None
        self.desviacion = None


    def entrena(self, X, y, n_epochs, reiniciar_pesos=False, pesos_iniciales=None):
        self.X = X
        self.y = y
        self.n_epochs = n_epochs
        
        if reiniciar_pesos or self.pesos is None:
            self.pesos = pesos_iniciales if pesos_iniciales is not None else np.random.uniform(-1, 1, size=X.shape[1])

        if self.normalizacion:
            self.media = np.mean(X, axis=0)
            self.desviacion = np.std(X, axis=0)
            X = (X - self.media) / self.desviacion

        indices = np.random.permutation(len(X))
        X_shuffled = X[indices]
        y_shuffled = y[indices]
        for j in range(n_epochs):
            for i in range(0, len(X), self.batch_tam):
                batch_X = X_shuffled[i:i + self.batch_tam]
                batch_y = y_shuffled[i:i + self.batch_tam]
            
                # Compute logistic function (sigmoid)
                z = np.dot(batch_X, self.pesos)
                y_pred = sigmoide(z)

                # Compute gradient
                error = batch_y - y_pred
                gradiente = np.dot(batch_X.T, error) / len(batch_X)

                # Update weights
                self.pesos += self.rate * gradiente

    def clasifica_prob(self, ejemplo):
        if self.pesos is None:
            raise ClasificadorNoEntrenado("El clasificador no ha sido entrenado")

        if self.normalizacion:
            ejemplo = (ejemplo - self.media) / self.desviacion

        probabilidad = sigmoide(np.dot(ejemplo, self.pesos))
        if probabilidad >= 0.5:
            return 1
        else:
            return 0

     
    def clasifica(self,ejemplo):
        probabilidad = self.clasifica_prob(ejemplo)
        return probabilidad

And the function to get the score:

def rendimiento(clasificador, X, y):
    aciertos = 0
    total_ejemplos = len(X)
    
    for i in range(total_ejemplos):
        ejemplo = X[i]
        clasificacion_esperada = y[i]
        clasificacion_obtenida = clasificador.clasifica(ejemplo)
        
        if clasificacion_obtenida == clasificacion_esperada:
            aciertos += 1
    
    accuracy = aciertos / total_ejemplos
    return accuracy

And now I share my developed one vs rest adaptation algorithm:

from scipy.special import softmax

class RL_OvR():

     def __init__(self,clases,rate=0.1,rate_decay=False,batch_tam=64):

        self.clases = clases
        self.rate = rate
        self.rate_decay = rate_decay
        self.batch_tam = batch_tam
        self.clasificadores = {}


     def entrena(self,X,y,n_epochs):
         
        self.X = X
        self.y = y
        self.n_epochs = n_epochs
        self.classifiers = {}

        for clase in self.clases:
            # Convertir el problema de múltiples clases en un problema de dos clases
            binary_y = np.where(y == clase, 1, 0)
            classifier = RegresionLogisticaMiniBatch(clases=[0, 1],
                                                     rate=self.rate,
                                                     rate_decay=self.rate_decay,
                                                     batch_tam=self.batch_tam)
            classifier.entrena(X, binary_y, n_epochs)
            self.classifiers[clase] = classifier



     def clasifica(self,ejemplo):
         
        probabilidades = []

        for classifier in self.classifiers.values():
            probabilidad = classifier.clasifica_prob(ejemplo)
            probabilidades.append(probabilidad)

        probabilidades = softmax(probabilidades)
        clase_predicha = np.argmax(probabilidades)
        return clase_predicha`

The score is very low, I tried to do it without softmax but the results are similar

This is the dataset I am using for testing:

from sklearn.datasets import load_iris
iris=load_iris()
X_iris=iris.data
y_iris=iris.target
Xe_iris,Xt_iris,ye_iris,yt_iris=train_test_split(X_iris,y_iris)

rl_iris=RL_OvR([0,1,2],rate=0.001,batch_tam=20)

rl_iris.entrena(Xe_iris,ye_iris,n_epochs=1000)

print(rendimiento(rl_iris,Xe_iris,ye_iris))

print(rendimiento(rl_iris,Xt_iris,yt_iris))

I tried some changes on my algorithm but I could not get a good score. I hope someone can help me to solve the problem.

0 Answers0