1

I'm new in tensorflow. I wrote following code using tensorflow2 in google colab but when I run,I'm getting this error:

OOM when allocating tensor with shape[130000,130000] and type double on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc [Op:Mul]

can anyone help me to solve this? (I guess something is wrong with loss function but i'm not sure) Thankyou

try:
  # %tensorflow_version only exists in Colab.
#   %tensorflow_version 2.x
except Exception:
  pass

import tensorflow as tf
print(tf.__version__)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import SVC
from tensorflow.keras import Model

#******upload dataset,shuffle and divide into two sections ******
data= pd.read_csv('creditcard_short.csv.tar.gz')
data=data.dropna()
feature_data = data.drop(columns='Class', axis=1)
class_lables= data.Class.values
feature_data=feature_data.values

indx = np.random.permutation(len(feature_data))
feature_data=feature_data[indx]

train_labels = class_lables[:130000]
train_data   = feature_data[:130000]
test_labels  = class_lables[130000:]
test_data    = feature_data[130000:]

scaler = StandardScaler()
scaler.fit(train_data)
print(scaler.mean_)
train_data=scaler.transform(train_data)
test_data=scaler.transform(test_data)

print(train_data.shape)
print(test_data.shape)
print(test_data.shape[0]+train_data.shape[0])
print(train_labels.shape)
print(test_labels.shape)
print(data.dtypes)
print(train_data.dtype)

class MySVM(Model):
  def __init__(self,x_input,y_target):
    super(MySVM, self).__init__()
    self.w = tf.Variable(np.random.rand(train_data.shape[1],1),trainable=True,dtype=np.float64,name='w')
    self.b = tf.Variable(1,trainable=True,dtype=np.float64,name='bias')
    self.x_input=train_data
    self.y_target=train_labels
  def call(self, x_input):
    # Declare model operations
    y_pred = tf.subtract(tf.matmul(x_input, self.w), self.b)
    # Declare vector L2 'norm' function squared
    return y_pred

  def loss(self,y_target):
    # Declare loss function
    # Loss = max(0, 1-pred*actual) + alpha * L2_norm(A)^2
    # L2 regularization parameter, alpha 
    l2_norm = tf.reduce_sum(tf.square(self.w))
    alpha   = tf.constant([0.01])
    # Margin term in loss
    classification_term = tf.reduce_mean(tf.maximum(0., tf.subtract(1., tf.multiply(self.call(train_data), y_target))))
    # Put terms together
    loss = tf.add(classification_term, tf.multiply(alpha, l2_norm))
    return loss

  def prediction(self):
    prediction = tf.sign(self.call(train_data))
    return prediction

  def optimizer(self):
    optimizer= tf.keras.optimizers.Adam(learning_rate=.01,decay=0.01)
    return optimizer

# Create an instance of the model
model = MySVM(train_data,train_labels)

lossi=[]
model.optimizer()
def train_step(X, Y):
  with tf.GradientTape() as tape:
    #loss = tf.math.reduce_mean(tf.square (Y - Y_predicted))
    loss = model.loss(train_labels)
    lossi.append(loss)
  gradients = tape.gradient(loss, model.trainable_variables)
  model.optimizer().apply_gradients(zip(gradients, model.trainable_variables))
  return loss.numpy()


epoch=10
mini_bach=128
loss_minibach=[]
loss_mean_per_bach=[]
for i in range(epoch):
  indx = np.random.permutation(len(train_data))
  train_data = train_data[indx]
  train_labels = train_labels[indx]
# Xtrain,Ytrain=shuffle(Xtrain,Ytrain)
  for j in range(int(len(train_data)/mini_bach)):
    loss_minibach.append(train_step(train_data[j*mini_bach:mini_bach*(j+1)],train_labels[j*mini_bach:mini_bach*(j+1)]))
  loss_mean_per_bach.append(np.mean(loss_minibach))
acc = tf.keras.metrics.BinaryAccuracy()
acc.update_state(train_labels,model(train_data))
print(acc.result().numpy())
WGBrain
  • 11
  • 1
  • you can select your batch size smaller or use data generator. – MH.AI.eAgLe Dec 04 '19 at 20:40
  • thankyou very much for replying. I changed that to 32 but I got the error again. i dont know about data generator.if you think it can be helpful please explain how can i use from. – WGBrain Dec 04 '19 at 20:45

0 Answers0