-1

So I've taken the Deep Learning AI course by Andrew NG on coursera. I am currently working the last assignment in week 2. I reached the part where I have to write the forward and backward propagation function. I managed to write the fwd_propagate function which is fairly easy. This is the code below :

def fwd_propagate(w,b,X,y):
    m = X.shape[1]
    A = sigmoid(np.dot(w.T,X)+b)
    J = (-1/m)*np.sum(y * np.log(A) + (1-y) * np.log(1-A))
    return J

Now I have to write the bwd_propagation function but I don't know where and how to start. Can someone help and explain to me what I should write.

This is everything I wrote so far with the tests.

import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage


%matplotlib inline

def load_dataset():
    train_dataset = h5py.File('C:/Users/Univ/Desktop/ML Intern/Logistic-Regression-with-a-Neural-Network-mindset-master/train_catvnoncat.h5', "r")
    train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
    train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels

    test_dataset = h5py.File('C:/Users/Univ/Desktop/ML Intern/Logistic-Regression-with-a-Neural-Network-mindset-master/test_catvnoncat.h5', "r")
    test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
    test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels

    classes = np.array(test_dataset["list_classes"][:]) # the list of classes
    
    train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
    test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
    
    return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes

train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()

index = 25
plt.imshow(train_set_x_orig[index])
print ("y = " + str(train_set_y[:,index]) + ", it's a '" + classes[np.squeeze(train_set_y[:,index])].decode("utf-8") +  "' picture.")

print(str(train_set_y.shape[1]) + " This is the amount of elements in the training set")
print(str(test_set_y.shape[1]) + " This is the amount of elements in the test set")
print(str(train_set_x_orig.shape[1]) + " This is the Num_px")
print(f"{train_set_x_orig.shape[1]} This is the Num_px")
print(train_set_x_orig.shape)

X_flatten1 = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
X_flatten2 = train_set_y.reshape(train_set_y.shape[0], -1).T 
X_flatten3 = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T 
X_flatten4 = test_set_y.reshape(test_set_y.shape[0], -1).T 

print(X_flatten1)
print(X_flatten2)
print(X_flatten3)
print(X_flatten4)
print(X_flatten1.shape)
print(X_flatten2.shape)
print(X_flatten3.shape)
print(X_flatten4.shape)

print(" Let's standardize our date")
train_set_x = X_flatten1/256
test_set_x = X_flatten3/256
print(train_set_x)
print(test_set_x)

def sigmoid(x):
    s = 1/(1+np.exp(-x))
    return s

print ("sigmoid(0) = " + str(sigmoid(0)))
print ("sigmoid(9.2) = " + str(sigmoid(9.2)))

def initialize_with_zeros(dim):
    shp=(dim,1)
    w = np.zeros(shp)
    b = 0
    assert(w.shape == (dim, 1))
    assert(isinstance(b, float) or isinstance(b, int))
    return w,b

dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))

def fwd_propagate(w,b,X,y):
    m = X.shape[1]
    A = sigmoid(np.dot(w.T,X)+b)
    J = (-1/m)*np.sum(y * np.log(A) + (1-y) * np.log(1-A))
    return J
Trip Trap
  • 33
  • 4

1 Answers1

0

The next step is to calculate derivative for back propagation:

dw = 1/m*(np.dot(X, ((A-Y).T)))
db = 1/m*(np.sum(A-Y))
Adarsh Wase
  • 1,727
  • 3
  • 12
  • 26