1

I have trained in background removal with my custom images and I am new to deploying computer vision models. please, anyone, share the blog how to deploy the Pytorch deep learning model(computer vision) using lambda. I have written some lambda functions to take the input image and predict the segment and give output as a background removal image. I am not sure this function is correct or not. please check this function as well.

Define imports

try:
    import unzip_requirements
except ImportError:
    pass

import json
from io import BytesIO
import time
import os
import base64

import boto3
import numpy as np
from skimage import io
import matplotlib.pyplot as plt
from preprocessing import RescaleT, ToTensorLab
import torch
import numpy as np
from PIL import Image
from network.u2net import U2NET

# Define two functions inside handler.py: img_to_base64_str to
# convert binary images to base64 format and load_models to
# load the four pretrained model inside a dictionary and then
# keep them in memory

def img_to_base64_str(img):
    buffered = BytesIO()
    img.save(buffered, format="PNG")
    buffered.seek(0)
    img_byte = buffered.getvalue()
    img_str = "data:image/png;base64," + base64.b64encode(img_byte).decode()
    return img_str


def load_models(s3, bucket):
    model = U2NET(3,1)
    response = s3.get_object(
            Bucket=bucket, Key=f"models/u2net/u2net.pth")
    state = torch.load(BytesIO(response["Body"].read()),map_location=torch.device('cpu'))
    model.load_state_dict(state)
    model.eval()
    return model

def preprocess_raw_img(raw_img_array):
    """
    This function preprocesses a raw input array in a way such that it can be fed into the U-2-Net architecture
    :param raw_img_array:
    :return:
    """
    rescaler = RescaleT(320)
    rescaled_img = rescaler(raw_img_array)
    tensor_converter = ToTensorLab(flag=0)
    tensor_img = tensor_converter(rescaled_img)
    tensor_img = tensor_img.unsqueeze(0)

    return tensor_img

def normPRED(d):
    ma = torch.max(d)
    mi = torch.min(d)
    dn = (d-mi)/(ma-mi)
    return dn

def resize_img_to_orig(prediction_np, orig_img):
    image = Image.fromarray(prediction_np * 255).convert('RGB')
    image_original = image.resize((orig_img.shape[1], orig_img.shape[0]), resample=Image.BILINEAR)
    return image_original

def mask_to_orig_size(orig_img, rescale, threshold):

    mask_orig_size = np.array(orig_img, dtype=np.float64)
    mask_orig_size /= rescale
    mask_orig_size[mask_orig_size > threshold] = 1
    mask_orig_size[mask_orig_size <= threshold] = 0

    return mask_orig_size

def extract_foreground(mask_orig_size):
    shape = mask_orig_size.shape
    a_layer_init = np.ones(shape=(shape[0], shape[1], 1))
    mul_layer = np.expand_dims(mask_orig_size[:, :, 0], axis=2)
    a_layer = mul_layer * a_layer_init
    rgba_out = np.append(mask_orig_size, a_layer, axis=2)
    return rgba_out

def input_to_rgba_inp(input_arr, rescale):
    input_arr = np.array(input_arr, dtype=np.float64)

    shape = input_arr.shape
    input_arr /= rescale
    a_layer = np.ones(shape=(shape[0], shape[1], 1))
    rgba_inp = np.append(input_arr, a_layer, axis=2)

    return rgba_inp

def u2net_api_call(raw_img_array, model):
    """
    This function takes as input an image array of any size. The goal is to return only the object in the foreground of
    the image.
    Therefore, the raw input image is preprocessed, fed into the deep learning model. Afterwards the foreground of the
    original image is extracted from the mask which was generated by the deep learning model.
    """
    THRESHOLD = 0.9
    RESCALE = 255

    preprocessed_img = preprocess_raw_img(raw_img_array)

    d1, d2, d3, d4, d5, d6, d7 = model(preprocessed_img)

    prediction = d1[:, 0, :, :]
    prediction = normPRED(prediction)
    prediction_np = prediction.squeeze().cpu().data.numpy()


    img_orig_size = resize_img_to_orig(prediction_np, raw_img_array)
    mask_orig_size = mask_to_orig_size(img_orig_size, RESCALE, THRESHOLD)
    rgba_out = extract_foreground(mask_orig_size)
    rgba_inp = input_to_rgba_inp(raw_img_array, RESCALE)
    rem_back = (rgba_inp * rgba_out)

    return rem_back

s3 = boto3.client("s3")
bucket = "sagemaker-m-model"
model = load_models(s3, bucket)

def lambda_handler(event,Context):
    
    if event.get("source") in ["aws.events", "serverless-plugin-warmup"]:
      print('Lambda is warm!')
      return {}
    data = json.loads(event["body"])
    print("data keys :", data.keys())
    image = data["image"]
    image = image[image.find(",")+1:]
    dec = base64.b64decode(image + "===")
    image = Image.open(io.BytesIO(dec))
    #image = image.convert("RGB")

  # loading the model with the selected style based on the model_id payload
    model = model
  # resize the image based on the load_size payload
    #load_size = int(data["load_size"])
    
    with torch.no_grad():
        background_removed = u2net_api_call(image, model)
        output_image = background_removed[0]

    # deprocess, (0, 1)
    output_image = output_image.data.cpu().float() * 0.5 + 0.5
    output_image = output_image.numpy()
    output_image = np.uint8(output_image.transpose(1, 2, 0) * 255)
    output_image = Image.fromarray(background_removed)

  # convert the PIL image to base64
    result = {
      "output": img_to_base64_str(output_image)
  }

  # send the result back to the client inside the body field
    return {
      "statusCode": 200,
      "body": json.dumps(result),
      "headers": {
          'Content-Type': 'application/json',
          'Access-Control-Allow-Origin': '*'
      }
    }

I have tried with the Serverless framework, I got some errors. I understand how to solve this.

Running "serverless" from node_modules

Warning: Invalid configuration encountered
  at 'custom.warmup.events': must be object
  at 'custom.warmup.timeout': must be object
  at 'functions.transformImage.warmup': must be object

Learn more about configuration validation here: http://slss.io/configuration-validation

Deploying br to stage dev (us-east-1)
Warning: WarmUp: Skipping warmer "events" creation. No functions to warm up.
Warning: WarmUp: Skipping warmer "timeout" creation. No functions to warm up.

✖ Stack br-dev failed to deploy (11s)
Environment: linux, node 16.14.0, framework 3.4.0 (local) 3.4.0v (global), plugin 6.1.2, SDK 4.3.1
Docs:        docs.serverless.com
Support:     forum.serverless.com
Bugs:        github.com/serverless/serverless/issues

Error:
Error: `docker run --rm -v /home/suri/project1/rmbg/br/cache/cf58e2124c894818b4beab8df9ac26ac92eeb326c8c74fc7e60e8f08ea86df1e_x86_64_slspyc:/var/task:z -v /home/suri/project1/rmbg/br/cache/downloadCacheslspyc:/var/useDownloadCache:z lambci/lambda:build-python3.6 /bin/sh -c chown -R 0\:0 /var/useDownloadCache && python3.6 -m pip install -t /var/task/ -r /var/task/requirements.txt --cache-dir /var/useDownloadCache && chown -R 0\:0 /var/task && chown -R 0\:0 /var/useDownloadCache` Exited with code 1
    at ChildProcess.<anonymous> (/home/suri/project1/rmbg/br/node_modules/child-process-ext/spawn.js:38:8)
    at ChildProcess.emit (node:events:520:28)
    at ChildProcess.emit (node:domain:475:12)
    at maybeClose (node:internal/child_process:1092:16)
    at Process.ChildProcess._handle.onexit (node:internal/child_process:302:5)

3 deprecations found: run 'serverless doctor' for more details
marton mar suri
  • 109
  • 2
  • 14

0 Answers0