-1

I need help with changing the lip color of a person in a video using Mediapipe. I've used Mediapipe for facial landmark detection and tracking, but I'm not sure how to proceed with changing the lip color. I couldn't find any resources on how to achieve this in the Mediapipe documentation.

This has to do more with OpenCV than Mediapipe. You might want to search for how to fill a polygon using cv2.fillPoly. You will need the landmarks to define the contour you can refer to this image here to find which landmarks.

I'm using Python and OpenCV. Running the code on Google Colab. I did try the method suggested by @fadiaburaid but the result was not up to the mark. The polygons seems to dance as the coords detected by Mediapipe were continuously changing and the polygons drawn on the image seemed visibly heterogenous. I tried feathering, but it didn't bring quality of results to an acceptable level.

Any suggestion to improve and stabilize the polygon blending are welcome!!

Face Cropping

from google.colab import output
from google.colab.patches import cv2_imshow

import cv2
import mediapipe as mp

# Load the MediaPipe Face Detection model
mp_face_detection = mp.solutions.face_detection

# Initialize the Face Detection model
face_detection = mp_face_detection.FaceDetection()

# Load the image
image = cv2.imread('/content/wallpaper.png')

# Convert the image to RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

# Detect faces in the image
results = face_detection.process(image)

# Get the first detected face
face = results.detections[0]

# Get the bounding box of the face
x1 = int(face.location_data.relative_bounding_box.xmin * image.shape[1])
y1 = int(face.location_data.relative_bounding_box.ymin * image.shape[0])
x2 = int(x1 + face.location_data.relative_bounding_box.width * image.shape[1])
y2 = int(y1 + face.location_data.relative_bounding_box.height * image.shape[0])

# Calculate the size of the square bounding box
size = max(x2 - x1, y2 - y1)

# Calculate the center of the bounding box
center_x = (x1 + x2) // 2
center_y = (y1 + y2) // 2

# Calculate the coordinates of the square bounding box
x1_square = center_x - size // 2
y1_square = center_y - size // 2

x2_square = x1_square + size 
y2_square= y1_square + size 

# Crop and show square face region from original image 
square_face_region=image[y1_square:y2_square,x1_square:x2_square]

resized_image=cv2.resize(square_face_region,(480,480))
resized_image_bgr = cv2.cvtColor(resized_image, cv2.COLOR_RGB2BGR)

# Save the image
cv2.imwrite('resized_image.jpg', resized_image_bgr)

Mask Generation

import itertools
import numpy as np

# Load the MediaPipe Face Mesh model
mp_face_mesh = mp.solutions.face_mesh

# Initialize the Face Mesh model
face_mesh = mp_face_mesh.FaceMesh( static_image_mode=True,refine_landmarks=True,min_detection_confidence=0.5)

image = resized_image_bgr

# Define the left eye landmark indices
# LIPS = list(set(itertools.chain(*mp_face_mesh.FACEMESH_LIPS)))

# upper = [409,405,375,321,314,267,269,270,291,146,181,185,91,84,61,37, 39, 40,0,17]
# lower = [402,415,312,311,310,308,324,318,317,178,191,80, 81, 82,87, 88,95,78,13, 14]

upper_new = [0,267,269,270,409,291,375,321,405,314,17,84,181,91,146,61,185,40,39,37]
lower_new = [13,312,311,310,415,308,324,318,402,317,14,87,178,88,95,78,191,80,81,82]


# Detect the face landmarks
results = face_mesh.process(image)

# Create an empty mask with the same shape as the image
mask_upper = np.zeros(image.shape[:2], dtype=np.uint8)

# Draw white polygons on the mask using the upper landmarks
for face_landmarks in results.multi_face_landmarks:
    points_upper = []
    for i in upper_new:
        landmark = face_landmarks.landmark[i]
        x = int(landmark.x * image.shape[1])
        y = int(landmark.y * image.shape[0])
        points_upper.append((x, y))
    cv2.fillConvexPoly(mask_upper, np.int32(points_upper), 255)


# Create an empty mask with the same shape as the image
mask_lower = np.zeros(image.shape[:2], dtype=np.uint8)

# Draw white polygons on the mask using the lower landmarks
for face_landmarks in results.multi_face_landmarks:
    points_lower = []
    for i in lower_new:
        landmark = face_landmarks.landmark[i]
        x = int(landmark.x * image.shape[1])
        y = int(landmark.y * image.shape[0])
        points_lower.append((x, y))
    cv2.fillPoly(mask_lower, np.int32([points_lower]), 255)

# Subtract the lower mask from the upper mask
mask_diff = cv2.subtract(mask_upper, mask_lower)

# Apply morphology operations to smooth mask 
kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5 ,5))
mask_diff=cv2.morphologyEx(mask_diff,cv2.MORPH_OPEN,kernel)
mask_diff=cv2.morphologyEx(mask_diff,cv2.MORPH_CLOSE,kernel)

cv2_imshow(mask_diff)

Mask Blending

# Convert the mask to 3 channels
mask_diff_3ch = cv2.cvtColor(mask_diff, cv2.COLOR_GRAY2BGR)

image = cv2.imread('/content/resized_image.jpg')

# Apply the mask to the original image
masked_image = cv2.bitwise_and(image, mask_diff_3ch)

cv2_imshow(masked_image)

def create_colored_mask(hex_color, shape):
    # Convert the hex color code to an RGB tuple
    rgb_color = tuple(int(hex_color[i:i+2], 16) for i in (0, 2 ,4))
    
    # Create a blank mask with the given shape
    colored_mask = np.zeros(shape, dtype=np.uint8)
    
    # Set the color channels according to the chosen RGB color
    colored_mask[:,:,0] = rgb_color[2]
    colored_mask[:,:,1] = rgb_color[1]
    colored_mask[:,:,2] = rgb_color[0]
    
    return colored_mask

# Create a 3-channel version of your mask_diff array
mask_diff_3ch = cv2.cvtColor(mask_diff,cv2.COLOR_GRAY2BGR)

# Ask the user to enter a hex color code for their mask
hex_color = input('Enter a hex color code for your mask (e.g. FF0000 for red): ')

# Create a colored mask with the chosen hex color and same shape as your original mask
colored_mask = create_colored_mask(hex_color, mask_diff_3ch.shape)

# Apply the colored mask where your original mask is True
masked_image = cv2.bitwise_and(colored_mask,colored_mask ,mask=mask_diff)

# Superimpose the colored mask on your original image
final_image = cv2.addWeighted(image, 1 , masked_image ,1 ,0)

cv2_imshow(final_image)

I'm getting following results from above code. But I want much higher quality result from both video or photo input.

enter image description here

Input Image: Input Image

Cropped Input Image: Cropped Input Image

Mask Image: Mask Image

Final Image: Final Image with Masking

  • See https://stackoverflow.com/questions/71860084/how-can-i-change-the-color-of-the-lip-that-got-its-landmarks-without-disturbing/71864126?r=SearchResults&s=1%7C35.9414#71864126 – fmw42 Mar 20 '23 at 19:24
  • tried. gave shit result. – Kirk Bentish Mar 20 '23 at 19:39
  • Post your input image, lip mask and the new color – fmw42 Mar 20 '23 at 21:59
  • check links at the end – Kirk Bentish Mar 21 '23 at 19:28
  • Your mask scale and image size do not match the scale of the lips in the input image your posted. How is anyone else supposed to match them without further information from you. Give me matching scales and sizes and I will try to do what I suggested. – fmw42 Mar 21 '23 at 20:57
  • atleast go through the code that I've written. The first part "Face Cropping" does image cropping to contain only the face image. That image is used to generate mask and then the final image. – Kirk Bentish Mar 21 '23 at 21:16
  • Link to facial Landmark positioning https://github.com/google/mediapipe/blob/a908d668c730da128dfa8d9f6bd25d519d006692/mediapipe/modules/face_geometry/data/canonical_face_model_uv_visualization.png – Kirk Bentish Mar 21 '23 at 21:17
  • I do not have access to your tools nor do I know them. So I can only do the recoloring and blending, but only if you provide a mask that is the same size and scale as your image. That is the best that I can do for you. – fmw42 Mar 21 '23 at 21:45
  • done. added at the end. – Kirk Bentish Mar 22 '23 at 13:24

3 Answers3

0

Here is a slight variation and I hope improvement over my method posted in How can I change the color of the lip that got its landmarks without disturbing its texture? in opencv python.

The main differences are: 1) the mask is provided, but does not match the lips as well as possible. So I dilate them a little. 2) I change the cv2.add to cv2.addWeighted to blend the new color with the lips. The weight on the new color determines the amount of lip color applied. I mixed the image (weight 1) with the new color (weight 0.75). Change weight 0.75 as desired. 3) I increased the anti-alias distance on the mask for doing a soft blend at the edges of the lips.

Input:

enter image description here

Mask:

enter image description here

import cv2
import numpy as np
import skimage.exposure

# specify desired bgr color for lips and make into array
desired_color = (170, 130, 255)
desired_color = np.asarray(desired_color, dtype=np.float64)

# create swatch
swatch = np.full((200,200,3), desired_color, dtype=np.uint8)

# read image
img = cv2.imread("lady2.jpg")

# read mask
mask = cv2.imread("lady2_mask.png", cv2.IMREAD_GRAYSCALE)

# dilate mask to make it better fit the lips
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))
mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, kernel)

# get average bgr color of lips
ave_color = cv2.mean(img, mask=mask)[:3]
print(ave_color)

# compute difference colors and make into an image the same size as input
diff_color = desired_color - ave_color
diff_color = np.full_like(img, diff_color, dtype=np.uint8)

# shift input image color
new_img = cv2.addWeighted(img, 1.0, diff_color, 0.75, 0)

# antialias mask, convert to float in range 0 to 1 and make 3-channels
mask = cv2.GaussianBlur(mask, (0,0), sigmaX=15, sigmaY=15, borderType = cv2.BORDER_DEFAULT)
mask = skimage.exposure.rescale_intensity(mask, in_range=(128,255), out_range=(0,1)).astype(np.float32)
mask = cv2.merge([mask,mask,mask])

# combine img and new_img using mask
result = (img * (1 - mask) + new_img * mask)
result = result.clip(0,255).astype(np.uint8)

# save result
cv2.imwrite('lady2_swatch.png', swatch)
cv2.imwrite('lady2_mask.png', (255*mask).clip(0,255).astype(np.uint8))
cv2.imwrite('lady2_recolor.jpg', result)

cv2.imshow('swatch', swatch)
cv2.imshow('mask', mask)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()

New Lip Color Swatch:

enter image description here

Result:

enter image description here

fmw42
  • 46,825
  • 10
  • 62
  • 80
  • I cannot comment about speed. Not my expertise. Do you actually want Blue and Green lips? Why? That is not really good colors for women. Have you tried changing the lip colors to those Blue and Green? Do they not work? – fmw42 Mar 22 '23 at 21:41
  • This method works great when we have to change Dark Pink lip color to light Pink lip color. But fails miserably when I want lip color to be Bright Yellow, Blue or Green. This was highlighted in earlier comments too. Please check for yourself too. – Kirk Bentish Mar 22 '23 at 21:47
  • yes I just tried and got some white shade when i wrote (255,0,0) as desired color – Kirk Bentish Mar 22 '23 at 21:48
  • Update - The mask gets bolder after "# dilate mask to make it better fit the lips" code, but then completely disappears after "# antialias mask, convert to float in range 0 to 1 and make 3-channels" code I'm getting a black square on imshow(mask) – Kirk Bentish Mar 22 '23 at 21:53
  • Yes, something got changed and I cannot reproduce. I will work to see what happened. – fmw42 Mar 22 '23 at 22:00
  • Desired Color - (159, 31, 25) Link to result image - https://ibb.co/XC1zJvF – Kirk Bentish Mar 22 '23 at 22:02
  • Somehow my Sigmas in the anti-aliasing got changed from 5 to 15 (too much blur). I put it back to 5 and adjusted the addWeighted a little. The result is not quite as much pink as it was before, but seems to work for pink. It does not work for Green as you suggested.. I will have to work on a better method than simple addition for the colors. Likely in HSV colorspace. – fmw42 Mar 22 '23 at 22:10
0

Here is a revised script that works better for most colors. It does the color difference in HSV color space.

Input:

enter image description here

Mask:

enter image description here

import cv2
import numpy as np
import skimage.exposure

# specify desired bgr color for lips and make into array
#desired_color = (170,130,255)    # pink
#desired_color = (255,0,0)        # blue
desired_color = (0,255,0)         # green

print(desired_color)

# create swatch
swatch = np.full((200,200,3), desired_color, dtype=np.uint8)

# read image
img = cv2.imread("lady2.jpg")

# read mask
mask = cv2.imread("lady2_mask.png", cv2.IMREAD_GRAYSCALE)


# convert input to HSV and separate channels
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv_img)

# dilate mask to make it better fit the lips
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))
mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, kernel)

# get average bgr color of lips as array
ave_color = cv2.mean(img, mask=mask)[:3]
print(ave_color)

# create 1 pixel image of average color
ave_color_img = np.full((1,1,3), ave_color, dtype=np.float32)
print(ave_color_img)

# create 1 pixel image of desired color
desired_color_img = np.full((1,1,3), desired_color, dtype=np.float32)
print(desired_color_img)

# convert desired color image to HSV
desired_hsv = cv2.cvtColor(desired_color_img, cv2.COLOR_BGR2HSV)

# convert average color image to HSV
ave_hsv = cv2.cvtColor(ave_color_img, cv2.COLOR_BGR2HSV)

# compute difference in HSV color arrays and separate channel values
diff_hsv = desired_hsv - ave_hsv
diff_h, diff_s, diff_v = cv2.split(diff_hsv)
print(diff_hsv)

# shift input image color
hnew = np.mod(h + diff_h/2, 180).astype(np.uint8)
snew = (s + diff_s).clip(0,255).astype(np.uint8)
vnew = (v + diff_v).clip(0,255).astype(np.uint8)

# merge channels back to HSV image
hsv_new = cv2.merge([hnew,snew,vnew])

# convert new HSV image to BGR
new_img = cv2.cvtColor(hsv_new, cv2.COLOR_HSV2BGR)

# antialias mask, convert to float in range 0 to 1 and make 3-channels
mask = cv2.GaussianBlur(mask, (0,0), sigmaX=5, sigmaY=5, borderType = cv2.BORDER_DEFAULT)
mask = skimage.exposure.rescale_intensity(mask, in_range=(128,255), out_range=(0,1)).astype(np.float32)
mask = cv2.merge([mask,mask,mask])

# combine img and new_img using mask 
result = (img * (1 - mask) + new_img * mask)
result = result.clip(0,255).astype(np.uint8)

# save result
cv2.imwrite('lady2_swatch.png', swatch)
cv2.imwrite('lady2_recolor.jpg', result)

cv2.imshow('swatch', swatch)
cv2.imshow('mask', mask)
cv2.imshow('new_img', new_img)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()

Pink Result:

enter image description here

Blue Result:

enter image description here

Green Result:

enter image description here

fmw42
  • 46,825
  • 10
  • 62
  • 80
  • Ran exactly the same code. Got this result - https://ibb.co/n3tLBZr – Kirk Bentish Mar 23 '23 at 11:09
  • can you try sharing your google colab notebook. The difference in result will end that way. – Kirk Bentish Mar 23 '23 at 11:10
  • I am not using Google Colab notebook. I ran this as simple Python script in python 3.9. Perhaps someone else can try my code and see if they reproduce my answer. (Did you threshold the mask? I kept it grayscale) – fmw42 Mar 23 '23 at 15:31
0

Here is a further improvement that adds gains on the saturation and brightness so that one can deepen or lighten the colors. In the following I use sfact=3 and vfact=1.5 to make a deeper green color.

import cv2
import numpy as np
import skimage.exposure

# specify desired bgr color for lips and make into array
#desired_color = (170,130,255)    # pink
#desired_color = (255,0,0)        # blue
desired_color = (0,255,0)         # green

print(desired_color)

# create swatch
swatch = np.full((200,200,3), desired_color, dtype=np.uint8)

# read image
img = cv2.imread("lady2.jpg")

# read mask
mask = cv2.imread("lady2_mask.png", cv2.IMREAD_GRAYSCALE)


# convert input to HSV and separate channels
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv_img)

# dilate mask to make it better fit the lips
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))
mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, kernel)

# get average bgr color of lips as array
ave_color = cv2.mean(img, mask=mask)[:3]
print(ave_color)

# create 1 pixel image of average color
ave_color_img = np.full((1,1,3), ave_color, dtype=np.float32)
print(ave_color_img)

# create 1 pixel image of desired color
desired_color_img = np.full((1,1,3), desired_color, dtype=np.float32)
print(desired_color_img)

# convert desired color image to HSV
desired_hsv = cv2.cvtColor(desired_color_img, cv2.COLOR_BGR2HSV)

# convert average color image to HSV
ave_hsv = cv2.cvtColor(ave_color_img, cv2.COLOR_BGR2HSV)

# compute difference in HSV color arrays and separate channel values
diff_hsv = desired_hsv - ave_hsv
diff_h, diff_s, diff_v = cv2.split(diff_hsv)
print(diff_hsv)

# shift input image color
sfact=3
vfact=1.5
hnew = np.mod(h + diff_h/2, 180).astype(np.uint8)
snew = (sfact*(s + diff_s)).clip(0,255).astype(np.uint8)
vnew = (vfact*(v + diff_v)).clip(0,255).astype(np.uint8)

# merge channels back to HSV image
hsv_new = cv2.merge([hnew,snew,vnew])

# convert new HSV image to BGR
new_img = cv2.cvtColor(hsv_new, cv2.COLOR_HSV2BGR)

# antialias mask, convert to float in range 0 to 1 and make 3-channels
mask = cv2.GaussianBlur(mask, (0,0), sigmaX=5, sigmaY=5, borderType = cv2.BORDER_DEFAULT)
mask = skimage.exposure.rescale_intensity(mask, in_range=(128,255), out_range=(0,1)).astype(np.float32)
mask = cv2.merge([mask,mask,mask])

# combine img and new_img using mask 
result = (img * (1 - mask) + new_img * mask)
result = result.clip(0,255).astype(np.uint8)

# save result
cv2.imwrite('lady2_swatch.png', swatch)
cv2.imwrite('lady2_recolor.jpg', result)

cv2.imshow('swatch', swatch)
cv2.imshow('mask', mask)
cv2.imshow('new_img', new_img)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()

Deep Green Result:

enter image description here

fmw42
  • 46,825
  • 10
  • 62
  • 80