0

I have been facing this problem from some days: i need to remove this image/pattern from images like this or this using OpenCV. I know that the problem is a Template Matching problem and I have to use filters (like canny) and and "slide" the template over the image, once this has been transformed by the filters.

I tried some solutions like this or this, but i had poor results, for example applying the second method I obtain this images 1 2

this is my code

import cv2
import numpy as np

# Resizes a image and maintains aspect ratio
def maintain_aspect_ratio_resize(image, width=None, height=None, inter=cv2.INTER_AREA):
    # Grab the image size and initialize dimensions
    dim = None
    (h, w) = image.shape[:2]

    # Return original image if no need to resize
    if width is None and height is None:
        return image

    # We are resizing height if width is none
    if width is None:
        # Calculate the ratio of the height and construct the dimensions
        r = height / float(h)
        dim = (int(w * r), height)
    # We are resizing width if height is none
    else:
        # Calculate the ratio of the 0idth and construct the dimensions
        r = width / float(w)
        dim = (width, int(h * r))

    # Return the resized image
    return cv2.resize(image, dim, interpolation=inter)

# Load template, convert to grayscale, perform canny edge detection
template = cv2.imread('C:\\Users\Quirino\Desktop\Reti\Bounding_box\Checkboard.jpg')
template = cv2.resize(template, (640,480))
template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
template = cv2.Canny(template, 50, 200)
(tH, tW) = template.shape[:2]
# cv2.imshow("template", template)

# Load original image, convert to grayscale
original_image = cv2.imread('F:\\ARCHAIDE\Appearance\Data_Archaide_Complete\MTL_G6\MTL_G6_MMO090.jpg')
# original_image = cv2.resize(original_image, (640,480))
final = original_image.copy()
gray = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
found = None

# Dynamically rescale image for better template matching
for scale in np.linspace(0.2, 1.0, 20)[::-1]:

    # Resize image to scale and keep track of ratio
    resized = maintain_aspect_ratio_resize(gray, width=int(gray.shape[1] * scale))
    r = gray.shape[1] / float(resized.shape[1])

    # Stop if template image size is larger than resized image
    if resized.shape[0] < tH or resized.shape[1] < tW:
        break

    # Detect edges in resized image and apply template matching
    canny = cv2.Canny(resized, 50, 200)
    detected = cv2.matchTemplate(canny, template, cv2.TM_CCOEFF)
    (_, max_val, _, max_loc) = cv2.minMaxLoc(detected)

    # Uncomment this section for visualization
    '''
    clone = np.dstack([canny, canny, canny])
    cv2.rectangle(clone, (max_loc[0], max_loc[1]), (max_loc[0] + tW, max_loc[1] + tH), (0,255,0), 2)
    cv2.imshow('visualize', clone)
    cv2.waitKey(0)
    '''
    # Keep track of correlation value
    # Higher correlation means better match
    if found is None or max_val > found[0]:
        found = (max_val, max_loc, r)

# Compute coordinates of bounding box
(_, max_loc, r) = found
(start_x, start_y) = (int(max_loc[0] * r), int(max_loc[1] * r))
(end_x, end_y) = (int((max_loc[0] + tW) * r), int((max_loc[1] + tH) * r))

original_image = cv2.resize(original_image, (640,480))
# Draw bounding box on ROI to remove
cv2.rectangle(original_image, (start_x, start_y), (end_x, end_y), (0,255,0), 2)
cv2.imshow('detected', original_image)

# Erase unwanted ROI (Fill ROI with white)
cv2.rectangle(final, (start_x, start_y), (end_x, end_y), (255,255,255), -1)
final = cv2.resize(final, (640,480))
cv2.imshow('final', final)
cv2.waitKey(0)

what could i try?

**20230207 EDIT

I tried the method below and it works great in the 80% of the images, but in some cases it doesn't recognize well the chess box and masks something else, for example you can see this or this and in other cases the chess box is recognized and covered only partially, like this

caesar753
  • 13
  • 3
  • You could try a rotation invariant method [link](https://www.geeksforgeeks.org/feature-matching-using-orb-algorithm-in-python-opencv/) – PepeChuy Jan 30 '23 at 15:44
  • Just threshold on the white outer area. Then get the external contour. Then replace with whatever color you want. – fmw42 Jan 30 '23 at 17:36

1 Answers1

0

Here is one way to approach that in Python/OpenCV

  • Read the input
  • Threshold on the outer white region of the checkerboard pattern using cv2.inRange()
  • Get the external contours and keep the largest
  • Get the bounding box of the largest contour
  • Get the color just outside the 4 corners of the bounding box and get its average
  • Replace the bounding box region in a copy of the input with the average color
  • Save the results

Input:

enter image description here

import cv2
import numpy as np

# read the input
img = cv2.imread('checks_object.jpg')

# threshold on outer white area of checkerboard pattern
lower = (210,210,210)
upper = (255,255,255)
thresh = cv2.inRange(img, lower, upper)

# get external contours and keep largest
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
big_contour = max(contours, key=cv2.contourArea)

# get bounding box of big contour
x,y,w,h = cv2.boundingRect(big_contour)

# get the average color of the 4 pixels just outside of the bounding box corners
[[color1]] = img[y-1:y, x-1:x]
[[color2]] = img[y-1:y, x+w:x+w+1]
[[color3]] = img[y+h:y+h+1, x+w:x+w+1]
[[color4]] = img[y+h:y+h+1, x-1:x]

ave_color = (color1.astype(np.float32) + color2.astype(np.float32) + color3.astype(np.float32) + color4.astype(np.float32)) / 4
ave_color = ave_color.astype(np.uint8)
print(ave_color)

# fill color inside contour bounding box
result = img.copy()
result[y:y+h, x:x+w] = ave_color

# save results
cv2.imwrite('checks_object_color_filled.jpg', result)

# show results
cv2.imshow('thresh', thresh)
cv2.imshow('checks_color_filled', result)
cv2.waitKey(0)

Results:

enter image description here

fmw42
  • 46,825
  • 10
  • 62
  • 80
  • Thanks a lot for your answer, I tried you method and works great with about the 80% of the images, but in some cases it doesn't recognize well the white box and masks something else, I'll show below an example – caesar753 Feb 07 '23 at 17:21
  • You likely need to adjust the threshold values. If the image is too noisy and/or you have other white areas, then what you say might happen – fmw42 Feb 07 '23 at 17:29