2

I want to create a linear video into a long panorama either directly into one long panorama or brokeninto many smaller panoramas stacked together. The video looks like this. What is the best approach? I have tried stitching a few frames at a time but im not getting a stitch . Is there a particular approach which is worth heading into? The video is 60 seconds long and can be broken into frames at any fps. This is my code so far

import cv2
import numpy as np
import glob
import imutils


# DEFINE THE HELPER FUNCTIONS

def draw_matches(img1, keypoints1, img2, keypoints2, matches):
    r, c = img1.shape[:2]
    r1, c1 = img2.shape[:2]

    # Create a blank image with the size of the first image + second image
    output_img = np.zeros((max([r, r1]), c + c1, 3), dtype='uint8')
    output_img[:r, :c, :] = np.dstack([img1])
    output_img[:r1, c:c + c1, :] = np.dstack([img2])

    # Go over all of the matching points and extract them
    for match in matches:
        img1_idx = match.queryIdx
        img2_idx = match.trainIdx
        (x1, y1) = keypoints1[img1_idx].pt
        (x2, y2) = keypoints2[img2_idx].pt

        # Draw circles on the keypoints
        cv2.circle(output_img, (int(x1), int(y1)), 4, (0, 255, 255), 1)
        cv2.circle(output_img, (int(x2) + c, int(y2)), 4, (0, 255, 255), 1)

        # Connect the same keypoints
        cv2.line(output_img, (int(x1), int(y1)), (int(x2) + c, int(y2)), (0, 255, 255), 1)

    return output_img


def warpImages(img1, img2, H):
    rows1, cols1 = img1.shape[:2]
    rows2, cols2 = img2.shape[:2]

    list_of_points_1 = np.float32([[0, 0], [0, rows1], [cols1, rows1], [cols1, 0]]).reshape(-1, 1, 2)
    temp_points = np.float32([[0, 0], [0, rows2], [cols2, rows2], [cols2, 0]]).reshape(-1, 1, 2)

    # When we have established a homography we need to warp perspective
    # Change field of view
    list_of_points_2 = cv2.perspectiveTransform(temp_points, H)

    list_of_points = np.concatenate((list_of_points_1, list_of_points_2), axis=0)

    [x_min, y_min] = np.int32(list_of_points.min(axis=0).ravel() - 0.5)
    [x_max, y_max] = np.int32(list_of_points.max(axis=0).ravel() + 0.5)

    translation_dist = [-x_min, -y_min]

    H_translation = np.array([[1, 0, translation_dist[0]], [0, 1, translation_dist[1]], [0, 0, 1]])

    output_img = cv2.warpPerspective(img2, H_translation.dot(H), (x_max - x_min, y_max - y_min))
    output_img[translation_dist[1]:rows1 + translation_dist[1], translation_dist[0]:cols1 + translation_dist[0]] = img1
    # print(output_img)

    return output_img

# End of Funcion definitions

# Main program begins here

# Define input and output paths
input_path = "/Users/akshayacharya/Desktop/Panorama/Bazinga/Test images for final/Highfps2fps/*.jpg"

# Define whatever variables necessary

input_img = glob.glob(input_path)
img_path = sorted(input_img)
for i in range(0, len(img_path)):
    img = cv2.imread(img_path[i])
    img = cv2.resize(img, (400, 300))
    cv2.imwrite(img_path[i], img)
tmp = img_path[0]
flag = True
pano = []
i = 1
count = 0
indices = []
k = 1

while i < len(img_path):
    indices.append(i)
    print(i)
    count += 1
    if flag:
        img1 = cv2.imread(tmp, cv2.COLOR_BGR2GRAY)
        img2 = cv2.imread(img_path[i], cv2.COLOR_BGR2GRAY)
        flag = False
    img1 = cv2.resize(img1, (0, 0), fx=1, fy=1)
    img2 = cv2.imread(img_path[i], cv2.COLOR_BGR2GRAY)
    img2 = cv2.resize(img2, (0, 0), fx=1, fy=1)

    orb = cv2.ORB_create(nfeatures=2000)

    keypoints1, descriptors1 = orb.detectAndCompute(img1, None)
    keypoints2, descriptors2 = orb.detectAndCompute(img2, None)

    # Create a BFMatcher object.
    # It will find all of the matching keypoints on two images
    bf = cv2.BFMatcher_create(cv2.NORM_HAMMING)

    # Find matching points
    matches = bf.knnMatch(descriptors1, descriptors2, k=2)

    all_matches = []
    for m, n in matches:
        all_matches.append(m)

    # Finding the best matches
    good = []
    for m, n in matches:
        if m.distance < 0.9 * n.distance:
            good.append(m)

    MIN_MATCH_COUNT = 15

    if len(good) > MIN_MATCH_COUNT:
        # Convert keypoints to an argument for findHomography
        src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

        # Establish a homography
        M, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

        result = warpImages(img2, img1, M)
        img1 = result

    i += 1

    if count % 8 == 0:
        i += 12
        count = 0
        """stitched = img1
        print(np.shape(stitched))
        stitched = cv2.copyMakeBorder(stitched, 10, 10, 10, 10,
                                      cv2.BORDER_CONSTANT, (0, 0, 0))

        gray = cv2.cvtColor(stitched, cv2.COLOR_BGR2GRAY)
        thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]

        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        c = max(cnts, key=cv2.contourArea)

        mask = np.zeros(thresh.shape, dtype="uint8")
        (x, y, w, h) = cv2.boundingRect(c)
        cv2.rectangle(mask, (x, y), (x + w, y + h), 255, -1)

        minRect = mask.copy()
        sub = mask.copy()

        while cv2.countNonZero(sub) > 0:
            minRect = cv2.erode(minRect, None)
            sub = cv2.subtract(minRect, thresh)

        cnts = cv2.findContours(minRect.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        c = max(cnts, key=cv2.contourArea)
        (x, y, w, h) = cv2.boundingRect(c)
        
        stitched = stitched[y:y + h, x:x + w]
"""
        #pano.append(stitched)
        result = cv2.resize(result, (1080, 720))

        #cv2.imwrite(f"Test images for final/Highfps2fps/temp_pano/frame{k}.jpg", stitched)
        k += 1
        try:
            img1 = cv2.imread(img_path[i])
            i = i + 1
            img1 = cv2.resize(img1, (400, 300))
            cv2.imshow("Stitch", result)
            cv2.waitKey(0)
            indices = []
            print(np.shape(img1))
        except:
            continue
if len(indices) == 8:
    indices = [0]
    j= 100000

if indices[0] != 0:
    i = 0
    print(indices)
    j = indices[i]
    temp = img_path[j]

if j == (len(img_path) - 1):
    img_1 = cv2.imread(temp)
i = 1
flag1 = True
while i < len(indices):
    if flag1:
        img_1 = cv2.imread(temp, cv2.COLOR_BGR2GRAY)
        j = indices[i]
        img_2 = cv2.imread(img_path[j], cv2.COLOR_BGR2GRAY)
        flag1 = False
    img_1 = cv2.resize(img1, (0, 0), fx=1, fy=1)
    img_2 = cv2.imread(img_path[i], cv2.COLOR_BGR2GRAY)
    img_2 = cv2.resize(img2, (0, 0), fx=1, fy=1)

    orb = cv2.ORB_create(nfeatures=2000)

    keypoints1, descriptors1 = orb.detectAndCompute(img_1, None)
    keypoints2, descriptors2 = orb.detectAndCompute(img_2, None)

    bf = cv2.BFMatcher_create(cv2.NORM_HAMMING)

    matches = bf.knnMatch(descriptors1, descriptors2, k=2)

    all_matches = []
    for m, n in matches:
        all_matches.append(m)

    # Finding the best matches
    good = []
    for m, n in matches:
        if m.distance < 0.9 * n.distance:
            good.append(m)

    MIN_MATCH_COUNT = 10

    if len(good) > MIN_MATCH_COUNT:
        # Convert keypoints to an argument for findHomography
        src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

        # Establish a homography
        M, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

        result1 = warpImages(img_2, img_1, M)
        img_1 = result1

    i += 1
if j != 100000:
    img_1 = cv2.resize(img_1,(400,300))
    #cv2.imwrite(f"Test images for final/Highfps2fps/temp_pano/frame{k}.jpg", img_1)
    cv2.imshow("Last pano", img_1)
    cv2.waitKey(0)

# stacking all the tiny panoramas
input_path = "/Users/akshayacharya/Desktop/Panorama/Bazinga/Test images for final/Highfps2fps/temp_pano/*.jpg"
output_path = "/Users/akshayacharya/Desktop/Panorama/Bazinga/Output/New/pano10.jpg"

list_images = glob.glob(input_path)
list_sorted = sorted(list_images)

images = []
for image in list_sorted:
    img = cv2.imread(image)
    img = cv2.resize(img, (1280, 720))
    # cv2.imshow(f"{image}", img)
    images.append(img)

final_image = cv2.hconcat(images)
#final_image = cv2.resize(final_image, (2000,1500))
cv2.imshow("Acceptable",final_image)
cv2.waitKey(0)
#cv2.imwrite(output_path, final_image)

akshay acharya
  • 143
  • 3
  • 15

0 Answers0