I am trying to make a Python script that records my entire screen but on a 70x34 resolution while cutting off as little as possible on a 16:9 monitor. I have this example code down below that uses the "mss" library to take screenshots of the monitor and display that in a loop through cv2 as a test. The problem I'm facing is that in the grab function to retrieve the screenshot image you are required to provide a bounding box which is the area that is retrieved for the screenshot. How would I get this code to open a small window that has pretty much my entire screen in a low resolution in it instead of only a small portion that is in high res? For anyone wondering in the full script I won't be displaying the video in a window but rather just retrieve the pixels from it to process.
import cv2
from mss import mss
from PIL import Image
bounding_box = {'top': 0, 'left': 0, 'width': 500, 'height': 500}
sct = mss()
while True:
sct_img = sct.grab(bounding_box)
cv2.imshow('screen', np.array(sct_img))
if (cv2.waitKey(1) & 0xFF) == ord('q'):
cv2.destroyAllWindows()
break
edit: Alright so I am now resizing the image to my desired size and then handling it but I ran into another issue. What I'm trying to do is send an emoji representation of the image through a websocket where each pixel has a corresponding emoji. My problem is that the amount of emojis is way more than the number of pixels (70x34=2730), 4725 to be exact.
import numpy as np
import cv2
from mss import mss
from PIL import Image
from math import sqrt
import asyncio
import websockets as ws
# settings:
imgWidth = 70
imgHeight = 34
# vars:
emojis = [['', (255, 204, 77)], ['', (218, 47, 71)], ['', (191, 105, 82)], ['✅', (119, 178, 85)], ['❤', (187, 26, 52)], ['', (93, 173, 236)], ['', (120, 177, 89)], ['', (253, 203, 88)], ['', (170, 142, 214)], ['⚫', (0, 0, 0)], ['', (204, 214, 221)], ['', (244, 171, 186)], ['', (255, 217, 131)], ['', (244, 144, 12)], ['', (217, 158, 130)], ['', (255, 136, 108)], ['', (166, 211, 136)], ['', (146, 102, 204)], ['', (59, 136, 195)], ['', (234, 89, 110)], ['', (189, 221, 244)], ['', (204, 62, 83)], ['', (253, 216, 136)], ['', (92, 145, 59)], ['', (221, 46, 68)], ['', (153, 170, 181)], ['', (154, 78, 28)], ['', (102, 117, 127)], ['⚪', (255, 255, 255)]]
bounding_box = {'top': 0, 'left': 0, 'width': 1920, 'height': 1080}
# functions:
def closest_color(rgb): #// gets the most fitting emoji to use for the pixel to represent
r, g, b = rgb
color_diffs = []
for z in emojis:
color = z[1]
cr, cg, cb = color
color_diff = sqrt((r - cr)**2 + (g - cg)**2 + (b - cb)**2)
color_diffs.append((color_diff, z[0]))
return min(color_diffs)[1]
def turnascii(image):
image = image.convert("RGB")
pixels = image.getdata()
newPixels = [closest_color(pixel) for pixel in pixels]
newPixels = ''.join(newPixels)
newPixels_count = len(newPixels)
print("pixels: " + str(newPixels_count))
asciiImage = [newPixels[index:index + imgWidth] for index in range(0, newPixels_count, imgHeight+1)]
asciiImage = "".join(asciiImage)
return asciiImage
# init
sct = mss()
newFrame = ""
async def echo(websocket, path):
async for message in websocket:
while True:
img = sct.grab(bounding_box)
img = cv2.resize(np.array(img),(imgWidth,imgHeight))
img = Image.fromarray(img)
newFrame = turnascii(img)
print("emojis: " + str(len(newFrame)))
await websocket.send(newFrame)
# start server
print("port 1874")
start_server = ws.serve(echo, "localhost", 1874)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()