This Part of the code has pysimple gui applied to yolo.
I'd like to try the thread in this code,
I want to run a thread inside 'while' and print a sentence every 2.5 seconds.
However, this function does not run every 2.5 seconds, but every when loop.
What is the way to run every 2.5 seconds in while loop
import PySimpleGUIQt as sg
##########thread test########################
def print_text():
print("yolo run")
threading.Timer(2.5, print_text).start()
################################################
thread test method
# loop over frames from the video file stream
win_started = False
if use_webcam:
cap = cv2.VideoCapture(0)
while True:
print_text()###########################################test thread
when to declare thread function inside while loop
# read the next frame from the file or webcam
if use_webcam:
grabbed, frame = cap.read()
else:
grabbed, frame = vs.read()
# if the frame was not grabbed, then we have reached the end
# of the stream
if not grabbed:
break
# if the frame dimensions are empty, grab them
if W is None or H is None:
(H, W) = frame.shape[:2]
# construct a blob from the input frame and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes
# and associated probabilities
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# initialize our lists of detected bounding boxes, confidences,
# and class IDs, respectively
boxes = []
confidences = []
classIDs = []
# apply non-maxima suppression to suppress weak, overlapping
# bounding boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, gui_confidence, gui_threshold)
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the frame
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(LABELS[classIDs[i]],
confidences[i])
cv2.putText(frame, text, (x, y - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
imgbytes = cv2.imencode('.png', frame)[1].tobytes() # ditto