import cv2 import numpy as np from ultralytics import YOLO import cvzone import os import time device = 'cuda' # Load the YOLO11 model model = YOLO("yolo11m-pecan.pt") # Open the video file (use video file or webcam, here using webcam) cap = cv2.VideoCapture(0) cy1=550 offset=60 idDict={} pecanCount = 0 # Define the desired resolution and FPS width, height = 640, 360 # Video Title vidTitle = 'test.avi' # Set the resolution and other properties for each camera cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) # Define the codec and create a VideoWriter object # MJPEG format example, adjust as needed based on your camera compatibility fourcc = cv2.VideoWriter_fourcc(*'MJPG') # Codec for MJPEG output = cv2.VideoWriter('raw.avi', fourcc, 60, (width, height)) # 32.8 is about the actual FPS the camera is recording at # Start capturing video start_time = time.time() frame_count = 0 # To keep track of frames for each camer while True: ret,frame = cap.read() if not ret: break # Define the black box position (adjust as needed) top_left = (0, 0) bottom_right = (1280, 220) # Draw a black rectangle (filled) cv2.rectangle(frame, top_left, bottom_right, (0, 0, 0), thickness=-1) # Run YOLO11 tracking on the frame, persisting tracks between frames results = model.track(frame, persist=True,classes=0,device = device) frame_count += 1 # Increment frame count for this camera # Check if there are any boxes in the results if results[0].boxes is not None and results[0].boxes.id is not None: # Get the boxes (x, y, w, h), class IDs, track IDs, and confidences boxes = results[0].boxes.xyxy.int().cpu().tolist() # Bounding boxes class_ids = results[0].boxes.cls.int().cpu().tolist() # Class IDs track_ids = results[0].boxes.id.int().cpu().tolist() # Track IDs confidences = results[0].boxes.conf.cpu().tolist() # Confidence score for box, class_id, track_id, conf in zip(boxes, class_ids, track_ids, confidences): x1, y1, x2, y2 = box cy = int(y1+y2)//2 if track_id in idDict.keys(): cv2.rectangle(frame,(x1,y1),(x2,y2),(255,255,255),2) cvzone.putTextRect(frame,f'{idDict[track_id]}',(x1,y2),1,1) else: cv2.rectangle(frame,(x1,y1),(x2,y2),(0,255,0),2) # cvzone.putTextRect(frame,f'{track_id}',(x1,y2),1,1) # cv2.circle(frame,(cx,cy),4,(255,0,0),-1) # cvzone.putTextRect(frame,f'{c}',(x1,y1),1,1) if cy<(cy1+offset) and cy>(cy1-offset) and track_id not in idDict.keys(): pecanCount += 1 idDict[track_id] = pecanCount cv2.putText(frame, f'Totals Pecans: {pecanCount}', (50, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.6, [255, 255, 255], 1) cv2.line(frame,(0,550),(1280,550),(255,0,255),2) cv2.line(frame,(0,cy1+offset),(1280,cy1+offset),(0,0,255),2) cv2.line(frame,(0,cy1-offset),(1280,cy1-offset),(0,0,255),2) output.write(frame) if (time.time()-start_time) > 5: break # Release the video capture object and close the display window cap.release() cv2.destroyAllWindows() # Calculate and display FPS for each camera elapsed_time = time.time() - start_time fps_actual = frame_count / elapsed_time print(f"Total frames captured from camera: {frame_count}") print(f"Elapsed time: {elapsed_time:.2f} seconds") print(f"Actual Frames per second for camera: {fps_actual:.2f}") cap = cv2.VideoCapture('raw.avi') # Get original width, height, and codec width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fourcc = int(cap.get(cv2.CAP_PROP_FOURCC)) # Create VideoWriter with new FPS out = cv2.VideoWriter(vidTitle, fourcc, fps_actual, (width, height)) while cap.isOpened(): ret, frame = cap.read() if not ret: break out.write(frame) cap.release() out.release() print(f"Video saved with {fps_actual} FPS.") if os.path.exists('raw.avi'): os.remove('raw.avi') print(f"Deleted original file: {'raw.avi'}") else: print(f"File not found: {'raw.avi'}")