added MQTT

This commit is contained in:
M1MacMini 2025-02-18 10:46:30 -05:00
parent ffb7a6300a
commit 8a4f684cc1
1 changed files with 66 additions and 5 deletions

View File

@ -1,12 +1,35 @@
import cv2
from ultralytics import YOLO
from collections import deque
import paho.mqtt.client as mqtt
from influxdb import InfluxDBClient
from influxdb_client import InfluxDBClient, Point, WriteOptions
import time
from datetime import datetime
# InfluxDB Configuration
INFLUX_URL = "http://localhost:8086"
INFLUX_TOKEN = "--k98NX5UQ2qBCGAO80lLc_-teD-AUtKNj4uQfz0M8WyjHt04AT9d0dr6w8pup93ukw6YcJxWURmo2v6CAP_2g=="
INFLUX_ORG = "GAAIM"
INFLUX_BUCKET = "AGVIGNETTE"
# Connect to InfluxDB
client = InfluxDBClient(url=INFLUX_URL, token=INFLUX_TOKEN, org=INFLUX_ORG)
write_api = client.write_api(write_options=WriteOptions(batch_size=1))
# MQTT Setup
MQTT_BROKER = "192.168.10.51"
MQTT_TOPIC = "fruit/classification"
mqtt_client = mqtt.Client()
mqtt_client.connect(MQTT_BROKER, 1883, 6000)
# Camera index (default camera is 0)
camera_index = 1
camera_index = 0
i = 0
# Load the YOLO model
model = YOLO(r"D:\AIM\lemon\runs\detect\train4\weights\best.pt") # Load custom model
model = YOLO(r"/Users/vel/Desktop/CvModel/CV_AG/runs/detect/train5/weights/best.pt") # Load custom model
# Initialize the camera
cap = cv2.VideoCapture(camera_index)
@ -32,8 +55,9 @@ class_labels = {
id_tracked_classes = ["DefectiveLemon", "GoodLemon", "NotRipeLemon"]
# Parameters
HISTORY_LENGTH = 5 # Number of frames to consider for majority voting
CONFIRMATION_FRAMES = 5 # Frames needed to confirm a new label
HISTORY_LENGTH = 7 # Number of frames to consider for majority voting
CONFIRMATION_FRAMES = 7 # Frames needed to confirm a new label
lemon_time = 0
# Dictionary to track detection history and confirmed states
lemon_history = {} # Format: {ID: deque(maxlen=HISTORY_LENGTH)}
@ -76,7 +100,7 @@ while cap.isOpened():
break
# Perform object tracking using BoT-SORT
results = model.track(source=frame, conf=0.5, tracker='botsort.yaml', show=False)
results = model.track(source=frame, conf=0.5, tracker='botsort.yaml', show=False, device = 'mps')
for result in results:
frame = result.orig_img # Current frame
@ -109,6 +133,43 @@ while cap.isOpened():
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(frame, display_text, (x1, y1 - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
# Create Decision Point at x = 600
if x1 > 100:
cv2.line(frame, (600, 0), (600, height), (255, 0, 0), 2)
# Create Decision Point at x = 670
if x1 > 100:
cv2.line(frame, (760, 0), (760, height), (255, 0, 0), 2)
cv2.putText(frame, "Decision Point", (630, height // 2),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
# Lock in the label once it crosses the decision point
if x1 > 700 and obj_id in lemon_states:
cv2.putText(frame, f"Locked: {lemon_states[obj_id]}", (x1, y1 - 40),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
else:
cv2.putText(frame, "Waiting to Lock", (x1, y1 - 40),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 0), 2)
if x1 > 600 and x1 < 780:
if final_label == "DefectiveLemon":
mqtt_message = f"lemon_classification classification=\"{final_label}\" {int(time.time()*1e9)}"
if time.time() - lemon_time > .3:
mqtt_client.publish(MQTT_TOPIC, mqtt_message)
lemon_time = time.time()
i = i + 1
elif final_label == "NotRipeLemon":
mqtt_message = f"lemon_classification classification=\"{final_label}\" {int(time.time()*1e9)}"
if time.time() - lemon_time > .3:
mqtt_client.publish(MQTT_TOPIC, mqtt_message)
lemon_time = time.time()
i = i + 1
elif final_label == "GoodLemon":
mqtt_message = f"lemon_classification classification=\"{final_label}\" {int(time.time()*1e9)}"
if time.time() - lemon_time > .3:
mqtt_client.publish(MQTT_TOPIC, mqtt_message)
lemon_time = time.time()
i = i + 1
# Display the processed video stream
cv2.imshow("Live Detection", frame)