Compare commits

..

3 Commits

Author SHA1 Message Date
M1MacMini e3b8320ad2 added finished test track and botsort yaml 2025-03-26 12:41:13 -04:00
M1MacMini ce6b1c6705 Merge branch 'main' of https://git.factory.uga.edu/GEORGIA-AIM/CV_AG 2025-03-13 15:27:29 -04:00
M1MacMini 8a4f684cc1 added MQTT 2025-02-18 10:46:30 -05:00
3 changed files with 146 additions and 22 deletions

View File

@ -1,12 +1,35 @@
import cv2 import cv2
from ultralytics import YOLO from ultralytics import YOLO
from collections import deque from collections import deque
import paho.mqtt.client as mqtt
from influxdb import InfluxDBClient
from influxdb_client import InfluxDBClient, Point, WriteOptions
import time
from datetime import datetime
# InfluxDB Configuration
INFLUX_URL = "http://localhost:8086"
INFLUX_TOKEN = "--k98NX5UQ2qBCGAO80lLc_-teD-AUtKNj4uQfz0M8WyjHt04AT9d0dr6w8pup93ukw6YcJxWURmo2v6CAP_2g=="
INFLUX_ORG = "GAAIM"
INFLUX_BUCKET = "AGVIGNETTE"
# Connect to InfluxDB
client = InfluxDBClient(url=INFLUX_URL, token=INFLUX_TOKEN, org=INFLUX_ORG)
write_api = client.write_api(write_options=WriteOptions(batch_size=1))
# MQTT Setup
MQTT_BROKER = "192.168.10.51"
MQTT_TOPIC = "fruit/classification"
mqtt_client = mqtt.Client()
mqtt_client.connect(MQTT_BROKER, 1883, 6000)
# Camera index (default camera is 0) # Camera index (default camera is 0)
camera_index = 1 camera_index = 0
i = 0
# Load the YOLO model # Load the YOLO model
model = YOLO(r"D:\AIM\lemon\runs\detect\train4\weights\best.pt") # Load custom model model = YOLO(r"/Users/vel/Desktop/CvModel/CV_AG/runs/detect/train5/weights/best.pt") # Load custom model
# Initialize the camera # Initialize the camera
cap = cv2.VideoCapture(camera_index) cap = cv2.VideoCapture(camera_index)
@ -32,8 +55,9 @@ class_labels = {
id_tracked_classes = ["DefectiveLemon", "GoodLemon", "NotRipeLemon"] id_tracked_classes = ["DefectiveLemon", "GoodLemon", "NotRipeLemon"]
# Parameters # Parameters
HISTORY_LENGTH = 5 # Number of frames to consider for majority voting HISTORY_LENGTH = 7 # Number of frames to consider for majority voting
CONFIRMATION_FRAMES = 5 # Frames needed to confirm a new label CONFIRMATION_FRAMES = 7 # Frames needed to confirm a new label
lemon_time = 0
# Dictionary to track detection history and confirmed states # Dictionary to track detection history and confirmed states
lemon_history = {} # Format: {ID: deque(maxlen=HISTORY_LENGTH)} lemon_history = {} # Format: {ID: deque(maxlen=HISTORY_LENGTH)}
@ -76,7 +100,7 @@ while cap.isOpened():
break break
# Perform object tracking using BoT-SORT # Perform object tracking using BoT-SORT
results = model.track(source=frame, conf=0.5, tracker='botsort.yaml', show=False) results = model.track(source=frame, conf=0.5, tracker='botsort.yaml', show=False, device = 'mps')
for result in results: for result in results:
frame = result.orig_img # Current frame frame = result.orig_img # Current frame
@ -109,6 +133,43 @@ while cap.isOpened():
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(frame, display_text, (x1, y1 - 10), cv2.putText(frame, display_text, (x1, y1 - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
# Create Decision Point at x = 600
if x1 > 100:
cv2.line(frame, (600, 0), (600, height), (255, 0, 0), 2)
# Create Decision Point at x = 670
if x1 > 100:
cv2.line(frame, (760, 0), (760, height), (255, 0, 0), 2)
cv2.putText(frame, "Decision Point", (630, height // 2),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
# Lock in the label once it crosses the decision point
if x1 > 700 and obj_id in lemon_states:
cv2.putText(frame, f"Locked: {lemon_states[obj_id]}", (x1, y1 - 40),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
else:
cv2.putText(frame, "Waiting to Lock", (x1, y1 - 40),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 0), 2)
if x1 > 600 and x1 < 780:
if final_label == "DefectiveLemon":
mqtt_message = f"lemon_classification classification=\"{final_label}\" {int(time.time()*1e9)}"
if time.time() - lemon_time > .3:
mqtt_client.publish(MQTT_TOPIC, mqtt_message)
lemon_time = time.time()
i = i + 1
elif final_label == "NotRipeLemon":
mqtt_message = f"lemon_classification classification=\"{final_label}\" {int(time.time()*1e9)}"
if time.time() - lemon_time > .3:
mqtt_client.publish(MQTT_TOPIC, mqtt_message)
lemon_time = time.time()
i = i + 1
elif final_label == "GoodLemon":
mqtt_message = f"lemon_classification classification=\"{final_label}\" {int(time.time()*1e9)}"
if time.time() - lemon_time > .3:
mqtt_client.publish(MQTT_TOPIC, mqtt_message)
lemon_time = time.time()
i = i + 1
# Display the processed video stream # Display the processed video stream
cv2.imshow("Live Detection", frame) cv2.imshow("Live Detection", frame)

View File

@ -1,8 +1,40 @@
import cv2 import cv2
from ultralytics import YOLO from ultralytics import YOLO
from collections import deque from collections import deque
import paho.mqtt.client as mqtt
from influxdb import InfluxDBClient
from influxdb_client import InfluxDBClient, Point, WriteOptions
import time
from datetime import datetime
import ssl
import os import os
# InfluxDB Configuration
INFLUX_URL = "http://localhost:8086"
INFLUX_TOKEN = "export INFLUX_TOKEN=duVTQHPpHqr6WmdYfpSStqm-pxnvZHs-W0-3lXDnk8Tn6PGt59MlnTSR6egjMWdYvmL_ZI6xt3YUzGVBZHvc7w=="
INFLUX_ORG = "GAAIM"
INFLUX_BUCKET = "AGVIGNETTE"
# Connect to InfluxDB
client = InfluxDBClient(url=INFLUX_URL, token=INFLUX_TOKEN, org=INFLUX_ORG)
write_api = client.write_api(write_options=WriteOptions(batch_size=1))
# MQTT Setup
MQTT_BROKER = "192.168.10.57"
MQTT_TOPIC = "fruit/classification"
mqtt_client = mqtt.Client()
# Set up TLS/SSL for MQTT connection
# mqtt_client.tls_set(
# ca_certs="/Users/vel/Desktop/CvModel/mosquitto/mosquitto/certs/ca.crt", # Path to the CA certificate
# tls_version=ssl.PROTOCOL_TLS # Specify the TLS version
#)
#mqtt_client.tls_insecure_set(True)
mqtt_client.connect(MQTT_BROKER, 1883, 6000)
# Allow duplicate loading of OpenMP runtime # Allow duplicate loading of OpenMP runtime
os.environ["KMP_DUPLICATE_LIB_OK"] = "True" os.environ["KMP_DUPLICATE_LIB_OK"] = "True"
@ -10,10 +42,15 @@ os.environ["KMP_DUPLICATE_LIB_OK"] = "True"
yaml_path = "botsort.yaml" yaml_path = "botsort.yaml"
# Camera index (default camera index, 1 indicates an external camera) # Camera index (default camera index, 1 indicates an external camera)
camera_index = 1 camera_index = 0
cap = cv2.VideoCapture(camera_index)
cap.set(cv2.CAP_PROP_FPS, 30)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Load the YOLO model # Load the YOLO model
model = YOLO(r"D:\AIM\lemon\runs\detect\train4\weights\best.pt") # Load custom model model = YOLO(r"/Users/vel/Desktop/CvModel/CV_AG/runs/detect/train4/weights/best.pt") # Load custom model
# Define class labels # Define class labels
class_labels = { class_labels = {
@ -34,6 +71,7 @@ GOOD_THRESHOLD = 0.7 # Threshold for "GoodLemon" and "NotRipeLemon" proportio
# State history for each target (used for smoothing), format: {ID: deque([...], maxlen=HISTORY_LENGTH)} # State history for each target (used for smoothing), format: {ID: deque([...], maxlen=HISTORY_LENGTH)}
lemon_history = {} lemon_history = {}
lemon_send_history = []
# Set the display window to be resizable # Set the display window to be resizable
cv2.namedWindow("Live Detection", cv2.WINDOW_NORMAL) cv2.namedWindow("Live Detection", cv2.WINDOW_NORMAL)
@ -71,15 +109,17 @@ def get_smoothed_label(obj_id, current_label):
# Use streaming tracking mode to maintain tracker state # Use streaming tracking mode to maintain tracker state
results = model.track( results = model.track(
source=camera_index, # Get video stream directly from the camera source=camera_index, # Get video stream directly from the camera
conf=0.5, conf=0.45,
tracker=yaml_path, # Use the YAML configuration file tracker=yaml_path, # Use the YAML configuration file
persist=True, # Persist tracking (do not reset) persist=True, # Persist tracking (do not reset)
stream=True, # Stream processing, not frame-by-frame calling stream=True, # Stream processing, not frame-by-frame calling
show=False show=False,
device = 'mps' #'cpu'
) )
# Iterate over streaming tracking results # Iterate over streaming tracking results
for result in results: for result in results:
frame = result.orig_img # Current frame frame = result.orig_img # Current frame
detections = result.boxes # Detection box information detections = result.boxes # Detection box information
@ -100,6 +140,32 @@ for result in results:
if final_label in smoothing_labels: if final_label in smoothing_labels:
position = f"({x1}, {y1}, {x2}, {y2})" position = f"({x1}, {y1}, {x2}, {y2})"
print(f"ID: {obj_id}, Position: {position}, Label: {display_text}") print(f"ID: {obj_id}, Position: {position}, Label: {display_text}")
# Draw detection box and label with color based on classification
if final_label == "DefectiveLemon":
box_color = (100, 100, 255) # Red for defective
elif final_label == "NotRipeLemon":
box_color = (255, 100, 80) # Blue for unripe
elif final_label == "GoodLemon":
box_color = (0, 255, 0) # Green for good
else:
box_color = (255, 255, 255) # White for unknown or other classes
# Add background rectangle for text
text_size = cv2.getTextSize(display_text, cv2.FONT_HERSHEY_TRIPLEX, 0.6, 2)[0]
text_x, text_y = x1, y1 - 10
text_w, text_h = text_size[0], text_size[1]
cv2.rectangle(frame, (text_x, text_y - text_h - 5), (text_x + text_w, text_y + 5), (0, 0, 0), -1)
# Draw detection box and text
cv2.rectangle(frame, (x1, y1), (x2, y2), box_color, 2)
cv2.putText(frame, display_text, (text_x, text_y),
cv2.FONT_HERSHEY_TRIPLEX, 0.6, box_color, 2)
if x1 > 750 and x1 < 850 and y2 < 410 and y1 > 190 and obj_id not in lemon_send_history:
if final_label in ["DefectiveLemon", "NotRipeLemon", "GoodLemon"]:
mqtt_message = f"lemon_classification classification=\"{final_label}\" {int(time.time()*1e9)}"
lemon_send_history.append(obj_id)
mqtt_client.publish(MQTT_TOPIC, mqtt_message)
else: else:
# For other classes, display the current detection result directly and clear history (if exists) # For other classes, display the current detection result directly and clear history (if exists)
if obj_id in lemon_history: if obj_id in lemon_history:
@ -108,11 +174,6 @@ for result in results:
else: else:
display_text = label display_text = label
# Draw detection box and label
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(frame, display_text, (x1, y1 - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
# Display the processed frame # Display the processed frame
cv2.imshow("Live Detection", frame) cv2.imshow("Live Detection", frame)
@ -123,3 +184,5 @@ for result in results:
cv2.destroyAllWindows() cv2.destroyAllWindows()
print("Camera video processing complete. Program terminated.") print("Camera video processing complete. Program terminated.")

View File

@ -4,18 +4,18 @@
# For documentation and examples see https://docs.ultralytics.com/modes/track/ # For documentation and examples see https://docs.ultralytics.com/modes/track/
# For BoT-SORT source code see https://github.com/NirAharon/BoT-SORT # For BoT-SORT source code see https://github.com/NirAharon/BoT-SORT
tracker_type: botsort # tracker type, ['botsort', 'bytetrack'] tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack']
track_high_thresh: 0.25 # threshold for the first association track_high_thresh: 0.2 # threshold for the first association
track_low_thresh: 0.1 # threshold for the second association track_low_thresh: 0.05 # threshold for the second association
new_track_thresh: 0.4 # threshold for init new track if the detection does not match any tracks new_track_thresh: 0.3 # threshold for init new track if the detection does not match any tracks
track_buffer: 30 # buffer to calculate the time when to remove tracks track_buffer: 50 # buffer to calculate the time when to remove tracks
match_thresh: 0.7 # threshold for matching tracks match_thresh: 0.8 # threshold for matching tracks
fuse_score: True # Whether to fuse confidence scores with the iou distances before matching fuse_score: True # Whether to fuse confidence scores with the iou distances before matching
# min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now) # min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
# BoT-SORT settings # BoT-SORT settings
gmc_method: sparseOptFlow # method of global motion compensation gmc_method: sparseOptFlow # method of global motion compensation
# ReID model related thresh (not supported yet) # ReID model related thresh (not supported yet)
proximity_thresh: 0.5 proximity_thresh: 0.6
appearance_thresh: 0.25 appearance_thresh: 0.2
with_reid: False with_reid: False