Compare commits

..

No commits in common. "e3b8320ad2bedcf9d620d9661b623716fc2c4945" and "061d049bdf2ccf087452591389191cab387bd84d" have entirely different histories.

3 changed files with 22 additions and 146 deletions

View File

@ -1,35 +1,12 @@
import cv2
from ultralytics import YOLO
from collections import deque
import paho.mqtt.client as mqtt
from influxdb import InfluxDBClient
from influxdb_client import InfluxDBClient, Point, WriteOptions
import time
from datetime import datetime
# InfluxDB Configuration
INFLUX_URL = "http://localhost:8086"
INFLUX_TOKEN = "--k98NX5UQ2qBCGAO80lLc_-teD-AUtKNj4uQfz0M8WyjHt04AT9d0dr6w8pup93ukw6YcJxWURmo2v6CAP_2g=="
INFLUX_ORG = "GAAIM"
INFLUX_BUCKET = "AGVIGNETTE"
# Connect to InfluxDB
client = InfluxDBClient(url=INFLUX_URL, token=INFLUX_TOKEN, org=INFLUX_ORG)
write_api = client.write_api(write_options=WriteOptions(batch_size=1))
# MQTT Setup
MQTT_BROKER = "192.168.10.51"
MQTT_TOPIC = "fruit/classification"
mqtt_client = mqtt.Client()
mqtt_client.connect(MQTT_BROKER, 1883, 6000)
# Camera index (default camera is 0)
camera_index = 0
i = 0
camera_index = 1
# Load the YOLO model
model = YOLO(r"/Users/vel/Desktop/CvModel/CV_AG/runs/detect/train5/weights/best.pt") # Load custom model
model = YOLO(r"D:\AIM\lemon\runs\detect\train4\weights\best.pt") # Load custom model
# Initialize the camera
cap = cv2.VideoCapture(camera_index)
@ -55,9 +32,8 @@ class_labels = {
id_tracked_classes = ["DefectiveLemon", "GoodLemon", "NotRipeLemon"]
# Parameters
HISTORY_LENGTH = 7 # Number of frames to consider for majority voting
CONFIRMATION_FRAMES = 7 # Frames needed to confirm a new label
lemon_time = 0
HISTORY_LENGTH = 5 # Number of frames to consider for majority voting
CONFIRMATION_FRAMES = 5 # Frames needed to confirm a new label
# Dictionary to track detection history and confirmed states
lemon_history = {} # Format: {ID: deque(maxlen=HISTORY_LENGTH)}
@ -100,7 +76,7 @@ while cap.isOpened():
break
# Perform object tracking using BoT-SORT
results = model.track(source=frame, conf=0.5, tracker='botsort.yaml', show=False, device = 'mps')
results = model.track(source=frame, conf=0.5, tracker='botsort.yaml', show=False)
for result in results:
frame = result.orig_img # Current frame
@ -133,43 +109,6 @@ while cap.isOpened():
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(frame, display_text, (x1, y1 - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
# Create Decision Point at x = 600
if x1 > 100:
cv2.line(frame, (600, 0), (600, height), (255, 0, 0), 2)
# Create Decision Point at x = 670
if x1 > 100:
cv2.line(frame, (760, 0), (760, height), (255, 0, 0), 2)
cv2.putText(frame, "Decision Point", (630, height // 2),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
# Lock in the label once it crosses the decision point
if x1 > 700 and obj_id in lemon_states:
cv2.putText(frame, f"Locked: {lemon_states[obj_id]}", (x1, y1 - 40),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
else:
cv2.putText(frame, "Waiting to Lock", (x1, y1 - 40),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 0), 2)
if x1 > 600 and x1 < 780:
if final_label == "DefectiveLemon":
mqtt_message = f"lemon_classification classification=\"{final_label}\" {int(time.time()*1e9)}"
if time.time() - lemon_time > .3:
mqtt_client.publish(MQTT_TOPIC, mqtt_message)
lemon_time = time.time()
i = i + 1
elif final_label == "NotRipeLemon":
mqtt_message = f"lemon_classification classification=\"{final_label}\" {int(time.time()*1e9)}"
if time.time() - lemon_time > .3:
mqtt_client.publish(MQTT_TOPIC, mqtt_message)
lemon_time = time.time()
i = i + 1
elif final_label == "GoodLemon":
mqtt_message = f"lemon_classification classification=\"{final_label}\" {int(time.time()*1e9)}"
if time.time() - lemon_time > .3:
mqtt_client.publish(MQTT_TOPIC, mqtt_message)
lemon_time = time.time()
i = i + 1
# Display the processed video stream
cv2.imshow("Live Detection", frame)

View File

@ -1,40 +1,8 @@
import cv2
from ultralytics import YOLO
from collections import deque
import paho.mqtt.client as mqtt
from influxdb import InfluxDBClient
from influxdb_client import InfluxDBClient, Point, WriteOptions
import time
from datetime import datetime
import ssl
import os
# InfluxDB Configuration
INFLUX_URL = "http://localhost:8086"
INFLUX_TOKEN = "export INFLUX_TOKEN=duVTQHPpHqr6WmdYfpSStqm-pxnvZHs-W0-3lXDnk8Tn6PGt59MlnTSR6egjMWdYvmL_ZI6xt3YUzGVBZHvc7w=="
INFLUX_ORG = "GAAIM"
INFLUX_BUCKET = "AGVIGNETTE"
# Connect to InfluxDB
client = InfluxDBClient(url=INFLUX_URL, token=INFLUX_TOKEN, org=INFLUX_ORG)
write_api = client.write_api(write_options=WriteOptions(batch_size=1))
# MQTT Setup
MQTT_BROKER = "192.168.10.57"
MQTT_TOPIC = "fruit/classification"
mqtt_client = mqtt.Client()
# Set up TLS/SSL for MQTT connection
# mqtt_client.tls_set(
# ca_certs="/Users/vel/Desktop/CvModel/mosquitto/mosquitto/certs/ca.crt", # Path to the CA certificate
# tls_version=ssl.PROTOCOL_TLS # Specify the TLS version
#)
#mqtt_client.tls_insecure_set(True)
mqtt_client.connect(MQTT_BROKER, 1883, 6000)
# Allow duplicate loading of OpenMP runtime
os.environ["KMP_DUPLICATE_LIB_OK"] = "True"
@ -42,15 +10,10 @@ os.environ["KMP_DUPLICATE_LIB_OK"] = "True"
yaml_path = "botsort.yaml"
# Camera index (default camera index, 1 indicates an external camera)
camera_index = 0
cap = cv2.VideoCapture(camera_index)
cap.set(cv2.CAP_PROP_FPS, 30)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
camera_index = 1
# Load the YOLO model
model = YOLO(r"/Users/vel/Desktop/CvModel/CV_AG/runs/detect/train4/weights/best.pt") # Load custom model
model = YOLO(r"D:\AIM\lemon\runs\detect\train4\weights\best.pt") # Load custom model
# Define class labels
class_labels = {
@ -71,7 +34,6 @@ GOOD_THRESHOLD = 0.7 # Threshold for "GoodLemon" and "NotRipeLemon" proportio
# State history for each target (used for smoothing), format: {ID: deque([...], maxlen=HISTORY_LENGTH)}
lemon_history = {}
lemon_send_history = []
# Set the display window to be resizable
cv2.namedWindow("Live Detection", cv2.WINDOW_NORMAL)
@ -109,17 +71,15 @@ def get_smoothed_label(obj_id, current_label):
# Use streaming tracking mode to maintain tracker state
results = model.track(
source=camera_index, # Get video stream directly from the camera
conf=0.45,
conf=0.5,
tracker=yaml_path, # Use the YAML configuration file
persist=True, # Persist tracking (do not reset)
stream=True, # Stream processing, not frame-by-frame calling
show=False,
device = 'mps' #'cpu'
show=False
)
# Iterate over streaming tracking results
for result in results:
frame = result.orig_img # Current frame
detections = result.boxes # Detection box information
@ -140,32 +100,6 @@ for result in results:
if final_label in smoothing_labels:
position = f"({x1}, {y1}, {x2}, {y2})"
print(f"ID: {obj_id}, Position: {position}, Label: {display_text}")
# Draw detection box and label with color based on classification
if final_label == "DefectiveLemon":
box_color = (100, 100, 255) # Red for defective
elif final_label == "NotRipeLemon":
box_color = (255, 100, 80) # Blue for unripe
elif final_label == "GoodLemon":
box_color = (0, 255, 0) # Green for good
else:
box_color = (255, 255, 255) # White for unknown or other classes
# Add background rectangle for text
text_size = cv2.getTextSize(display_text, cv2.FONT_HERSHEY_TRIPLEX, 0.6, 2)[0]
text_x, text_y = x1, y1 - 10
text_w, text_h = text_size[0], text_size[1]
cv2.rectangle(frame, (text_x, text_y - text_h - 5), (text_x + text_w, text_y + 5), (0, 0, 0), -1)
# Draw detection box and text
cv2.rectangle(frame, (x1, y1), (x2, y2), box_color, 2)
cv2.putText(frame, display_text, (text_x, text_y),
cv2.FONT_HERSHEY_TRIPLEX, 0.6, box_color, 2)
if x1 > 750 and x1 < 850 and y2 < 410 and y1 > 190 and obj_id not in lemon_send_history:
if final_label in ["DefectiveLemon", "NotRipeLemon", "GoodLemon"]:
mqtt_message = f"lemon_classification classification=\"{final_label}\" {int(time.time()*1e9)}"
lemon_send_history.append(obj_id)
mqtt_client.publish(MQTT_TOPIC, mqtt_message)
else:
# For other classes, display the current detection result directly and clear history (if exists)
if obj_id in lemon_history:
@ -174,6 +108,11 @@ for result in results:
else:
display_text = label
# Draw detection box and label
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(frame, display_text, (x1, y1 - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
# Display the processed frame
cv2.imshow("Live Detection", frame)
@ -184,5 +123,3 @@ for result in results:
cv2.destroyAllWindows()
print("Camera video processing complete. Program terminated.")

View File

@ -4,18 +4,18 @@
# For documentation and examples see https://docs.ultralytics.com/modes/track/
# For BoT-SORT source code see https://github.com/NirAharon/BoT-SORT
tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack']
track_high_thresh: 0.2 # threshold for the first association
track_low_thresh: 0.05 # threshold for the second association
new_track_thresh: 0.3 # threshold for init new track if the detection does not match any tracks
track_buffer: 50 # buffer to calculate the time when to remove tracks
match_thresh: 0.8 # threshold for matching tracks
tracker_type: botsort # tracker type, ['botsort', 'bytetrack']
track_high_thresh: 0.25 # threshold for the first association
track_low_thresh: 0.1 # threshold for the second association
new_track_thresh: 0.4 # threshold for init new track if the detection does not match any tracks
track_buffer: 30 # buffer to calculate the time when to remove tracks
match_thresh: 0.7 # threshold for matching tracks
fuse_score: True # Whether to fuse confidence scores with the iou distances before matching
# min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
# BoT-SORT settings
gmc_method: sparseOptFlow # method of global motion compensation
# ReID model related thresh (not supported yet)
proximity_thresh: 0.6
appearance_thresh: 0.2
proximity_thresh: 0.5
appearance_thresh: 0.25
with_reid: False