added finished test track and botsort yaml
This commit is contained in:
parent
ce6b1c6705
commit
e3b8320ad2
|
@ -1,8 +1,40 @@
|
|||
import cv2
|
||||
from ultralytics import YOLO
|
||||
from collections import deque
|
||||
import paho.mqtt.client as mqtt
|
||||
from influxdb import InfluxDBClient
|
||||
from influxdb_client import InfluxDBClient, Point, WriteOptions
|
||||
import time
|
||||
from datetime import datetime
|
||||
import ssl
|
||||
import os
|
||||
|
||||
# InfluxDB Configuration
|
||||
INFLUX_URL = "http://localhost:8086"
|
||||
INFLUX_TOKEN = "export INFLUX_TOKEN=duVTQHPpHqr6WmdYfpSStqm-pxnvZHs-W0-3lXDnk8Tn6PGt59MlnTSR6egjMWdYvmL_ZI6xt3YUzGVBZHvc7w=="
|
||||
INFLUX_ORG = "GAAIM"
|
||||
INFLUX_BUCKET = "AGVIGNETTE"
|
||||
|
||||
# Connect to InfluxDB
|
||||
client = InfluxDBClient(url=INFLUX_URL, token=INFLUX_TOKEN, org=INFLUX_ORG)
|
||||
write_api = client.write_api(write_options=WriteOptions(batch_size=1))
|
||||
|
||||
# MQTT Setup
|
||||
MQTT_BROKER = "192.168.10.57"
|
||||
MQTT_TOPIC = "fruit/classification"
|
||||
|
||||
mqtt_client = mqtt.Client()
|
||||
|
||||
|
||||
# Set up TLS/SSL for MQTT connection
|
||||
|
||||
# mqtt_client.tls_set(
|
||||
# ca_certs="/Users/vel/Desktop/CvModel/mosquitto/mosquitto/certs/ca.crt", # Path to the CA certificate
|
||||
# tls_version=ssl.PROTOCOL_TLS # Specify the TLS version
|
||||
#)
|
||||
#mqtt_client.tls_insecure_set(True)
|
||||
mqtt_client.connect(MQTT_BROKER, 1883, 6000)
|
||||
|
||||
# Allow duplicate loading of OpenMP runtime
|
||||
os.environ["KMP_DUPLICATE_LIB_OK"] = "True"
|
||||
|
||||
|
@ -10,10 +42,15 @@ os.environ["KMP_DUPLICATE_LIB_OK"] = "True"
|
|||
yaml_path = "botsort.yaml"
|
||||
|
||||
# Camera index (default camera index, 1 indicates an external camera)
|
||||
camera_index = 1
|
||||
camera_index = 0
|
||||
|
||||
cap = cv2.VideoCapture(camera_index)
|
||||
cap.set(cv2.CAP_PROP_FPS, 30)
|
||||
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
|
||||
# Load the YOLO model
|
||||
model = YOLO(r"D:\AIM\lemon\runs\detect\train4\weights\best.pt") # Load custom model
|
||||
model = YOLO(r"/Users/vel/Desktop/CvModel/CV_AG/runs/detect/train4/weights/best.pt") # Load custom model
|
||||
|
||||
# Define class labels
|
||||
class_labels = {
|
||||
|
@ -34,6 +71,7 @@ GOOD_THRESHOLD = 0.7 # Threshold for "GoodLemon" and "NotRipeLemon" proportio
|
|||
|
||||
# State history for each target (used for smoothing), format: {ID: deque([...], maxlen=HISTORY_LENGTH)}
|
||||
lemon_history = {}
|
||||
lemon_send_history = []
|
||||
|
||||
# Set the display window to be resizable
|
||||
cv2.namedWindow("Live Detection", cv2.WINDOW_NORMAL)
|
||||
|
@ -71,15 +109,17 @@ def get_smoothed_label(obj_id, current_label):
|
|||
# Use streaming tracking mode to maintain tracker state
|
||||
results = model.track(
|
||||
source=camera_index, # Get video stream directly from the camera
|
||||
conf=0.5,
|
||||
conf=0.45,
|
||||
tracker=yaml_path, # Use the YAML configuration file
|
||||
persist=True, # Persist tracking (do not reset)
|
||||
stream=True, # Stream processing, not frame-by-frame calling
|
||||
show=False
|
||||
show=False,
|
||||
device = 'mps' #'cpu'
|
||||
)
|
||||
|
||||
# Iterate over streaming tracking results
|
||||
for result in results:
|
||||
|
||||
frame = result.orig_img # Current frame
|
||||
detections = result.boxes # Detection box information
|
||||
|
||||
|
@ -100,6 +140,32 @@ for result in results:
|
|||
if final_label in smoothing_labels:
|
||||
position = f"({x1}, {y1}, {x2}, {y2})"
|
||||
print(f"ID: {obj_id}, Position: {position}, Label: {display_text}")
|
||||
# Draw detection box and label with color based on classification
|
||||
if final_label == "DefectiveLemon":
|
||||
box_color = (100, 100, 255) # Red for defective
|
||||
elif final_label == "NotRipeLemon":
|
||||
box_color = (255, 100, 80) # Blue for unripe
|
||||
elif final_label == "GoodLemon":
|
||||
box_color = (0, 255, 0) # Green for good
|
||||
else:
|
||||
box_color = (255, 255, 255) # White for unknown or other classes
|
||||
|
||||
# Add background rectangle for text
|
||||
text_size = cv2.getTextSize(display_text, cv2.FONT_HERSHEY_TRIPLEX, 0.6, 2)[0]
|
||||
text_x, text_y = x1, y1 - 10
|
||||
text_w, text_h = text_size[0], text_size[1]
|
||||
cv2.rectangle(frame, (text_x, text_y - text_h - 5), (text_x + text_w, text_y + 5), (0, 0, 0), -1)
|
||||
|
||||
# Draw detection box and text
|
||||
cv2.rectangle(frame, (x1, y1), (x2, y2), box_color, 2)
|
||||
cv2.putText(frame, display_text, (text_x, text_y),
|
||||
cv2.FONT_HERSHEY_TRIPLEX, 0.6, box_color, 2)
|
||||
|
||||
if x1 > 750 and x1 < 850 and y2 < 410 and y1 > 190 and obj_id not in lemon_send_history:
|
||||
if final_label in ["DefectiveLemon", "NotRipeLemon", "GoodLemon"]:
|
||||
mqtt_message = f"lemon_classification classification=\"{final_label}\" {int(time.time()*1e9)}"
|
||||
lemon_send_history.append(obj_id)
|
||||
mqtt_client.publish(MQTT_TOPIC, mqtt_message)
|
||||
else:
|
||||
# For other classes, display the current detection result directly and clear history (if exists)
|
||||
if obj_id in lemon_history:
|
||||
|
@ -108,11 +174,6 @@ for result in results:
|
|||
else:
|
||||
display_text = label
|
||||
|
||||
# Draw detection box and label
|
||||
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
|
||||
cv2.putText(frame, display_text, (x1, y1 - 10),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
|
||||
|
||||
# Display the processed frame
|
||||
cv2.imshow("Live Detection", frame)
|
||||
|
||||
|
@ -123,3 +184,5 @@ for result in results:
|
|||
|
||||
cv2.destroyAllWindows()
|
||||
print("Camera video processing complete. Program terminated.")
|
||||
|
||||
|
||||
|
|
16
botsort.yaml
16
botsort.yaml
|
@ -4,18 +4,18 @@
|
|||
# For documentation and examples see https://docs.ultralytics.com/modes/track/
|
||||
# For BoT-SORT source code see https://github.com/NirAharon/BoT-SORT
|
||||
|
||||
tracker_type: botsort # tracker type, ['botsort', 'bytetrack']
|
||||
track_high_thresh: 0.25 # threshold for the first association
|
||||
track_low_thresh: 0.1 # threshold for the second association
|
||||
new_track_thresh: 0.4 # threshold for init new track if the detection does not match any tracks
|
||||
track_buffer: 30 # buffer to calculate the time when to remove tracks
|
||||
match_thresh: 0.7 # threshold for matching tracks
|
||||
tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack']
|
||||
track_high_thresh: 0.2 # threshold for the first association
|
||||
track_low_thresh: 0.05 # threshold for the second association
|
||||
new_track_thresh: 0.3 # threshold for init new track if the detection does not match any tracks
|
||||
track_buffer: 50 # buffer to calculate the time when to remove tracks
|
||||
match_thresh: 0.8 # threshold for matching tracks
|
||||
fuse_score: True # Whether to fuse confidence scores with the iou distances before matching
|
||||
# min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
|
||||
|
||||
# BoT-SORT settings
|
||||
gmc_method: sparseOptFlow # method of global motion compensation
|
||||
# ReID model related thresh (not supported yet)
|
||||
proximity_thresh: 0.5
|
||||
appearance_thresh: 0.25
|
||||
proximity_thresh: 0.6
|
||||
appearance_thresh: 0.2
|
||||
with_reid: False
|
||||
|
|
Loading…
Reference in New Issue