Updated tracking to support multi-lemon tracking
- Improved tracking logic to handle multiple lemons simultaneously - Added necessary YAML configuration for BoT-SORT Higher Sensitivity to Minor Defects: - Weighted “DefectiveLemon” more heavily - Extended HISTORY_LENGTH for improved tracking stability
This commit is contained in:
parent
ffb7a6300a
commit
061d049bdf
|
@ -0,0 +1,125 @@
|
||||||
|
import cv2
|
||||||
|
from ultralytics import YOLO
|
||||||
|
from collections import deque
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Allow duplicate loading of OpenMP runtime
|
||||||
|
os.environ["KMP_DUPLICATE_LIB_OK"] = "True"
|
||||||
|
|
||||||
|
# Define the official YAML configuration file path (adjust as needed)
|
||||||
|
yaml_path = "botsort.yaml"
|
||||||
|
|
||||||
|
# Camera index (default camera index, 1 indicates an external camera)
|
||||||
|
camera_index = 1
|
||||||
|
|
||||||
|
# Load the YOLO model
|
||||||
|
model = YOLO(r"D:\AIM\lemon\runs\detect\train4\weights\best.pt") # Load custom model
|
||||||
|
|
||||||
|
# Define class labels
|
||||||
|
class_labels = {
|
||||||
|
0: "Bruised",
|
||||||
|
1: "DefectiveLemon",
|
||||||
|
2: "GoodLemon",
|
||||||
|
3: "NotRipeLemon",
|
||||||
|
4: "Rotten"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Apply smoothing to "DefectiveLemon", "GoodLemon", and "NotRipeLemon"
|
||||||
|
smoothing_labels = ["DefectiveLemon", "GoodLemon", "NotRipeLemon"]
|
||||||
|
|
||||||
|
# Smoothing parameters for sliding window
|
||||||
|
HISTORY_LENGTH = 20 # Number of recent frames
|
||||||
|
DEFECT_THRESHOLD = 0.3 # Threshold for "DefectiveLemon" proportion
|
||||||
|
GOOD_THRESHOLD = 0.7 # Threshold for "GoodLemon" and "NotRipeLemon" proportion
|
||||||
|
|
||||||
|
# State history for each target (used for smoothing), format: {ID: deque([...], maxlen=HISTORY_LENGTH)}
|
||||||
|
lemon_history = {}
|
||||||
|
|
||||||
|
# Set the display window to be resizable
|
||||||
|
cv2.namedWindow("Live Detection", cv2.WINDOW_NORMAL)
|
||||||
|
|
||||||
|
# Smoothing function:
|
||||||
|
# If the current detected label is not in smoothing_labels, clear the target's history and return the current label;
|
||||||
|
# Otherwise, add the current label to the history and return a smoothed label based on the proportion.
|
||||||
|
def get_smoothed_label(obj_id, current_label):
|
||||||
|
if current_label not in smoothing_labels:
|
||||||
|
if obj_id in lemon_history:
|
||||||
|
lemon_history[obj_id].clear()
|
||||||
|
return current_label
|
||||||
|
|
||||||
|
if obj_id not in lemon_history:
|
||||||
|
lemon_history[obj_id] = deque(maxlen=HISTORY_LENGTH)
|
||||||
|
lemon_history[obj_id].append(current_label)
|
||||||
|
|
||||||
|
history = lemon_history[obj_id]
|
||||||
|
defect_count = history.count("DefectiveLemon")
|
||||||
|
good_count = history.count("GoodLemon")
|
||||||
|
notripe_count = history.count("NotRipeLemon")
|
||||||
|
total = len(history)
|
||||||
|
|
||||||
|
if total == 0:
|
||||||
|
return current_label
|
||||||
|
if defect_count / total >= DEFECT_THRESHOLD:
|
||||||
|
return "DefectiveLemon"
|
||||||
|
elif good_count / total >= GOOD_THRESHOLD:
|
||||||
|
return "GoodLemon"
|
||||||
|
elif notripe_count / total >= GOOD_THRESHOLD:
|
||||||
|
return "NotRipeLemon"
|
||||||
|
else:
|
||||||
|
return history[-1]
|
||||||
|
|
||||||
|
# Use streaming tracking mode to maintain tracker state
|
||||||
|
results = model.track(
|
||||||
|
source=camera_index, # Get video stream directly from the camera
|
||||||
|
conf=0.5,
|
||||||
|
tracker=yaml_path, # Use the YAML configuration file
|
||||||
|
persist=True, # Persist tracking (do not reset)
|
||||||
|
stream=True, # Stream processing, not frame-by-frame calling
|
||||||
|
show=False
|
||||||
|
)
|
||||||
|
|
||||||
|
# Iterate over streaming tracking results
|
||||||
|
for result in results:
|
||||||
|
frame = result.orig_img # Current frame
|
||||||
|
detections = result.boxes # Detection box information
|
||||||
|
|
||||||
|
for box in detections:
|
||||||
|
x1, y1, x2, y2 = map(int, box.xyxy[0]) # Detection box coordinates
|
||||||
|
obj_id = int(box.id) if box.id is not None else -1 # Tracking ID
|
||||||
|
class_id = int(box.cls) # Class ID
|
||||||
|
score = box.conf # Confidence
|
||||||
|
label = class_labels.get(class_id, "Unknown") # Get class name
|
||||||
|
|
||||||
|
# If target ID is valid
|
||||||
|
if obj_id != -1:
|
||||||
|
# If the detected label requires smoothing, use the smoothing function
|
||||||
|
if label in smoothing_labels:
|
||||||
|
final_label = get_smoothed_label(obj_id, label)
|
||||||
|
display_text = f"ID {obj_id} | {final_label}"
|
||||||
|
# Only print for targets with smoothed labels (only care about these three classes)
|
||||||
|
if final_label in smoothing_labels:
|
||||||
|
position = f"({x1}, {y1}, {x2}, {y2})"
|
||||||
|
print(f"ID: {obj_id}, Position: {position}, Label: {display_text}")
|
||||||
|
else:
|
||||||
|
# For other classes, display the current detection result directly and clear history (if exists)
|
||||||
|
if obj_id in lemon_history:
|
||||||
|
lemon_history[obj_id].clear()
|
||||||
|
display_text = label
|
||||||
|
else:
|
||||||
|
display_text = label
|
||||||
|
|
||||||
|
# Draw detection box and label
|
||||||
|
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
|
||||||
|
cv2.putText(frame, display_text, (x1, y1 - 10),
|
||||||
|
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
|
||||||
|
|
||||||
|
# Display the processed frame
|
||||||
|
cv2.imshow("Live Detection", frame)
|
||||||
|
|
||||||
|
# Exit program when ESC key is pressed
|
||||||
|
if cv2.waitKey(1) & 0xFF == 27:
|
||||||
|
print("ESC key detected. Exiting the program.")
|
||||||
|
break
|
||||||
|
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
print("Camera video processing complete. Program terminated.")
|
|
@ -0,0 +1,21 @@
|
||||||
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
||||||
|
|
||||||
|
# Default Ultralytics settings for BoT-SORT tracker when using mode="track"
|
||||||
|
# For documentation and examples see https://docs.ultralytics.com/modes/track/
|
||||||
|
# For BoT-SORT source code see https://github.com/NirAharon/BoT-SORT
|
||||||
|
|
||||||
|
tracker_type: botsort # tracker type, ['botsort', 'bytetrack']
|
||||||
|
track_high_thresh: 0.25 # threshold for the first association
|
||||||
|
track_low_thresh: 0.1 # threshold for the second association
|
||||||
|
new_track_thresh: 0.4 # threshold for init new track if the detection does not match any tracks
|
||||||
|
track_buffer: 30 # buffer to calculate the time when to remove tracks
|
||||||
|
match_thresh: 0.7 # threshold for matching tracks
|
||||||
|
fuse_score: True # Whether to fuse confidence scores with the iou distances before matching
|
||||||
|
# min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
|
||||||
|
|
||||||
|
# BoT-SORT settings
|
||||||
|
gmc_method: sparseOptFlow # method of global motion compensation
|
||||||
|
# ReID model related thresh (not supported yet)
|
||||||
|
proximity_thresh: 0.5
|
||||||
|
appearance_thresh: 0.25
|
||||||
|
with_reid: False
|
Loading…
Reference in New Issue