updates to algorithm

This commit is contained in:
Hunter Halloran 2025-02-10 16:57:45 +00:00
parent 2a8d2afd5a
commit 82b45040ea
9 changed files with 371 additions and 23 deletions

View File

@ -9,20 +9,23 @@ import time
############################################################################################################
# Initialize Variables
cy1=550
offset=30
ids=set()
cy1 = 550
offset = 90
ids = set()
pecanCount = 0
avgPecanWeight = 0.0270453125 # lbs
refThroughput = 0 # lbs / 15 seconds
avgSampleTime = 0.5 # seconds
samplePeriod = 0.25 # seconds
width = 1280
height = 720
totalPecanCount = 0
# Load the YOLO11 model and set device
model = YOLO("yolo11m-pecan.pt")
device = 'cuda'
# Open the video file (use video file or webcam, here using webcam)
cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture('rtsp://192.168.1.10:8554/stream')
############################################################################################################
# Unscented Kalman Filter
@ -39,8 +42,8 @@ def hx(x):
points = MerweScaledSigmaPoints(n=1, alpha=0.1, beta=2, kappa=0)# Define sigma points
ukf = UKF(dim_x=1, dim_z=1, fx=fx, hx=hx, points=points) # Initialize UKF
ukf.x = np.array([refThroughput / 15 / avgPecanWeight * avgSampleTime ]) # Initial state estimate
ukf = UKF(dim_x=1, dim_z=1, fx=fx, hx=hx, points=points, dt=samplePeriod) # Initial State Estimate
ukf.x = np.array([refThroughput / 15 / avgPecanWeight * samplePeriod]) # Initial state estimate
ukf.Q = np.array([[0.02]]) # Process noise covariance (Q) - controls how much the state changes naturally
ukf.R = np.array([[1]]) # Measurement noise covariance (R) - how noisy the measurements are
ukf.P = np.eye(1) * 0.1 # Initial state covariance (P) - initial uncertainty
@ -50,7 +53,8 @@ ukf.P = np.eye(1) * 0.1 # Initial state covariance (P) - initial uncertainty
MQTT_BROKER = "192.168.1.110"
MQTT_PORT = 1883
MQTT_TOPIC = "/jc/feedrate/count"
MQTT_TOPIC = "/jc/feedrate/"
MQTT_COUNT_TOPIC = "/jc/feedrate/count/"
def on_connect(client, userdata, flags, rc, properties=None):
print("Connected with result code " + str(rc))
@ -90,14 +94,27 @@ client.loop_start() # Starts MQTT loop in the background
# ema_filter = FastEMA(alpha=0.1) # Adjust alpha (lower = smoother)
############################################################################################################
sampleStart = time.time()
frameTime = time.time()
frameCount = 0
while True:
ret,frame = cap.read()
if not ret:
break
sampleStart = time.time() # Sample Start Time
pecanCount = 0 # Reset count for new sample period
# Define the black box position (adjust as needed)
top_left = (0, 0)
bottom_right = (1280, 220)
# Draw a black rectangle (filled)
cv2.rectangle(frame, top_left, bottom_right, (0, 0, 0), thickness=-1)
frameCount += 1
# sampleStart = time.time() # Sample Start Time
# pecanCount = 0 # Reset count for new sample period
# Run YOLO11 tracking on the frame, persisting tracks between frames
results = model.track(frame, persist=True, classes=0, device = device)
@ -115,23 +132,45 @@ while True:
cy = int(y1+y2)//2
if cy<(cy1+offset) and cy>(cy1-offset) and track_id not in ids:
pecanCount += 1
totalPecanCount += 1
print(f'New Count: {pecanCount}')
ids.add(track_id)
# filtered_count = ema_filter.update(pecanCount, refThroughput) # Applies exponential moving average filter
ukf.predict()
ukf.update(np.array([pecanCount]))
filtered_count = ukf.x[0]
sampleEnd = time.time()
print(f'Pecan Count: {pecanCount}')
print(f'Total Count: {totalPecanCount}')
if (sampleEnd - sampleStart) > samplePeriod:
ukf.predict()
print(f'Predicted State: {ukf.x[0]}')
ukf.update(np.array([pecanCount]))
print(f'Updated State: {ukf.x[0]}')
filtered_count = ukf.x[0]
print(filtered_count)
measuredThroughput = (filtered_count * avgPecanWeight) / (samplePeriod) * 15 # lbs / 15 seconds
print(f'Published Throughput: {measuredThroughput}')
client.publish(MQTT_COUNT_TOPIC, str(measuredThroughput))
pecanCount = 0
sampleStart = time.time()
print(f'Publish Time: {sampleStart-sampleEnd}')
sampleEnd = time.time() # End Sample Timer
# sampleEnd = time.time() # End Sample Timer
samplePeriod = sampleEnd - sampleStart
print(samplePeriod)
# samplePeriod = sampleEnd - sampleStart
measuredThroughput = (filtered_count * avgPecanWeight) / (samplePeriod) * 15 # lbs / 15 seconds
client.publish(MQTT_TOPIC, str(measuredThroughput))
# measuredThroughput = (filtered_count * avgPecanWeight) / (samplePeriod) * 15 # lbs / 15 seconds
#if (new_time := time.time()) > last_time + 0.25:
#client.publish(MQTT_COUNT_TOPIC, str(measuredThroughput))
#print(samplePeriod)
#print(str(last_time) + " " + str(new_time))
#last_time = new_time
if (time.time()-frameTime) > 10:
fps = frameCount / (time.time() - frameTime)
print(fps)
break
# Release the video capture object and close the display window
cap.release()

View File

@ -7,6 +7,6 @@ WORKDIR /app
COPY . .
RUN pip install --no-cache-dir -r requirements.txt
RUN pip install -r requirements.txt
CMD [ "python", "Count.py"]
CMD [ "python", "Count.py"]

42
ffmpegTest.py Normal file
View File

@ -0,0 +1,42 @@
import ffmpeg
import numpy as np
from ultralytics import YOLO
import paho.mqtt.client as mqtt
from filterpy.kalman import UnscentedKalmanFilter as UKF
from filterpy.kalman import MerweScaledSigmaPoints
import time
width = 1280
height = 720
fps = 60
frameCount = 0
# Setup the ffmpeg stream
input_stream = ffmpeg.input('video="0":video_size={}x{}:framerate={}'.format(width, height, fps))
output_stream = ffmpeg.output(input_stream, 'pipe:1', format='rawvideo', pix_fmt='bgr24')
# Initialize the stream
process = ffmpeg.run_async(output_stream, pipe_stdout=True, pipe_stderr=True)
frameTime = time.time()
while True:
# Read a frame from the stream
in_bytes = process.stdout.read(width * height * 3)
if len(in_bytes) < width * height * 3:
break # End of stream
# Convert bytes to numpy array
frame = np.frombuffer(in_bytes, np.uint8).reshape([height, width, 3])
frameCount += 1
if (time.time() - frameTime) > 10:
trueFPS = frameCount / (time.time()-frameTime)
print(trueFPS)
break
# Close the process
process.stdout.close()
process.stderr.close()
process.wait()

6
hello.py Normal file
View File

@ -0,0 +1,6 @@
def main():
print("Hello from usda-throughput-control!")
if __name__ == "__main__":
main()

125
recordModel.py Normal file
View File

@ -0,0 +1,125 @@
import cv2
import numpy as np
from ultralytics import YOLO
import cvzone
import os
import time
device = 'cuda'
# Load the YOLO11 model
model = YOLO("yolo11m-pecan.pt")
# Open the video file (use video file or webcam, here using webcam)
cap = cv2.VideoCapture(0)
cy1=550
offset=60
idDict={}
pecanCount = 0
# Define the desired resolution and FPS
width, height = 640, 360
# Video Title
vidTitle = 'test.avi'
# Set the resolution and other properties for each camera
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
# Define the codec and create a VideoWriter object
# MJPEG format example, adjust as needed based on your camera compatibility
fourcc = cv2.VideoWriter_fourcc(*'MJPG') # Codec for MJPEG
output = cv2.VideoWriter('raw.avi', fourcc, 60, (width, height)) # 32.8 is about the actual FPS the camera is recording at
# Start capturing video
start_time = time.time()
frame_count = 0 # To keep track of frames for each camer
while True:
ret,frame = cap.read()
if not ret:
break
# Define the black box position (adjust as needed)
top_left = (0, 0)
bottom_right = (1280, 220)
# Draw a black rectangle (filled)
cv2.rectangle(frame, top_left, bottom_right, (0, 0, 0), thickness=-1)
# Run YOLO11 tracking on the frame, persisting tracks between frames
results = model.track(frame, persist=True,classes=0,device = device)
frame_count += 1 # Increment frame count for this camera
# Check if there are any boxes in the results
if results[0].boxes is not None and results[0].boxes.id is not None:
# Get the boxes (x, y, w, h), class IDs, track IDs, and confidences
boxes = results[0].boxes.xyxy.int().cpu().tolist() # Bounding boxes
class_ids = results[0].boxes.cls.int().cpu().tolist() # Class IDs
track_ids = results[0].boxes.id.int().cpu().tolist() # Track IDs
confidences = results[0].boxes.conf.cpu().tolist() # Confidence score
for box, class_id, track_id, conf in zip(boxes, class_ids, track_ids, confidences):
x1, y1, x2, y2 = box
cy = int(y1+y2)//2
if track_id in idDict.keys():
cv2.rectangle(frame,(x1,y1),(x2,y2),(255,255,255),2)
cvzone.putTextRect(frame,f'{idDict[track_id]}',(x1,y2),1,1)
else:
cv2.rectangle(frame,(x1,y1),(x2,y2),(0,255,0),2)
# cvzone.putTextRect(frame,f'{track_id}',(x1,y2),1,1)
# cv2.circle(frame,(cx,cy),4,(255,0,0),-1)
# cvzone.putTextRect(frame,f'{c}',(x1,y1),1,1)
if cy<(cy1+offset) and cy>(cy1-offset) and track_id not in idDict.keys():
pecanCount += 1
idDict[track_id] = pecanCount
cv2.putText(frame, f'Totals Pecans: {pecanCount}', (50, 50),cv2.FONT_HERSHEY_SIMPLEX,
0.6, [255, 255, 255], 1)
cv2.line(frame,(0,550),(1280,550),(255,0,255),2)
cv2.line(frame,(0,cy1+offset),(1280,cy1+offset),(0,0,255),2)
cv2.line(frame,(0,cy1-offset),(1280,cy1-offset),(0,0,255),2)
output.write(frame)
if (time.time()-start_time) > 5:
break
# Release the video capture object and close the display window
cap.release()
cv2.destroyAllWindows()
# Calculate and display FPS for each camera
elapsed_time = time.time() - start_time
fps_actual = frame_count / elapsed_time
print(f"Total frames captured from camera: {frame_count}")
print(f"Elapsed time: {elapsed_time:.2f} seconds")
print(f"Actual Frames per second for camera: {fps_actual:.2f}")
cap = cv2.VideoCapture('raw.avi')
# Get original width, height, and codec
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = int(cap.get(cv2.CAP_PROP_FOURCC))
# Create VideoWriter with new FPS
out = cv2.VideoWriter(vidTitle, fourcc, fps_actual, (width, height))
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
out.write(frame)
cap.release()
out.release()
print(f"Video saved with {fps_actual} FPS.")
if os.path.exists('raw.avi'):
os.remove('raw.avi')
print(f"Deleted original file: {'raw.avi'}")
else:
print(f"File not found: {'raw.avi'}")

View File

@ -4,7 +4,8 @@ charset-normalizer==3.4.0
coloredlogs==15.0.1
contourpy==1.3.1
cvzone==1.6.1
cycler==1.3.1
cycler
paho-mqtt
filelock==3.16.1
filterpy==1.4.5
flatbuffers==24.3.25

17
screenshot.py Normal file
View File

@ -0,0 +1,17 @@
import cv2
# Open the video file (use video file or webcam, here using webcam)
cap = cv2.VideoCapture(0)
width = 1280
height = 720
# Set the resolution and fps of the camera
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
result, image = cap.read()
cv2.imwrite('test.jpg', image)
cap.release()

41
testFPS.py Normal file
View File

@ -0,0 +1,41 @@
import cv2
import numpy as np
from ultralytics import YOLO
import paho.mqtt.client as mqtt
from filterpy.kalman import UnscentedKalmanFilter as UKF
from filterpy.kalman import MerweScaledSigmaPoints
import time
width = 1280
height = 720
# fps = 60
frameCount = 0
# Open the video file (use video file or webcam, here using webcam)
cap = cv2.VideoCapture(0)
# Set the resolution and fps of the camera
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
# cap.set(cv2.CAP_PROP_FPS, fps)
print(cap.get(cv2.CAP_PROP_FPS))
print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
frameTime = time.time()
while True:
ret,frame = cap.read()
if not ret:
break
frameCount += 1
if (time.time() - frameTime) > 10:
trueFPS = frameCount / (time.time()-frameTime)
print(trueFPS)
break
# Release the video capture object and close the display window
cap.release()
cv2.destroyAllWindows()

77
visualizeCount.py Normal file
View File

@ -0,0 +1,77 @@
import cv2
import numpy as np
from ultralytics import YOLO
import cvzone
device = 'cuda'
# Load the YOLO11 model
model = YOLO("yolo11m-pecan.pt")
# Open the video file (use video file or webcam, here using webcam)
cap = cv2.VideoCapture(0)
cy1=550
offset=60
idDict={}
pecanCount = 0
# Define the desired resolution and FPS
width, height = 1280, 720
# Set the resolution and other properties for each camera
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
while True:
ret,frame = cap.read()
if not ret:
break
# Define the black box position (adjust as needed)
top_left = (0, 0)
bottom_right = (1280, 220)
# Draw a black rectangle (filled)
cv2.rectangle(frame, top_left, bottom_right, (0, 0, 0), thickness=-1)
# Run YOLO11 tracking on the frame, persisting tracks between frames
results = model.track(frame, persist=True,classes=0,device = device)
# Check if there are any boxes in the results
if results[0].boxes is not None and results[0].boxes.id is not None:
# Get the boxes (x, y, w, h), class IDs, track IDs, and confidences
boxes = results[0].boxes.xyxy.int().cpu().tolist() # Bounding boxes
class_ids = results[0].boxes.cls.int().cpu().tolist() # Class IDs
track_ids = results[0].boxes.id.int().cpu().tolist() # Track IDs
confidences = results[0].boxes.conf.cpu().tolist() # Confidence score
for box, class_id, track_id, conf in zip(boxes, class_ids, track_ids, confidences):
x1, y1, x2, y2 = box
cy = int(y1+y2)//2
if track_id in idDict.keys():
cv2.rectangle(frame,(x1,y1),(x2,y2),(255,255,255),2)
cvzone.putTextRect(frame,f'{idDict[track_id]}',(x1,y2),1,1)
else:
cv2.rectangle(frame,(x1,y1),(x2,y2),(0,255,0),2)
# cvzone.putTextRect(frame,f'{track_id}',(x1,y2),1,1)
# cv2.circle(frame,(cx,cy),4,(255,0,0),-1)
# cvzone.putTextRect(frame,f'{c}',(x1,y1),1,1)
if cy<(cy1+offset) and cy>(cy1-offset) and track_id not in idDict.keys():
pecanCount += 1
idDict[track_id] = pecanCount
cv2.putText(frame, f'Totals Pecans: {pecanCount}', (50, 50),cv2.FONT_HERSHEY_SIMPLEX,
0.6, [255, 255, 255], 1)
cv2.line(frame,(0,550),(1280,550),(255,0,255),2)
cv2.line(frame,(0,cy1+offset),(1280,cy1+offset),(0,0,255),2)
cv2.line(frame,(0,cy1-offset),(1280,cy1-offset),(0,0,255),2)
cv2.imshow('Frame', frame)
cv2.imshow("RGB", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
# Release the video capture object and close the display window
cap.release()
cv2.destroyAllWindows()
print(pecanCount)