Chore: rename api->camera-management-api and web->management-dashboard-web-app; update compose, ignore, README references
This commit is contained in:
95
camera-management-api/ai_agent/examples/demos/cv_grab.py
Normal file
95
camera-management-api/ai_agent/examples/demos/cv_grab.py
Normal file
@@ -0,0 +1,95 @@
|
||||
#coding=utf-8
|
||||
import cv2
|
||||
import numpy as np
|
||||
import mvsdk
|
||||
import platform
|
||||
|
||||
def main_loop():
|
||||
# 枚举相机
|
||||
DevList = mvsdk.CameraEnumerateDevice()
|
||||
nDev = len(DevList)
|
||||
if nDev < 1:
|
||||
print("No camera was found!")
|
||||
return
|
||||
|
||||
for i, DevInfo in enumerate(DevList):
|
||||
print("{}: {} {}".format(i, DevInfo.GetFriendlyName(), DevInfo.GetPortType()))
|
||||
i = 0 if nDev == 1 else int(input("Select camera: "))
|
||||
DevInfo = DevList[i]
|
||||
print(DevInfo)
|
||||
|
||||
# 打开相机
|
||||
hCamera = 0
|
||||
try:
|
||||
hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
|
||||
except mvsdk.CameraException as e:
|
||||
print("CameraInit Failed({}): {}".format(e.error_code, e.message) )
|
||||
return
|
||||
|
||||
# 获取相机特性描述
|
||||
cap = mvsdk.CameraGetCapability(hCamera)
|
||||
|
||||
# 判断是黑白相机还是彩色相机
|
||||
monoCamera = (cap.sIspCapacity.bMonoSensor != 0)
|
||||
|
||||
# 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
|
||||
if monoCamera:
|
||||
mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)
|
||||
else:
|
||||
mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_BGR8)
|
||||
|
||||
# 相机模式切换成连续采集
|
||||
mvsdk.CameraSetTriggerMode(hCamera, 0)
|
||||
|
||||
# 手动曝光,曝光时间30ms
|
||||
mvsdk.CameraSetAeState(hCamera, 0)
|
||||
mvsdk.CameraSetExposureTime(hCamera, 30 * 1000)
|
||||
|
||||
# 让SDK内部取图线程开始工作
|
||||
mvsdk.CameraPlay(hCamera)
|
||||
|
||||
# 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
|
||||
FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (1 if monoCamera else 3)
|
||||
|
||||
# 分配RGB buffer,用来存放ISP输出的图像
|
||||
# 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
|
||||
pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)
|
||||
|
||||
while (cv2.waitKey(1) & 0xFF) != ord('q'):
|
||||
# 从相机取一帧图片
|
||||
try:
|
||||
pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200)
|
||||
mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer, FrameHead)
|
||||
mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)
|
||||
|
||||
# windows下取到的图像数据是上下颠倒的,以BMP格式存放。转换成opencv则需要上下翻转成正的
|
||||
# linux下直接输出正的,不需要上下翻转
|
||||
if platform.system() == "Windows":
|
||||
mvsdk.CameraFlipFrameBuffer(pFrameBuffer, FrameHead, 1)
|
||||
|
||||
# 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
|
||||
# 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理
|
||||
frame_data = (mvsdk.c_ubyte * FrameHead.uBytes).from_address(pFrameBuffer)
|
||||
frame = np.frombuffer(frame_data, dtype=np.uint8)
|
||||
frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth, 1 if FrameHead.uiMediaType == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3) )
|
||||
|
||||
frame = cv2.resize(frame, (640,480), interpolation = cv2.INTER_LINEAR)
|
||||
cv2.imshow("Press q to end", frame)
|
||||
|
||||
except mvsdk.CameraException as e:
|
||||
if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT:
|
||||
print("CameraGetImageBuffer failed({}): {}".format(e.error_code, e.message) )
|
||||
|
||||
# 关闭相机
|
||||
mvsdk.CameraUnInit(hCamera)
|
||||
|
||||
# 释放帧缓存
|
||||
mvsdk.CameraAlignFree(pFrameBuffer)
|
||||
|
||||
def main():
|
||||
try:
|
||||
main_loop()
|
||||
finally:
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
main()
|
||||
127
camera-management-api/ai_agent/examples/demos/cv_grab2.py
Normal file
127
camera-management-api/ai_agent/examples/demos/cv_grab2.py
Normal file
@@ -0,0 +1,127 @@
|
||||
#coding=utf-8
|
||||
import cv2
|
||||
import numpy as np
|
||||
import mvsdk
|
||||
import platform
|
||||
|
||||
class Camera(object):
|
||||
def __init__(self, DevInfo):
|
||||
super(Camera, self).__init__()
|
||||
self.DevInfo = DevInfo
|
||||
self.hCamera = 0
|
||||
self.cap = None
|
||||
self.pFrameBuffer = 0
|
||||
|
||||
def open(self):
|
||||
if self.hCamera > 0:
|
||||
return True
|
||||
|
||||
# 打开相机
|
||||
hCamera = 0
|
||||
try:
|
||||
hCamera = mvsdk.CameraInit(self.DevInfo, -1, -1)
|
||||
except mvsdk.CameraException as e:
|
||||
print("CameraInit Failed({}): {}".format(e.error_code, e.message) )
|
||||
return False
|
||||
|
||||
# 获取相机特性描述
|
||||
cap = mvsdk.CameraGetCapability(hCamera)
|
||||
|
||||
# 判断是黑白相机还是彩色相机
|
||||
monoCamera = (cap.sIspCapacity.bMonoSensor != 0)
|
||||
|
||||
# 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
|
||||
if monoCamera:
|
||||
mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)
|
||||
else:
|
||||
mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_BGR8)
|
||||
|
||||
# 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
|
||||
FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (1 if monoCamera else 3)
|
||||
|
||||
# 分配RGB buffer,用来存放ISP输出的图像
|
||||
# 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
|
||||
pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)
|
||||
|
||||
# 相机模式切换成连续采集
|
||||
mvsdk.CameraSetTriggerMode(hCamera, 0)
|
||||
|
||||
# 手动曝光,曝光时间30ms
|
||||
mvsdk.CameraSetAeState(hCamera, 0)
|
||||
mvsdk.CameraSetExposureTime(hCamera, 30 * 1000)
|
||||
|
||||
# 让SDK内部取图线程开始工作
|
||||
mvsdk.CameraPlay(hCamera)
|
||||
|
||||
self.hCamera = hCamera
|
||||
self.pFrameBuffer = pFrameBuffer
|
||||
self.cap = cap
|
||||
return True
|
||||
|
||||
def close(self):
|
||||
if self.hCamera > 0:
|
||||
mvsdk.CameraUnInit(self.hCamera)
|
||||
self.hCamera = 0
|
||||
|
||||
mvsdk.CameraAlignFree(self.pFrameBuffer)
|
||||
self.pFrameBuffer = 0
|
||||
|
||||
def grab(self):
|
||||
# 从相机取一帧图片
|
||||
hCamera = self.hCamera
|
||||
pFrameBuffer = self.pFrameBuffer
|
||||
try:
|
||||
pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200)
|
||||
mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer, FrameHead)
|
||||
mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)
|
||||
|
||||
# windows下取到的图像数据是上下颠倒的,以BMP格式存放。转换成opencv则需要上下翻转成正的
|
||||
# linux下直接输出正的,不需要上下翻转
|
||||
if platform.system() == "Windows":
|
||||
mvsdk.CameraFlipFrameBuffer(pFrameBuffer, FrameHead, 1)
|
||||
|
||||
# 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
|
||||
# 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理
|
||||
frame_data = (mvsdk.c_ubyte * FrameHead.uBytes).from_address(pFrameBuffer)
|
||||
frame = np.frombuffer(frame_data, dtype=np.uint8)
|
||||
frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth, 1 if FrameHead.uiMediaType == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3) )
|
||||
return frame
|
||||
except mvsdk.CameraException as e:
|
||||
if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT:
|
||||
print("CameraGetImageBuffer failed({}): {}".format(e.error_code, e.message) )
|
||||
return None
|
||||
|
||||
def main_loop():
|
||||
# 枚举相机
|
||||
DevList = mvsdk.CameraEnumerateDevice()
|
||||
nDev = len(DevList)
|
||||
if nDev < 1:
|
||||
print("No camera was found!")
|
||||
return
|
||||
|
||||
for i, DevInfo in enumerate(DevList):
|
||||
print("{}: {} {}".format(i, DevInfo.GetFriendlyName(), DevInfo.GetPortType()))
|
||||
|
||||
cams = []
|
||||
for i in map(lambda x: int(x), raw_input("Select cameras: ").split()):
|
||||
cam = Camera(DevList[i])
|
||||
if cam.open():
|
||||
cams.append(cam)
|
||||
|
||||
while (cv2.waitKey(1) & 0xFF) != ord('q'):
|
||||
for cam in cams:
|
||||
frame = cam.grab()
|
||||
if frame is not None:
|
||||
frame = cv2.resize(frame, (640,480), interpolation = cv2.INTER_LINEAR)
|
||||
cv2.imshow("{} Press q to end".format(cam.DevInfo.GetFriendlyName()), frame)
|
||||
|
||||
for cam in cams:
|
||||
cam.close()
|
||||
|
||||
def main():
|
||||
try:
|
||||
main_loop()
|
||||
finally:
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
main()
|
||||
@@ -0,0 +1,110 @@
|
||||
#coding=utf-8
|
||||
import cv2
|
||||
import numpy as np
|
||||
import mvsdk
|
||||
import time
|
||||
import platform
|
||||
|
||||
class App(object):
|
||||
def __init__(self):
|
||||
super(App, self).__init__()
|
||||
self.pFrameBuffer = 0
|
||||
self.quit = False
|
||||
|
||||
def main(self):
|
||||
# 枚举相机
|
||||
DevList = mvsdk.CameraEnumerateDevice()
|
||||
nDev = len(DevList)
|
||||
if nDev < 1:
|
||||
print("No camera was found!")
|
||||
return
|
||||
|
||||
for i, DevInfo in enumerate(DevList):
|
||||
print("{}: {} {}".format(i, DevInfo.GetFriendlyName(), DevInfo.GetPortType()))
|
||||
i = 0 if nDev == 1 else int(input("Select camera: "))
|
||||
DevInfo = DevList[i]
|
||||
print(DevInfo)
|
||||
|
||||
# 打开相机
|
||||
hCamera = 0
|
||||
try:
|
||||
hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
|
||||
except mvsdk.CameraException as e:
|
||||
print("CameraInit Failed({}): {}".format(e.error_code, e.message) )
|
||||
return
|
||||
|
||||
# 获取相机特性描述
|
||||
cap = mvsdk.CameraGetCapability(hCamera)
|
||||
|
||||
# 判断是黑白相机还是彩色相机
|
||||
monoCamera = (cap.sIspCapacity.bMonoSensor != 0)
|
||||
|
||||
# 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
|
||||
if monoCamera:
|
||||
mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)
|
||||
else:
|
||||
mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_BGR8)
|
||||
|
||||
# 相机模式切换成连续采集
|
||||
mvsdk.CameraSetTriggerMode(hCamera, 0)
|
||||
|
||||
# 手动曝光,曝光时间30ms
|
||||
mvsdk.CameraSetAeState(hCamera, 0)
|
||||
mvsdk.CameraSetExposureTime(hCamera, 30 * 1000)
|
||||
|
||||
# 让SDK内部取图线程开始工作
|
||||
mvsdk.CameraPlay(hCamera)
|
||||
|
||||
# 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
|
||||
FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (1 if monoCamera else 3)
|
||||
|
||||
# 分配RGB buffer,用来存放ISP输出的图像
|
||||
# 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
|
||||
self.pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)
|
||||
|
||||
# 设置采集回调函数
|
||||
self.quit = False
|
||||
mvsdk.CameraSetCallbackFunction(hCamera, self.GrabCallback, 0)
|
||||
|
||||
# 等待退出
|
||||
while not self.quit:
|
||||
time.sleep(0.1)
|
||||
|
||||
# 关闭相机
|
||||
mvsdk.CameraUnInit(hCamera)
|
||||
|
||||
# 释放帧缓存
|
||||
mvsdk.CameraAlignFree(self.pFrameBuffer)
|
||||
|
||||
@mvsdk.method(mvsdk.CAMERA_SNAP_PROC)
|
||||
def GrabCallback(self, hCamera, pRawData, pFrameHead, pContext):
|
||||
FrameHead = pFrameHead[0]
|
||||
pFrameBuffer = self.pFrameBuffer
|
||||
|
||||
mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer, FrameHead)
|
||||
mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)
|
||||
|
||||
# windows下取到的图像数据是上下颠倒的,以BMP格式存放。转换成opencv则需要上下翻转成正的
|
||||
# linux下直接输出正的,不需要上下翻转
|
||||
if platform.system() == "Windows":
|
||||
mvsdk.CameraFlipFrameBuffer(pFrameBuffer, FrameHead, 1)
|
||||
|
||||
# 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
|
||||
# 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理
|
||||
frame_data = (mvsdk.c_ubyte * FrameHead.uBytes).from_address(pFrameBuffer)
|
||||
frame = np.frombuffer(frame_data, dtype=np.uint8)
|
||||
frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth, 1 if FrameHead.uiMediaType == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3) )
|
||||
|
||||
frame = cv2.resize(frame, (640,480), interpolation = cv2.INTER_LINEAR)
|
||||
cv2.imshow("Press q to end", frame)
|
||||
if (cv2.waitKey(1) & 0xFF) == ord('q'):
|
||||
self.quit = True
|
||||
|
||||
def main():
|
||||
try:
|
||||
app = App()
|
||||
app.main()
|
||||
finally:
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
main()
|
||||
@@ -0,0 +1,117 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Demo script to show MQTT console logging in action.
|
||||
|
||||
This script demonstrates the enhanced MQTT logging by starting just the MQTT client
|
||||
and showing the console output.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import signal
|
||||
import logging
|
||||
|
||||
# Add the current directory to Python path
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
from usda_vision_system.core.config import Config
|
||||
from usda_vision_system.core.state_manager import StateManager
|
||||
from usda_vision_system.core.events import EventSystem
|
||||
from usda_vision_system.core.logging_config import setup_logging
|
||||
from usda_vision_system.mqtt.client import MQTTClient
|
||||
|
||||
def signal_handler(signum, frame):
|
||||
"""Handle Ctrl+C gracefully"""
|
||||
print("\n🛑 Stopping MQTT demo...")
|
||||
sys.exit(0)
|
||||
|
||||
def main():
|
||||
"""Main demo function"""
|
||||
print("🚀 MQTT Console Logging Demo")
|
||||
print("=" * 50)
|
||||
print()
|
||||
print("This demo shows enhanced MQTT console logging.")
|
||||
print("You'll see colorful console output for MQTT events:")
|
||||
print(" 🔗 Connection status")
|
||||
print(" 📋 Topic subscriptions")
|
||||
print(" 📡 Incoming messages")
|
||||
print(" ⚠️ Disconnections and errors")
|
||||
print()
|
||||
print("Press Ctrl+C to stop the demo.")
|
||||
print("=" * 50)
|
||||
|
||||
# Setup signal handler
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
try:
|
||||
# Setup logging with INFO level for console visibility
|
||||
setup_logging(log_level="INFO", log_file="mqtt_demo.log")
|
||||
|
||||
# Load configuration
|
||||
config = Config()
|
||||
|
||||
# Initialize components
|
||||
state_manager = StateManager()
|
||||
event_system = EventSystem()
|
||||
|
||||
# Create MQTT client
|
||||
mqtt_client = MQTTClient(config, state_manager, event_system)
|
||||
|
||||
print(f"\n🔧 Configuration:")
|
||||
print(f" Broker: {config.mqtt.broker_host}:{config.mqtt.broker_port}")
|
||||
print(f" Topics: {list(config.mqtt.topics.values())}")
|
||||
print()
|
||||
|
||||
# Start MQTT client
|
||||
print("🚀 Starting MQTT client...")
|
||||
if mqtt_client.start():
|
||||
print("✅ MQTT client started successfully!")
|
||||
print("\n👀 Watching for MQTT messages... (Press Ctrl+C to stop)")
|
||||
print("-" * 50)
|
||||
|
||||
# Keep running and show periodic status
|
||||
start_time = time.time()
|
||||
last_status_time = start_time
|
||||
|
||||
while True:
|
||||
time.sleep(1)
|
||||
|
||||
# Show status every 30 seconds
|
||||
current_time = time.time()
|
||||
if current_time - last_status_time >= 30:
|
||||
status = mqtt_client.get_status()
|
||||
uptime = current_time - start_time
|
||||
print(f"\n📊 Status Update (uptime: {uptime:.0f}s):")
|
||||
print(f" Connected: {status['connected']}")
|
||||
print(f" Messages: {status['message_count']}")
|
||||
print(f" Errors: {status['error_count']}")
|
||||
if status['last_message_time']:
|
||||
print(f" Last Message: {status['last_message_time']}")
|
||||
print("-" * 50)
|
||||
last_status_time = current_time
|
||||
|
||||
else:
|
||||
print("❌ Failed to start MQTT client")
|
||||
print(" Check your MQTT broker configuration in config.json")
|
||||
print(" Make sure the broker is running and accessible")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\n🛑 Demo stopped by user")
|
||||
except Exception as e:
|
||||
print(f"\n❌ Error: {e}")
|
||||
finally:
|
||||
# Cleanup
|
||||
try:
|
||||
if 'mqtt_client' in locals():
|
||||
mqtt_client.stop()
|
||||
print("🔌 MQTT client stopped")
|
||||
except:
|
||||
pass
|
||||
|
||||
print("\n👋 Demo completed!")
|
||||
print("\n💡 To run the full system with this enhanced logging:")
|
||||
print(" python main.py")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
111
camera-management-api/ai_agent/examples/demos/grab.py
Normal file
111
camera-management-api/ai_agent/examples/demos/grab.py
Normal file
@@ -0,0 +1,111 @@
|
||||
#coding=utf-8
|
||||
import mvsdk
|
||||
|
||||
def main():
|
||||
# 枚举相机
|
||||
DevList = mvsdk.CameraEnumerateDevice()
|
||||
nDev = len(DevList)
|
||||
if nDev < 1:
|
||||
print("No camera was found!")
|
||||
return
|
||||
|
||||
for i, DevInfo in enumerate(DevList):
|
||||
print("{}: {} {}".format(i, DevInfo.GetFriendlyName(), DevInfo.GetPortType()))
|
||||
i = 0 if nDev == 1 else int(input("Select camera: "))
|
||||
DevInfo = DevList[i]
|
||||
print(DevInfo)
|
||||
|
||||
# 打开相机
|
||||
hCamera = 0
|
||||
try:
|
||||
hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
|
||||
except mvsdk.CameraException as e:
|
||||
print("CameraInit Failed({}): {}".format(e.error_code, e.message) )
|
||||
return
|
||||
|
||||
# 获取相机特性描述
|
||||
cap = mvsdk.CameraGetCapability(hCamera)
|
||||
PrintCapbility(cap)
|
||||
|
||||
# 判断是黑白相机还是彩色相机
|
||||
monoCamera = (cap.sIspCapacity.bMonoSensor != 0)
|
||||
|
||||
# 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
|
||||
if monoCamera:
|
||||
mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)
|
||||
|
||||
# 相机模式切换成连续采集
|
||||
mvsdk.CameraSetTriggerMode(hCamera, 0)
|
||||
|
||||
# 手动曝光,曝光时间30ms
|
||||
mvsdk.CameraSetAeState(hCamera, 0)
|
||||
mvsdk.CameraSetExposureTime(hCamera, 30 * 1000)
|
||||
|
||||
# 让SDK内部取图线程开始工作
|
||||
mvsdk.CameraPlay(hCamera)
|
||||
|
||||
# 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
|
||||
FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (1 if monoCamera else 3)
|
||||
|
||||
# 分配RGB buffer,用来存放ISP输出的图像
|
||||
# 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
|
||||
pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)
|
||||
|
||||
# 从相机取一帧图片
|
||||
try:
|
||||
pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 2000)
|
||||
mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer, FrameHead)
|
||||
mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)
|
||||
|
||||
# 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
|
||||
# 该示例中我们只是把图片保存到硬盘文件中
|
||||
status = mvsdk.CameraSaveImage(hCamera, "./grab.bmp", pFrameBuffer, FrameHead, mvsdk.FILE_BMP, 100)
|
||||
if status == mvsdk.CAMERA_STATUS_SUCCESS:
|
||||
print("Save image successfully. image_size = {}X{}".format(FrameHead.iWidth, FrameHead.iHeight) )
|
||||
else:
|
||||
print("Save image failed. err={}".format(status) )
|
||||
except mvsdk.CameraException as e:
|
||||
print("CameraGetImageBuffer failed({}): {}".format(e.error_code, e.message) )
|
||||
|
||||
# 关闭相机
|
||||
mvsdk.CameraUnInit(hCamera)
|
||||
|
||||
# 释放帧缓存
|
||||
mvsdk.CameraAlignFree(pFrameBuffer)
|
||||
|
||||
def PrintCapbility(cap):
|
||||
for i in range(cap.iTriggerDesc):
|
||||
desc = cap.pTriggerDesc[i]
|
||||
print("{}: {}".format(desc.iIndex, desc.GetDescription()) )
|
||||
for i in range(cap.iImageSizeDesc):
|
||||
desc = cap.pImageSizeDesc[i]
|
||||
print("{}: {}".format(desc.iIndex, desc.GetDescription()) )
|
||||
for i in range(cap.iClrTempDesc):
|
||||
desc = cap.pClrTempDesc[i]
|
||||
print("{}: {}".format(desc.iIndex, desc.GetDescription()) )
|
||||
for i in range(cap.iMediaTypeDesc):
|
||||
desc = cap.pMediaTypeDesc[i]
|
||||
print("{}: {}".format(desc.iIndex, desc.GetDescription()) )
|
||||
for i in range(cap.iFrameSpeedDesc):
|
||||
desc = cap.pFrameSpeedDesc[i]
|
||||
print("{}: {}".format(desc.iIndex, desc.GetDescription()) )
|
||||
for i in range(cap.iPackLenDesc):
|
||||
desc = cap.pPackLenDesc[i]
|
||||
print("{}: {}".format(desc.iIndex, desc.GetDescription()) )
|
||||
for i in range(cap.iPresetLut):
|
||||
desc = cap.pPresetLutDesc[i]
|
||||
print("{}: {}".format(desc.iIndex, desc.GetDescription()) )
|
||||
for i in range(cap.iAeAlmSwDesc):
|
||||
desc = cap.pAeAlmSwDesc[i]
|
||||
print("{}: {}".format(desc.iIndex, desc.GetDescription()) )
|
||||
for i in range(cap.iAeAlmHdDesc):
|
||||
desc = cap.pAeAlmHdDesc[i]
|
||||
print("{}: {}".format(desc.iIndex, desc.GetDescription()) )
|
||||
for i in range(cap.iBayerDecAlmSwDesc):
|
||||
desc = cap.pBayerDecAlmSwDesc[i]
|
||||
print("{}: {}".format(desc.iIndex, desc.GetDescription()) )
|
||||
for i in range(cap.iBayerDecAlmHdDesc):
|
||||
desc = cap.pBayerDecAlmHdDesc[i]
|
||||
print("{}: {}".format(desc.iIndex, desc.GetDescription()) )
|
||||
|
||||
main()
|
||||
@@ -0,0 +1,234 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
MQTT Publisher Test Script for USDA Vision Camera System
|
||||
|
||||
This script allows you to manually publish test messages to the MQTT topics
|
||||
to simulate machine state changes for testing purposes.
|
||||
|
||||
Usage:
|
||||
python mqtt_publisher_test.py
|
||||
|
||||
The script provides an interactive menu to:
|
||||
1. Send 'on' state to vibratory conveyor
|
||||
2. Send 'off' state to vibratory conveyor
|
||||
3. Send 'on' state to blower separator
|
||||
4. Send 'off' state to blower separator
|
||||
5. Send custom message
|
||||
"""
|
||||
|
||||
import paho.mqtt.client as mqtt
|
||||
import time
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
# MQTT Configuration (matching your system config)
|
||||
MQTT_BROKER_HOST = "192.168.1.110"
|
||||
MQTT_BROKER_PORT = 1883
|
||||
MQTT_USERNAME = None # Set if your broker requires authentication
|
||||
MQTT_PASSWORD = None # Set if your broker requires authentication
|
||||
|
||||
# Topics (from your config.json)
|
||||
MQTT_TOPICS = {
|
||||
"vibratory_conveyor": "vision/vibratory_conveyor/state",
|
||||
"blower_separator": "vision/blower_separator/state"
|
||||
}
|
||||
|
||||
class MQTTPublisher:
|
||||
def __init__(self):
|
||||
self.client = None
|
||||
self.connected = False
|
||||
|
||||
def setup_client(self):
|
||||
"""Setup MQTT client"""
|
||||
try:
|
||||
self.client = mqtt.Client(mqtt.CallbackAPIVersion.VERSION1)
|
||||
self.client.on_connect = self.on_connect
|
||||
self.client.on_disconnect = self.on_disconnect
|
||||
self.client.on_publish = self.on_publish
|
||||
|
||||
if MQTT_USERNAME and MQTT_PASSWORD:
|
||||
self.client.username_pw_set(MQTT_USERNAME, MQTT_PASSWORD)
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ Error setting up MQTT client: {e}")
|
||||
return False
|
||||
|
||||
def connect(self):
|
||||
"""Connect to MQTT broker"""
|
||||
try:
|
||||
print(f"🔗 Connecting to MQTT broker at {MQTT_BROKER_HOST}:{MQTT_BROKER_PORT}...")
|
||||
self.client.connect(MQTT_BROKER_HOST, MQTT_BROKER_PORT, 60)
|
||||
self.client.loop_start() # Start background loop
|
||||
|
||||
# Wait for connection
|
||||
timeout = 10
|
||||
start_time = time.time()
|
||||
while not self.connected and (time.time() - start_time) < timeout:
|
||||
time.sleep(0.1)
|
||||
|
||||
return self.connected
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Failed to connect to MQTT broker: {e}")
|
||||
return False
|
||||
|
||||
def disconnect(self):
|
||||
"""Disconnect from MQTT broker"""
|
||||
if self.client:
|
||||
self.client.loop_stop()
|
||||
self.client.disconnect()
|
||||
|
||||
def on_connect(self, client, userdata, flags, rc):
|
||||
"""Callback when client connects"""
|
||||
if rc == 0:
|
||||
self.connected = True
|
||||
print(f"✅ Connected to MQTT broker successfully!")
|
||||
else:
|
||||
self.connected = False
|
||||
print(f"❌ Connection failed with return code {rc}")
|
||||
|
||||
def on_disconnect(self, client, userdata, rc):
|
||||
"""Callback when client disconnects"""
|
||||
self.connected = False
|
||||
print(f"🔌 Disconnected from MQTT broker")
|
||||
|
||||
def on_publish(self, client, userdata, mid):
|
||||
"""Callback when message is published"""
|
||||
print(f"📤 Message published successfully (mid: {mid})")
|
||||
|
||||
def publish_message(self, topic, payload):
|
||||
"""Publish a message to a topic"""
|
||||
if not self.connected:
|
||||
print("❌ Not connected to MQTT broker")
|
||||
return False
|
||||
|
||||
try:
|
||||
timestamp = datetime.now().strftime('%H:%M:%S.%f')[:-3]
|
||||
print(f"📡 [{timestamp}] Publishing message:")
|
||||
print(f" 📍 Topic: {topic}")
|
||||
print(f" 📄 Payload: '{payload}'")
|
||||
|
||||
result = self.client.publish(topic, payload)
|
||||
|
||||
if result.rc == mqtt.MQTT_ERR_SUCCESS:
|
||||
print(f"✅ Message queued for publishing")
|
||||
return True
|
||||
else:
|
||||
print(f"❌ Failed to publish message (error: {result.rc})")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error publishing message: {e}")
|
||||
return False
|
||||
|
||||
def show_menu(self):
|
||||
"""Show interactive menu"""
|
||||
print("\n" + "=" * 50)
|
||||
print("🎛️ MQTT PUBLISHER TEST MENU")
|
||||
print("=" * 50)
|
||||
print("1. Send 'on' to vibratory conveyor")
|
||||
print("2. Send 'off' to vibratory conveyor")
|
||||
print("3. Send 'on' to blower separator")
|
||||
print("4. Send 'off' to blower separator")
|
||||
print("5. Send custom message")
|
||||
print("6. Show current topics")
|
||||
print("0. Exit")
|
||||
print("-" * 50)
|
||||
|
||||
def handle_menu_choice(self, choice):
|
||||
"""Handle menu selection"""
|
||||
if choice == "1":
|
||||
self.publish_message(MQTT_TOPICS["vibratory_conveyor"], "on")
|
||||
elif choice == "2":
|
||||
self.publish_message(MQTT_TOPICS["vibratory_conveyor"], "off")
|
||||
elif choice == "3":
|
||||
self.publish_message(MQTT_TOPICS["blower_separator"], "on")
|
||||
elif choice == "4":
|
||||
self.publish_message(MQTT_TOPICS["blower_separator"], "off")
|
||||
elif choice == "5":
|
||||
self.custom_message()
|
||||
elif choice == "6":
|
||||
self.show_topics()
|
||||
elif choice == "0":
|
||||
return False
|
||||
else:
|
||||
print("❌ Invalid choice. Please try again.")
|
||||
|
||||
return True
|
||||
|
||||
def custom_message(self):
|
||||
"""Send custom message"""
|
||||
print("\n📝 Custom Message")
|
||||
print("Available topics:")
|
||||
for i, (name, topic) in enumerate(MQTT_TOPICS.items(), 1):
|
||||
print(f" {i}. {name}: {topic}")
|
||||
|
||||
try:
|
||||
topic_choice = input("Select topic (1-2): ").strip()
|
||||
if topic_choice == "1":
|
||||
topic = MQTT_TOPICS["vibratory_conveyor"]
|
||||
elif topic_choice == "2":
|
||||
topic = MQTT_TOPICS["blower_separator"]
|
||||
else:
|
||||
print("❌ Invalid topic choice")
|
||||
return
|
||||
|
||||
payload = input("Enter message payload: ").strip()
|
||||
if payload:
|
||||
self.publish_message(topic, payload)
|
||||
else:
|
||||
print("❌ Empty payload, message not sent")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\n❌ Cancelled")
|
||||
|
||||
def show_topics(self):
|
||||
"""Show configured topics"""
|
||||
print("\n📋 Configured Topics:")
|
||||
for name, topic in MQTT_TOPICS.items():
|
||||
print(f" 🏭 {name}: {topic}")
|
||||
|
||||
def run(self):
|
||||
"""Main interactive loop"""
|
||||
print("📤 MQTT Publisher Test")
|
||||
print("=" * 50)
|
||||
print(f"🎯 Broker: {MQTT_BROKER_HOST}:{MQTT_BROKER_PORT}")
|
||||
|
||||
if not self.setup_client():
|
||||
return False
|
||||
|
||||
if not self.connect():
|
||||
print("❌ Failed to connect to MQTT broker")
|
||||
return False
|
||||
|
||||
try:
|
||||
while True:
|
||||
self.show_menu()
|
||||
choice = input("Enter your choice: ").strip()
|
||||
|
||||
if not self.handle_menu_choice(choice):
|
||||
break
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\n\n🛑 Interrupted by user")
|
||||
except Exception as e:
|
||||
print(f"\n❌ Error: {e}")
|
||||
finally:
|
||||
self.disconnect()
|
||||
print("👋 Goodbye!")
|
||||
|
||||
return True
|
||||
|
||||
def main():
|
||||
"""Main function"""
|
||||
publisher = MQTTPublisher()
|
||||
|
||||
try:
|
||||
publisher.run()
|
||||
except Exception as e:
|
||||
print(f"❌ Unexpected error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
242
camera-management-api/ai_agent/examples/demos/mqtt_test.py
Normal file
242
camera-management-api/ai_agent/examples/demos/mqtt_test.py
Normal file
@@ -0,0 +1,242 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
MQTT Test Script for USDA Vision Camera System
|
||||
|
||||
This script tests MQTT message reception by connecting to the broker
|
||||
and listening for messages on the configured topics.
|
||||
|
||||
Usage:
|
||||
python mqtt_test.py
|
||||
|
||||
The script will:
|
||||
1. Connect to the MQTT broker
|
||||
2. Subscribe to all configured topics
|
||||
3. Display received messages with timestamps
|
||||
4. Show connection status and statistics
|
||||
"""
|
||||
|
||||
import paho.mqtt.client as mqtt
|
||||
import time
|
||||
import json
|
||||
import signal
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from typing import Dict, Optional
|
||||
|
||||
# MQTT Configuration (matching your system config)
|
||||
MQTT_BROKER_HOST = "192.168.1.110"
|
||||
MQTT_BROKER_PORT = 1883
|
||||
MQTT_USERNAME = None # Set if your broker requires authentication
|
||||
MQTT_PASSWORD = None # Set if your broker requires authentication
|
||||
|
||||
# Topics to monitor (from your config.json)
|
||||
MQTT_TOPICS = {
|
||||
"vibratory_conveyor": "vision/vibratory_conveyor/state",
|
||||
"blower_separator": "vision/blower_separator/state"
|
||||
}
|
||||
|
||||
class MQTTTester:
|
||||
def __init__(self):
|
||||
self.client: Optional[mqtt.Client] = None
|
||||
self.connected = False
|
||||
self.message_count = 0
|
||||
self.start_time = None
|
||||
self.last_message_time = None
|
||||
self.received_messages = []
|
||||
|
||||
def setup_client(self):
|
||||
"""Setup MQTT client with callbacks"""
|
||||
try:
|
||||
# Create MQTT client
|
||||
self.client = mqtt.Client(mqtt.CallbackAPIVersion.VERSION1)
|
||||
|
||||
# Set callbacks
|
||||
self.client.on_connect = self.on_connect
|
||||
self.client.on_disconnect = self.on_disconnect
|
||||
self.client.on_message = self.on_message
|
||||
self.client.on_subscribe = self.on_subscribe
|
||||
|
||||
# Set authentication if provided
|
||||
if MQTT_USERNAME and MQTT_PASSWORD:
|
||||
self.client.username_pw_set(MQTT_USERNAME, MQTT_PASSWORD)
|
||||
print(f"🔐 Using authentication: {MQTT_USERNAME}")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error setting up MQTT client: {e}")
|
||||
return False
|
||||
|
||||
def connect(self):
|
||||
"""Connect to MQTT broker"""
|
||||
try:
|
||||
print(f"🔗 Connecting to MQTT broker at {MQTT_BROKER_HOST}:{MQTT_BROKER_PORT}...")
|
||||
self.client.connect(MQTT_BROKER_HOST, MQTT_BROKER_PORT, 60)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Failed to connect to MQTT broker: {e}")
|
||||
return False
|
||||
|
||||
def on_connect(self, client, userdata, flags, rc):
|
||||
"""Callback when client connects to broker"""
|
||||
if rc == 0:
|
||||
self.connected = True
|
||||
self.start_time = datetime.now()
|
||||
print(f"✅ Successfully connected to MQTT broker!")
|
||||
print(f"📅 Connection time: {self.start_time.strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
print()
|
||||
|
||||
# Subscribe to all topics
|
||||
print("📋 Subscribing to topics:")
|
||||
for machine_name, topic in MQTT_TOPICS.items():
|
||||
result, mid = client.subscribe(topic)
|
||||
if result == mqtt.MQTT_ERR_SUCCESS:
|
||||
print(f" ✅ {machine_name}: {topic}")
|
||||
else:
|
||||
print(f" ❌ {machine_name}: {topic} (error: {result})")
|
||||
|
||||
print()
|
||||
print("🎧 Listening for MQTT messages...")
|
||||
print(" (Manually turn machines on/off to trigger messages)")
|
||||
print(" (Press Ctrl+C to stop)")
|
||||
print("-" * 60)
|
||||
|
||||
else:
|
||||
self.connected = False
|
||||
print(f"❌ Connection failed with return code {rc}")
|
||||
print(" Return codes:")
|
||||
print(" 0: Connection successful")
|
||||
print(" 1: Connection refused - incorrect protocol version")
|
||||
print(" 2: Connection refused - invalid client identifier")
|
||||
print(" 3: Connection refused - server unavailable")
|
||||
print(" 4: Connection refused - bad username or password")
|
||||
print(" 5: Connection refused - not authorised")
|
||||
|
||||
def on_disconnect(self, client, userdata, rc):
|
||||
"""Callback when client disconnects from broker"""
|
||||
self.connected = False
|
||||
if rc != 0:
|
||||
print(f"🔌 Unexpected disconnection from MQTT broker (code: {rc})")
|
||||
else:
|
||||
print(f"🔌 Disconnected from MQTT broker")
|
||||
|
||||
def on_subscribe(self, client, userdata, mid, granted_qos):
|
||||
"""Callback when subscription is confirmed"""
|
||||
print(f"📋 Subscription confirmed (mid: {mid}, QoS: {granted_qos})")
|
||||
|
||||
def on_message(self, client, userdata, msg):
|
||||
"""Callback when a message is received"""
|
||||
try:
|
||||
# Decode message
|
||||
topic = msg.topic
|
||||
payload = msg.payload.decode("utf-8").strip()
|
||||
timestamp = datetime.now()
|
||||
|
||||
# Update statistics
|
||||
self.message_count += 1
|
||||
self.last_message_time = timestamp
|
||||
|
||||
# Find machine name
|
||||
machine_name = "unknown"
|
||||
for name, configured_topic in MQTT_TOPICS.items():
|
||||
if topic == configured_topic:
|
||||
machine_name = name
|
||||
break
|
||||
|
||||
# Store message
|
||||
message_data = {
|
||||
"timestamp": timestamp,
|
||||
"topic": topic,
|
||||
"machine": machine_name,
|
||||
"payload": payload,
|
||||
"message_number": self.message_count
|
||||
}
|
||||
self.received_messages.append(message_data)
|
||||
|
||||
# Display message
|
||||
time_str = timestamp.strftime('%H:%M:%S.%f')[:-3] # Include milliseconds
|
||||
print(f"📡 [{time_str}] Message #{self.message_count}")
|
||||
print(f" 🏭 Machine: {machine_name}")
|
||||
print(f" 📍 Topic: {topic}")
|
||||
print(f" 📄 Payload: '{payload}'")
|
||||
print(f" 📊 Total messages: {self.message_count}")
|
||||
print("-" * 60)
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error processing message: {e}")
|
||||
|
||||
def show_statistics(self):
|
||||
"""Show connection and message statistics"""
|
||||
print("\n" + "=" * 60)
|
||||
print("📊 MQTT TEST STATISTICS")
|
||||
print("=" * 60)
|
||||
|
||||
if self.start_time:
|
||||
runtime = datetime.now() - self.start_time
|
||||
print(f"⏱️ Runtime: {runtime}")
|
||||
|
||||
print(f"🔗 Connected: {'Yes' if self.connected else 'No'}")
|
||||
print(f"📡 Messages received: {self.message_count}")
|
||||
|
||||
if self.last_message_time:
|
||||
print(f"🕐 Last message: {self.last_message_time.strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
|
||||
if self.received_messages:
|
||||
print(f"\n📋 Message Summary:")
|
||||
for msg in self.received_messages[-5:]: # Show last 5 messages
|
||||
time_str = msg["timestamp"].strftime('%H:%M:%S')
|
||||
print(f" [{time_str}] {msg['machine']}: {msg['payload']}")
|
||||
|
||||
print("=" * 60)
|
||||
|
||||
def run(self):
|
||||
"""Main test loop"""
|
||||
print("🧪 MQTT Message Reception Test")
|
||||
print("=" * 60)
|
||||
print(f"🎯 Broker: {MQTT_BROKER_HOST}:{MQTT_BROKER_PORT}")
|
||||
print(f"📋 Topics: {list(MQTT_TOPICS.values())}")
|
||||
print()
|
||||
|
||||
# Setup signal handler for graceful shutdown
|
||||
def signal_handler(sig, frame):
|
||||
print(f"\n\n🛑 Received interrupt signal, shutting down...")
|
||||
self.show_statistics()
|
||||
if self.client and self.connected:
|
||||
self.client.disconnect()
|
||||
sys.exit(0)
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
# Setup and connect
|
||||
if not self.setup_client():
|
||||
return False
|
||||
|
||||
if not self.connect():
|
||||
return False
|
||||
|
||||
# Start the client loop
|
||||
try:
|
||||
self.client.loop_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
except Exception as e:
|
||||
print(f"❌ Error in main loop: {e}")
|
||||
|
||||
return True
|
||||
|
||||
def main():
|
||||
"""Main function"""
|
||||
tester = MQTTTester()
|
||||
|
||||
try:
|
||||
success = tester.run()
|
||||
if not success:
|
||||
print("❌ Test failed")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"❌ Unexpected error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
4
camera-management-api/ai_agent/examples/demos/readme.txt
Normal file
4
camera-management-api/ai_agent/examples/demos/readme.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
mvsdk.py: 相机SDK接口库(参考文档 WindowsSDK安装目录\Document\MVSDK_API_CHS.chm)
|
||||
|
||||
grab.py: 使用SDK采集图片,并保存到硬盘文件
|
||||
cv_grab.py: 使用SDK采集图片,转换为opencv的图像格式
|
||||
@@ -0,0 +1,607 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "intro",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Camera Status and Availability Testing\n",
|
||||
"\n",
|
||||
"This notebook tests various methods to check camera status and availability before attempting to capture images.\n",
|
||||
"\n",
|
||||
"## Key Functions to Test:\n",
|
||||
"- `CameraIsOpened()` - Check if camera is already opened by another process\n",
|
||||
"- `CameraInit()` - Try to initialize and catch specific error codes\n",
|
||||
"- `CameraGetImageBuffer()` - Test actual image capture with timeout\n",
|
||||
"- Error code analysis for different failure scenarios"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "imports",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Libraries imported successfully!\n",
|
||||
"Platform: Linux\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Import required libraries\n",
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import time\n",
|
||||
"import numpy as np\n",
|
||||
"import cv2\n",
|
||||
"import platform\n",
|
||||
"from datetime import datetime\n",
|
||||
"\n",
|
||||
"# Add the python demo directory to path to import mvsdk\n",
|
||||
"sys.path.append('../python demo')\n",
|
||||
"import mvsdk\n",
|
||||
"\n",
|
||||
"print(\"Libraries imported successfully!\")\n",
|
||||
"print(f\"Platform: {platform.system()}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "error-codes",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Relevant Camera Status Error Codes:\n",
|
||||
"========================================\n",
|
||||
"CAMERA_STATUS_SUCCESS: 0\n",
|
||||
"CAMERA_STATUS_DEVICE_IS_OPENED: -18\n",
|
||||
"CAMERA_STATUS_DEVICE_IS_CLOSED: -19\n",
|
||||
"CAMERA_STATUS_ACCESS_DENY: -45\n",
|
||||
"CAMERA_STATUS_DEVICE_LOST: -38\n",
|
||||
"CAMERA_STATUS_TIME_OUT: -12\n",
|
||||
"CAMERA_STATUS_BUSY: -28\n",
|
||||
"CAMERA_STATUS_NO_DEVICE_FOUND: -16\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Let's examine the relevant error codes from the SDK\n",
|
||||
"print(\"Relevant Camera Status Error Codes:\")\n",
|
||||
"print(\"=\" * 40)\n",
|
||||
"print(f\"CAMERA_STATUS_SUCCESS: {mvsdk.CAMERA_STATUS_SUCCESS}\")\n",
|
||||
"print(f\"CAMERA_STATUS_DEVICE_IS_OPENED: {mvsdk.CAMERA_STATUS_DEVICE_IS_OPENED}\")\n",
|
||||
"print(f\"CAMERA_STATUS_DEVICE_IS_CLOSED: {mvsdk.CAMERA_STATUS_DEVICE_IS_CLOSED}\")\n",
|
||||
"print(f\"CAMERA_STATUS_ACCESS_DENY: {mvsdk.CAMERA_STATUS_ACCESS_DENY}\")\n",
|
||||
"print(f\"CAMERA_STATUS_DEVICE_LOST: {mvsdk.CAMERA_STATUS_DEVICE_LOST}\")\n",
|
||||
"print(f\"CAMERA_STATUS_TIME_OUT: {mvsdk.CAMERA_STATUS_TIME_OUT}\")\n",
|
||||
"print(f\"CAMERA_STATUS_BUSY: {mvsdk.CAMERA_STATUS_BUSY}\")\n",
|
||||
"print(f\"CAMERA_STATUS_NO_DEVICE_FOUND: {mvsdk.CAMERA_STATUS_NO_DEVICE_FOUND}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"id": "status-functions",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Camera Availability Check\n",
|
||||
"==============================\n",
|
||||
"✓ SDK initialized successfully\n",
|
||||
"✓ Found 2 camera(s)\n",
|
||||
" 0: Blower-Yield-Cam (192.168.1.165-192.168.1.54)\n",
|
||||
" 1: Cracker-Cam (192.168.1.167-192.168.1.54)\n",
|
||||
"\n",
|
||||
"Testing camera 0: Blower-Yield-Cam\n",
|
||||
"✓ Camera is available (not opened by another process)\n",
|
||||
"✓ Camera initialized successfully\n",
|
||||
"✓ Camera closed after testing\n",
|
||||
"\n",
|
||||
"Testing camera 1: Cracker-Cam\n",
|
||||
"✓ Camera is available (not opened by another process)\n",
|
||||
"✓ Camera initialized successfully\n",
|
||||
"✓ Camera closed after testing\n",
|
||||
"\n",
|
||||
"Results for 2 cameras:\n",
|
||||
" Camera 0: AVAILABLE\n",
|
||||
" Camera 1: AVAILABLE\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def check_camera_availability():\n",
|
||||
" \"\"\"\n",
|
||||
" Comprehensive camera availability check\n",
|
||||
" \"\"\"\n",
|
||||
" print(\"Camera Availability Check\")\n",
|
||||
" print(\"=\" * 30)\n",
|
||||
" \n",
|
||||
" # Step 1: Initialize SDK\n",
|
||||
" try:\n",
|
||||
" mvsdk.CameraSdkInit(1)\n",
|
||||
" print(\"✓ SDK initialized successfully\")\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"✗ SDK initialization failed: {e}\")\n",
|
||||
" return None, \"SDK_INIT_FAILED\"\n",
|
||||
" \n",
|
||||
" # Step 2: Enumerate cameras\n",
|
||||
" try:\n",
|
||||
" DevList = mvsdk.CameraEnumerateDevice()\n",
|
||||
" nDev = len(DevList)\n",
|
||||
" print(f\"✓ Found {nDev} camera(s)\")\n",
|
||||
" \n",
|
||||
" if nDev < 1:\n",
|
||||
" print(\"✗ No cameras detected\")\n",
|
||||
" return None, \"NO_CAMERAS\"\n",
|
||||
" \n",
|
||||
" for i, DevInfo in enumerate(DevList):\n",
|
||||
" print(f\" {i}: {DevInfo.GetFriendlyName()} ({DevInfo.GetPortType()})\")\n",
|
||||
" \n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"✗ Camera enumeration failed: {e}\")\n",
|
||||
" return None, \"ENUM_FAILED\"\n",
|
||||
" \n",
|
||||
" # Step 3: Check all cameras\n",
|
||||
" camera_results = []\n",
|
||||
" \n",
|
||||
" for i, DevInfo in enumerate(DevList):\n",
|
||||
" print(f\"\\nTesting camera {i}: {DevInfo.GetFriendlyName()}\")\n",
|
||||
" \n",
|
||||
" # Check if camera is already opened\n",
|
||||
" try:\n",
|
||||
" is_opened = mvsdk.CameraIsOpened(DevInfo)\n",
|
||||
" if is_opened:\n",
|
||||
" print(\"✗ Camera is already opened by another process\")\n",
|
||||
" camera_results.append((DevInfo, \"ALREADY_OPENED\"))\n",
|
||||
" continue\n",
|
||||
" else:\n",
|
||||
" print(\"✓ Camera is available (not opened by another process)\")\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"⚠ Could not check if camera is opened: {e}\")\n",
|
||||
" \n",
|
||||
" # Try to initialize camera\n",
|
||||
" try:\n",
|
||||
" hCamera = mvsdk.CameraInit(DevInfo, -1, -1)\n",
|
||||
" print(\"✓ Camera initialized successfully\")\n",
|
||||
" camera_results.append((hCamera, \"AVAILABLE\"))\n",
|
||||
" \n",
|
||||
" # Close the camera after testing\n",
|
||||
" try:\n",
|
||||
" mvsdk.CameraUnInit(hCamera)\n",
|
||||
" print(\"✓ Camera closed after testing\")\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"⚠ Warning: Could not close camera: {e}\")\n",
|
||||
" \n",
|
||||
" except mvsdk.CameraException as e:\n",
|
||||
" print(f\"✗ Camera initialization failed: {e.error_code} - {e.message}\")\n",
|
||||
" \n",
|
||||
" # Analyze specific error codes\n",
|
||||
" if e.error_code == mvsdk.CAMERA_STATUS_DEVICE_IS_OPENED:\n",
|
||||
" camera_results.append((DevInfo, \"DEVICE_OPENED\"))\n",
|
||||
" elif e.error_code == mvsdk.CAMERA_STATUS_ACCESS_DENY:\n",
|
||||
" camera_results.append((DevInfo, \"ACCESS_DENIED\"))\n",
|
||||
" elif e.error_code == mvsdk.CAMERA_STATUS_DEVICE_LOST:\n",
|
||||
" camera_results.append((DevInfo, \"DEVICE_LOST\"))\n",
|
||||
" else:\n",
|
||||
" camera_results.append((DevInfo, f\"INIT_ERROR_{e.error_code}\"))\n",
|
||||
" \n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"✗ Unexpected error during initialization: {e}\")\n",
|
||||
" camera_results.append((DevInfo, \"UNEXPECTED_ERROR\"))\n",
|
||||
" \n",
|
||||
" return camera_results\n",
|
||||
"\n",
|
||||
"# Test the function\n",
|
||||
"camera_results = check_camera_availability()\n",
|
||||
"print(f\"\\nResults for {len(camera_results)} cameras:\")\n",
|
||||
"for i, (camera_info, status) in enumerate(camera_results):\n",
|
||||
" if hasattr(camera_info, 'GetFriendlyName'):\n",
|
||||
" name = camera_info.GetFriendlyName()\n",
|
||||
" else:\n",
|
||||
" name = f\"Camera {i}\"\n",
|
||||
" print(f\" {name}: {status}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"id": "test-capture-availability",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"Testing capture readiness for 2 available camera(s):\n",
|
||||
"\n",
|
||||
"Testing camera 0 capture readiness...\n",
|
||||
"\n",
|
||||
"Testing Camera Capture Readiness\n",
|
||||
"===================================\n",
|
||||
"✓ Camera capabilities retrieved\n",
|
||||
"✓ Camera type: Color\n",
|
||||
"✓ Basic camera configuration set\n",
|
||||
"✓ Camera started\n",
|
||||
"✓ Frame buffer allocated\n",
|
||||
"\n",
|
||||
"Testing image capture...\n",
|
||||
"✓ Image captured successfully: 1280x1024\n",
|
||||
"✓ Image processed and buffer released\n",
|
||||
"✓ Cleanup completed\n",
|
||||
"Capture Ready for Blower-Yield-Cam: True\n",
|
||||
"\n",
|
||||
"Testing camera 1 capture readiness...\n",
|
||||
"\n",
|
||||
"Testing Camera Capture Readiness\n",
|
||||
"===================================\n",
|
||||
"✓ Camera capabilities retrieved\n",
|
||||
"✓ Camera type: Color\n",
|
||||
"✓ Basic camera configuration set\n",
|
||||
"✓ Camera started\n",
|
||||
"✓ Frame buffer allocated\n",
|
||||
"\n",
|
||||
"Testing image capture...\n",
|
||||
"✓ Image captured successfully: 1280x1024\n",
|
||||
"✓ Image processed and buffer released\n",
|
||||
"✓ Cleanup completed\n",
|
||||
"Capture Ready for Cracker-Cam: True\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def test_camera_capture_readiness(hCamera):\n",
|
||||
" \"\"\"\n",
|
||||
" Test if camera is ready for image capture\n",
|
||||
" \"\"\"\n",
|
||||
" if not isinstance(hCamera, int):\n",
|
||||
" print(\"Camera not properly initialized, skipping capture test\")\n",
|
||||
" return False\n",
|
||||
" \n",
|
||||
" print(\"\\nTesting Camera Capture Readiness\")\n",
|
||||
" print(\"=\" * 35)\n",
|
||||
" \n",
|
||||
" try:\n",
|
||||
" # Get camera capabilities\n",
|
||||
" cap = mvsdk.CameraGetCapability(hCamera)\n",
|
||||
" print(\"✓ Camera capabilities retrieved\")\n",
|
||||
" \n",
|
||||
" # Check camera type\n",
|
||||
" monoCamera = (cap.sIspCapacity.bMonoSensor != 0)\n",
|
||||
" print(f\"✓ Camera type: {'Monochrome' if monoCamera else 'Color'}\")\n",
|
||||
" \n",
|
||||
" # Set basic configuration\n",
|
||||
" if monoCamera:\n",
|
||||
" mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)\n",
|
||||
" else:\n",
|
||||
" mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_BGR8)\n",
|
||||
" \n",
|
||||
" mvsdk.CameraSetTriggerMode(hCamera, 0) # Continuous mode\n",
|
||||
" mvsdk.CameraSetAeState(hCamera, 0) # Manual exposure\n",
|
||||
" mvsdk.CameraSetExposureTime(hCamera, 5000) # 5ms exposure\n",
|
||||
" print(\"✓ Basic camera configuration set\")\n",
|
||||
" \n",
|
||||
" # Start camera\n",
|
||||
" mvsdk.CameraPlay(hCamera)\n",
|
||||
" print(\"✓ Camera started\")\n",
|
||||
" \n",
|
||||
" # Allocate buffer\n",
|
||||
" FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (1 if monoCamera else 3)\n",
|
||||
" pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)\n",
|
||||
" print(\"✓ Frame buffer allocated\")\n",
|
||||
" \n",
|
||||
" # Test image capture with short timeout\n",
|
||||
" print(\"\\nTesting image capture...\")\n",
|
||||
" try:\n",
|
||||
" pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 1000) # 1 second timeout\n",
|
||||
" print(f\"✓ Image captured successfully: {FrameHead.iWidth}x{FrameHead.iHeight}\")\n",
|
||||
" \n",
|
||||
" # Process and release\n",
|
||||
" mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer, FrameHead)\n",
|
||||
" mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)\n",
|
||||
" print(\"✓ Image processed and buffer released\")\n",
|
||||
" \n",
|
||||
" capture_success = True\n",
|
||||
" \n",
|
||||
" except mvsdk.CameraException as e:\n",
|
||||
" print(f\"✗ Image capture failed: {e.error_code} - {e.message}\")\n",
|
||||
" \n",
|
||||
" if e.error_code == mvsdk.CAMERA_STATUS_TIME_OUT:\n",
|
||||
" print(\" → Camera timeout - may be busy or not streaming\")\n",
|
||||
" elif e.error_code == mvsdk.CAMERA_STATUS_DEVICE_LOST:\n",
|
||||
" print(\" → Device lost - camera disconnected\")\n",
|
||||
" elif e.error_code == mvsdk.CAMERA_STATUS_BUSY:\n",
|
||||
" print(\" → Camera busy - may be used by another process\")\n",
|
||||
" \n",
|
||||
" capture_success = False\n",
|
||||
" \n",
|
||||
" # Cleanup\n",
|
||||
" mvsdk.CameraAlignFree(pFrameBuffer)\n",
|
||||
" print(\"✓ Cleanup completed\")\n",
|
||||
" \n",
|
||||
" return capture_success\n",
|
||||
" \n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"✗ Capture readiness test failed: {e}\")\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
"# Test capture readiness for available cameras\n",
|
||||
"available_cameras = [(cam, stat) for cam, stat in camera_results if stat == \"AVAILABLE\"]\n",
|
||||
"\n",
|
||||
"if available_cameras:\n",
|
||||
" print(f\"\\nTesting capture readiness for {len(available_cameras)} available camera(s):\")\n",
|
||||
" for i, (camera_handle, status) in enumerate(available_cameras):\n",
|
||||
" if hasattr(camera_handle, 'GetFriendlyName'):\n",
|
||||
" # This shouldn't happen for AVAILABLE cameras, but just in case\n",
|
||||
" print(f\"\\nCamera {i}: Invalid handle\")\n",
|
||||
" continue\n",
|
||||
" \n",
|
||||
" print(f\"\\nTesting camera {i} capture readiness...\")\n",
|
||||
" # Re-initialize the camera for testing since we closed it earlier\n",
|
||||
" try:\n",
|
||||
" # Find the camera info from the original results\n",
|
||||
" DevList = mvsdk.CameraEnumerateDevice()\n",
|
||||
" if i < len(DevList):\n",
|
||||
" DevInfo = DevList[i]\n",
|
||||
" hCamera = mvsdk.CameraInit(DevInfo, -1, -1)\n",
|
||||
" capture_ready = test_camera_capture_readiness(hCamera)\n",
|
||||
" print(f\"Capture Ready for {DevInfo.GetFriendlyName()}: {capture_ready}\")\n",
|
||||
" mvsdk.CameraUnInit(hCamera)\n",
|
||||
" else:\n",
|
||||
" print(f\"Could not re-initialize camera {i}\")\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"Error testing camera {i}: {e}\")\n",
|
||||
"else:\n",
|
||||
" print(\"\\nNo cameras are available for capture testing\")\n",
|
||||
" print(\"Camera statuses:\")\n",
|
||||
" for i, (cam_info, status) in enumerate(camera_results):\n",
|
||||
" if hasattr(cam_info, 'GetFriendlyName'):\n",
|
||||
" name = cam_info.GetFriendlyName()\n",
|
||||
" else:\n",
|
||||
" name = f\"Camera {i}\"\n",
|
||||
" print(f\" {name}: {status}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"id": "comprehensive-check",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"==================================================\n",
|
||||
"COMPREHENSIVE CAMERA CHECK\n",
|
||||
"==================================================\n",
|
||||
"Camera Availability Check\n",
|
||||
"==============================\n",
|
||||
"✓ SDK initialized successfully\n",
|
||||
"✓ Found 2 camera(s)\n",
|
||||
" 0: Blower-Yield-Cam (192.168.1.165-192.168.1.54)\n",
|
||||
" 1: Cracker-Cam (192.168.1.167-192.168.1.54)\n",
|
||||
"\n",
|
||||
"Testing camera 0: Blower-Yield-Cam\n",
|
||||
"✓ Camera is available (not opened by another process)\n",
|
||||
"✓ Camera initialized successfully\n",
|
||||
"✓ Camera closed after testing\n",
|
||||
"\n",
|
||||
"Testing camera 1: Cracker-Cam\n",
|
||||
"✓ Camera is available (not opened by another process)\n",
|
||||
"✓ Camera initialized successfully\n",
|
||||
"✓ Camera closed after testing\n",
|
||||
"\n",
|
||||
"==================================================\n",
|
||||
"FINAL RESULTS:\n",
|
||||
"Camera Available: False\n",
|
||||
"Capture Ready: False\n",
|
||||
"Status: (42, 'AVAILABLE')\n",
|
||||
"==================================================\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def comprehensive_camera_check():\n",
|
||||
" \"\"\"\n",
|
||||
" Complete camera availability and readiness check\n",
|
||||
" Returns: (available, ready, handle_or_info, status_message)\n",
|
||||
" \"\"\"\n",
|
||||
" # Check availability\n",
|
||||
" handle_or_info, status = check_camera_availability()\n",
|
||||
" \n",
|
||||
" available = status == \"AVAILABLE\"\n",
|
||||
" ready = False\n",
|
||||
" \n",
|
||||
" if available:\n",
|
||||
" # Test capture readiness\n",
|
||||
" ready = test_camera_capture_readiness(handle_or_info)\n",
|
||||
" \n",
|
||||
" # Close camera after testing\n",
|
||||
" try:\n",
|
||||
" mvsdk.CameraUnInit(handle_or_info)\n",
|
||||
" print(\"✓ Camera closed after testing\")\n",
|
||||
" except:\n",
|
||||
" pass\n",
|
||||
" \n",
|
||||
" return available, ready, handle_or_info, status\n",
|
||||
"\n",
|
||||
"# Run comprehensive check\n",
|
||||
"print(\"\\n\" + \"=\" * 50)\n",
|
||||
"print(\"COMPREHENSIVE CAMERA CHECK\")\n",
|
||||
"print(\"=\" * 50)\n",
|
||||
"\n",
|
||||
"available, ready, info, status_msg = comprehensive_camera_check()\n",
|
||||
"\n",
|
||||
"print(\"\\n\" + \"=\" * 50)\n",
|
||||
"print(\"FINAL RESULTS:\")\n",
|
||||
"print(f\"Camera Available: {available}\")\n",
|
||||
"print(f\"Capture Ready: {ready}\")\n",
|
||||
"print(f\"Status: {status_msg}\")\n",
|
||||
"print(\"=\" * 50)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"id": "status-check-function",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"Testing Simple Camera Ready Check:\n",
|
||||
"========================================\n",
|
||||
"Ready: True\n",
|
||||
"Message: Camera 'Blower-Yield-Cam' is ready for capture\n",
|
||||
"Camera: Blower-Yield-Cam\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def is_camera_ready_for_capture():\n",
|
||||
" \"\"\"\n",
|
||||
" Simple function to check if camera is ready for capture.\n",
|
||||
" Returns: (ready: bool, message: str, camera_info: object or None)\n",
|
||||
" \n",
|
||||
" This is the function you can use in your main capture script.\n",
|
||||
" \"\"\"\n",
|
||||
" try:\n",
|
||||
" # Initialize SDK\n",
|
||||
" mvsdk.CameraSdkInit(1)\n",
|
||||
" \n",
|
||||
" # Enumerate cameras\n",
|
||||
" DevList = mvsdk.CameraEnumerateDevice()\n",
|
||||
" if len(DevList) < 1:\n",
|
||||
" return False, \"No cameras found\", None\n",
|
||||
" \n",
|
||||
" DevInfo = DevList[0]\n",
|
||||
" \n",
|
||||
" # Check if already opened\n",
|
||||
" try:\n",
|
||||
" if mvsdk.CameraIsOpened(DevInfo):\n",
|
||||
" return False, f\"Camera '{DevInfo.GetFriendlyName()}' is already opened by another process\", DevInfo\n",
|
||||
" except:\n",
|
||||
" pass # Some cameras might not support this check\n",
|
||||
" \n",
|
||||
" # Try to initialize\n",
|
||||
" try:\n",
|
||||
" hCamera = mvsdk.CameraInit(DevInfo, -1, -1)\n",
|
||||
" \n",
|
||||
" # Quick capture test\n",
|
||||
" try:\n",
|
||||
" # Basic setup\n",
|
||||
" mvsdk.CameraSetTriggerMode(hCamera, 0)\n",
|
||||
" mvsdk.CameraPlay(hCamera)\n",
|
||||
" \n",
|
||||
" # Try to get one frame with short timeout\n",
|
||||
" pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 500) # 0.5 second timeout\n",
|
||||
" mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)\n",
|
||||
" \n",
|
||||
" # Success - close and return\n",
|
||||
" mvsdk.CameraUnInit(hCamera)\n",
|
||||
" return True, f\"Camera '{DevInfo.GetFriendlyName()}' is ready for capture\", DevInfo\n",
|
||||
" \n",
|
||||
" except mvsdk.CameraException as e:\n",
|
||||
" mvsdk.CameraUnInit(hCamera)\n",
|
||||
" if e.error_code == mvsdk.CAMERA_STATUS_TIME_OUT:\n",
|
||||
" return False, \"Camera timeout - may be busy or not streaming properly\", DevInfo\n",
|
||||
" else:\n",
|
||||
" return False, f\"Camera capture test failed: {e.message}\", DevInfo\n",
|
||||
" \n",
|
||||
" except mvsdk.CameraException as e:\n",
|
||||
" if e.error_code == mvsdk.CAMERA_STATUS_DEVICE_IS_OPENED:\n",
|
||||
" return False, f\"Camera '{DevInfo.GetFriendlyName()}' is already in use\", DevInfo\n",
|
||||
" elif e.error_code == mvsdk.CAMERA_STATUS_ACCESS_DENY:\n",
|
||||
" return False, f\"Access denied to camera '{DevInfo.GetFriendlyName()}'\", DevInfo\n",
|
||||
" else:\n",
|
||||
" return False, f\"Camera initialization failed: {e.message}\", DevInfo\n",
|
||||
" \n",
|
||||
" except Exception as e:\n",
|
||||
" return False, f\"Camera check failed: {str(e)}\", None\n",
|
||||
"\n",
|
||||
"# Test the simple function\n",
|
||||
"print(\"\\nTesting Simple Camera Ready Check:\")\n",
|
||||
"print(\"=\" * 40)\n",
|
||||
"\n",
|
||||
"ready, message, camera_info = is_camera_ready_for_capture()\n",
|
||||
"print(f\"Ready: {ready}\")\n",
|
||||
"print(f\"Message: {message}\")\n",
|
||||
"if camera_info:\n",
|
||||
" print(f\"Camera: {camera_info.GetFriendlyName()}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "usage-example",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage Example\n",
|
||||
"\n",
|
||||
"Here's how you can integrate the camera status check into your capture script:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"# Before attempting to capture images\n",
|
||||
"ready, message, camera_info = is_camera_ready_for_capture()\n",
|
||||
"\n",
|
||||
"if not ready:\n",
|
||||
" print(f\"Camera not ready: {message}\")\n",
|
||||
" # Handle the error appropriately\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
"print(f\"Camera ready: {message}\")\n",
|
||||
"# Proceed with normal capture logic\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## Key Findings\n",
|
||||
"\n",
|
||||
"1. **`CameraIsOpened()`** - Checks if camera is opened by another process\n",
|
||||
"2. **`CameraInit()` error codes** - Provide specific failure reasons\n",
|
||||
"3. **Quick capture test** - Verifies camera is actually streaming\n",
|
||||
"4. **Timeout handling** - Detects if camera is busy/unresponsive\n",
|
||||
"\n",
|
||||
"The most reliable approach is to:\n",
|
||||
"1. Check if camera exists\n",
|
||||
"2. Check if it's already opened\n",
|
||||
"3. Try to initialize it\n",
|
||||
"4. Test actual image capture with short timeout\n",
|
||||
"5. Clean up properly"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "USDA-vision-cameras",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,495 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# GigE Camera Test Setup\n",
|
||||
"\n",
|
||||
"This notebook helps you test and configure your GigE cameras for the USDA vision project.\n",
|
||||
"\n",
|
||||
"## Key Features:\n",
|
||||
"- Test camera connectivity\n",
|
||||
"- Display images inline (no GUI needed)\n",
|
||||
"- Save test images/videos to `/storage`\n",
|
||||
"- Configure camera parameters\n",
|
||||
"- Test recording functionality"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"✅ All imports successful!\n",
|
||||
"OpenCV version: 4.11.0\n",
|
||||
"NumPy version: 2.3.2\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import cv2\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import os\n",
|
||||
"from datetime import datetime\n",
|
||||
"import time\n",
|
||||
"from pathlib import Path\n",
|
||||
"import imageio\n",
|
||||
"from tqdm import tqdm\n",
|
||||
"\n",
|
||||
"# Configure matplotlib for inline display\n",
|
||||
"plt.rcParams['figure.figsize'] = (12, 8)\n",
|
||||
"plt.rcParams['image.cmap'] = 'gray'\n",
|
||||
"\n",
|
||||
"print(\"✅ All imports successful!\")\n",
|
||||
"print(f\"OpenCV version: {cv2.__version__}\")\n",
|
||||
"print(f\"NumPy version: {np.__version__}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Utility Functions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"✅ Utility functions loaded!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def display_image(image, title=\"Image\", figsize=(10, 8)):\n",
|
||||
" \"\"\"Display image inline in Jupyter notebook\"\"\"\n",
|
||||
" plt.figure(figsize=figsize)\n",
|
||||
" if len(image.shape) == 3:\n",
|
||||
" # Convert BGR to RGB for matplotlib\n",
|
||||
" image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
|
||||
" plt.imshow(image_rgb)\n",
|
||||
" else:\n",
|
||||
" plt.imshow(image, cmap='gray')\n",
|
||||
" plt.title(title)\n",
|
||||
" plt.axis('off')\n",
|
||||
" plt.tight_layout()\n",
|
||||
" plt.show()\n",
|
||||
"\n",
|
||||
"def save_image_to_storage(image, filename_prefix=\"test_image\"):\n",
|
||||
" \"\"\"Save image to /storage with timestamp\"\"\"\n",
|
||||
" timestamp = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
|
||||
" filename = f\"{filename_prefix}_{timestamp}.jpg\"\n",
|
||||
" filepath = f\"/storage/{filename}\"\n",
|
||||
" \n",
|
||||
" success = cv2.imwrite(filepath, image)\n",
|
||||
" if success:\n",
|
||||
" print(f\"✅ Image saved: {filepath}\")\n",
|
||||
" return filepath\n",
|
||||
" else:\n",
|
||||
" print(f\"❌ Failed to save image: {filepath}\")\n",
|
||||
" return None\n",
|
||||
"\n",
|
||||
"def create_storage_subdir(subdir_name):\n",
|
||||
" \"\"\"Create subdirectory in /storage\"\"\"\n",
|
||||
" path = Path(f\"/storage/{subdir_name}\")\n",
|
||||
" path.mkdir(exist_ok=True)\n",
|
||||
" print(f\"📁 Directory ready: {path}\")\n",
|
||||
" return str(path)\n",
|
||||
"\n",
|
||||
"def list_available_cameras():\n",
|
||||
" \"\"\"List all available camera devices\"\"\"\n",
|
||||
" print(\"🔍 Scanning for available cameras...\")\n",
|
||||
" available_cameras = []\n",
|
||||
" \n",
|
||||
" # Test camera indices 0-10\n",
|
||||
" for i in range(11):\n",
|
||||
" cap = cv2.VideoCapture(i)\n",
|
||||
" if cap.isOpened():\n",
|
||||
" ret, frame = cap.read()\n",
|
||||
" if ret:\n",
|
||||
" available_cameras.append(i)\n",
|
||||
" print(f\"📷 Camera {i}: Available (Resolution: {frame.shape[1]}x{frame.shape[0]})\")\n",
|
||||
" cap.release()\n",
|
||||
" else:\n",
|
||||
" # Try with different backends for GigE cameras\n",
|
||||
" cap = cv2.VideoCapture(i, cv2.CAP_GSTREAMER)\n",
|
||||
" if cap.isOpened():\n",
|
||||
" ret, frame = cap.read()\n",
|
||||
" if ret:\n",
|
||||
" available_cameras.append(i)\n",
|
||||
" print(f\"📷 Camera {i}: Available via GStreamer (Resolution: {frame.shape[1]}x{frame.shape[0]})\")\n",
|
||||
" cap.release()\n",
|
||||
" \n",
|
||||
" if not available_cameras:\n",
|
||||
" print(\"❌ No cameras found\")\n",
|
||||
" \n",
|
||||
" return available_cameras\n",
|
||||
"\n",
|
||||
"print(\"✅ Utility functions loaded!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 1: Check Storage Directory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Storage directory exists: True\n",
|
||||
"Storage directory writable: True\n",
|
||||
"📁 Directory ready: /storage/test_images\n",
|
||||
"📁 Directory ready: /storage/test_videos\n",
|
||||
"📁 Directory ready: /storage/camera1\n",
|
||||
"📁 Directory ready: /storage/camera2\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Check storage directory\n",
|
||||
"storage_path = Path(\"/storage\")\n",
|
||||
"print(f\"Storage directory exists: {storage_path.exists()}\")\n",
|
||||
"print(f\"Storage directory writable: {os.access('/storage', os.W_OK)}\")\n",
|
||||
"\n",
|
||||
"# Create test subdirectories\n",
|
||||
"test_images_dir = create_storage_subdir(\"test_images\")\n",
|
||||
"test_videos_dir = create_storage_subdir(\"test_videos\")\n",
|
||||
"camera1_dir = create_storage_subdir(\"camera1\")\n",
|
||||
"camera2_dir = create_storage_subdir(\"camera2\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 2: Scan for Available Cameras"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"🔍 Scanning for available cameras...\n",
|
||||
"❌ No cameras found\n",
|
||||
"\n",
|
||||
"📊 Summary: Found 0 camera(s): []\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[ WARN:0@9.977] global cap_v4l.cpp:913 open VIDEOIO(V4L2:/dev/video0): can't open camera by index\n",
|
||||
"[ WARN:0@9.977] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.977] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ WARN:0@9.977] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.977] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ERROR:0@9.977] global obsensor_uvc_stream_channel.cpp:158 getStreamChannelGroup Camera index out of range\n",
|
||||
"[ WARN:0@9.977] global cap_v4l.cpp:913 open VIDEOIO(V4L2:/dev/video1): can't open camera by index\n",
|
||||
"[ WARN:0@9.977] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.977] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ WARN:0@9.977] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.977] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ERROR:0@9.977] global obsensor_uvc_stream_channel.cpp:158 getStreamChannelGroup Camera index out of range\n",
|
||||
"[ WARN:0@9.977] global cap_v4l.cpp:913 open VIDEOIO(V4L2:/dev/video2): can't open camera by index\n",
|
||||
"[ WARN:0@9.977] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.977] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ WARN:0@9.977] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.977] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ERROR:0@9.977] global obsensor_uvc_stream_channel.cpp:158 getStreamChannelGroup Camera index out of range\n",
|
||||
"[ WARN:0@9.977] global cap_v4l.cpp:913 open VIDEOIO(V4L2:/dev/video3): can't open camera by index\n",
|
||||
"[ WARN:0@9.977] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.977] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ WARN:0@9.977] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.977] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ERROR:0@9.977] global obsensor_uvc_stream_channel.cpp:158 getStreamChannelGroup Camera index out of range\n",
|
||||
"[ WARN:0@9.977] global cap_v4l.cpp:913 open VIDEOIO(V4L2:/dev/video4): can't open camera by index\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ERROR:0@9.978] global obsensor_uvc_stream_channel.cpp:158 getStreamChannelGroup Camera index out of range\n",
|
||||
"[ WARN:0@9.978] global cap_v4l.cpp:913 open VIDEOIO(V4L2:/dev/video5): can't open camera by index\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ERROR:0@9.978] global obsensor_uvc_stream_channel.cpp:158 getStreamChannelGroup Camera index out of range\n",
|
||||
"[ WARN:0@9.978] global cap_v4l.cpp:913 open VIDEOIO(V4L2:/dev/video6): can't open camera by index\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ERROR:0@9.978] global obsensor_uvc_stream_channel.cpp:158 getStreamChannelGroup Camera index out of range\n",
|
||||
"[ WARN:0@9.978] global cap_v4l.cpp:913 open VIDEOIO(V4L2:/dev/video7): can't open camera by index\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ERROR:0@9.978] global obsensor_uvc_stream_channel.cpp:158 getStreamChannelGroup Camera index out of range\n",
|
||||
"[ WARN:0@9.978] global cap_v4l.cpp:913 open VIDEOIO(V4L2:/dev/video8): can't open camera by index\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ERROR:0@9.978] global obsensor_uvc_stream_channel.cpp:158 getStreamChannelGroup Camera index out of range\n",
|
||||
"[ WARN:0@9.978] global cap_v4l.cpp:913 open VIDEOIO(V4L2:/dev/video9): can't open camera by index\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ERROR:0@9.978] global obsensor_uvc_stream_channel.cpp:158 getStreamChannelGroup Camera index out of range\n",
|
||||
"[ WARN:0@9.978] global cap_v4l.cpp:913 open VIDEOIO(V4L2:/dev/video10): can't open camera by index\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.978] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ WARN:0@9.979] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@9.979] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ERROR:0@9.979] global obsensor_uvc_stream_channel.cpp:158 getStreamChannelGroup Camera index out of range\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Scan for cameras\n",
|
||||
"cameras = list_available_cameras()\n",
|
||||
"print(f\"\\n📊 Summary: Found {len(cameras)} camera(s): {cameras}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 3: Test Individual Camera"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"🔧 Testing camera 0...\n",
|
||||
" Trying Default backend...\n",
|
||||
" ❌ Default backend failed to open\n",
|
||||
" Trying GStreamer backend...\n",
|
||||
" ❌ GStreamer backend failed to open\n",
|
||||
" Trying V4L2 backend...\n",
|
||||
" ❌ V4L2 backend failed to open\n",
|
||||
" Trying FFmpeg backend...\n",
|
||||
" ❌ FFmpeg backend failed to open\n",
|
||||
"❌ Camera 0 not accessible with any backend\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[ WARN:0@27.995] global cap_v4l.cpp:913 open VIDEOIO(V4L2:/dev/video0): can't open camera by index\n",
|
||||
"[ WARN:0@27.995] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@27.995] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ WARN:0@27.995] global obsensor_stream_channel_v4l2.cpp:82 xioctl ioctl: fd=-1, req=-2140645888\n",
|
||||
"[ WARN:0@27.995] global obsensor_stream_channel_v4l2.cpp:138 queryUvcDeviceInfoList ioctl error return: 9\n",
|
||||
"[ERROR:0@27.995] global obsensor_uvc_stream_channel.cpp:158 getStreamChannelGroup Camera index out of range\n",
|
||||
"[ WARN:0@27.996] global cap_v4l.cpp:913 open VIDEOIO(V4L2:/dev/video0): can't open camera by index\n",
|
||||
"[ WARN:0@27.996] global cap.cpp:478 open VIDEOIO(V4L2): backend is generally available but can't be used to capture by index\n",
|
||||
"[ WARN:0@27.996] global cap.cpp:478 open VIDEOIO(FFMPEG): backend is generally available but can't be used to capture by index\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Test a specific camera (change camera_id as needed)\n",
|
||||
"camera_id = 0 # Change this to test different cameras\n",
|
||||
"\n",
|
||||
"print(f\"🔧 Testing camera {camera_id}...\")\n",
|
||||
"\n",
|
||||
"# Try different backends for GigE cameras\n",
|
||||
"backends_to_try = [\n",
|
||||
" (cv2.CAP_ANY, \"Default\"),\n",
|
||||
" (cv2.CAP_GSTREAMER, \"GStreamer\"),\n",
|
||||
" (cv2.CAP_V4L2, \"V4L2\"),\n",
|
||||
" (cv2.CAP_FFMPEG, \"FFmpeg\")\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"successful_backend = None\n",
|
||||
"cap = None\n",
|
||||
"\n",
|
||||
"for backend, name in backends_to_try:\n",
|
||||
" print(f\" Trying {name} backend...\")\n",
|
||||
" cap = cv2.VideoCapture(camera_id, backend)\n",
|
||||
" if cap.isOpened():\n",
|
||||
" ret, frame = cap.read()\n",
|
||||
" if ret:\n",
|
||||
" print(f\" ✅ {name} backend works!\")\n",
|
||||
" successful_backend = (backend, name)\n",
|
||||
" break\n",
|
||||
" else:\n",
|
||||
" print(f\" ❌ {name} backend opened but can't read frames\")\n",
|
||||
" else:\n",
|
||||
" print(f\" ❌ {name} backend failed to open\")\n",
|
||||
" cap.release()\n",
|
||||
"\n",
|
||||
"if successful_backend:\n",
|
||||
" backend, backend_name = successful_backend\n",
|
||||
" cap = cv2.VideoCapture(camera_id, backend)\n",
|
||||
" \n",
|
||||
" # Get camera properties\n",
|
||||
" width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n",
|
||||
" height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n",
|
||||
" fps = cap.get(cv2.CAP_PROP_FPS)\n",
|
||||
" \n",
|
||||
" print(f\"\\n📷 Camera {camera_id} Properties ({backend_name}):\")\n",
|
||||
" print(f\" Resolution: {width}x{height}\")\n",
|
||||
" print(f\" FPS: {fps}\")\n",
|
||||
" \n",
|
||||
" # Capture a test frame\n",
|
||||
" ret, frame = cap.read()\n",
|
||||
" if ret:\n",
|
||||
" print(f\" Frame shape: {frame.shape}\")\n",
|
||||
" print(f\" Frame dtype: {frame.dtype}\")\n",
|
||||
" \n",
|
||||
" # Display the frame\n",
|
||||
" display_image(frame, f\"Camera {camera_id} Test Frame\")\n",
|
||||
" \n",
|
||||
" # Save test image\n",
|
||||
" save_image_to_storage(frame, f\"camera_{camera_id}_test\")\n",
|
||||
" else:\n",
|
||||
" print(\" ❌ Failed to capture frame\")\n",
|
||||
" \n",
|
||||
" cap.release()\n",
|
||||
"else:\n",
|
||||
" print(f\"❌ Camera {camera_id} not accessible with any backend\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 4: Test Video Recording"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Test video recording\n",
|
||||
"def test_video_recording(camera_id, duration_seconds=5, fps=30):\n",
|
||||
" \"\"\"Test video recording from camera\"\"\"\n",
|
||||
" print(f\"🎥 Testing video recording from camera {camera_id} for {duration_seconds} seconds...\")\n",
|
||||
" \n",
|
||||
" # Open camera\n",
|
||||
" cap = cv2.VideoCapture(camera_id)\n",
|
||||
" if not cap.isOpened():\n",
|
||||
" print(f\"❌ Cannot open camera {camera_id}\")\n",
|
||||
" return None\n",
|
||||
" \n",
|
||||
" # Get camera properties\n",
|
||||
" width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n",
|
||||
" height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n",
|
||||
" \n",
|
||||
" # Create video writer\n",
|
||||
" timestamp = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
|
||||
" video_filename = f\"/storage/test_videos/camera_{camera_id}_test_{timestamp}.mp4\"\n",
|
||||
" \n",
|
||||
" fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n",
|
||||
" out = cv2.VideoWriter(video_filename, fourcc, fps, (width, height))\n",
|
||||
" \n",
|
||||
" if not out.isOpened():\n",
|
||||
" print(\"❌ Cannot create video writer\")\n",
|
||||
" cap.release()\n",
|
||||
" return None\n",
|
||||
" \n",
|
||||
" # Record video\n",
|
||||
" frames_to_capture = duration_seconds * fps\n",
|
||||
" frames_captured = 0\n",
|
||||
" \n",
|
||||
" print(f\"Recording {frames_to_capture} frames...\")\n",
|
||||
" \n",
|
||||
" with tqdm(total=frames_to_capture, desc=\"Recording\") as pbar:\n",
|
||||
" start_time = time.time()\n",
|
||||
" \n",
|
||||
" while frames_captured < frames_to_capture:\n",
|
||||
" ret, frame = cap.read()\n",
|
||||
" if ret:\n",
|
||||
" out.write(frame)\n",
|
||||
" frames_captured += 1\n",
|
||||
" pbar.update(1)\n",
|
||||
" \n",
|
||||
" # Display first frame\n",
|
||||
" if frames_captured == 1:\n",
|
||||
" display_image(frame, f\"First frame from camera {camera_id}\")\n",
|
||||
" else:\n",
|
||||
" print(f\"❌ Failed to read frame {frames_captured}\")\n",
|
||||
" break\n",
|
||||
" \n",
|
||||
" # Cleanup\n",
|
||||
" cap.release()\n",
|
||||
" out.release()\n",
|
||||
" \n",
|
||||
" elapsed_time = time.time() - start_time\n",
|
||||
" actual_fps = frames_captured / elapsed_time\n",
|
||||
" \n",
|
||||
" print(f\"✅ Video saved: {video_filename}\")\n",
|
||||
" print(f\"📊 Captured {frames_captured} frames in {elapsed_time:.2f}s\")\n",
|
||||
" print(f\"📊 Actual FPS: {actual_fps:.2f}\")\n",
|
||||
" \n",
|
||||
" return video_filename\n",
|
||||
"\n",
|
||||
"# Test recording (change camera_id as needed)\n",
|
||||
"if cameras: # Only test if cameras were found\n",
|
||||
" test_camera = cameras[0] # Use first available camera\n",
|
||||
" video_file = test_video_recording(test_camera, duration_seconds=3)\n",
|
||||
"else:\n",
|
||||
" print(\"⚠️ No cameras available for video test\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "USDA-vision-cameras",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -0,0 +1,426 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "ba958c88",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# coding=utf-8\n",
|
||||
"\"\"\"\n",
|
||||
"Test script to help find optimal exposure settings for your GigE camera.\n",
|
||||
"This script captures a single test image with different exposure settings.\n",
|
||||
"\"\"\"\n",
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"sys.path.append(\"./python demo\")\n",
|
||||
"import os\n",
|
||||
"import mvsdk\n",
|
||||
"import numpy as np\n",
|
||||
"import cv2\n",
|
||||
"import platform\n",
|
||||
"from datetime import datetime\n",
|
||||
"\n",
|
||||
"# Add the python demo directory to path\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "23f1dc49",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def test_exposure_settings():\n",
|
||||
" \"\"\"\n",
|
||||
" Test different exposure settings to find optimal values\n",
|
||||
" \"\"\"\n",
|
||||
" # Initialize SDK\n",
|
||||
" try:\n",
|
||||
" mvsdk.CameraSdkInit(1)\n",
|
||||
" print(\"SDK initialized successfully\")\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"SDK initialization failed: {e}\")\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
" # Enumerate cameras\n",
|
||||
" DevList = mvsdk.CameraEnumerateDevice()\n",
|
||||
" nDev = len(DevList)\n",
|
||||
"\n",
|
||||
" if nDev < 1:\n",
|
||||
" print(\"No camera was found!\")\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
" print(f\"Found {nDev} camera(s):\")\n",
|
||||
" for i, DevInfo in enumerate(DevList):\n",
|
||||
" print(f\" {i}: {DevInfo.GetFriendlyName()} ({DevInfo.GetPortType()})\")\n",
|
||||
"\n",
|
||||
" # Use first camera\n",
|
||||
" DevInfo = DevList[0]\n",
|
||||
" print(f\"\\nSelected camera: {DevInfo.GetFriendlyName()}\")\n",
|
||||
"\n",
|
||||
" # Initialize camera\n",
|
||||
" try:\n",
|
||||
" hCamera = mvsdk.CameraInit(DevInfo, -1, -1)\n",
|
||||
" print(\"Camera initialized successfully\")\n",
|
||||
" except mvsdk.CameraException as e:\n",
|
||||
" print(f\"CameraInit Failed({e.error_code}): {e.message}\")\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
" try:\n",
|
||||
" # Get camera capabilities\n",
|
||||
" cap = mvsdk.CameraGetCapability(hCamera)\n",
|
||||
" monoCamera = cap.sIspCapacity.bMonoSensor != 0\n",
|
||||
" print(f\"Camera type: {'Monochrome' if monoCamera else 'Color'}\")\n",
|
||||
"\n",
|
||||
" # Get camera ranges\n",
|
||||
" try:\n",
|
||||
" exp_min, exp_max, exp_step = mvsdk.CameraGetExposureTimeRange(hCamera)\n",
|
||||
" print(f\"Exposure time range: {exp_min:.1f} - {exp_max:.1f} μs\")\n",
|
||||
"\n",
|
||||
" gain_min, gain_max, gain_step = mvsdk.CameraGetAnalogGainXRange(hCamera)\n",
|
||||
" print(f\"Analog gain range: {gain_min:.2f} - {gain_max:.2f}x\")\n",
|
||||
"\n",
|
||||
" print(\"whatever this is: \", mvsdk.CameraGetAnalogGainXRange(hCamera))\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"Could not get camera ranges: {e}\")\n",
|
||||
" exp_min, exp_max = 100, 100000\n",
|
||||
" gain_min, gain_max = 1.0, 4.0\n",
|
||||
"\n",
|
||||
" # Set output format\n",
|
||||
" if monoCamera:\n",
|
||||
" mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)\n",
|
||||
" else:\n",
|
||||
" mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_BGR8)\n",
|
||||
"\n",
|
||||
" # Set camera to continuous capture mode\n",
|
||||
" mvsdk.CameraSetTriggerMode(hCamera, 0)\n",
|
||||
" mvsdk.CameraSetAeState(hCamera, 0) # Disable auto exposure\n",
|
||||
"\n",
|
||||
" # Start camera\n",
|
||||
" mvsdk.CameraPlay(hCamera)\n",
|
||||
"\n",
|
||||
" # Allocate frame buffer\n",
|
||||
" FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (1 if monoCamera else 3)\n",
|
||||
" pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)\n",
|
||||
"\n",
|
||||
" # Create test directory\n",
|
||||
" if not os.path.exists(\"exposure_tests\"):\n",
|
||||
" os.makedirs(\"exposure_tests\")\n",
|
||||
"\n",
|
||||
" print(\"\\nTesting different exposure settings...\")\n",
|
||||
" print(\"=\" * 50)\n",
|
||||
"\n",
|
||||
" # Test different exposure times (in microseconds)\n",
|
||||
" exposure_times = [100, 200, 500, 1000, 2000, 5000, 10000, 20000] # 0.5ms to 20ms\n",
|
||||
" analog_gains = [2.5, 5.0, 10.0, 16.0] # Start with 1x gain\n",
|
||||
"\n",
|
||||
" test_count = 0\n",
|
||||
" for exp_time in exposure_times:\n",
|
||||
" for gain in analog_gains:\n",
|
||||
" # Clamp values to valid ranges\n",
|
||||
" exp_time = max(exp_min, min(exp_max, exp_time))\n",
|
||||
" gain = max(gain_min, min(gain_max, gain))\n",
|
||||
"\n",
|
||||
" print(f\"\\nTest {test_count + 1}: Exposure={exp_time/1000:.1f}ms, Gain={gain:.1f}x\")\n",
|
||||
"\n",
|
||||
" # Set camera parameters\n",
|
||||
" mvsdk.CameraSetExposureTime(hCamera, exp_time)\n",
|
||||
" try:\n",
|
||||
" mvsdk.CameraSetAnalogGainX(hCamera, gain)\n",
|
||||
" except:\n",
|
||||
" pass # Some cameras might not support this\n",
|
||||
"\n",
|
||||
" # Wait a moment for settings to take effect\n",
|
||||
" import time\n",
|
||||
"\n",
|
||||
" time.sleep(0.1)\n",
|
||||
"\n",
|
||||
" # Capture image\n",
|
||||
" try:\n",
|
||||
" pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 2000)\n",
|
||||
" mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer, FrameHead)\n",
|
||||
" mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)\n",
|
||||
"\n",
|
||||
" # Handle Windows image flip\n",
|
||||
" if platform.system() == \"Windows\":\n",
|
||||
" mvsdk.CameraFlipFrameBuffer(pFrameBuffer, FrameHead, 1)\n",
|
||||
"\n",
|
||||
" # Convert to numpy array\n",
|
||||
" frame_data = (mvsdk.c_ubyte * FrameHead.uBytes).from_address(pFrameBuffer)\n",
|
||||
" frame = np.frombuffer(frame_data, dtype=np.uint8)\n",
|
||||
"\n",
|
||||
" if FrameHead.uiMediaType == mvsdk.CAMERA_MEDIA_TYPE_MONO8:\n",
|
||||
" frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth))\n",
|
||||
" else:\n",
|
||||
" frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth, 3))\n",
|
||||
"\n",
|
||||
" # Calculate image statistics\n",
|
||||
" mean_brightness = np.mean(frame)\n",
|
||||
" max_brightness = np.max(frame)\n",
|
||||
"\n",
|
||||
" # Save image\n",
|
||||
" filename = f\"exposure_tests/test_{test_count+1:02d}_exp{exp_time/1000:.1f}ms_gain{gain:.1f}x.jpg\"\n",
|
||||
" cv2.imwrite(filename, frame)\n",
|
||||
"\n",
|
||||
" # Provide feedback\n",
|
||||
" status = \"\"\n",
|
||||
" if mean_brightness < 50:\n",
|
||||
" status = \"TOO DARK\"\n",
|
||||
" elif mean_brightness > 200:\n",
|
||||
" status = \"TOO BRIGHT\"\n",
|
||||
" elif max_brightness >= 255:\n",
|
||||
" status = \"OVEREXPOSED\"\n",
|
||||
" else:\n",
|
||||
" status = \"GOOD\"\n",
|
||||
"\n",
|
||||
" print(f\" → Saved: {filename}\")\n",
|
||||
" print(f\" → Brightness: mean={mean_brightness:.1f}, max={max_brightness:.1f} [{status}]\")\n",
|
||||
"\n",
|
||||
" test_count += 1\n",
|
||||
"\n",
|
||||
" except mvsdk.CameraException as e:\n",
|
||||
" print(f\" → Failed to capture: {e.message}\")\n",
|
||||
"\n",
|
||||
" print(f\"\\nCompleted {test_count} test captures!\")\n",
|
||||
" print(\"Check the 'exposure_tests' directory to see the results.\")\n",
|
||||
" print(\"\\nRecommendations:\")\n",
|
||||
" print(\"- Look for images marked as 'GOOD' - these have optimal exposure\")\n",
|
||||
" print(\"- If all images are 'TOO BRIGHT', try lower exposure times or gains\")\n",
|
||||
" print(\"- If all images are 'TOO DARK', try higher exposure times or gains\")\n",
|
||||
" print(\"- Avoid 'OVEREXPOSED' images as they have clipped highlights\")\n",
|
||||
"\n",
|
||||
" # Cleanup\n",
|
||||
" mvsdk.CameraAlignFree(pFrameBuffer)\n",
|
||||
"\n",
|
||||
" finally:\n",
|
||||
" # Close camera\n",
|
||||
" mvsdk.CameraUnInit(hCamera)\n",
|
||||
" print(\"\\nCamera closed\")\n",
|
||||
"\n",
|
||||
" return True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "2891b5bf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"GigE Camera Exposure Test Script\n",
|
||||
"========================================\n",
|
||||
"This script will test different exposure settings and save sample images.\n",
|
||||
"Use this to find the optimal settings for your lighting conditions.\n",
|
||||
"\n",
|
||||
"SDK initialized successfully\n",
|
||||
"Found 2 camera(s):\n",
|
||||
" 0: Blower-Yield-Cam (NET-100M-192.168.1.204)\n",
|
||||
" 1: Cracker-Cam (NET-1000M-192.168.1.246)\n",
|
||||
"\n",
|
||||
"Selected camera: Blower-Yield-Cam\n",
|
||||
"Camera initialized successfully\n",
|
||||
"Camera type: Color\n",
|
||||
"Exposure time range: 8.0 - 1048568.0 μs\n",
|
||||
"Analog gain range: 2.50 - 16.50x\n",
|
||||
"whatever this is: (2.5, 16.5, 0.5)\n",
|
||||
"\n",
|
||||
"Testing different exposure settings...\n",
|
||||
"==================================================\n",
|
||||
"\n",
|
||||
"Test 1: Exposure=0.1ms, Gain=2.5x\n",
|
||||
" → Saved: exposure_tests/test_01_exp0.1ms_gain2.5x.jpg\n",
|
||||
" → Brightness: mean=94.1, max=255.0 [OVEREXPOSED]\n",
|
||||
"\n",
|
||||
"Test 2: Exposure=0.1ms, Gain=5.0x\n",
|
||||
" → Saved: exposure_tests/test_02_exp0.1ms_gain5.0x.jpg\n",
|
||||
" → Brightness: mean=13.7, max=173.0 [TOO DARK]\n",
|
||||
"\n",
|
||||
"Test 3: Exposure=0.1ms, Gain=10.0x\n",
|
||||
" → Saved: exposure_tests/test_03_exp0.1ms_gain10.0x.jpg\n",
|
||||
" → Brightness: mean=14.1, max=255.0 [TOO DARK]\n",
|
||||
"\n",
|
||||
"Test 4: Exposure=0.1ms, Gain=16.0x\n",
|
||||
" → Saved: exposure_tests/test_04_exp0.1ms_gain16.0x.jpg\n",
|
||||
" → Brightness: mean=18.2, max=255.0 [TOO DARK]\n",
|
||||
"\n",
|
||||
"Test 5: Exposure=0.2ms, Gain=2.5x\n",
|
||||
" → Saved: exposure_tests/test_05_exp0.2ms_gain2.5x.jpg\n",
|
||||
" → Brightness: mean=22.1, max=255.0 [TOO DARK]\n",
|
||||
"\n",
|
||||
"Test 6: Exposure=0.2ms, Gain=5.0x\n",
|
||||
" → Saved: exposure_tests/test_06_exp0.2ms_gain5.0x.jpg\n",
|
||||
" → Brightness: mean=19.5, max=255.0 [TOO DARK]\n",
|
||||
"\n",
|
||||
"Test 7: Exposure=0.2ms, Gain=10.0x\n",
|
||||
" → Saved: exposure_tests/test_07_exp0.2ms_gain10.0x.jpg\n",
|
||||
" → Brightness: mean=25.3, max=255.0 [TOO DARK]\n",
|
||||
"\n",
|
||||
"Test 8: Exposure=0.2ms, Gain=16.0x\n",
|
||||
" → Saved: exposure_tests/test_08_exp0.2ms_gain16.0x.jpg\n",
|
||||
" → Brightness: mean=36.6, max=255.0 [TOO DARK]\n",
|
||||
"\n",
|
||||
"Test 9: Exposure=0.5ms, Gain=2.5x\n",
|
||||
" → Saved: exposure_tests/test_09_exp0.5ms_gain2.5x.jpg\n",
|
||||
" → Brightness: mean=55.8, max=255.0 [OVEREXPOSED]\n",
|
||||
"\n",
|
||||
"Test 10: Exposure=0.5ms, Gain=5.0x\n",
|
||||
" → Saved: exposure_tests/test_10_exp0.5ms_gain5.0x.jpg\n",
|
||||
" → Brightness: mean=38.5, max=255.0 [TOO DARK]\n",
|
||||
"\n",
|
||||
"Test 11: Exposure=0.5ms, Gain=10.0x\n",
|
||||
" → Saved: exposure_tests/test_11_exp0.5ms_gain10.0x.jpg\n",
|
||||
" → Brightness: mean=60.2, max=255.0 [OVEREXPOSED]\n",
|
||||
"\n",
|
||||
"Test 12: Exposure=0.5ms, Gain=16.0x\n",
|
||||
" → Saved: exposure_tests/test_12_exp0.5ms_gain16.0x.jpg\n",
|
||||
" → Brightness: mean=99.3, max=255.0 [OVEREXPOSED]\n",
|
||||
"\n",
|
||||
"Test 13: Exposure=1.0ms, Gain=2.5x\n",
|
||||
" → Saved: exposure_tests/test_13_exp1.0ms_gain2.5x.jpg\n",
|
||||
" → Brightness: mean=121.1, max=255.0 [OVEREXPOSED]\n",
|
||||
"\n",
|
||||
"Test 14: Exposure=1.0ms, Gain=5.0x\n",
|
||||
" → Saved: exposure_tests/test_14_exp1.0ms_gain5.0x.jpg\n",
|
||||
" → Brightness: mean=68.8, max=255.0 [OVEREXPOSED]\n",
|
||||
"\n",
|
||||
"Test 15: Exposure=1.0ms, Gain=10.0x\n",
|
||||
" → Saved: exposure_tests/test_15_exp1.0ms_gain10.0x.jpg\n",
|
||||
" → Brightness: mean=109.6, max=255.0 [OVEREXPOSED]\n",
|
||||
"\n",
|
||||
"Test 16: Exposure=1.0ms, Gain=16.0x\n",
|
||||
" → Saved: exposure_tests/test_16_exp1.0ms_gain16.0x.jpg\n",
|
||||
" → Brightness: mean=148.7, max=255.0 [OVEREXPOSED]\n",
|
||||
"\n",
|
||||
"Test 17: Exposure=2.0ms, Gain=2.5x\n",
|
||||
" → Saved: exposure_tests/test_17_exp2.0ms_gain2.5x.jpg\n",
|
||||
" → Brightness: mean=171.9, max=255.0 [OVEREXPOSED]\n",
|
||||
"\n",
|
||||
"Test 18: Exposure=2.0ms, Gain=5.0x\n",
|
||||
" → Saved: exposure_tests/test_18_exp2.0ms_gain5.0x.jpg\n",
|
||||
" → Brightness: mean=117.9, max=255.0 [OVEREXPOSED]\n",
|
||||
"\n",
|
||||
"Test 19: Exposure=2.0ms, Gain=10.0x\n",
|
||||
" → Saved: exposure_tests/test_19_exp2.0ms_gain10.0x.jpg\n",
|
||||
" → Brightness: mean=159.0, max=255.0 [OVEREXPOSED]\n",
|
||||
"\n",
|
||||
"Test 20: Exposure=2.0ms, Gain=16.0x\n",
|
||||
" → Saved: exposure_tests/test_20_exp2.0ms_gain16.0x.jpg\n",
|
||||
" → Brightness: mean=195.7, max=255.0 [OVEREXPOSED]\n",
|
||||
"\n",
|
||||
"Test 21: Exposure=5.0ms, Gain=2.5x\n",
|
||||
" → Saved: exposure_tests/test_21_exp5.0ms_gain2.5x.jpg\n",
|
||||
" → Brightness: mean=214.6, max=255.0 [TOO BRIGHT]\n",
|
||||
"\n",
|
||||
"Test 22: Exposure=5.0ms, Gain=5.0x\n",
|
||||
" → Saved: exposure_tests/test_22_exp5.0ms_gain5.0x.jpg\n",
|
||||
" → Brightness: mean=180.2, max=255.0 [OVEREXPOSED]\n",
|
||||
"\n",
|
||||
"Test 23: Exposure=5.0ms, Gain=10.0x\n",
|
||||
" → Saved: exposure_tests/test_23_exp5.0ms_gain10.0x.jpg\n",
|
||||
" → Brightness: mean=214.6, max=255.0 [TOO BRIGHT]\n",
|
||||
"\n",
|
||||
"Test 24: Exposure=5.0ms, Gain=16.0x\n",
|
||||
" → Saved: exposure_tests/test_24_exp5.0ms_gain16.0x.jpg\n",
|
||||
" → Brightness: mean=239.6, max=255.0 [TOO BRIGHT]\n",
|
||||
"\n",
|
||||
"Test 25: Exposure=10.0ms, Gain=2.5x\n",
|
||||
" → Saved: exposure_tests/test_25_exp10.0ms_gain2.5x.jpg\n",
|
||||
" → Brightness: mean=247.5, max=255.0 [TOO BRIGHT]\n",
|
||||
"\n",
|
||||
"Test 26: Exposure=10.0ms, Gain=5.0x\n",
|
||||
" → Saved: exposure_tests/test_26_exp10.0ms_gain5.0x.jpg\n",
|
||||
" → Brightness: mean=252.4, max=255.0 [TOO BRIGHT]\n",
|
||||
"\n",
|
||||
"Test 27: Exposure=10.0ms, Gain=10.0x\n",
|
||||
" → Saved: exposure_tests/test_27_exp10.0ms_gain10.0x.jpg\n",
|
||||
" → Brightness: mean=218.9, max=255.0 [TOO BRIGHT]\n",
|
||||
"\n",
|
||||
"Test 28: Exposure=10.0ms, Gain=16.0x\n",
|
||||
" → Saved: exposure_tests/test_28_exp10.0ms_gain16.0x.jpg\n",
|
||||
" → Brightness: mean=250.8, max=255.0 [TOO BRIGHT]\n",
|
||||
"\n",
|
||||
"Test 29: Exposure=20.0ms, Gain=2.5x\n",
|
||||
" → Saved: exposure_tests/test_29_exp20.0ms_gain2.5x.jpg\n",
|
||||
" → Brightness: mean=252.4, max=255.0 [TOO BRIGHT]\n",
|
||||
"\n",
|
||||
"Test 30: Exposure=20.0ms, Gain=5.0x\n",
|
||||
" → Saved: exposure_tests/test_30_exp20.0ms_gain5.0x.jpg\n",
|
||||
" → Brightness: mean=244.4, max=255.0 [TOO BRIGHT]\n",
|
||||
"\n",
|
||||
"Test 31: Exposure=20.0ms, Gain=10.0x\n",
|
||||
" → Saved: exposure_tests/test_31_exp20.0ms_gain10.0x.jpg\n",
|
||||
" → Brightness: mean=251.5, max=255.0 [TOO BRIGHT]\n",
|
||||
"\n",
|
||||
"Test 32: Exposure=20.0ms, Gain=16.0x\n",
|
||||
" → Saved: exposure_tests/test_32_exp20.0ms_gain16.0x.jpg\n",
|
||||
" → Brightness: mean=253.4, max=255.0 [TOO BRIGHT]\n",
|
||||
"\n",
|
||||
"Completed 32 test captures!\n",
|
||||
"Check the 'exposure_tests' directory to see the results.\n",
|
||||
"\n",
|
||||
"Recommendations:\n",
|
||||
"- Look for images marked as 'GOOD' - these have optimal exposure\n",
|
||||
"- If all images are 'TOO BRIGHT', try lower exposure times or gains\n",
|
||||
"- If all images are 'TOO DARK', try higher exposure times or gains\n",
|
||||
"- Avoid 'OVEREXPOSED' images as they have clipped highlights\n",
|
||||
"\n",
|
||||
"Camera closed\n",
|
||||
"\n",
|
||||
"Testing completed successfully!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"if __name__ == \"__main__\":\n",
|
||||
" print(\"GigE Camera Exposure Test Script\")\n",
|
||||
" print(\"=\" * 40)\n",
|
||||
" print(\"This script will test different exposure settings and save sample images.\")\n",
|
||||
" print(\"Use this to find the optimal settings for your lighting conditions.\")\n",
|
||||
" print()\n",
|
||||
"\n",
|
||||
" success = test_exposure_settings()\n",
|
||||
"\n",
|
||||
" if success:\n",
|
||||
" print(\"\\nTesting completed successfully!\")\n",
|
||||
" else:\n",
|
||||
" print(\"\\nTesting failed!\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ead8d889",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "cc_pecan",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.13.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,385 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Advanced GigE Camera Configuration\n",
|
||||
"\n",
|
||||
"This notebook provides advanced testing and configuration for GigE cameras.\n",
|
||||
"\n",
|
||||
"## Features:\n",
|
||||
"- Network interface detection\n",
|
||||
"- GigE camera discovery\n",
|
||||
"- Camera parameter configuration\n",
|
||||
"- Performance testing\n",
|
||||
"- Dual camera synchronization testing"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import cv2\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import subprocess\n",
|
||||
"import socket\n",
|
||||
"import threading\n",
|
||||
"import time\n",
|
||||
"from datetime import datetime\n",
|
||||
"import os\n",
|
||||
"from pathlib import Path\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"print(\"✅ Imports successful!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Network Interface Detection"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_network_interfaces():\n",
|
||||
" \"\"\"Get network interface information\"\"\"\n",
|
||||
" try:\n",
|
||||
" result = subprocess.run(['ip', 'addr', 'show'], capture_output=True, text=True)\n",
|
||||
" print(\"🌐 Network Interfaces:\")\n",
|
||||
" print(result.stdout)\n",
|
||||
" \n",
|
||||
" # Also check for GigE specific interfaces\n",
|
||||
" result2 = subprocess.run(['ifconfig'], capture_output=True, text=True)\n",
|
||||
" if result2.returncode == 0:\n",
|
||||
" print(\"\\n📡 Interface Configuration:\")\n",
|
||||
" print(result2.stdout)\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"❌ Error getting network info: {e}\")\n",
|
||||
"\n",
|
||||
"get_network_interfaces()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## GigE Camera Discovery"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def discover_gige_cameras():\n",
|
||||
" \"\"\"Attempt to discover GigE cameras on the network\"\"\"\n",
|
||||
" print(\"🔍 Discovering GigE cameras...\")\n",
|
||||
" \n",
|
||||
" # Try different methods to find GigE cameras\n",
|
||||
" methods = [\n",
|
||||
" \"OpenCV with different backends\",\n",
|
||||
" \"Network scanning\",\n",
|
||||
" \"GStreamer pipeline testing\"\n",
|
||||
" ]\n",
|
||||
" \n",
|
||||
" print(\"\\n1. Testing OpenCV backends:\")\n",
|
||||
" backends = [\n",
|
||||
" (cv2.CAP_GSTREAMER, \"GStreamer\"),\n",
|
||||
" (cv2.CAP_V4L2, \"V4L2\"),\n",
|
||||
" (cv2.CAP_FFMPEG, \"FFmpeg\"),\n",
|
||||
" (cv2.CAP_ANY, \"Default\")\n",
|
||||
" ]\n",
|
||||
" \n",
|
||||
" for backend_id, backend_name in backends:\n",
|
||||
" print(f\" Testing {backend_name}...\")\n",
|
||||
" for cam_id in range(5):\n",
|
||||
" try:\n",
|
||||
" cap = cv2.VideoCapture(cam_id, backend_id)\n",
|
||||
" if cap.isOpened():\n",
|
||||
" ret, frame = cap.read()\n",
|
||||
" if ret:\n",
|
||||
" print(f\" ✅ Camera {cam_id} accessible via {backend_name}\")\n",
|
||||
" print(f\" Resolution: {frame.shape[1]}x{frame.shape[0]}\")\n",
|
||||
" cap.release()\n",
|
||||
" except Exception as e:\n",
|
||||
" pass\n",
|
||||
" \n",
|
||||
" print(\"\\n2. Testing GStreamer pipelines:\")\n",
|
||||
" # Common GigE camera GStreamer pipelines\n",
|
||||
" gstreamer_pipelines = [\n",
|
||||
" \"v4l2src device=/dev/video0 ! videoconvert ! appsink\",\n",
|
||||
" \"v4l2src device=/dev/video1 ! videoconvert ! appsink\",\n",
|
||||
" \"tcambin ! videoconvert ! appsink\", # For TIS cameras\n",
|
||||
" \"aravis ! videoconvert ! appsink\", # For Aravis-supported cameras\n",
|
||||
" ]\n",
|
||||
" \n",
|
||||
" for pipeline in gstreamer_pipelines:\n",
|
||||
" try:\n",
|
||||
" print(f\" Testing: {pipeline}\")\n",
|
||||
" cap = cv2.VideoCapture(pipeline, cv2.CAP_GSTREAMER)\n",
|
||||
" if cap.isOpened():\n",
|
||||
" ret, frame = cap.read()\n",
|
||||
" if ret:\n",
|
||||
" print(f\" ✅ Pipeline works! Frame shape: {frame.shape}\")\n",
|
||||
" else:\n",
|
||||
" print(f\" ⚠️ Pipeline opened but no frames\")\n",
|
||||
" else:\n",
|
||||
" print(f\" ❌ Pipeline failed\")\n",
|
||||
" cap.release()\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\" ❌ Error: {e}\")\n",
|
||||
"\n",
|
||||
"discover_gige_cameras()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Camera Parameter Configuration"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def configure_camera_parameters(camera_id, backend=cv2.CAP_ANY):\n",
|
||||
" \"\"\"Configure and test camera parameters\"\"\"\n",
|
||||
" print(f\"⚙️ Configuring camera {camera_id}...\")\n",
|
||||
" \n",
|
||||
" cap = cv2.VideoCapture(camera_id, backend)\n",
|
||||
" if not cap.isOpened():\n",
|
||||
" print(f\"❌ Cannot open camera {camera_id}\")\n",
|
||||
" return None\n",
|
||||
" \n",
|
||||
" # Get current parameters\n",
|
||||
" current_params = {\n",
|
||||
" 'width': cap.get(cv2.CAP_PROP_FRAME_WIDTH),\n",
|
||||
" 'height': cap.get(cv2.CAP_PROP_FRAME_HEIGHT),\n",
|
||||
" 'fps': cap.get(cv2.CAP_PROP_FPS),\n",
|
||||
" 'brightness': cap.get(cv2.CAP_PROP_BRIGHTNESS),\n",
|
||||
" 'contrast': cap.get(cv2.CAP_PROP_CONTRAST),\n",
|
||||
" 'saturation': cap.get(cv2.CAP_PROP_SATURATION),\n",
|
||||
" 'hue': cap.get(cv2.CAP_PROP_HUE),\n",
|
||||
" 'gain': cap.get(cv2.CAP_PROP_GAIN),\n",
|
||||
" 'exposure': cap.get(cv2.CAP_PROP_EXPOSURE),\n",
|
||||
" 'auto_exposure': cap.get(cv2.CAP_PROP_AUTO_EXPOSURE),\n",
|
||||
" 'white_balance': cap.get(cv2.CAP_PROP_WHITE_BALANCE_BLUE_U),\n",
|
||||
" }\n",
|
||||
" \n",
|
||||
" print(\"📊 Current Camera Parameters:\")\n",
|
||||
" for param, value in current_params.items():\n",
|
||||
" print(f\" {param}: {value}\")\n",
|
||||
" \n",
|
||||
" # Test setting some parameters\n",
|
||||
" print(\"\\n🔧 Testing parameter changes:\")\n",
|
||||
" \n",
|
||||
" # Try to set resolution (common GigE resolutions)\n",
|
||||
" test_resolutions = [(1920, 1080), (1280, 720), (640, 480)]\n",
|
||||
" for width, height in test_resolutions:\n",
|
||||
" if cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) and cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height):\n",
|
||||
" actual_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n",
|
||||
" actual_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n",
|
||||
" print(f\" Resolution {width}x{height}: Set to {actual_width}x{actual_height}\")\n",
|
||||
" break\n",
|
||||
" \n",
|
||||
" # Test FPS settings\n",
|
||||
" for fps in [30, 60, 120]:\n",
|
||||
" if cap.set(cv2.CAP_PROP_FPS, fps):\n",
|
||||
" actual_fps = cap.get(cv2.CAP_PROP_FPS)\n",
|
||||
" print(f\" FPS {fps}: Set to {actual_fps}\")\n",
|
||||
" break\n",
|
||||
" \n",
|
||||
" # Capture test frame with new settings\n",
|
||||
" ret, frame = cap.read()\n",
|
||||
" if ret:\n",
|
||||
" print(f\"\\n✅ Test frame captured: {frame.shape}\")\n",
|
||||
" \n",
|
||||
" # Display frame\n",
|
||||
" plt.figure(figsize=(10, 6))\n",
|
||||
" if len(frame.shape) == 3:\n",
|
||||
" plt.imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n",
|
||||
" else:\n",
|
||||
" plt.imshow(frame, cmap='gray')\n",
|
||||
" plt.title(f\"Camera {camera_id} - Configured\")\n",
|
||||
" plt.axis('off')\n",
|
||||
" plt.show()\n",
|
||||
" \n",
|
||||
" # Save configuration and test image\n",
|
||||
" timestamp = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
|
||||
" \n",
|
||||
" # Save image\n",
|
||||
" img_path = f\"/storage/camera{camera_id}/configured_test_{timestamp}.jpg\"\n",
|
||||
" cv2.imwrite(img_path, frame)\n",
|
||||
" print(f\"💾 Test image saved: {img_path}\")\n",
|
||||
" \n",
|
||||
" # Save configuration\n",
|
||||
" config_path = f\"/storage/camera{camera_id}/config_{timestamp}.json\"\n",
|
||||
" with open(config_path, 'w') as f:\n",
|
||||
" json.dump(current_params, f, indent=2)\n",
|
||||
" print(f\"💾 Configuration saved: {config_path}\")\n",
|
||||
" \n",
|
||||
" cap.release()\n",
|
||||
" return current_params\n",
|
||||
"\n",
|
||||
"# Test configuration (change camera_id as needed)\n",
|
||||
"camera_to_configure = 0\n",
|
||||
"config = configure_camera_parameters(camera_to_configure)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Dual Camera Testing"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def test_dual_cameras(camera1_id=0, camera2_id=1, duration=5):\n",
|
||||
" \"\"\"Test simultaneous capture from two cameras\"\"\"\n",
|
||||
" print(f\"📷📷 Testing dual camera capture (cameras {camera1_id} and {camera2_id})...\")\n",
|
||||
" \n",
|
||||
" # Open both cameras\n",
|
||||
" cap1 = cv2.VideoCapture(camera1_id)\n",
|
||||
" cap2 = cv2.VideoCapture(camera2_id)\n",
|
||||
" \n",
|
||||
" if not cap1.isOpened():\n",
|
||||
" print(f\"❌ Cannot open camera {camera1_id}\")\n",
|
||||
" return\n",
|
||||
" \n",
|
||||
" if not cap2.isOpened():\n",
|
||||
" print(f\"❌ Cannot open camera {camera2_id}\")\n",
|
||||
" cap1.release()\n",
|
||||
" return\n",
|
||||
" \n",
|
||||
" print(\"✅ Both cameras opened successfully\")\n",
|
||||
" \n",
|
||||
" # Capture test frames\n",
|
||||
" ret1, frame1 = cap1.read()\n",
|
||||
" ret2, frame2 = cap2.read()\n",
|
||||
" \n",
|
||||
" if ret1 and ret2:\n",
|
||||
" print(f\"📊 Camera {camera1_id}: {frame1.shape}\")\n",
|
||||
" print(f\"📊 Camera {camera2_id}: {frame2.shape}\")\n",
|
||||
" \n",
|
||||
" # Display both frames side by side\n",
|
||||
" fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))\n",
|
||||
" \n",
|
||||
" if len(frame1.shape) == 3:\n",
|
||||
" ax1.imshow(cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB))\n",
|
||||
" else:\n",
|
||||
" ax1.imshow(frame1, cmap='gray')\n",
|
||||
" ax1.set_title(f\"Camera {camera1_id}\")\n",
|
||||
" ax1.axis('off')\n",
|
||||
" \n",
|
||||
" if len(frame2.shape) == 3:\n",
|
||||
" ax2.imshow(cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB))\n",
|
||||
" else:\n",
|
||||
" ax2.imshow(frame2, cmap='gray')\n",
|
||||
" ax2.set_title(f\"Camera {camera2_id}\")\n",
|
||||
" ax2.axis('off')\n",
|
||||
" \n",
|
||||
" plt.tight_layout()\n",
|
||||
" plt.show()\n",
|
||||
" \n",
|
||||
" # Save test images\n",
|
||||
" timestamp = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
|
||||
" cv2.imwrite(f\"/storage/camera1/dual_test_{timestamp}.jpg\", frame1)\n",
|
||||
" cv2.imwrite(f\"/storage/camera2/dual_test_{timestamp}.jpg\", frame2)\n",
|
||||
" print(f\"💾 Dual camera test images saved with timestamp {timestamp}\")\n",
|
||||
" \n",
|
||||
" else:\n",
|
||||
" print(\"❌ Failed to capture from one or both cameras\")\n",
|
||||
" \n",
|
||||
" # Test synchronized recording\n",
|
||||
" print(f\"\\n🎥 Testing synchronized recording for {duration} seconds...\")\n",
|
||||
" \n",
|
||||
" # Setup video writers\n",
|
||||
" timestamp = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
|
||||
" \n",
|
||||
" fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n",
|
||||
" fps = 30\n",
|
||||
" \n",
|
||||
" if ret1:\n",
|
||||
" h1, w1 = frame1.shape[:2]\n",
|
||||
" out1 = cv2.VideoWriter(f\"/storage/camera1/sync_test_{timestamp}.mp4\", fourcc, fps, (w1, h1))\n",
|
||||
" \n",
|
||||
" if ret2:\n",
|
||||
" h2, w2 = frame2.shape[:2]\n",
|
||||
" out2 = cv2.VideoWriter(f\"/storage/camera2/sync_test_{timestamp}.mp4\", fourcc, fps, (w2, h2))\n",
|
||||
" \n",
|
||||
" # Record synchronized video\n",
|
||||
" start_time = time.time()\n",
|
||||
" frame_count = 0\n",
|
||||
" \n",
|
||||
" while time.time() - start_time < duration:\n",
|
||||
" ret1, frame1 = cap1.read()\n",
|
||||
" ret2, frame2 = cap2.read()\n",
|
||||
" \n",
|
||||
" if ret1 and ret2:\n",
|
||||
" out1.write(frame1)\n",
|
||||
" out2.write(frame2)\n",
|
||||
" frame_count += 1\n",
|
||||
" else:\n",
|
||||
" print(f\"⚠️ Frame drop at frame {frame_count}\")\n",
|
||||
" \n",
|
||||
" # Cleanup\n",
|
||||
" cap1.release()\n",
|
||||
" cap2.release()\n",
|
||||
" if 'out1' in locals():\n",
|
||||
" out1.release()\n",
|
||||
" if 'out2' in locals():\n",
|
||||
" out2.release()\n",
|
||||
" \n",
|
||||
" elapsed = time.time() - start_time\n",
|
||||
" actual_fps = frame_count / elapsed\n",
|
||||
" \n",
|
||||
" print(f\"✅ Synchronized recording complete\")\n",
|
||||
" print(f\"📊 Recorded {frame_count} frames in {elapsed:.2f}s\")\n",
|
||||
" print(f\"📊 Actual FPS: {actual_fps:.2f}\")\n",
|
||||
" print(f\"💾 Videos saved with timestamp {timestamp}\")\n",
|
||||
"\n",
|
||||
"# Test dual cameras (adjust camera IDs as needed)\n",
|
||||
"test_dual_cameras(0, 1, duration=3)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "usda-vision-cameras",
|
||||
"language": "python",
|
||||
"name": "usda-vision-cameras"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -0,0 +1,146 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "3b92c632",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import paho.mqtt.client as mqtt\n",
|
||||
"import time\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a6753fb1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/tmp/ipykernel_2342/243927247.py:34: DeprecationWarning: Callback API version 1 is deprecated, update to latest version\n",
|
||||
" client = mqtt.Client(mqtt.CallbackAPIVersion.VERSION1) # Use VERSION1 for broader compatibility\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Connecting to MQTT broker at 192.168.1.110:1883...\n",
|
||||
"Successfully connected to MQTT Broker!\n",
|
||||
"Subscribed to topic: 'vision/vibratory_conveyor/state'\n",
|
||||
"Listening for messages... (Press Ctrl+C to stop)\n",
|
||||
"\n",
|
||||
"--- MQTT MESSAGE RECEIVED! ---\n",
|
||||
" Topic: vision/vibratory_conveyor/state\n",
|
||||
" Payload: on\n",
|
||||
" Time: 2025-07-25 21:03:21\n",
|
||||
"------------------------------\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--- MQTT MESSAGE RECEIVED! ---\n",
|
||||
" Topic: vision/vibratory_conveyor/state\n",
|
||||
" Payload: off\n",
|
||||
" Time: 2025-07-25 21:05:26\n",
|
||||
"------------------------------\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Stopping MQTT listener.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\n",
|
||||
"# --- MQTT Broker Configuration ---\n",
|
||||
"# Your Home Assistant's IP address (where your MQTT broker is running)\n",
|
||||
"MQTT_BROKER_HOST = \"192.168.1.110\"\n",
|
||||
"MQTT_BROKER_PORT = 1883\n",
|
||||
"# IMPORTANT: Replace with your actual MQTT broker username and password if you have one set up\n",
|
||||
"# (These are NOT your Home Assistant login credentials, but for the Mosquitto add-on, if used)\n",
|
||||
"# MQTT_BROKER_USERNAME = \"pecan\" # e.g., \"homeassistant_mqtt_user\"\n",
|
||||
"# MQTT_BROKER_PASSWORD = \"whatever\" # e.g., \"SuperSecurePassword123!\"\n",
|
||||
"\n",
|
||||
"# --- Topic to Subscribe To ---\n",
|
||||
"# This MUST exactly match the topic you set in your Home Assistant automation\n",
|
||||
"MQTT_TOPIC = \"vision/vibratory_conveyor/state\" # <<<< Make sure this is correct!\n",
|
||||
"MQTT_TOPIC = \"vision/blower_separator/state\" # <<<< Make sure this is correct!\n",
|
||||
"\n",
|
||||
"# The callback for when the client receives a CONNACK response from the server.\n",
|
||||
"def on_connect(client, userdata, flags, rc):\n",
|
||||
" if rc == 0:\n",
|
||||
" print(\"Successfully connected to MQTT Broker!\")\n",
|
||||
" client.subscribe(MQTT_TOPIC)\n",
|
||||
" print(f\"Subscribed to topic: '{MQTT_TOPIC}'\")\n",
|
||||
" print(\"Listening for messages... (Press Ctrl+C to stop)\")\n",
|
||||
" else:\n",
|
||||
" print(f\"Failed to connect, return code {rc}\\n\")\n",
|
||||
"\n",
|
||||
"# The callback for when a PUBLISH message is received from the server.\n",
|
||||
"def on_message(client, userdata, msg):\n",
|
||||
" received_payload = msg.payload.decode()\n",
|
||||
" print(f\"\\n--- MQTT MESSAGE RECEIVED! ---\")\n",
|
||||
" print(f\" Topic: {msg.topic}\")\n",
|
||||
" print(f\" Payload: {received_payload}\")\n",
|
||||
" print(f\" Time: {time.strftime('%Y-%m-%d %H:%M:%S')}\")\n",
|
||||
" print(f\"------------------------------\\n\")\n",
|
||||
"\n",
|
||||
"# Create an MQTT client instance\n",
|
||||
"client = mqtt.Client(mqtt.CallbackAPIVersion.VERSION1) # Use VERSION1 for broader compatibility\n",
|
||||
"\n",
|
||||
"# Set callback functions\n",
|
||||
"client.on_connect = on_connect\n",
|
||||
"client.on_message = on_message\n",
|
||||
"\n",
|
||||
"# Set username and password if required\n",
|
||||
"# (Only uncomment and fill these if your MQTT broker requires authentication)\n",
|
||||
"# client.username_pw_set(MQTT_BROKER_USERNAME, MQTT_BROKER_PASSWORD)\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" # Attempt to connect to the MQTT broker\n",
|
||||
" print(f\"Connecting to MQTT broker at {MQTT_BROKER_HOST}:{MQTT_BROKER_PORT}...\")\n",
|
||||
" client.connect(MQTT_BROKER_HOST, MQTT_BROKER_PORT, 60)\n",
|
||||
"\n",
|
||||
" # Start the MQTT loop. This runs in the background and processes messages.\n",
|
||||
" client.loop_forever()\n",
|
||||
"\n",
|
||||
"except KeyboardInterrupt:\n",
|
||||
" print(\"\\nStopping MQTT listener.\")\n",
|
||||
" client.disconnect() # Disconnect gracefully\n",
|
||||
"except Exception as e:\n",
|
||||
" print(f\"An unexpected error occurred: {e}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "56531671",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "USDA-vision-cameras",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Reference in New Issue
Block a user