diff --git a/DeviceActivate/bin/init b/DeviceActivate/bin/init index 9876def..a8ef9bb 100755 Binary files a/DeviceActivate/bin/init and b/DeviceActivate/bin/init differ diff --git a/DeviceActivate/src/makefile b/DeviceActivate/src/makefile index edc7d72..56cda4a 100644 --- a/DeviceActivate/src/makefile +++ b/DeviceActivate/src/makefile @@ -1,7 +1,7 @@ all:init init:main.cpp - g++ -g -o init main.cpp -lpaho-mqttpp3 -lpaho-mqtt3a -lpthread /home/orangepi/RKApp/DeviceActivate/NetraLib/src/Netra.cpp -I/home/orangepi/RKApp/DeviceActivate/NetraLib/include + g++ -g -o init main.cpp -lpaho-mqttpp3 -lpaho-mqtt3a -lpthread /home/orangepi/RKApp/DeviceActivate/NetraLib/src/Netra.cpp -I/home/orangepi/RKApp/DeviceActivate/NetraLib/include mv init ../bin/ clean: diff --git a/VideoProsessing/bin/video b/VideoProsessing/bin/video index 038ab9c..e605686 100755 Binary files a/VideoProsessing/bin/video and b/VideoProsessing/bin/video differ diff --git a/VideoProsessing/src/main.cpp b/VideoProsessing/src/main.cpp index a7ca26c..6b730da 100644 --- a/VideoProsessing/src/main.cpp +++ b/VideoProsessing/src/main.cpp @@ -1,6 +1,6 @@ /* 本程序用于视频分流 -1.推流摄像头画面至RTSP服务器,交由YOLO模型进行处理 +1.推流摄像头画面,使用UDP原生协议进行推流,交由YOLO模型进行处理 2.接收YOLO传来的坐标,深度,警报等级等等数据 3.根据获取到的数据绘制边框和相应数据 4.将绘制完毕的视频流继续推流至RTSP服务器用于输出 @@ -17,7 +17,7 @@ using namespace chrono_literals; // 全局变量 VideoCapture cap(0); -Mat handleFrame; +Mat handleFrame; // 存放处理后的帧 const string mqtt_url = "tcp://192.168.12.1:1883"; const string clientId = "video_subData"; const string Topic = "/video/PersonData"; @@ -57,7 +57,7 @@ int main() return -1; } - // 初始化mqtt + // 初始化mqtt:订阅,回调函数 MqttInit(); // 主处理循环 @@ -93,12 +93,22 @@ void MqttInit() void getMsgCallback(mqtt::const_message_ptr msg) { string payload = msg->to_string(); - cout << "Recv payload: " << payload << endl; - thread([payload]() { try { // TODO 处理接收到到的位置数据 + auto json = nlohmann::json::parse(payload); + //绘制标况 + for(auto & ii :json) + { + int x = static_cast(ii["x"]); + int y = static_cast(ii["y"]); + int w = static_cast(ii["w"]); + int h = static_cast(ii["h"]); + double distance = static_cast(ii["distance"]); + drawRect(x,y,w,h,distance); + } + } catch (const nlohmann::json::parse_error &e) { cerr << "JSON 解析错误: " << e.what() << "\n原始 payload: " << payload << "\n"; @@ -171,20 +181,6 @@ bool processFrame(VideoCapture &cap, FILE *pipe, Mat &frame, int64 &count, chron // 拷贝视频帧 handleFrame = frame; - auto now_ms = chrono::system_clock::now(); - auto timestamp = chrono::duration_cast(now_ms.time_since_epoch()).count(); - auto now_time = chrono::system_clock::to_time_t(now_ms); - auto ms = timestamp % 1000; - - char timeStr[100]; - struct tm *tm_info = localtime(&now_time); - sprintf(timeStr, "%02d:%02d:%02d.%03ld", - tm_info->tm_hour, tm_info->tm_min, tm_info->tm_sec, ms); - - // 添加提示文本 - putText(frame, timeStr, Point(0, 20), - FONT_HERSHEY_SIMPLEX, 1.0, Scalar(0, 255, 0), 2, LINE_8); - // FPS计算与显示 ++count; auto now = chrono::steady_clock::now(); @@ -199,7 +195,7 @@ bool processFrame(VideoCapture &cap, FILE *pipe, Mat &frame, int64 &count, chron fwrite(frame.data, 1, frame.total() * frame.elemSize(), pipe); fflush(pipe); // 可选:显示窗口 - imshow("测试画面", frame); + // imshow("测试画面", frame); return true; } @@ -212,14 +208,14 @@ void mainLoop(VideoCapture &cap, FILE *pipe) Mat frame; cout << "开始视频处理循环..." << endl; - // // 创建全屏窗口 - // namedWindow("处理后的画面", WINDOW_NORMAL); - // setWindowProperty("处理后的画面", WND_PROP_FULLSCREEN, WINDOW_FULLSCREEN); + // 创建全屏窗口 + namedWindow("处理后的画面", WINDOW_NORMAL); + setWindowProperty("处理后的画面", WND_PROP_FULLSCREEN, WINDOW_FULLSCREEN); - // // 获取屏幕尺寸(通过获取全屏窗口的实际大小) - // cv::Rect windowRect = getWindowImageRect("处理后的画面"); - // int screenWidth = windowRect.width > 0 ? windowRect.width : 1920; - // int screenHeight = windowRect.height > 0 ? windowRect.height : 1080; + // 获取屏幕尺寸(通过获取全屏窗口的实际大小) + cv::Rect windowRect = getWindowImageRect("处理后的画面"); + int screenWidth = windowRect.width > 0 ? windowRect.width : 1920; + int screenHeight = windowRect.height > 0 ? windowRect.height : 1080; Mat displayFrame; // 用于存储缩放后的画面 @@ -230,9 +226,10 @@ void mainLoop(VideoCapture &cap, FILE *pipe) break; } - // // 将 handleFrame 缩放到全屏尺寸 - // resize(handleFrame, displayFrame, Size(screenWidth, screenHeight)); - // imshow("处理后的画面", displayFrame); + // 将 handleFrame 缩放到全屏尺寸 + resize(handleFrame, displayFrame, Size(screenWidth, screenHeight)); + imshow("处理后的画面", displayFrame); + // 检测退出键 if (cv::waitKey(1) == 'q') {