将检测任务迁移python

This commit is contained in:
2025-09-30 14:23:33 +08:00
parent 3fe5f8083d
commit 39d39a7a24
69 changed files with 7921 additions and 1836 deletions

54
.dockerignore Normal file
View File

@@ -0,0 +1,54 @@
# Git
.git
.gitignore
.gitattributes
# CI/CD
.github
.gitlab-ci.yml
# IDE
.idea
.vscode
*.iml
*.ipr
*.iws
# Maven
**/target/
.mvn/
mvnw
mvnw.cmd
# Node
**/node_modules/
**/dist/
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# Docker
Dockerfile
docker-compose.yml
.dockerignore
# Documentation
*.md
README*
LICENSE
docs/
# Scripts
bin/
*.bat
*.sh
# Logs
*.log
**/logs/
# Temporary files
*.tmp
*.temp
.DS_Store
Thumbs.db

28
.env Normal file
View File

@@ -0,0 +1,28 @@
# MySQL数据库配置
MYSQL_ROOT_PASSWORD=ruoyi123
MYSQL_DATABASE=ry-vue
MYSQL_USER=ruoyi
MYSQL_PASSWORD=ruoyi123
MYSQL_HOST=rtsp-mysql
MYSQL_PORT=3306
# Redis配置
REDIS_HOST=rtsp-redis
REDIS_PORT=6379
REDIS_PASSWORD=
# 后端服务配置
BACKEND_HOST=rtsp-backend
BACKEND_PORT=8080
# 前端服务配置
FRONTEND_PORT=10080
# Python推理服务配置
PYTHON_SERVICE_HOST=rtsp-python-service
PYTHON_SERVICE_PORT=8000
# 时区配置
TZ=Asia/Shanghai
# 备注MinIO使用外部已部署的服务配置在application.yml中

View File

@@ -1 +0,0 @@
EWEPEPEOGMGTELIZJUGECKIUJDBCJTCNISGPBNHLJTJUBHEWGNAKGEGAIOHJDQAJGNCFDRFZJEDMJTIBJBDZGZJSFPAUBTBXFSIUFTCMBHGCBKAGHFAMFSGYEIJPGPGXGJEREEBYAJFHFGESCXBJIGGHDBISEQAMAGGWGCADDVENHCIYITAVBUDYDWJGIRIRIKFHBAABGUHEDHFODQGUATGIGSEPBFBMBEDNCUGSEBCGCMCJGTEMFTCGBQBEBCFOCGDOIGEGFDAIEYEUEXGMDWJAFHCEHBGLJHAIIGGQANAKFBCOBGIUHYEWCSGNANCICCCOGNIVITFDHPHQCBANJIBTAQEMJFJLAJFGELHOHPGPAIGCEJDVHOIZHXECIIAMEPDJGBCXGSHCHHCVGVIJCKHKBXCBHKFRISAIDTFWGOGRBHBMFAHYACAQFRHAGCGVBWFWFGBCIGEHEHJBHZHABMGCHJEFCUEDFIBSABDTDYGXBMFRIZAHINEUBJFHICIJHRANJCBXBBBMFEJDJPDQCLIOFLDKDEGTICCMCWIUDWBYGFAPJOAUCCFUEMBEBOGDIFJMHJDQGWBFACBKEREXGPBXDJBPEZCUDTCRFVBWHACNFACDFDFEBIJKJNGWAPAMGQFDDKIJBCJNHLBBDOFMEBGUHDDYISIBEAJMEAIQEDCJFQBEIUHFJTCAIVBZGMCAFBDTHFDCARBRFWBFGRCJDIHGIZABDJFQGLCTHCJUJCJGGNHXHOBHHRIIBQIGJFACDRDBAHFMEMFQAZFXBAIJBCGCHZFRHNBFEPCFBIHFDDAWGPFBECITGIASAIDXHMAIHVHKINFVDHDDGFJHHWECASFHITFQIJACDACVAIDAFVISGAALHFCQACEUHOBOGFDEBHFSBUEQEXESEWIOJUGXHHIZDTEZJGDFIOAJGTDCBUBABPEVBTJJGNGGHOFCCHIWGFCPBDCEANIIACBFBEDSCMIWISCXAZFUBFCSGSENHOHKGLGDGEBTAIBNIOIRGMFBIMHVEXFGFICECPAQCZEFFZERETCTBEHLEEJPIFDVHHGNIZAOINGOCKBPDRCIBQJUBYDCEGFWBECSDDJHGCIUCUCCGKIHDRIRIPJBENGAAPJSDIBECTHAAXIVHZFFDCHDHWJTIQAIIPBYGMEMCYCPIRGOJHINFZASGVDVBWHZCNEFJI

567
COMPLETE-SUMMARY.md Normal file
View File

@@ -0,0 +1,567 @@
# 🎉 RTSP视频分析系统 - 完整更新总结
## 📋 项目概述
本文档总结了RTSP视频分析系统的所有Docker部署和巡检任务记录功能的实现。
## ✅ 完成的工作
### 一、Docker部署方案
#### 1.1 核心配置文件
-`.env` - 环境变量配置前端端口10080
-`docker-compose.yml` - 5个服务编排无MinIO使用外部
-`.dockerignore` - Docker构建优化
#### 1.2 Dockerfile文件
-`ruoyi-admin/Dockerfile` - Java后端多阶段构建
-`rtsp-vue/Dockerfile` - Vue前端多阶段构建
-`rtsp-vue/nginx.conf` - Nginx配置容器名代理
-`python-inference-service/Dockerfile` - Python服务CPU模式
#### 1.3 YOLOv8支持
-`python-inference-service/requirements.txt` - ultralytics>=8.0.0
-`python-inference-service/models/yolov8_model.py` - YOLOv8包装类
-`python-inference-service/models/models.json` - yolov8_detector配置
#### 1.4 部署脚本
-`deploy.bat` - Windows一键部署
-`deploy.sh` - Linux/Mac一键部署
#### 1.5 部署文档
-`README-DOCKER.md` - 完整部署文档
-`DOCKER-QUICK-START.md` - 快速开始指南
-`YOLOV8-SETUP.md` - YOLOv8配置指南
-`DEPLOYMENT-NOTES.md` - 部署配置说明
-`FINAL-SUMMARY.md` - 最终配置总结
### 二、巡检任务记录功能
#### 2.1 新增Service层
-`IInspectionTaskRecordService.java` - 服务接口
-`IInspectionTaskRecordServiceImpl.java` - 服务实现
#### 2.2 修改核心服务
-`InspectionTaskServiceImpl.java`
- 添加依赖注入InspectionTaskRecordMapper等
- 修改`executeInspectionTask()` - 创建记录
- 新增`performVideoAnalysisWithRecord()` - 录制+分析
- 新增`analyzeVideoAndUpdateRecord()` - 调用分析
- 新增`updateRecordFailed()` - 失败处理
-`VideoAnalysisService.java`
- 更新Python服务URL为容器名
- 更新模型名称为yolov8_detector
- 新增`analyzeVideoWithRecord()` - 带记录的分析
- 新增`processVideoWithRecord()` - 处理并记录
- 新增`createAlarmRecordForRecord()` - 去重告警
- 新增`uploadProcessedVideoForRecord()` - 上传视频
#### 2.3 功能文档
-`INSPECTION-WORKFLOW.md` - 详细工作流程
-`INSPECTION-FEATURE-SUMMARY.md` - 功能总结
## 🎯 关键特性
### Docker部署特性
| 特性 | 说明 |
|------|------|
| YOLOv8 | CPU模式无需GPU/CUDA |
| MinIO | 使用外部服务49.232.154.205:10900 |
| 端口 | 只暴露前端10080端口 |
| 网络 | 容器间使用容器名通信 |
| 服务 | MySQL, Redis, Python(CPU), Backend, Frontend |
### 巡检任务特性
| 特性 | 说明 |
|------|------|
| 自动记录 | 每次执行自动创建InspectionTaskRecord |
| 视频保存 | 自动录制并上传到MinIO |
| AI识别 | 调用Python服务(YOLOv8)识别 |
| 结果更新 | 自动更新record.accessory和result |
| 告警去重 | 相同对象只创建一次告警 |
## 🔄 完整工作流程
```
用户启动巡检任务
创建InspectionTaskRecord
├── recordId: [auto]
├── taskId: 1001
├── executeTime: now
└── status: 1 (执行中)
录制RTSP视频流
├── 抓取视频流
├── 录制30秒
└── 保存临时文件
上传原始视频
├── 上传到MinIO
├── 获取URL
└── 更新record.accessory = "video1.mp4"
AI识别处理
├── 逐帧分析
├── 每10帧调用Python API (YOLOv8)
├── 检测结果去重
└── 创建AlarmRecord
├── 提取检测区域图片
├── 上传告警图片
└── 保存告警记录
生成处理后视频
├── 绘制检测框
├── 上传到MinIO
└── 更新record.accessory += ";video2.mp4"
更新记录
├── record.result = "检测结果摘要"
├── record.duration = 32秒
└── record.status = 0 (成功)
```
## 📊 数据表关系
```
InspectionTask (1)
InspectionTaskRecord (N)
├── accessory: 原始视频URL;处理后视频URL
├── result: 共检测到X个问题详情...
└── status: 0=成功, 1=失败, 2=部分成功
AlarmRecord (N)
├── 告警类型、内容、置信度
├── 告警图片URL
├── 视频帧位置
└── 自动去重(相同对象只记录一次)
```
## 🚀 快速开始
### 1. Docker部署
```bash
# 1. 准备YOLOv8模型
# 将best.pt放到: python-inference-service/models/best.pt
# 2. 一键部署
deploy.bat # Windows
# 3. 访问系统
# http://localhost:10080
```
### 2. 创建巡检任务
```java
// 创建任务
InspectionTask task = new InspectionTask();
task.setDeviceId(deviceId);
task.setDuration(30); // 录制30秒
task.setStatus(0); // 待执行
inspectionTaskService.insertInspectionTask(task);
// 启动任务
inspectionTaskService.executeInspectionTask(task.getTaskId());
```
### 3. 查看结果
```sql
-- 查看执行记录
SELECT * FROM v_inspection_task_record
WHERE task_id = ?
ORDER BY execute_time DESC;
-- 查看告警
SELECT * FROM v_alarm_record
WHERE task_id = ?
ORDER BY create_time DESC;
```
## 📦 文件清单
### Docker部署文件15个
```
.env # 环境变量
docker-compose.yml # 服务编排
.dockerignore # 构建优化
ruoyi-admin/Dockerfile # 后端镜像
rtsp-vue/Dockerfile # 前端镜像
rtsp-vue/nginx.conf # Nginx配置
rtsp-vue/.dockerignore # 前端构建优化
python-inference-service/Dockerfile # Python镜像
python-inference-service/.dockerignore # Python构建优化
deploy.bat # Windows部署脚本
deploy.sh # Linux部署脚本
README-DOCKER.md # 完整文档
DOCKER-QUICK-START.md # 快速开始
DEPLOYMENT-NOTES.md # 配置说明
FINAL-SUMMARY.md # 配置总结
```
### YOLOv8文件4个
```
python-inference-service/requirements.txt # ultralytics依赖
python-inference-service/models/yolov8_model.py # YOLOv8包装类
python-inference-service/models/models.json # 模型配置
YOLOV8-SETUP.md # 配置指南
```
### 巡检任务文件5个
```
ruoyi-video/src/main/java/com/ruoyi/video/service/
├── IInspectionTaskRecordService.java # 记录服务接口
└── impl/
└── IInspectionTaskRecordServiceImpl.java # 记录服务实现
ruoyi-video/src/main/java/com/ruoyi/video/service/impl/
├── InspectionTaskServiceImpl.java (修改) # 添加记录创建
└── VideoAnalysisService.java (修改) # 添加记录更新
INSPECTION-WORKFLOW.md # 工作流程文档
INSPECTION-FEATURE-SUMMARY.md # 功能总结
```
### 后端配置修改2个
```
ruoyi-admin/pom.xml # 添加actuator依赖
ruoyi-admin/src/main/resources/application.yml # 添加actuator配置
```
## 🎯 配置要点
### 1. 环境变量(.env
```bash
FRONTEND_PORT=10080 # 前端对外端口
MYSQL_HOST=rtsp-mysql # 数据库容器名
REDIS_HOST=rtsp-redis # Redis容器名
BACKEND_HOST=rtsp-backend # 后端容器名
PYTHON_SERVICE_HOST=rtsp-python-service # Python服务容器名
# MinIO配置在application.yml中
```
### 2. MinIO配置application.yml
```yaml
minio:
endpoint: http://49.232.154.205:10900
access-key: 4EsLD9g9OM09DT0HaBKj
secret-key: 05SFC5fleqTnaLRYBrxHiphMFYbGX5nYicj0WCHA
bucket: rtsp
```
### 3. Python服务URL
```java
// VideoAnalysisService.java
private static final String PYTHON_API_URL = "http://rtsp-python-service:8000/api/detect/file";
private static final String MODEL_NAME = "yolov8_detector";
```
## 📖 文档导航
### 快速开始
1. **FINAL-SUMMARY.md** - Docker配置总结
2. **DOCKER-QUICK-START.md** - 快速启动指南
### 部署相关
3. **README-DOCKER.md** - 完整部署文档
4. **DEPLOYMENT-NOTES.md** - 详细配置说明
5. **YOLOV8-SETUP.md** - YOLOv8模型配置
### 功能相关
6. **INSPECTION-FEATURE-SUMMARY.md** - 巡检功能总结
7. **INSPECTION-WORKFLOW.md** - 详细工作流程
### 更新记录
8. **UPDATE-SUMMARY.md** - 更新变更记录
9. **DEPLOYMENT-FILES.md** - 文件清单
10. **COMPLETE-SUMMARY.md** - 本文档
## 🔍 验证清单
### Docker部署验证
- [ ] 所有容器运行正常
```bash
docker-compose ps
```
- [ ] 前端可访问
```
http://localhost:10080
```
- [ ] Python服务健康
```bash
docker-compose logs python-service | grep "YOLOv8模型加载完成"
```
- [ ] 后端连接正常
```bash
docker-compose logs backend | grep "Started RuoYiApplication"
```
### 巡检功能验证
- [ ] 创建测试任务
- [ ] 执行任务
- [ ] 检查Record创建
```sql
SELECT * FROM v_inspection_task_record ORDER BY create_time DESC LIMIT 1;
```
- [ ] 检查视频保存
```sql
SELECT accessory FROM v_inspection_task_record WHERE record_id = ?;
```
- [ ] 检查识别结果
```sql
SELECT result FROM v_inspection_task_record WHERE record_id = ?;
```
- [ ] 检查告警创建
```sql
SELECT * FROM v_alarm_record WHERE task_id = ? ORDER BY create_time DESC;
```
## ⚙️ 系统架构
```
┌──────────────┐
│ 浏览器 │
└──────┬───────┘
│ :10080 ← 唯一对外端口
┌──────▼───────┐
│ Frontend │
│ (Nginx) │
└──────┬───────┘
┌───┴──────────────┐
│ │
┌──▼────┐ ┌──────▼──────┐
│Backend│ │ Python │
│ :8080 │──────│ Service │
│ │ │ :8000(CPU) │
└─┬──┬──┘ └─────────────┘
│ │
│ │ ┌──────────────┐
│ └──│ MinIO(外部) │
│ │ 49.232... │
│ └──────────────┘
┌─▼────────┐
│ MySQL │
│ Redis │
└──────────┘
```
## 🎯 工作流程
### 巡检任务执行流程
```
1. 启动任务
└── 创建InspectionTaskRecord (status=1执行中)
2. 录制视频
├── 从RTSP流录制30秒
├── 上传到MinIO
└── 更新record.accessory = "原始视频URL"
3. AI识别
├── 逐帧分析视频
├── 调用Python服务(YOLOv8)
├── 检测结果去重
└── 创建AlarmRecord不重复
4. 保存结果
├── 绘制检测框
├── 上传处理后视频
├── 更新record.accessory += ";处理后URL"
└── 更新record.result = "检测结果摘要"
5. 完成
├── record.status = 0 (成功)
├── record.duration = 实际时长
└── task.status = 2 (已完成)
```
## 📝 配置摘要
### Docker服务配置
| 服务 | 容器名 | 端口 | 暴露 |
|------|--------|------|------|
| MySQL | rtsp-mysql | 3306 | ❌ |
| Redis | rtsp-redis | 6379 | ❌ |
| Python | rtsp-python-service | 8000 | ❌ |
| Backend | rtsp-backend | 8080 | ❌ |
| Frontend | rtsp-frontend | 80→10080 | ✅ |
| MinIO | 外部服务 | 10900 | - |
### 巡检任务配置
| 配置项 | 值 | 位置 |
|--------|-----|------|
| Python服务URL | http://rtsp-python-service:8000 | VideoAnalysisService.java |
| 模型名称 | yolov8_detector | VideoAnalysisService.java |
| 检测频率 | 每10帧 | processVideoWithRecord() |
| 去重容差 | 10像素 | generateDetectionKey() |
| 去重时间窗口 | 60秒 | processVideoWithRecord() |
## 💡 使用建议
### 性能优化CPU模式
1. **使用最小模型**
- yolov8n.pt推荐
- 而不是yolov8l.pt或yolov8x.pt
2. **降低检测频率**
```java
if (frameCount % 30 == 0) { // 从10改为30
```
3. **缩短录制时长**
```java
task.setDuration(15); // 从30秒改为15秒
```
### MinIO Bucket准备
需要在外部MinIO服务中创建以下bucket
- `inspection-videos` - 巡检视频
- `alarm-images` - 告警图片
### 数据库表确认
确保以下表存在:
- `v_inspection_task` - 巡检任务
- `v_inspection_task_record` - 巡检记录
- `v_alarm_record` - 告警记录
- `v_minio_object` - MinIO对象
- `v_device` - 设备信息
## 🐛 常见问题
### Q1: 视频未保存
**检查**
```bash
docker-compose logs backend | grep "MinIO"
curl http://49.232.154.205:10900/minio/health/live
```
### Q2: Python识别失败
**检查**
```bash
docker-compose logs python-service
docker exec -it rtsp-python-service ls -lh /app/models/best.pt
```
### Q3: 告警重复
**调整**
```java
// generateDetectionKey中增大容差
int x = rect.x() / 20 * 20; // 从10改为20
```
### Q4: 执行时间过长
**优化**
- 降低检测频率每30帧而不是10帧
- 缩短录制时长
- 使用更小的YOLOv8模型
## 📞 获取帮助
### 查看日志
```bash
# 所有服务
docker-compose logs -f
# 后端
docker-compose logs -f backend
# Python服务
docker-compose logs -f python-service
```
### 查看数据
```sql
-- 最新的执行记录
SELECT * FROM v_inspection_task_record ORDER BY create_time DESC LIMIT 10;
-- 最新的告警
SELECT * FROM v_alarm_record ORDER BY create_time DESC LIMIT 10;
-- 统计信息
SELECT
t.task_id,
COUNT(DISTINCT r.record_id) as execution_count,
COUNT(DISTINCT a.alarm_id) as alarm_count
FROM v_inspection_task t
LEFT JOIN v_inspection_task_record r ON t.task_id = r.task_id
LEFT JOIN v_alarm_record a ON t.task_id = a.task_id
GROUP BY t.task_id;
```
## 🎓 学习资源
- [Ultralytics YOLOv8文档](https://docs.ultralytics.com/)
- [Docker Compose文档](https://docs.docker.com/compose/)
- [FFmpeg JavaCV文档](https://github.com/bytedeco/javacv)
- [MinIO文档](https://min.io/docs/minio/linux/index.html)
## ✨ 总结
### 部署方面
✅ 完整的Docker部署方案
✅ YOLOv8 CPU模式支持
✅ 外部MinIO集成
✅ 容器间使用容器名通信
✅ 只暴露前端端口10080
✅ 完整的健康检查和依赖管理
### 功能方面
✅ 自动创建巡检记录
✅ 自动录制和保存视频
✅ 调用Python服务(YOLOv8)识别
✅ 自动更新识别结果
✅ 创建不重复的告警记录
✅ 完整的异常处理和日志记录
### 文档方面
✅ 10个详细的文档文件
✅ 工作流程图示
✅ 配置说明
✅ 故障排查指南
✅ SQL查询示例
---
**项目状态**: ✅ 完成
**部署状态**: 待部署
**测试状态**: 待测试
**文档版本**: 1.0
**最后更新**: 2025-09-30
🎊 **所有功能已完整实现,可以开始部署和测试!**

1
DEPLOYMENT-FILES.md Normal file
View File

@@ -0,0 +1 @@

329
DEPLOYMENT-NOTES.md Normal file
View File

@@ -0,0 +1,329 @@
# 📌 部署配置说明
## 重要配置
### 1. MinIO配置使用外部服务
本系统使用**外部已部署的MinIO服务**不在Docker中部署MinIO。
**配置位置**: `ruoyi-admin/src/main/resources/application.yml`
```yaml
minio:
enabled: true
endpoint: http://49.232.154.205:10900
access-key: 4EsLD9g9OM09DT0HaBKj
secret-key: 05SFC5fleqTnaLRYBrxHiphMFYbGX5nYicj0WCHA
bucket: rtsp
```
**如需更换MinIO服务器**
1. 编辑上述配置文件
2. 重新构建后端镜像:`docker-compose build backend`
3. 重启服务:`docker-compose up -d`
### 2. Python推理服务CPU模式
本系统**不使用GPU**Python推理服务运行在CPU模式。
**配置特点**
- ✅ 无需NVIDIA Docker Runtime
- ✅ 无需GPU驱动
- ✅ 适合CPU服务器部署
- ⚠️ 推理速度比GPU慢
**如需提升性能**
1. 使用更小的YOLOv8模型如yolov8n.pt
2. 减小输入图像尺寸
3. 调整置信度阈值
### 3. 服务端口配置
**对外暴露端口**
- 前端10080可在.env中修改`FRONTEND_PORT`
**内部端口**(不对外暴露):
- 后端8080
- Python服务8000
- MySQL3306
- Redis6379
## Docker Compose配置要点
### 服务列表
```yaml
services:
mysql # MySQL数据库
redis # Redis缓存
python-service # Python推理服务CPU
backend # Java后端
frontend # Vue前端唯一对外暴露
```
**注意**没有MinIO服务使用外部MinIO。
### 服务依赖关系
```
frontend 依赖 backend (健康检查)
backend 依赖 mysql, redis (健康检查) + python-service (启动)
```
### 环境变量(.env
```bash
# 数据库
MYSQL_HOST=rtsp-mysql
MYSQL_PORT=3306
MYSQL_PASSWORD=ruoyi123
# Redis
REDIS_HOST=rtsp-redis
REDIS_PORT=6379
# 后端
BACKEND_HOST=rtsp-backend
BACKEND_PORT=8080
# 前端(对外端口)
FRONTEND_PORT=10080
# Python服务
PYTHON_SERVICE_HOST=rtsp-python-service
PYTHON_SERVICE_PORT=8000
```
**不包含MinIO配置**在application.yml中
## 网络架构
### 内部网络rtsp-network
所有服务运行在同一个Docker网络中
- 容器间通过容器名通信
- 外部只能访问前端10080端口
- 安全性高
### 对外访问
```
浏览器 → 前端:10080 → Nginx → 后端:8080
→ Python服务:8000
后端 → MySQL:3306
→ Redis:6379
→ MinIO:49.232.154.205:10900 (外部)
```
## YOLOv8模型配置
### 模型要求
- **模型格式**: PyTorch (.pt)
- **训练框架**: Ultralytics YOLOv8
- **模型位置**: `python-inference-service/models/best.pt`
- **类别文件**: `python-inference-service/models/classes.txt`(可选)
### 模型配置
**models.json**:
```json
[
{
"name": "yolov8_detector",
"path": "models/yolov8_model.py",
"size": [640, 640]
}
]
```
### 性能调优CPU模式
编辑 `yolov8_model.py`
```python
# 降低置信度阈值(检测更多目标)
self.conf_threshold = 0.25
# 使用更小的输入尺寸(提升速度)
self.img_size = 320 # 或 480
# 建议使用 yolov8n.pt最快的模型
```
## 数据持久化
### Docker卷
```yaml
volumes:
mysql-data # MySQL数据
redis-data # Redis数据
backend-logs # 后端日志
backend-upload # 上传文件
```
**注意**MinIO数据在外部服务器不在本地。
### 数据备份
```bash
# 备份MySQL
docker exec rtsp-mysql mysqldump -u root -pruoyi123 ry-vue > backup.sql
# 备份Redis
docker exec rtsp-redis redis-cli SAVE
docker cp rtsp-redis:/data/dump.rdb ./redis-backup.rdb
```
## 部署检查清单
### 部署前检查
- [ ] YOLOv8模型文件best.pt已放置
- [ ] 类别文件classes.txt已创建可选
- [ ] .env配置已检查
- [ ] MinIO外部服务可访问
- [ ] Docker和Docker Compose已安装
- [ ] 端口10080未被占用
### 部署后检查
- [ ] 所有容器运行正常:`docker-compose ps`
- [ ] 前端可访问http://localhost:10080
- [ ] 后端健康检查通过
- [ ] Python服务加载模型成功
- [ ] MySQL连接正常
- [ ] Redis连接正常
- [ ] MinIO外部服务连接正常
## 常见问题
### Q1: MinIO连接失败
**检查**
1. 外部MinIO服务是否可访问
2. application.yml中的配置是否正确
3. 网络是否畅通
```bash
# 测试连接
curl http://49.232.154.205:10900/minio/health/live
```
### Q2: Python推理慢
**原因**CPU模式比GPU慢
**优化**
1. 使用yolov8n.pt最小模型
2. 减小输入尺寸320或480
3. 提高置信度阈值(减少检测框)
### Q3: 容器启动失败
**排查步骤**
```bash
# 查看日志
docker-compose logs [服务名]
# 检查端口占用
netstat -an | grep 10080
# 检查资源
docker stats
```
### Q4: 模型加载失败
**检查**
1. best.pt文件是否存在
2. 文件是否是YOLOv8格式
3. Python依赖是否安装完整
```bash
# 进入容器检查
docker exec -it rtsp-python-service bash
ls -lh /app/models/
```
## 安全建议
1. **修改默认密码**
- MySQL: ruoyi123
- MinIO密钥在application.yml中
2. **网络隔离**
- 只暴露必要端口10080
- 使用防火墙限制访问
3. **MinIO安全**
- 使用HTTPS连接
- 定期更新密钥
- 限制bucket访问权限
4. **定期备份**
- MySQL数据
- Redis数据
- 应用日志
## 维护操作
### 更新服务
```bash
# 停止服务
docker-compose down
# 更新代码
git pull
# 重新构建
docker-compose build
# 启动服务
docker-compose up -d
```
### 查看日志
```bash
# 所有服务
docker-compose logs -f
# 特定服务
docker-compose logs -f backend
docker-compose logs -f python-service
```
### 清理资源
```bash
# 清理未使用的镜像
docker image prune -a
# 清理未使用的卷
docker volume prune
# 完全清理(包括数据)
docker-compose down -v
```
## 性能监控
```bash
# 查看资源使用
docker stats
# 查看容器状态
docker-compose ps
# 查看网络连接
docker network inspect rtsp-network
```
---
**最后更新**: 2025-09-30
**配置版本**: Docker Compose 3.8, YOLOv8 CPU模式

202
DOCKER-QUICK-START.md Normal file
View File

@@ -0,0 +1,202 @@
# 🚀 Docker快速启动指南
## 一键部署
### Windows
```bash
deploy.bat
```
### Linux/Mac
```bash
chmod +x deploy.sh
./deploy.sh
```
## 手动部署
```bash
# 1. 启动所有服务
docker-compose up -d
# 2. 查看服务状态
docker-compose ps
# 3. 查看日志
docker-compose logs -f
```
## 常用命令
### 服务管理
```bash
# 启动服务
docker-compose up -d
# 停止服务
docker-compose stop
# 重启服务
docker-compose restart
# 停止并删除容器
docker-compose down
# 停止并删除容器和数据卷
docker-compose down -v
```
### 日志查看
```bash
# 查看所有日志
docker-compose logs
# 查看特定服务日志
docker-compose logs backend
docker-compose logs frontend
docker-compose logs python-service
# 实时查看日志
docker-compose logs -f
# 查看最近100行日志
docker-compose logs --tail=100
```
### 服务重建
```bash
# 重新构建所有镜像
docker-compose build
# 重新构建特定服务
docker-compose build backend
# 强制重建并启动
docker-compose up -d --build --force-recreate
```
### 进入容器
```bash
# 进入后端容器
docker exec -it rtsp-backend sh
# 进入前端容器
docker exec -it rtsp-frontend sh
# 进入Python服务容器
docker exec -it rtsp-python-service bash
# 进入MySQL容器
docker exec -it rtsp-mysql bash
# 进入Redis容器
docker exec -it rtsp-redis sh
```
## 端口配置
| 服务 | 默认端口 | 对外暴露 | 说明 |
|------|---------|---------|------|
| 前端 | 10080 | ✅ | 唯一对外暴露的端口 |
| 后端 | 8080 | ❌ | 仅容器内部访问 |
| Python服务 | 8000 | ❌ | 仅容器内部访问CPU模式 |
| MySQL | 3306 | ❌ | 仅容器内部访问 |
| Redis | 6379 | ❌ | 仅容器内部访问 |
| MinIO | 外部服务 | - | 使用已部署的外部服务 |
要修改前端端口,编辑 `.env` 文件中的 `FRONTEND_PORT` 变量。
## 环境变量配置
所有配置都在 `.env` 文件中:
```bash
# 修改前端端口
FRONTEND_PORT=8080
# 修改数据库密码
MYSQL_ROOT_PASSWORD=your_secure_password
MYSQL_PASSWORD=your_secure_password
# 修改MinIO密码
MINIO_ROOT_PASSWORD=your_secure_password
```
修改后需要重新启动服务:
```bash
docker-compose down
docker-compose up -d
```
## 数据备份
### 备份MySQL数据
```bash
docker exec rtsp-mysql mysqldump -u root -pruoyi123 ry-vue > backup.sql
```
### 备份MinIO数据
```bash
docker cp rtsp-minio:/data ./minio-backup
```
### 恢复MySQL数据
```bash
docker exec -i rtsp-mysql mysql -u root -pruoyi123 ry-vue < backup.sql
```
## 故障排查
### 查看服务健康状态
```bash
docker-compose ps
```
### 服务启动失败
```bash
# 查看详细日志
docker-compose logs [服务名]
# 常见问题:
# 1. 端口被占用 -> 修改.env中的端口配置
# 2. 数据库连接失败 -> 等待MySQL完全启动约30秒
# 3. 内存不足 -> 增加Docker内存限制或减少服务
```
### 清理并重新开始
```bash
# 完全清理(会删除所有数据)
docker-compose down -v
docker system prune -a
# 重新部署
docker-compose up -d
```
## 访问地址
部署成功后:
- **前端界面**: http://localhost:10080
- **后端API**: 通过前端代理 `/prod-api/`
- **Python API**: 通过前端代理 `/python-api/`
- **API文档**: http://localhost:10080/prod-api/swagger-ui.html
## 首次使用
1. **准备YOLOv8模型**:将训练好的`best.pt`文件放到`python-inference-service/models/`目录
2. 访问 http://localhost:10080
3. 默认账号密码请参考主文档
4. 首次启动可能需要等待1-2分钟让所有服务完全启动
## YOLOv8模型说明
本系统使用**YOLOv8**Ultralytics进行目标检测
- 将训练好的YOLOv8模型`best.pt`)放到`python-inference-service/models/`目录
- (可选)创建`classes.txt`文件,每行一个类别名称
- 模型会自动加载并提供推理服务
## 更多帮助
详细文档请查看:`README-DOCKER.md`

279
FINAL-SUMMARY.md Normal file
View File

@@ -0,0 +1,279 @@
# ✅ 最终配置总结
## 🎯 您的部署配置
根据您的需求,已完成以下配置:
### 1. ✅ YOLOv8 (CPU模式)
- **框架**: Ultralytics YOLOv8
- **运行模式**: CPU无需GPU
- **模型文件**: `python-inference-service/models/best.pt`
### 2. ✅ MinIO (外部服务)
- **地址**: http://49.232.154.205:10900
- **配置位置**: `ruoyi-admin/src/main/resources/application.yml`
- **不在Docker中部署**
### 3. ✅ 前端端口
- **对外端口**: 10080
- **访问地址**: http://localhost:10080
## 📦 Docker服务列表
| 服务 | 说明 | 备注 |
|------|------|------|
| MySQL | 数据库 | 内部部署 |
| Redis | 缓存 | 内部部署 |
| Python服务 | YOLOv8推理 | CPU模式不需要GPU |
| Java后端 | 业务逻辑 | 连接外部MinIO |
| Vue前端 | 用户界面 | 唯一对外暴露 :10080 |
| MinIO | 对象存储 | **使用外部服务** |
## 🚀 快速部署
### 1. 准备模型文件
```bash
# 将YOLOv8训练的模型放到这里
python-inference-service/models/best.pt
```
### 2. 启动服务
```bash
# Windows
deploy.bat
# Linux/Mac
chmod +x deploy.sh
./deploy.sh
# 或手动启动
docker-compose up -d
```
### 3. 访问系统
```
前端: http://localhost:10080
```
## 📋 关键配置文件
### docker-compose.yml
**包含的服务**
```yaml
services:
mysql # ✅ 内部部署
redis # ✅ 内部部署
python-service # ✅ CPU模式无GPU配置
backend # ✅ 连接外部MinIO
frontend # ✅ 对外端口10080
# minio # ❌ 不部署,使用外部
```
### .env
**环境变量**
```bash
MYSQL_HOST=rtsp-mysql
REDIS_HOST=rtsp-redis
BACKEND_HOST=rtsp-backend
FRONTEND_PORT=10080 # 前端对外端口
PYTHON_SERVICE_HOST=rtsp-python-service
# MinIO配置在application.yml中不在.env
```
### application.yml
**MinIO外部服务配置**
```yaml
minio:
enabled: true
endpoint: http://49.232.154.205:10900
access-key: 4EsLD9g9OM09DT0HaBKj
secret-key: 05SFC5fleqTnaLRYBrxHiphMFYbGX5nYicj0WCHA
bucket: rtsp
```
## 🎯 系统架构
```
┌────────────────┐
│ 浏览器 │
│ │
└───────┬────────┘
│ :10080 ← 唯一对外端口
┌───────▼────────┐
│ Frontend │
│ (Nginx) │
└───────┬────────┘
┌───┴───────────────┐
│ │
┌───▼─────┐ ┌──────▼──────┐
│ Backend │ │ Python │
│ :8080 │ │ Service │
│ │ │ :8000(CPU) │
└─┬──┬──┬─┘ └─────────────┘
│ │ │
│ │ └────────────┐
│ │ │
│ │ ┌────▼─────┐
│ │ │ MinIO │
│ │ │ 外部服务 │
│ │ │ 49.232.. │
│ │ └──────────┘
│ │
┌─▼──▼───┐
│ MySQL │
│ Redis │
└────────┘
```
## ⚙️ 配置特点
### ✅ 优点
1. **无需GPU**
- 不需要NVIDIA驱动
- 不需要NVIDIA Docker Runtime
- 适合普通CPU服务器
2. **使用外部MinIO**
- 不占用本地存储
- 配置灵活
- 易于扩展
3. **网络安全**
- 只暴露前端端口10080
- 其他服务内部通信
- 提高安全性
### ⚠️ 注意事项
1. **推理速度**
- CPU模式比GPU慢
- 建议使用yolov8n.pt最小模型
- 可适当降低图像尺寸
2. **MinIO依赖**
- 需要外部MinIO服务可用
- 修改配置需重新构建后端
3. **首次启动**
- 等待1-2分钟让服务完全启动
- Python服务需要加载模型
## 📖 重要文档
| 文档 | 说明 |
|------|------|
| `DEPLOYMENT-NOTES.md` | **最重要** - 部署配置说明 |
| `DOCKER-QUICK-START.md` | 快速启动和常用命令 |
| `YOLOV8-SETUP.md` | YOLOv8模型配置 |
| `README-DOCKER.md` | 完整部署文档 |
| `UPDATE-SUMMARY.md` | 更新变更记录 |
## 🔍 部署检查清单
### 部署前
- [ ] Docker和Docker Compose已安装
- [ ] YOLOv8模型文件best.pt已准备
- [ ] 外部MinIO服务可访问http://49.232.154.205:10900
- [ ] 端口10080未被占用
- [ ] 服务器有足够CPU资源
### 部署后
- [ ] 所有容器正常运行:`docker-compose ps`
- [ ] 前端可访问http://localhost:10080
- [ ] Python服务加载模型成功
- [ ] 后端连接MySQL成功
- [ ] 后端连接Redis成功
- [ ] 后端连接外部MinIO成功
## 🛠️ 常用命令
```bash
# 启动服务
docker-compose up -d
# 查看状态
docker-compose ps
# 查看日志
docker-compose logs -f
# 查看特定服务日志
docker-compose logs -f python-service
docker-compose logs -f backend
# 重启服务
docker-compose restart
# 停止服务
docker-compose down
# 完全清理(包括数据)
docker-compose down -v
```
## 🔧 性能优化建议
### CPU模式优化
1. **使用最小模型**
```bash
# 使用yolov8n.pt而不是yolov8l.pt或yolov8x.pt
```
2. **调整输入尺寸**
```python
# 在yolov8_model.py中
self.img_size = 320 # 从640改为320
```
3. **提高置信度阈值**
```python
# 减少检测框数量
self.conf_threshold = 0.4 # 从0.25提高到0.4
```
## 📞 获取帮助
1. **查看日志**
```bash
docker-compose logs [服务名]
```
2. **检查配置**
- `.env` - 环境变量
- `docker-compose.yml` - 服务配置
- `application.yml` - MinIO配置
3. **阅读文档**
- `DEPLOYMENT-NOTES.md` - 详细配置说明
- `DOCKER-QUICK-START.md` - 快速参考
## ✨ 部署完成
所有配置已完成,您可以:
1. 运行 `deploy.bat` 或 `./deploy.sh` 启动系统
2. 访问 http://localhost:10080 使用系统
3. 查看日志确认服务正常运行
---
**配置日期**: 2025-09-30
**配置特点**:
- ✅ YOLOv8 (CPU模式)
- ✅ 外部MinIO (49.232.154.205:10900)
- ✅ 前端端口 10080
- ✅ 无GPU依赖
🎉 **祝部署顺利!**

View File

@@ -0,0 +1,525 @@
# ✅ 巡检任务功能更新总结
## 🎯 功能实现
根据需求已实现完整的巡检任务记录和AI识别流程。
## 📋 新增功能
### 1. ✅ 自动创建巡检记录
**何时创建**:巡检任务启动时自动创建
**InspectionTaskRecord字段**
- `recordId`: 自动生成的记录ID
- `taskId`: 关联的巡检任务ID
- `executeTime`: 执行开始时间
- `duration`: 执行时长(秒)
- `accessory`: 视频URL原始;处理后)
- `result`: AI识别结果摘要
- `status`: 0=成功, 1=失败, 2=部分成功
### 2. ✅ 自动保存视频
**视频保存流程**
1. 从RTSP流录制视频按task.duration时长
2. 上传原始视频到MinIO
3. 保存URL到`record.accessory`
4. 继续分析处理
### 3. ✅ 调用Python服务识别
**识别流程**
1. 创建`HttpYoloDetector`连接Python服务
2. 逐帧提取并调用YOLOv8检测
3. 每10帧检测一次可调整
4. 绘制检测框到视频上
5. 生成带标注的处理后视频
### 4. ✅ 更新识别结果
**result字段内容**
```
共检测到 5 个问题,详情:垃圾(3) 烟雾(2)
```
### 5. ✅ 创建不重复告警
**去重机制**
- 使用位置+类别生成唯一键
- 相同对象只创建一次告警
- 允许10像素的位置波动
- 60秒未检测到自动清除
**AlarmRecord包含**
- 告警类型、内容、置信度
- 关联的任务ID和设备ID
- 告警图片MinIO存储
- 视频帧位置
- 未处理状态
## 🔄 完整执行流程
```
1. 用户启动巡检任务
2. InspectionTaskServiceImpl.executeInspectionTask()
├── 创建InspectionTaskRecord (status=1执行中)
├── 更新InspectionTask (status=1执行中)
└── 调用performVideoAnalysisWithRecord()
3. performVideoAnalysisWithRecord()
├── 录制RTSP视频流30秒
├── 上传原始视频到MinIO
├── 更新record.accessory = "原始视频URL"
└── 调用analyzeVideoAndUpdateRecord()
4. VideoAnalysisService.analyzeVideoWithRecord()
├── 逐帧分析视频
├── 每10帧调用Python API检测
├── 发现新对象 → createAlarmRecordForRecord()
│ ├── 提取检测区域图片
│ ├── 上传告警图片到MinIO
│ └── 创建AlarmRecord去重
├── 绘制检测框
├── 保存处理后视频
├── 上传处理后视频到MinIO
├── 更新record.accessory += ";处理后视频URL"
└── 更新record.result = "检测结果摘要"
5. 完成
├── 更新record.status = 0 (成功)
├── 更新record.duration = 实际执行时长
└── 更新task.status = 2 (已完成)
```
## 📦 新增/修改的文件
### 新增文件
1. **IInspectionTaskRecordService.java**
- 巡检记录服务接口
2. **IInspectionTaskRecordServiceImpl.java**
- 巡检记录服务实现
3. **INSPECTION-WORKFLOW.md**
- 详细工作流程文档
4. **INSPECTION-FEATURE-SUMMARY.md**
- 本文档
### 修改文件
1. **InspectionTaskServiceImpl.java**
- 添加依赖注入InspectionTaskRecordMapper, MinioService, VMinioObjectService
- 修改`executeInspectionTask()` - 创建记录
- 新增`performVideoAnalysisWithRecord()` - 录制视频并分析
- 新增`analyzeVideoAndUpdateRecord()` - 调用分析服务
- 新增`updateRecordFailed()` - 更新失败状态
2. **VideoAnalysisService.java**
- 添加InspectionTaskRecordMapper依赖
- 更新Python服务URL为容器名
- 更新模型名称为yolov8_detector
- 新增`analyzeVideoWithRecord()` - 带记录的视频分析
- 新增`processVideoWithRecord()` - 处理视频并记录结果
- 新增`createAlarmRecordForRecord()` - 创建去重告警
- 新增`uploadProcessedVideoForRecord()` - 上传处理后视频
## 🎯 数据流转
### InspectionTaskRecord字段变化
```
创建时:
recordId: [auto]
taskId: 1001
executeTime: 2025-09-30 14:30:00
status: 1 (执行中)
accessory: null
result: null
duration: null
录制视频后:
accessory: "http://.../inspection_1001_xxx.mp4"
分析完成后:
accessory: "http://.../inspection_1001_xxx.mp4;http://.../processed_xxx.mp4"
result: "共检测到 3 个问题,详情:垃圾(2) 烟雾(1)"
duration: 32
status: 0 (成功)
```
### AlarmRecord创建条件
仅当满足以下条件时创建告警:
1. ✅ 检测到新对象(不在缓存中)
2. ✅ 位置和类别不重复
3. ✅ 置信度超过阈值Python服务的conf参数
## 💾 数据库查询示例
### 查看任务执行历史
```sql
-- 查看任务的所有执行记录
SELECT
r.record_id,
r.execute_time,
r.duration,
r.status,
r.result,
(SELECT COUNT(*) FROM v_alarm_record a WHERE a.task_id = r.task_id
AND a.create_time >= r.execute_time) as alarm_count
FROM v_inspection_task_record r
WHERE r.task_id = 1001
ORDER BY r.execute_time DESC;
```
### 查看记录详情
```sql
-- 查看单条记录的完整信息
SELECT
r.*,
t.device_id,
d.ip as device_ip
FROM v_inspection_task_record r
JOIN v_inspection_task t ON r.task_id = t.task_id
JOIN v_device d ON t.device_id = d.device_id
WHERE r.record_id = 2001;
```
### 查看记录的所有告警
```sql
-- 查看某次执行产生的告警
SELECT
a.alarm_id,
a.alarm_type,
a.alarm_content,
a.confidence,
a.frame_position,
m.object_url as alarm_image_url
FROM v_alarm_record a
LEFT JOIN v_minio_object m ON a.image_oss_id = m.object_id
WHERE a.task_id = 1001
AND a.create_time >= (SELECT execute_time FROM v_inspection_task_record WHERE record_id = 2001)
AND a.create_time <= DATE_ADD((SELECT execute_time FROM v_inspection_task_record WHERE record_id = 2001),
INTERVAL (SELECT duration FROM v_inspection_task_record WHERE record_id = 2001) SECOND)
ORDER BY a.create_time;
```
## 🔧 配置参数
### 关键配置位置
**VideoAnalysisService.java**:
```java
// Python服务地址使用容器名
private static final String PYTHON_API_URL = "http://rtsp-python-service:8000/api/detect/file";
// 模型名称
private static final String MODEL_NAME = "yolov8_detector";
// 检测频率每N帧
if (frameCount % 10 == 0) { ... }
// 去重时间窗口60秒
(currentId - entry.getValue()) > grabber.getFrameRate() * 60
```
### 可调整参数
| 参数 | 位置 | 默认值 | 说明 |
|------|------|--------|------|
| 检测频率 | processVideoWithRecord | 10帧 | 降低可提升性能 |
| 去重容差 | generateDetectionKey | 10像素 | 提高容差减少告警 |
| 去重时间窗口 | processVideoWithRecord | 60秒 | 缩短窗口增加告警 |
| 模型名称 | MODEL_NAME | yolov8_detector | 与Python配置对应 |
## 🚀 API接口
### Python服务接口
**请求**
```http
POST http://rtsp-python-service:8000/api/detect/file
Content-Type: multipart/form-data
model_name: yolov8_detector
file: [图像文件]
```
**响应**
```json
{
"model_name": "yolov8_detector",
"detections": [
{
"label": "[yolov8_detector] 垃圾",
"confidence": 0.95,
"x": 100,
"y": 200,
"width": 150,
"height": 180,
"color": 65280
}
],
"inference_time": 45.6
}
```
## 🎬 使用示例
### 示例1: 创建并执行巡检任务
```java
// 1. 创建任务
InspectionTask task = new InspectionTask();
task.setDeviceId(5001L);
task.setDuration(30); // 30秒
task.setStatus(0); // 待执行
inspectionTaskService.insertInspectionTask(task);
// 2. 启动任务(异步)
inspectionTaskService.executeInspectionTask(task.getTaskId());
// 3. 查询执行记录
List<InspectionTaskRecord> records =
inspectionTaskRecordService.selectInspectionTaskRecordList(
new InspectionTaskRecord().setTaskId(task.getTaskId())
);
```
### 示例2: 查询告警
```java
// 查询某任务的所有告警
AlarmRecord query = new AlarmRecord();
query.setTaskId(1001L);
query.setStatus(0); // 未处理
List<AlarmRecord> alarms = alarmRecordService.selectAlarmRecordList(query);
```
## ⚠️ 注意事项
### 1. 执行时间
- 录制视频需要时间与duration设置一致
- AI分析需要额外时间取决于视频长度和CPU性能
- 总执行时间 ≈ duration + 分析时间
### 2. 存储空间
每次执行会产生:
- 原始视频(~10-50MB30秒
- 处理后视频(~10-50MB
- 告警图片(每个~100-500KB
建议定期清理历史数据。
### 3. Python服务调用
- 使用HTTP调用Python服务
- 每帧调用可能较慢已优化为每10帧
- CPU模式下建议降低检测频率
### 4. MinIO存储
- 确保bucket已创建
- `inspection-videos`(巡检视频)
- `alarm-images`(告警图片)
- 确保外部MinIO服务可访问
## 🔍 测试清单
### 部署后测试
- [ ] Python服务可访问
```bash
curl http://rtsp-python-service:8000/health
curl http://rtsp-python-service:8000/api/models
```
- [ ] MinIO bucket已创建
```bash
# 登录MinIO管理界面创建bucket
# 或使用mc命令创建
```
- [ ] 创建测试任务
```sql
INSERT INTO v_inspection_task (device_id, duration, status) VALUES (1, 30, 0);
```
- [ ] 执行任务并查看记录
```sql
SELECT * FROM v_inspection_task_record ORDER BY create_time DESC LIMIT 1;
```
- [ ] 查看生成的告警
```sql
SELECT * FROM v_alarm_record ORDER BY create_time DESC LIMIT 10;
```
- [ ] 验证视频URL可访问
```
访问record.accessory中的URL
```
## 📊 预期结果
### 成功执行的记录
```json
{
"recordId": 2001,
"taskId": 1001,
"executeTime": "2025-09-30 14:30:00",
"duration": 32,
"accessory": "http://49.232.154.205:10900/inspection-videos/inspection_1001_1696056600000.mp4;http://49.232.154.205:10900/inspection-videos/processed_1696056632000.mp4",
"result": "共检测到 5 个问题,详情:垃圾(3) 烟雾(2)",
"status": 0
}
```
### 生成的告警
```json
{
"alarmId": 3001,
"deviceId": 5001,
"taskId": 1001,
"alarmType": "detection",
"alarmContent": "垃圾 - 置信度: 0.95",
"imageOssId": 4001,
"framePosition": 150,
"confidence": 0.95,
"status": 0
}
```
## 🛠️ 维护和优化
### 1. 清理历史数据
```sql
-- 删除30天前的记录
DELETE FROM v_inspection_task_record
WHERE execute_time < DATE_SUB(NOW(), INTERVAL 30 DAY);
-- 删除已处理的告警
DELETE FROM v_alarm_record
WHERE status = 1 AND create_time < DATE_SUB(NOW(), INTERVAL 7 DAY);
```
### 2. 性能监控
```sql
-- 查看最近的执行统计
SELECT
DATE(execute_time) as date,
COUNT(*) as total_executions,
SUM(CASE WHEN status = 0 THEN 1 ELSE 0 END) as success_count,
AVG(duration) as avg_duration
FROM v_inspection_task_record
WHERE execute_time >= DATE_SUB(NOW(), INTERVAL 7 DAY)
GROUP BY DATE(execute_time)
ORDER BY date DESC;
```
### 3. 告警统计
```sql
-- 查看最近的告警统计
SELECT
DATE(create_time) as date,
alarm_type,
COUNT(*) as count,
AVG(confidence) as avg_confidence
FROM v_alarm_record
WHERE create_time >= DATE_SUB(NOW(), INTERVAL 7 DAY)
GROUP BY DATE(create_time), alarm_type
ORDER BY date DESC, count DESC;
```
## 📞 故障排查
### 问题1: Record未创建
**症状**:执行任务但`v_inspection_task_record`表无数据
**检查**
```sql
SELECT * FROM v_inspection_task WHERE task_id = ?;
```
**解决**
- 确认任务状态为0待执行
- 查看后端日志
- 检查Mapper XML配置
### 问题2: Accessory为空
**症状**Record创建了但accessory字段为空
**检查**
```bash
# 查看MinIO上传日志
docker-compose logs backend | grep "MinIO"
# 测试MinIO连接
curl http://49.232.154.205:10900/minio/health/live
```
**解决**
- 确认MinIO服务可访问
- 检查application.yml中的MinIO配置
- 确认bucket已创建
### 问题3: Result为空
**症状**视频已保存但result字段为空
**检查**
```bash
# 查看Python服务日志
docker-compose logs python-service
# 测试Python服务
curl http://rtsp-python-service:8000/api/models
```
**解决**
- 确认Python服务运行正常
- 确认best.pt模型文件存在
- 检查容器间网络通信
### 问题4: 告警重复
**症状**:相同对象产生多个告警
**调整去重参数**
```java
// 在generateDetectionKey中增大容差
int x = rect.x() / 20 * 20; // 从10改为20
int y = rect.y() / 20 * 20;
```
## 📖 相关文档
- `INSPECTION-WORKFLOW.md` - 详细工作流程
- `YOLOV8-SETUP.md` - YOLOv8模型配置
- `DEPLOYMENT-NOTES.md` - 部署配置说明
- `DOCKER-QUICK-START.md` - Docker快速开始
---
**功能状态**: ✅ 已实现
**测试状态**: 待测试
**文档版本**: 1.0
**最后更新**: 2025-09-30
<EFBFBD><EFBFBD> **巡检任务记录功能已完整实现!**

544
INSPECTION-WORKFLOW.md Normal file
View File

@@ -0,0 +1,544 @@
# 巡检任务工作流程说明
## 📋 功能概述
本文档说明巡检任务的完整执行流程包括视频录制、保存、AI识别和告警创建。
## 🔄 完整工作流程
### 1. 任务启动
当巡检任务启动时:
```
InspectionTaskServiceImpl.executeInspectionTask(taskId)
├── 创建 InspectionTaskRecord记录ID
├── 更新任务状态为"执行中"
└── 调用 performVideoAnalysisWithRecord()
```
### 2. 视频录制和保存
```
performVideoAnalysisWithRecord()
├── 从RTSP流抓取视频
├── 录制指定时长的视频
├── 保存为临时文件
├── 上传视频到MinIO
├── 更新InspectionTaskRecord.accessory视频URL
└── 调用Python服务进行分析
```
### 3. AI识别处理
```
VideoAnalysisService.analyzeVideoWithRecord()
├── 创建HttpYoloDetector连接Python服务
├── 逐帧分析视频
├── 每10帧调用一次Python API检测
├── 绘制检测框
├── 去重检测结果(避免重复告警)
├── 创建告警记录
├── 上传处理后的视频
├── 生成检测结果摘要
└── 更新InspectionTaskRecord.result识别结果
```
### 4. 告警创建(去重)
```
createAlarmRecordForRecord()
├── 提取检测区域图像
├── 上传告警图片到MinIO
├── 创建AlarmRecord
│ ├── 设备ID
│ ├── 告警类型
│ ├── 告警内容(检测类别+置信度)
│ ├── 关联的任务ID
│ ├── 图片URL
│ └── 帧位置
└── 保存到数据库(仅新检测的对象)
```
## 📊 数据表关系
```
InspectionTask (巡检任务)
↓ 1:N
InspectionTaskRecord (巡检记录)
├── accessory: 原始视频URL + 处理后视频URL
├── result: AI识别结果摘要
├── duration: 执行时长
└── status: 0=成功, 1=失败, 2=部分成功
InspectionTaskRecord → AlarmRecord (1:N)
├── 同一个record可以有多个告警
└── 告警自动去重(相同位置的相同对象只记录一次)
```
## 🎯 关键字段说明
### InspectionTaskRecord
| 字段 | 说明 | 示例 |
|------|------|------|
| recordId | 记录ID | 自增主键 |
| taskId | 关联的任务ID | 1001 |
| executeTime | 执行时间 | 2025-09-30 14:30:00 |
| duration | 执行时长(秒) | 30 |
| accessory | 附件URL | video1.mp4;video2.mp4 |
| result | 识别结果 | 共检测到3个问题详情垃圾(2) 烟雾(1) |
| status | 执行状态 | 0=成功, 1=失败, 2=部分成功 |
### AlarmRecord
| 字段 | 说明 | 示例 |
|------|------|------|
| alarmId | 告警ID | 自增主键 |
| deviceId | 设备ID | 1001 |
| taskId | 任务ID | 1001 |
| alarmType | 告警类型 | detection |
| alarmContent | 告警内容 | 垃圾 - 置信度: 0.95 |
| imageOssId | 告警图片ID | MinIO对象ID |
| framePosition | 视频帧位置 | 150 |
| confidence | 置信度 | 0.95 |
| status | 处理状态 | 0=未处理, 1=已处理 |
## 🔧 关键实现细节
### 1. 去重机制
使用`generateDetectionKey`生成唯一键:
```java
private String generateDetectionKey(Detection detection) {
Rect rect = detection.getRect();
// 取10的倍数允许小范围波动
int x = rect.x() / 10 * 10;
int y = rect.y() / 10 * 10;
int w = rect.width() / 10 * 10;
int h = rect.height() / 10 * 10;
return String.format("%s_%d_%d_%d_%d", detection.getLabel(), x, y, w, h);
}
```
**原理**
- 相同类别 + 相似位置 → 认为是同一个对象
- 允许10像素的波动
- 超过60秒未检测到自动清除
### 2. Python服务调用
使用容器名调用:
```java
private static final String PYTHON_API_URL = "http://rtsp-python-service:8000/api/detect/file";
private static final String MODEL_NAME = "yolov8_detector";
```
### 3. 视频处理流程
```
RTSP流 → FFmpegFrameGrabber → 录制 → 临时文件
→ 上传MinIO → 保存URL到record.accessory
→ 逐帧分析 → 调用Python API → 绘制检测框
→ 保存处理后视频 → 追加URL到record.accessory
→ 更新record.result
```
### 4. 附件字段格式
`accessory`字段使用分号分隔多个URL
```
原始视频URL;处理后视频URL
```
示例:
```
http://minio.com/inspection-videos/inspection_1001_1234567890.mp4;http://minio.com/inspection-videos/processed_1234567891.mp4
```
## 🚀 使用方法
### 1. 创建巡检任务
```java
InspectionTask task = new InspectionTask();
task.setDeviceId(deviceId);
task.setDuration(30); // 录制30秒
task.setStatus(0); // 待执行
inspectionTaskService.insertInspectionTask(task);
```
### 2. 启动任务
```java
// 异步执行
inspectionTaskService.executeInspectionTask(taskId);
```
### 3. 查看执行记录
```sql
-- 查询某任务的所有执行记录
SELECT * FROM v_inspection_task_record WHERE task_id = 1001 ORDER BY execute_time DESC;
-- 查询成功的记录
SELECT * FROM v_inspection_task_record WHERE status = 0;
-- 查询某记录的所有告警
SELECT * FROM v_alarm_record WHERE task_id = 1001;
```
### 4. 查看告警
```sql
-- 查询某任务的所有告警
SELECT * FROM v_alarm_record WHERE task_id = 1001 ORDER BY create_time DESC;
-- 查询未处理的告警
SELECT * FROM v_alarm_record WHERE status = 0;
```
## 📝 执行示例
### 执行流程
1. **任务创建**
```
Task ID: 1001
Device ID: 5001
Duration: 30秒
```
2. **记录创建**
```
Record ID: 2001
Task ID: 1001
Execute Time: 2025-09-30 14:30:00
Status: 1 (执行中)
```
3. **视频录制**
```
录制30秒视频
保存到MinIO: inspection_1001_1234567890.mp4
更新Record.accessory: http://minio.com/.../inspection_1001_1234567890.mp4
```
4. **AI识别**
```
调用Python服务
检测到: 垃圾(2个), 烟雾(1个)
```
5. **告警创建**(去重)
```
Alarm 1: 垃圾 - 位置(100,200) - 置信度0.95
Alarm 2: 垃圾 - 位置(300,400) - 置信度0.87
Alarm 3: 烟雾 - 位置(500,100) - 置信度0.92
```
6. **处理后视频**
```
带检测框的视频上传
保存到MinIO: processed_1234567891.mp4
更新Record.accessory: 原始URL;处理后URL
```
7. **更新记录**
```
Record.result: "共检测到 3 个问题,详情:垃圾(2) 烟雾(1)"
Record.status: 0 (成功)
Record.duration: 32秒
```
## ⚙️ 配置说明
### Python服务配置
在Docker环境中Python服务地址为
```
http://rtsp-python-service:8000
```
### 模型配置
确保Python服务使用正确的模型名称
```json
{
"name": "yolov8_detector",
"path": "models/yolov8_model.py",
"size": [640, 640]
}
```
### 检测参数
在`VideoAnalysisService`中可调整:
```java
// 检测频率每N帧检测一次
if (frameCount % 10 == 0) { ... }
// 去重时间窗口(秒)
detectedGarbageCache.entrySet().removeIf(entry ->
(currentId - entry.getValue()) > grabber.getFrameRate() * 60);
```
## 🐛 故障排查
### 问题1: 视频未保存
**检查**
```sql
-- 查看record的accessory字段
SELECT record_id, accessory FROM v_inspection_task_record WHERE task_id = ?;
-- 查看MinIO对象
SELECT * FROM v_minio_object WHERE bucket_name = 'inspection-videos' ORDER BY create_time DESC;
```
**解决**
- 检查MinIO服务是否可用
- 检查网络连接
- 查看后端日志
### 问题2: Python识别未执行
**检查**
```bash
# 查看Python服务日志
docker-compose logs python-service
# 测试Python服务
curl http://rtsp-python-service:8000/health
curl http://rtsp-python-service:8000/api/models
```
**解决**
- 确认Python服务运行正常
- 确认模型文件存在
- 检查网络连通性
### 问题3: 告警未创建
**检查**
```sql
-- 查看告警记录
SELECT * FROM v_alarm_record WHERE task_id = ? ORDER BY create_time DESC;
-- 查看检测结果
SELECT result FROM v_inspection_task_record WHERE record_id = ?;
```
**解决**
- 检查检测置信度阈值
- 查看视频内容是否有检测对象
- 检查Python服务返回结果
### 问题4: 重复告警
**检查**
- 去重机制是否正常工作
- `generateDetectionKey`逻辑是否合理
**调整**
```java
// 调整去重的位置容差
int x = rect.x() / 20 * 20; // 从10改为20更宽松的去重
```
## 📊 性能优化
### 1. 检测频率
```java
// 降低检测频率以提升性能CPU模式
if (frameCount % 30 == 0) { // 从10改为30
// 每30帧检测一次
}
```
### 2. 视频质量
```java
// 降低视频比特率节省存储
recorder.setVideoBitrate(500000); // 降低比特率
```
### 3. 去重时间窗口
```java
// 缩短去重时间窗口
(currentId - entry.getValue()) > grabber.getFrameRate() * 30 // 从60秒改为30秒
```
## 🔍 调试方法
### 查看执行日志
```bash
# 查看后端日志
docker-compose logs -f backend | grep "inspection"
# 查看Python服务日志
docker-compose logs -f python-service
# 查看特定记录的处理过程
docker-compose logs backend | grep "recordId=2001"
```
### 数据库查询
```sql
-- 查看最新的执行记录
SELECT
r.record_id,
r.task_id,
r.execute_time,
r.duration,
r.status,
r.result,
LENGTH(r.accessory) as accessory_length
FROM v_inspection_task_record r
ORDER BY r.create_time DESC
LIMIT 10;
-- 查看记录对应的告警
SELECT
a.alarm_id,
a.alarm_content,
a.confidence,
a.frame_position,
a.create_time
FROM v_alarm_record a
WHERE a.task_id = ?
ORDER BY a.create_time DESC;
-- 统计告警数量
SELECT
r.record_id,
r.execute_time,
COUNT(a.alarm_id) as alarm_count
FROM v_inspection_task_record r
LEFT JOIN v_alarm_record a ON r.task_id = a.task_id
AND a.create_time >= r.execute_time
AND a.create_time <= DATE_ADD(r.execute_time, INTERVAL r.duration SECOND)
GROUP BY r.record_id
ORDER BY r.create_time DESC;
```
## 💡 扩展建议
### 1. 添加检测类型过滤
在`createAlarmRecordForRecord`中:
```java
// 只对特定类型创建告警
List<String> alarmTypes = Arrays.asList("垃圾", "烟雾", "火焰");
if (!alarmTypes.contains(detection.getLabel())) {
return; // 忽略其他类型
}
```
### 2. 添加置信度阈值
```java
// 只对高置信度的检测创建告警
if (detection.getConfidence() < 0.7) {
return; // 忽略低置信度检测
}
```
### 3. 添加区域过滤
```java
// 只对特定区域的检测创建告警
Rect rect = detection.getRect();
if (!isInMonitorArea(rect, task)) {
return; // 忽略监控区域外的检测
}
```
### 4. 添加告警级别
```java
// 根据检测类型设置告警级别
String alarmLevel = "medium";
if (detection.getLabel().contains("火焰")) {
alarmLevel = "high";
} else if (detection.getLabel().contains("垃圾")) {
alarmLevel = "low";
}
alarmRecord.setAlarmLevel(alarmLevel);
```
## 🔒 安全考虑
### 1. 异常处理
所有方法都包含完整的异常处理:
- 视频录制失败 → 更新record状态为失败
- Python服务调用失败 → 记录错误但不影响整体流程
- MinIO上传失败 → 记录错误并回滚
### 2. 资源清理
使用try-finally确保资源释放
- FFmpegFrameGrabber自动关闭
- FFmpegFrameRecorder自动关闭
- 临时文件自动删除
### 3. 并发控制
使用`@Async`异步执行,避免阻塞:
- 任务执行不阻塞API响应
- 多个任务可并发执行
- 通过runningTasks避免重复执行
## 📈 监控指标
### 建议监控的指标
1. **执行成功率**
```sql
SELECT
COUNT(CASE WHEN status = 0 THEN 1 END) * 100.0 / COUNT(*) as success_rate
FROM v_inspection_task_record
WHERE execute_time >= DATE_SUB(NOW(), INTERVAL 1 DAY);
```
2. **平均执行时长**
```sql
SELECT AVG(duration) as avg_duration
FROM v_inspection_task_record
WHERE status = 0 AND execute_time >= DATE_SUB(NOW(), INTERVAL 1 DAY);
```
3. **告警统计**
```sql
SELECT
alarm_type,
COUNT(*) as count
FROM v_alarm_record
WHERE create_time >= DATE_SUB(NOW(), INTERVAL 1 DAY)
GROUP BY alarm_type;
```
## 📞 技术支持
如有问题,请查看:
1. 后端日志:`docker-compose logs backend`
2. Python服务日志`docker-compose logs python-service`
3. 数据库记录:查询`v_inspection_task_record`和`v_alarm_record`表
4. MinIO对象查询`v_minio_object`表
---
**文档版本**: 1.0
**最后更新**: 2025-09-30
**适用版本**: YOLOv8, Docker Compose部署

304
README-DOCKER.md Normal file
View File

@@ -0,0 +1,304 @@
# RTSP视频分析系统 Docker部署方案
## 📋 目录结构
```
rtsp-video-analysis-system/
├── .env # 环境变量配置文件
├── docker-compose.yml # Docker Compose编排文件
├── ruoyi-admin/
│ └── Dockerfile # Java后端Dockerfile
├── rtsp-vue/
│ ├── Dockerfile # 前端Dockerfile
│ └── nginx.conf # Nginx配置文件
├── python-inference-service/
│ └── Dockerfile # Python推理服务Dockerfile
└── sql/ # 数据库初始化脚本
```
## 🚀 快速开始
### 1. 前置要求
- Docker 20.10+
- Docker Compose 2.0+
- 可选NVIDIA Docker Runtime如需GPU支持
### 2. 准备工作
#### 2.1 配置环境变量
编辑 `.env` 文件,根据需要调整配置:
```bash
# MySQL数据库配置
MYSQL_ROOT_PASSWORD=ruoyi123
MYSQL_DATABASE=ry-vue
MYSQL_USER=ruoyi
MYSQL_PASSWORD=ruoyi123
# MinIO对象存储配置
MINIO_ROOT_USER=minio
MINIO_ROOT_PASSWORD=minio123
# 其他配置...
```
#### 2.2 准备数据库初始化脚本
将数据库SQL脚本放到 `sql/` 目录:
```bash
# 示例
cp sql/fad_watch.sql sql/
cp sql/ry_face.sql sql/
```
### 3. 启动服务
#### 3.1 构建并启动所有服务
```bash
# 启动所有服务
docker-compose up -d
# 查看服务状态
docker-compose ps
# 查看日志
docker-compose logs -f
```
#### 3.2 查看特定服务日志
```bash
# 查看后端日志
docker-compose logs -f backend
# 查看前端日志
docker-compose logs -f frontend
# 查看Python服务日志
docker-compose logs -f python-service
```
### 4. 访问服务
- **前端界面**: http://localhost:10080 或配置的FRONTEND_PORT
- **后端API**: 通过前端的 `/prod-api/` 路径访问
- **Python推理API**: 通过前端的 `/python-api/` 路径访问
- **MinIO控制台**: 仅容器内部访问如需外部访问需修改docker-compose.yml
> 注意:除前端外,其他服务端口均不对外暴露,仅容器间通信使用。
## 🔧 服务说明
### 服务列表
| 服务名称 | 容器名称 | 端口(内部) | 端口(外部) | 说明 |
|---------|---------|------------|------------|------|
| mysql | rtsp-mysql | 3306 | - | MySQL数据库 |
| redis | rtsp-redis | 6379 | - | Redis缓存 |
| python-service | rtsp-python-service | 8000 | - | Python推理服务CPU |
| backend | rtsp-backend | 8080 | - | Java后端服务 |
| frontend | rtsp-frontend | 80 | 10080 | Vue前端服务 |
**注意**
- MinIO使用外部已部署的服务http://49.232.154.205:10900
- Python推理服务使用CPU模式不需要GPU
### 服务依赖关系
```
frontend
└── backend
├── mysql
├── redis
├── python-service
└── minio (外部服务)
```
## ⚙️ 高级配置
### 1. 修改前端暴露端口
编辑 `.env` 文件:
```bash
FRONTEND_PORT=8080 # 修改为你想要的端口
```
### 2. 如需外部访问其他服务
编辑 `docker-compose.yml`,在对应服务下添加 `ports` 配置:
```yaml
mysql:
# ... 其他配置
ports:
- "3306:3306" # 添加端口映射
```
### 3. 调整Java后端内存
编辑 `ruoyi-admin/Dockerfile`
```dockerfile
ENV JAVA_OPTS="-Xms1g -Xmx2g -Djava.security.egd=file:/dev/./urandom"
```
或在 `docker-compose.yml` 中添加环境变量:
```yaml
backend:
environment:
JAVA_OPTS: "-Xms1g -Xmx2g"
```
### 4. MinIO配置
本系统使用**外部已部署的MinIO服务**,配置在 `ruoyi-admin/src/main/resources/application.yml`
```yaml
minio:
enabled: true
endpoint: http://49.232.154.205:10900
access-key: 4EsLD9g9OM09DT0HaBKj
secret-key: 05SFC5fleqTnaLRYBrxHiphMFYbGX5nYicj0WCHA
bucket: rtsp
```
如需修改MinIO配置请编辑上述文件。
## 🔍 故障排查
### 1. 查看服务健康状态
```bash
docker-compose ps
```
健康的服务会显示 `healthy` 状态。
### 2. 查看容器日志
```bash
# 查看所有服务日志
docker-compose logs
# 查看特定服务的最新日志
docker-compose logs --tail=100 -f backend
```
### 3. 进入容器调试
```bash
# 进入后端容器
docker exec -it rtsp-backend sh
# 进入前端容器
docker exec -it rtsp-frontend sh
# 进入Python服务容器
docker exec -it rtsp-python-service bash
```
### 4. 重启服务
```bash
# 重启单个服务
docker-compose restart backend
# 重启所有服务
docker-compose restart
```
### 5. 完全重建服务
```bash
# 停止并删除所有容器
docker-compose down
# 重新构建并启动
docker-compose up -d --build
```
### 6. 清理数据重新开始
```bash
# 停止并删除所有容器和数据卷
docker-compose down -v
# 重新启动
docker-compose up -d
```
## 📦 数据持久化
以下数据会持久化存储:
- `mysql-data`: MySQL数据库数据
- `redis-data`: Redis持久化数据
- `backend-logs`: 后端服务日志
- `backend-upload`: 后端上传文件
**注意**MinIO数据存储在外部服务器不在本地
## 🔒 安全建议
1. **修改默认密码**:部署到生产环境前,请修改 `.env` 中的所有默认密码
2. **限制网络访问**:使用防火墙规则限制对前端端口的访问
3. **HTTPS配置**在生产环境中建议配置HTTPS可使用Nginx反向代理+Let's Encrypt
4. **定期备份**:定期备份数据卷内容
## 🔄 更新部署
```bash
# 1. 拉取最新代码
git pull
# 2. 重新构建镜像
docker-compose build
# 3. 重启服务
docker-compose up -d
```
## 📝 常见问题
### Q1: 后端启动失败,提示连接不到数据库
**A**: 检查MySQL是否已完全启动。使用 `docker-compose logs mysql` 查看MySQL日志。
### Q2: 前端无法连接后端API
**A**: 确认nginx配置中的后端地址是否正确应该使用容器名 `rtsp-backend` 而不是 `localhost`
### Q3: Python推理服务启动慢
**A**:
1. 首次启动需要下载Ultralytics YOLOv8依赖这可能需要一些时间
2. 确保`python-inference-service/models/best.pt`文件存在
3. 使用 `docker-compose logs python-service` 查看进度
### Q4: 如何更换MinIO服务
**A**: 修改 `ruoyi-admin/src/main/resources/application.yml` 中的MinIO配置
```yaml
minio:
endpoint: http://your-minio-server:port
access-key: your-access-key
secret-key: your-secret-key
bucket: your-bucket
```
## 📞 支持
如有问题,请查看:
- 项目GitHub Issues
- 查看服务日志进行调试
- 检查环境变量配置是否正确
## <20><> 许可证
[根据项目实际许可证填写]

135
START-HERE.md Normal file
View File

@@ -0,0 +1,135 @@
# 🚀 快速开始 - 从这里开始!
## 📌 您需要的一切
### 第一步:部署系统
```bash
# 1. 准备YOLOv8模型文件
# 将训练好的 best.pt 放到:
python-inference-service/models/best.pt
# 2. 运行部署脚本
deploy.bat # Windows用户
# 3. 访问系统
# 打开浏览器http://localhost:10080
```
**就这么简单!** 🎊
---
## 📖 详细文档
### 🐳 Docker部署
| 文档 | 用途 | 优先级 |
|------|------|--------|
| `FINAL-SUMMARY.md` | Docker配置总结 | ⭐⭐⭐ |
| `DOCKER-QUICK-START.md` | 常用命令 | ⭐⭐⭐ |
| `README-DOCKER.md` | 完整部署文档 | ⭐⭐ |
| `DEPLOYMENT-NOTES.md` | 配置细节 | ⭐ |
### 🤖 YOLOv8模型
| 文档 | 用途 | 优先级 |
|------|------|--------|
| `YOLOV8-SETUP.md` | 模型配置指南 | ⭐⭐⭐ |
| `python-inference-service/README.md` | Python服务文档 | ⭐⭐ |
### 📹 巡检任务
| 文档 | 用途 | 优先级 |
|------|------|--------|
| `INSPECTION-FEATURE-SUMMARY.md` | 功能总结 | ⭐⭐⭐ |
| `INSPECTION-WORKFLOW.md` | 详细流程 | ⭐⭐ |
### 📝 更新记录
| 文档 | 用途 |
|------|------|
| `COMPLETE-SUMMARY.md` | 完整更新总结 |
| `UPDATE-SUMMARY.md` | 变更记录 |
---
## 🎯 核心配置
### 系统架构
```
浏览器 :10080 (唯一对外端口)
前端 (Nginx)
后端 + Python服务 (内部)
MySQL + Redis + MinIO(外部)
```
### 关键特性
-**YOLOv8**: CPU模式无需GPU
-**MinIO**: 使用外部服务
-**端口**: 只暴露10080
-**记录**: 自动创建巡检记录
-**视频**: 自动保存到MinIO
-**识别**: 调用YOLOv8分析
-**告警**: 自动去重
---
## 🔧 常用命令
```bash
# 启动服务
docker-compose up -d
# 查看状态
docker-compose ps
# 查看日志
docker-compose logs -f backend
docker-compose logs -f python-service
# 重启服务
docker-compose restart
# 停止服务
docker-compose down
```
---
## 📞 遇到问题?
### 部署问题
→ 查看 `DOCKER-QUICK-START.md`
### 模型问题
→ 查看 `YOLOV8-SETUP.md`
### 功能问题
→ 查看 `INSPECTION-FEATURE-SUMMARY.md`
### 其他问题
→ 查看日志:`docker-compose logs [服务名]`
---
## ✨ 下一步
1. ✅ 部署系统 - `deploy.bat`
2. ✅ 访问前端 - http://localhost:10080
3. ✅ 创建巡检任务
4. ✅ 查看执行记录和告警
---
**快速帮助**
- 📘 完整功能:`COMPLETE-SUMMARY.md`
- 🚀 快速参考:`DOCKER-QUICK-START.md`
- 🤖 模型配置:`YOLOV8-SETUP.md`
**祝使用愉快!** 🎉

295
UPDATE-SUMMARY.md Normal file
View File

@@ -0,0 +1,295 @@
# 🎉 Docker部署方案更新总结
根据您的需求,已完成以下更新:
## ✅ 主要变更
### 1. YOLOv5 → YOLOv8 (Ultralytics)
| 变更项 | 之前 | 现在 |
|-------|------|------|
| Python包 | `yolov5>=7.0.0` | `ultralytics>=8.0.0` |
| 模型文件 | `garbage_model.py` | `yolov8_model.py` |
| 模型名称 | `garbage_detector` | `yolov8_detector` |
| API框架 | yolov5 | Ultralytics YOLO |
**优势:**
- ✨ 更高的检测精度
- ⚡ 更快的推理速度
- 🎯 更简单的API接口
- 📚 更好的官方文档支持
### 2. 前端端口调整
| 变更项 | 之前 | 现在 |
|-------|------|------|
| 对外端口 | 80 | 10080 |
| 容器内部端口 | 80 | 80 |
| 访问地址 | http://localhost | http://localhost:10080 |
**修改文件:**
- `.env` - `FRONTEND_PORT=10080`
- 所有文档中的访问地址
## 📦 新增文件
### 1. YOLOv8模型支持
- `python-inference-service/models/yolov8_model.py` - YOLOv8模型包装类
- `YOLOV8-SETUP.md` - YOLOv8配置和使用指南
- 更新 `python-inference-service/README.md` - 详细的YOLOv8文档
### 2. 配置文件更新
- 更新 `python-inference-service/requirements.txt` - ultralytics依赖
- 更新 `python-inference-service/models/models.json` - 指向yolov8_model
### 3. 文档更新
- `README-DOCKER.md` - 更新端口和YOLOv8说明
- `DOCKER-QUICK-START.md` - 更新访问地址和YOLOv8说明
- `deploy.bat` / `deploy.sh` - 更新访问地址
- `UPDATE-SUMMARY.md` - 本文档
## 🚀 使用指南
### 准备工作
1. **准备YOLOv8模型文件**
```bash
# 将训练好的模型放到指定位置
python-inference-service/models/best.pt
```
2. **创建类别文件**(可选)
```bash
# 创建 classes.txt每行一个类别名
python-inference-service/models/classes.txt
```
3. **检查环境变量**
```bash
# 查看 .env 文件,确认端口配置
FRONTEND_PORT=10080
```
### 快速部署
#### Windows
```batch
deploy.bat
```
#### Linux/Mac
```bash
chmod +x deploy.sh
./deploy.sh
```
#### 手动部署
```bash
# 1. 启动所有服务
docker-compose up -d
# 2. 查看服务状态
docker-compose ps
# 3. 查看日志
docker-compose logs -f python-service
```
### 访问系统
部署成功后访问:
- **前端界面**: http://localhost:10080
- **后端API**: http://localhost:10080/prod-api/
- **Python API**: http://localhost:10080/python-api/
- **API文档**: http://localhost:10080/prod-api/swagger-ui.html
## 📋 YOLOv8模型要求
### 模型训练
如果需要训练新模型:
```python
from ultralytics import YOLO
# 加载预训练模型
model = YOLO('yolov8n.pt') # n/s/m/l/x
# 训练
results = model.train(
data='data.yaml',
epochs=100,
imgsz=640,
batch=16
)
# 模型保存在 runs/detect/train/weights/best.pt
```
### 模型部署
```bash
# 1. 复制模型到项目
cp runs/detect/train/weights/best.pt python-inference-service/models/
# 2. 创建类别文件
cat > python-inference-service/models/classes.txt << EOF
class1
class2
class3
EOF
# 3. 重启服务
docker-compose restart python-service
```
## 🔧 配置说明
### 环境变量(.env
所有配置集中在一个文件:
```bash
# 前端端口
FRONTEND_PORT=10080
# MySQL
MYSQL_HOST=rtsp-mysql
MYSQL_PORT=3306
MYSQL_PASSWORD=ruoyi123
# Redis
REDIS_HOST=rtsp-redis
# MinIO
MINIO_HOST=rtsp-minio
MINIO_ACCESS_KEY=minio
MINIO_SECRET_KEY=minio123
# Python服务
PYTHON_SERVICE_HOST=rtsp-python-service
```
### 模型配置models.json
```json
[
{
"name": "yolov8_detector",
"path": "models/yolov8_model.py",
"size": [640, 640]
}
]
```
### 性能调优
编辑 `yolov8_model.py`
```python
# 置信度阈值
self.conf_threshold = 0.25 # 默认0.25
# 输入尺寸
self.img_size = 640 # 320/640/1280
```
## 🎯 服务架构
```
┌─────────────┐
│ Browser │
│ :10080 │ ← 唯一对外暴露
└──────┬──────┘
┌──────▼──────┐
│ Frontend │
│ Nginx │
└──────┬──────┘
┌───┴────────────────┐
│ │
┌──▼───────┐ ┌──────▼──────┐
│ Backend │ │ Python │
│ :8080 │ │ Service │
│ │ │ :8000(CPU) │
└───┬──┬───┘ └─────────────┘
│ │
┌───▼──▼───┐ ┌─────────────┐
│ MySQL │ │ MinIO(外部) │
│ Redis │ │ 49.232... │
└──────────┘ └─────────────┘
```
## ⚠️ 重要提示
### 必须操作
1. ✅ 将 `best.pt` 文件放到 `python-inference-service/models/` 目录
2. ✅ 确保模型是 YOLOv8 训练的不支持YOLOv5直接使用
3. ✅ 首次启动等待1-2分钟
### 可选操作
1. 📝 创建 `classes.txt` 类别文件
2. 🎨 调整置信度阈值
3. 🔧 修改MinIO配置如需更换服务器
### 常见问题
#### Q1: 模型加载失败
```bash
# 检查模型文件
ls -lh python-inference-service/models/best.pt
# 查看日志
docker-compose logs python-service
```
#### Q2: 端口被占用
```bash
# 修改 .env 文件
FRONTEND_PORT=8080 # 改为其他端口
# 重启服务
docker-compose down
docker-compose up -d
```
#### Q3: 如何更换MinIO服务
```bash
# 编辑 ruoyi-admin/src/main/resources/application.yml
minio:
endpoint: http://your-server:port
access-key: your-key
secret-key: your-secret
bucket: your-bucket
```
## 📖 文档导航
- 📘 **完整部署文档**: `README-DOCKER.md`
- 🚀 **快速开始**: `DOCKER-QUICK-START.md`
- 🤖 **YOLOv8配置**: `YOLOV8-SETUP.md`
- 📋 **文件清单**: `DEPLOYMENT-FILES.md`
- 🐍 **Python服务**: `python-inference-service/README.md`
## 🎓 学习资源
- [Ultralytics YOLOv8官方文档](https://docs.ultralytics.com/)
- [YOLOv8训练教程](https://docs.ultralytics.com/modes/train/)
- [Docker部署最佳实践](https://docs.docker.com/develop/dev-best-practices/)
## 💡 技术支持
遇到问题?
1. 查看日志:`docker-compose logs [服务名]`
2. 查看服务状态:`docker-compose ps`
3. 查看相关文档
4. 检查模型文件和配置
---
**更新日期**: 2025-09-30
**适用版本**: Docker Compose 3.8+, YOLOv8 8.0+
✅ 所有更新已完成,可以直接使用!

314
YOLOV8-SETUP.md Normal file
View File

@@ -0,0 +1,314 @@
# YOLOv8模型配置指南
本系统使用**YOLOv8**Ultralytics进行目标检测推理。
## 快速开始
### 1. 准备模型文件
将YOLOv8训练好的模型放到指定位置
```bash
# 模型文件路径
python-inference-service/models/best.pt
```
### 2. 准备类别文件(可选)
创建 `classes.txt` 文件,每行一个类别名称:
```bash
# 文件路径
python-inference-service/models/classes.txt
# 内容示例
person
car
truck
bicycle
```
### 3. 启动服务
```bash
# 使用部署脚本
deploy.bat # Windows
./deploy.sh # Linux/Mac
# 或手动启动
docker-compose up -d
```
## 训练YOLOv8模型
如果还没有训练好的模型,可以按以下步骤训练:
### 1. 安装Ultralytics
```bash
pip install ultralytics
```
### 2. 准备数据集
创建数据集配置文件 `data.yaml`
```yaml
# 数据集路径
path: /path/to/dataset
train: images/train
val: images/val
# 类别
nc: 4 # 类别数量
names: ['person', 'car', 'truck', 'bicycle'] # 类别名称
```
### 3. 训练模型
```python
from ultralytics import YOLO
# 加载预训练模型
model = YOLO('yolov8n.pt') # n, s, m, l, x 可选
# 训练模型
results = model.train(
data='data.yaml',
epochs=100,
imgsz=640,
batch=16,
device=0 # GPU设备IDCPU使用'cpu'
)
# 训练完成后,最佳模型保存在 runs/detect/train/weights/best.pt
```
### 4. 导出模型
训练完成后,将最佳模型复制到项目中:
```bash
# 复制模型文件
cp runs/detect/train/weights/best.pt python-inference-service/models/best.pt
# 创建类别文件
echo "person
car
truck
bicycle" > python-inference-service/models/classes.txt
```
## 模型配置
### 修改模型参数
编辑 `python-inference-service/models/yolov8_model.py`
```python
# 置信度阈值
self.conf_threshold = 0.25 # 默认0.25,降低可检测更多目标
# 输入图像尺寸
self.img_size = 640 # 默认640可改为320、1280等
```
### 配置文件
`python-inference-service/models/models.json`
```json
[
{
"name": "yolov8_detector",
"path": "models/yolov8_model.py",
"size": [640, 640],
"comment": "YOLOv8检测模型"
}
]
```
参数说明:
- `name`: 模型名称API调用时使用
- `path`: 模型包装类路径
- `size`: 输入图像尺寸 [宽度, 高度]
## 多模型配置
如果有多个模型,可以配置多个:
```json
[
{
"name": "person_detector",
"path": "models/person_model.py",
"size": [640, 640]
},
{
"name": "vehicle_detector",
"path": "models/vehicle_model.py",
"size": [640, 640]
}
]
```
然后为每个模型创建对应的模型文件,参考 `yolov8_model.py`
## 测试模型
### 本地测试
```python
from ultralytics import YOLO
# 加载模型
model = YOLO('python-inference-service/models/best.pt')
# 测试图像
results = model('test.jpg')
# 显示结果
results[0].show()
# 打印检测结果
for r in results:
print(r.boxes)
```
### API测试
启动服务后使用curl测试
```bash
# 1. 检查服务健康
curl http://localhost:10080/python-api/health
# 2. 查看可用模型
curl http://localhost:10080/python-api/api/models
# 3. 测试检测(文件上传)
curl -X POST "http://localhost:10080/python-api/api/detect/file" \
-F "model_name=yolov8_detector" \
-F "file=@test.jpg"
```
## 性能优化
### 1. 使用GPU
确保Docker配置了GPU支持
```yaml
# docker-compose.yml
python-service:
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
```
### 2. 选择合适的模型大小
YOLOv8提供多种尺寸
| 模型 | 参数量 | 速度 | 精度 |
|------|--------|------|------|
| YOLOv8n | 3.2M | 最快 | 较低 |
| YOLOv8s | 11.2M | 快 | 中等 |
| YOLOv8m | 25.9M | 中等 | 较高 |
| YOLOv8l | 43.7M | 慢 | 高 |
| YOLOv8x | 68.2M | 最慢 | 最高 |
根据需求选择:
- 实时处理 → 使用 `yolov8n.pt``yolov8s.pt`
- 高精度 → 使用 `yolov8l.pt``yolov8x.pt`
### 3. 调整图像尺寸
```python
# 在 yolov8_model.py 中
self.img_size = 320 # 更快但精度降低
# 或
self.img_size = 1280 # 更慢但精度提高
```
## 常见问题
### Q1: 模型加载失败
```
错误FileNotFoundError: best.pt not found
解决:确保模型文件在 python-inference-service/models/best.pt
```
### Q2: GPU不可用
```
错误CUDA not available
解决:
1. 检查NVIDIA驱动安装
2. 检查Docker GPU支持
3. 使用 docker run --gpus all 测试GPU
```
### Q3: 检测结果为空
```
原因:
1. 置信度阈值太高
2. 模型未正确训练
3. 输入图像与训练数据差异大
解决:
1. 降低 conf_threshold
2. 检查模型训练情况
3. 检查输入图像质量
```
### Q4: 推理速度慢
```
解决:
1. 使用GPU加速
2. 使用更小的模型如yolov8n
3. 减小输入图像尺寸
4. 批量处理多张图像
```
## 模型版本差异
### YOLOv5 vs YOLOv8
本系统已从YOLOv5升级到YOLOv8
| 特性 | YOLOv5 | YOLOv8 |
|------|--------|--------|
| 精度 | 较高 | 更高 |
| 速度 | 快 | 更快 |
| API | yolov5 | ultralytics |
| 训练 | 复杂 | 简单 |
### 迁移说明
如果之前使用YOLOv5模型
1. 使用YOLOv8重新训练推荐
2. 或使用 `garbage_model.py` 作为模板支持YOLOv5
## 参考资料
- [Ultralytics YOLOv8文档](https://docs.ultralytics.com/)
- [YOLOv8 GitHub](https://github.com/ultralytics/ultralytics)
- [模型训练教程](https://docs.ultralytics.com/modes/train/)
- [推理示例](https://docs.ultralytics.com/modes/predict/)
## 技术支持
如遇问题:
1. 查看服务日志:`docker-compose logs python-service`
2. 查看模型加载日志
3. 测试模型是否可以本地加载
4. 检查环境配置是否正确

54
deploy.bat Normal file
View File

@@ -0,0 +1,54 @@
@echo off
chcp 65001 >nul
echo ============================================
echo RTSP视频分析系统 Docker部署
echo ============================================
echo.
REM 检查Docker是否运行
docker info >nul 2>&1
if %errorlevel% neq 0 (
echo [错误] Docker未运行或未安装
echo 请先启动Docker Desktop
pause
exit /b 1
)
echo [1/5] 检查环境文件...
if not exist .env (
echo [错误] .env文件不存在
echo 请先配置.env文件
pause
exit /b 1
)
echo [2/5] 停止现有容器...
docker-compose down
echo [3/5] 构建Docker镜像...
docker-compose build
echo [4/5] 启动所有服务...
docker-compose up -d
echo [5/5] 等待服务启动...
timeout /t 10 /nobreak >nul
echo.
echo ============================================
echo 部署完成!
echo ============================================
echo.
echo 查看服务状态: docker-compose ps
echo 查看日志: docker-compose logs -f
echo.
echo 访问地址:
echo 前端界面: http://localhost:10080
echo.
echo 按任意键查看服务状态...
pause >nul
docker-compose ps
echo.
pause

46
deploy.sh Normal file
View File

@@ -0,0 +1,46 @@
#!/bin/bash
echo "============================================"
echo "RTSP视频分析系统 Docker部署"
echo "============================================"
echo ""
# 检查Docker是否运行
if ! docker info > /dev/null 2>&1; then
echo "[错误] Docker未运行或未安装"
echo "请先安装并启动Docker"
exit 1
fi
echo "[1/5] 检查环境文件..."
if [ ! -f .env ]; then
echo "[错误] .env文件不存在"
echo "请先配置.env文件"
exit 1
fi
echo "[2/5] 停止现有容器..."
docker-compose down
echo "[3/5] 构建Docker镜像..."
docker-compose build
echo "[4/5] 启动所有服务..."
docker-compose up -d
echo "[5/5] 等待服务启动..."
sleep 10
echo ""
echo "============================================"
echo "部署完成!"
echo "============================================"
echo ""
echo "查看服务状态: docker-compose ps"
echo "查看日志: docker-compose logs -f"
echo ""
echo "访问地址:"
echo " 前端界面: http://localhost:10080"
echo ""
echo "服务状态:"
docker-compose ps

125
docker-compose.yml Normal file
View File

@@ -0,0 +1,125 @@
version: '3.8'
services:
mysql:
image: mysql:8.0
container_name: ${MYSQL_HOST}
restart: always
environment:
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD}
MYSQL_DATABASE: ${MYSQL_DATABASE}
MYSQL_USER: ${MYSQL_USER}
MYSQL_PASSWORD: ${MYSQL_PASSWORD}
TZ: ${TZ}
volumes:
- mysql-data:/var/lib/mysql
- ./sql:/docker-entrypoint-initdb.d
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci --default-authentication-plugin=mysql_native_password
networks:
- rtsp-network
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
interval: 10s
timeout: 5s
retries: 5
redis:
image: redis:7-alpine
container_name: ${REDIS_HOST}
restart: always
command: redis-server --appendonly yes ${REDIS_PASSWORD:+--requirepass ${REDIS_PASSWORD}}
volumes:
- redis-data:/data
networks:
- rtsp-network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
python-service:
build:
context: ./python-inference-service
dockerfile: Dockerfile
container_name: ${PYTHON_SERVICE_HOST}
restart: always
environment:
TZ: ${TZ}
MODEL_DIR: /app/models
volumes:
- ./python-inference-service/models:/app/models
networks:
- rtsp-network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
backend:
build:
context: .
dockerfile: ruoyi-admin/Dockerfile
container_name: ${BACKEND_HOST}
restart: always
depends_on:
mysql:
condition: service_healthy
redis:
condition: service_healthy
python-service:
condition: service_started
environment:
TZ: ${TZ}
SPRING_DATASOURCE_URL: jdbc:mysql://${MYSQL_HOST}:${MYSQL_PORT}/${MYSQL_DATABASE}?useUnicode=true&characterEncoding=utf8&useSSL=false&serverTimezone=Asia/Shanghai
SPRING_DATASOURCE_USERNAME: ${MYSQL_USER}
SPRING_DATASOURCE_PASSWORD: ${MYSQL_PASSWORD}
SPRING_DATA_REDIS_HOST: ${REDIS_HOST}
SPRING_DATA_REDIS_PORT: ${REDIS_PORT}
SPRING_DATA_REDIS_PASSWORD: ${REDIS_PASSWORD}
PYTHON_API_URL: http://${PYTHON_SERVICE_HOST}:${PYTHON_SERVICE_PORT}
volumes:
- backend-logs:/app/logs
- backend-upload:/app/upload
networks:
- rtsp-network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/actuator/health"]
interval: 30s
timeout: 10s
retries: 5
start_period: 60s
frontend:
build:
context: ./rtsp-vue
dockerfile: Dockerfile
args:
BACKEND_API_URL: http://${BACKEND_HOST}:${BACKEND_PORT}
container_name: rtsp-frontend
restart: always
ports:
- "${FRONTEND_PORT}:80"
depends_on:
backend:
condition: service_healthy
environment:
TZ: ${TZ}
networks:
- rtsp-network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:80"]
interval: 30s
timeout: 10s
retries: 3
networks:
rtsp-network:
driver: bridge
volumes:
mysql-data:
redis-data:
backend-logs:
backend-upload:

View File

@@ -0,0 +1,47 @@
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
env/
venv/
ENV/
*.egg-info/
dist/
build/
# IDE
.vscode
.idea
*.swp
*.swo
# OS
.DS_Store
Thumbs.db
# Git
.git
.gitignore
# CI/CD
.github
.gitlab-ci.yml
# Documentation
*.md
README*
# Scripts
*.bat
*.sh
# Large model files (will be mounted as volume)
models/*.pt
models/*.onnx
models/*.pth
# Logs
*.log
logs/

View File

@@ -0,0 +1,45 @@
# 使用支持CUDA的PyTorch基础镜像
FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime
# 设置工作目录
WORKDIR /app
# 设置pip镜像源
RUN pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
# 安装系统依赖
RUN apt-get update && apt-get install -y \
libglib2.0-0 \
libsm6 \
libxext6 \
libxrender-dev \
libgomp1 \
curl \
&& rm -rf /var/lib/apt/lists/*
# 复制requirements.txt
COPY requirements.txt .
# 安装Python依赖
RUN pip install --no-cache-dir -r requirements.txt
# 复制应用代码
COPY app/ /app/app/
# 创建models目录
RUN mkdir -p /app/models
# 设置环境变量
ENV PYTHONPATH=/app
ENV MODEL_DIR=/app/models
ENV PYTHONUNBUFFERED=1
# 暴露端口(仅内部使用)
EXPOSE 8000
# 健康检查
HEALTHCHECK --interval=30s --timeout=10s --retries=3 \
CMD curl -f http://localhost:8000/health || exit 1
# 启动应用
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000", "--workers", "1"]

View File

@@ -0,0 +1,252 @@
# Python推理服务
基于FastAPI的YOLOv8目标检测推理服务。
## 功能特性
- 支持YOLOv8模型推理
- RESTful API接口
- 支持Base64图像和文件上传
- 支持GPU加速可选
- Docker部署支持
## 模型要求
本服务使用**YOLOv8**Ultralytics进行目标检测。
### 模型文件准备
1. **模型文件**: 将YOLOv8训练好的模型文件命名为`best.pt`,放在`models/`目录下
2. **类别文件**: (可选)创建`classes.txt`文件,每行一个类别名称
3. **配置文件**: `models.json`配置模型参数
### 目录结构
```
python-inference-service/
├── app/
│ ├── __init__.py
│ ├── main.py # FastAPI应用
│ ├── detector.py # 检测器封装
│ └── models.py # 数据模型
├── models/
│ ├── best.pt # YOLOv8模型文件必需
│ ├── classes.txt # 类别名称(可选)
│ ├── yolov8_model.py # YOLOv8模型包装类
│ └── models.json # 模型配置
├── requirements.txt
└── Dockerfile
```
## 安装依赖
```bash
pip install -r requirements.txt
```
主要依赖:
- `ultralytics>=8.0.0` - YOLOv8框架
- `fastapi` - Web框架
- `uvicorn` - ASGI服务器
- `opencv-python` - 图像处理
- `torch` - PyTorch
## 配置模型
编辑`models/models.json`
```json
[
{
"name": "yolov8_detector",
"path": "models/yolov8_model.py",
"size": [640, 640],
"comment": "YOLOv8检测模型"
}
]
```
参数说明:
- `name`: 模型名称API调用时使用
- `path`: 模型包装类的路径
- `size`: 输入图像尺寸 [宽度, 高度]
## 启动服务
### 本地启动
```bash
# 启动服务默认端口8000
uvicorn app.main:app --host 0.0.0.0 --port 8000
# 或使用启动脚本
python -m uvicorn app.main:app --host 0.0.0.0 --port 8000
```
### Docker启动
```bash
# 构建镜像
docker build -t python-inference-service .
# 运行容器
docker run -p 8000:8000 \
-v $(pwd)/models:/app/models \
python-inference-service
```
### 使用GPU
```bash
# 确保安装了NVIDIA Docker Runtime
docker run --gpus all -p 8000:8000 \
-v $(pwd)/models:/app/models \
python-inference-service
```
## API接口
服务启动后访问http://localhost:8000/docs 查看API文档
### 1. 健康检查
```bash
GET /health
```
### 2. 获取可用模型列表
```bash
GET /api/models
```
### 3. Base64图像检测
```bash
POST /api/detect
Content-Type: application/json
{
"model_name": "yolov8_detector",
"image_data": "base64_encoded_image_string"
}
```
### 4. 文件上传检测
```bash
POST /api/detect/file
Content-Type: multipart/form-data
model_name: yolov8_detector
file: <image_file>
```
## 响应格式
```json
{
"model_name": "yolov8_detector",
"detections": [
{
"label": "[yolov8_detector] 类别名",
"confidence": 0.95,
"x": 100,
"y": 150,
"width": 200,
"height": 180,
"color": 65280
}
],
"inference_time": 45.6
}
```
## 自定义模型
要使用自己训练的YOLOv8模型
1. **训练模型**使用Ultralytics YOLOv8训练您的模型
```python
from ultralytics import YOLO
model = YOLO('yolov8n.yaml')
model.train(data='your_data.yaml', epochs=100)
```
2. **导出模型**:训练完成后会生成`best.pt`文件
3. **准备类别文件**:创建`classes.txt`
```
class1
class2
class3
```
4. **放置文件**:将`best.pt`和`classes.txt`放到`models/`目录
5. **更新配置**:确保`models.json`配置正确
6. **重启服务**
## 环境变量
- `MODEL_DIR`: 模型目录路径(默认:`/app/models`
- `MODELS_JSON`: 模型配置文件路径(默认:`models/models.json`
## 性能优化
### GPU加速
服务会自动检测GPU并使用。如果有多张GPU可以指定
```bash
CUDA_VISIBLE_DEVICES=0 uvicorn app.main:app --host 0.0.0.0 --port 8000
```
### 置信度阈值
在`yolov8_model.py`中调整:
```python
self.conf_threshold = 0.25 # 降低阈值检测更多目标
```
## 故障排查
### 模型加载失败
```
错误:找不到 best.pt
解决:确保模型文件在 models/ 目录下
```
### GPU不可用
```
错误CUDA not available
解决:
1. 检查NVIDIA驱动
2. 检查PyTorch GPU版本
3. 检查CUDA版本兼容性
```
### 推理速度慢
```
解决:
1. 使用GPU加速
2. 使用更小的模型如yolov8n.pt
3. 减小输入图像尺寸
```
## 开发者
如需修改或扩展功能,请参考:
- `app/main.py` - API路由定义
- `app/detector.py` - 检测器基类
- `models/yolov8_model.py` - YOLOv8模型包装类
## 许可证
[根据项目实际许可证填写]

View File

@@ -0,0 +1 @@
# Python Inference Service package

View File

@@ -0,0 +1,311 @@
import os
import cv2
import numpy as np
import time
from typing import List, Dict, Tuple, Optional
import importlib.util
import sys
from app.models import Detection
class PythonModelDetector:
"""Object detector using native Python models"""
def __init__(self, model_name: str, model_path: str, input_width: int, input_height: int, color: int = 0x00FF00):
"""
Initialize detector with Python model
Args:
model_name: Name of the model
model_path: Path to the Python model file (.py)
input_width: Input width for the model
input_height: Input height for the model
color: RGB color for detection boxes (default: green)
"""
self.model_name = model_name
self.input_width = input_width
self.input_height = input_height
self.color = color
# Convert color from RGB to BGR (OpenCV uses BGR)
self.color_bgr = ((color & 0xFF) << 16) | (color & 0xFF00) | ((color >> 16) & 0xFF)
# Default confidence thresholds
self.conf_threshold = 0.25
self.nms_threshold = 0.45
# Load the Python model dynamically
self._load_python_model(model_path)
# Load class names if available
self.classes = []
model_dir = os.path.dirname(model_path)
classes_path = os.path.join(model_dir, "classes.txt")
if os.path.exists(classes_path):
with open(classes_path, 'r') as f:
self.classes = [line.strip() for line in f.readlines() if line.strip()]
def _load_python_model(self, model_path: str):
"""Load Python model dynamically"""
if not os.path.exists(model_path):
raise FileNotFoundError(f"Model file not found: {model_path}")
# Get model directory and file name
model_dir = os.path.dirname(model_path)
model_file = os.path.basename(model_path)
model_name = os.path.splitext(model_file)[0]
# Add model directory to system path
if model_dir not in sys.path:
sys.path.append(model_dir)
# Import the model module
spec = importlib.util.spec_from_file_location(model_name, model_path)
if spec is None:
raise ImportError(f"Failed to load model specification: {model_path}")
model_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(model_module)
# Check if the module has the required interface
if not hasattr(model_module, "Model"):
raise AttributeError(f"Model module must define a 'Model' class: {model_path}")
# Create model instance
self.model = model_module.Model()
# Check if model has the required methods
if not hasattr(self.model, "predict"):
raise AttributeError(f"Model must implement 'predict' method: {model_path}")
def preprocess(self, img: np.ndarray) -> np.ndarray:
"""Preprocess image for model input"""
# Ensure BGR image
if len(img.shape) == 2: # Grayscale
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
elif img.shape[2] == 4: # BGRA
img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
# Resize to model input size
resized = cv2.resize(img, (self.input_width, self.input_height))
# Use model's preprocess method if available
if hasattr(self.model, "preprocess"):
return self.model.preprocess(resized)
# Default preprocessing: normalize to [0, 1]
return resized / 255.0
def detect(self, img: np.ndarray) -> Tuple[List[Detection], float]:
"""
Detect objects in an image
Args:
img: Input image in BGR format (OpenCV)
Returns:
List of Detection objects and inference time in milliseconds
"""
if img is None or img.size == 0:
return [], 0.0
# Original image dimensions
img_height, img_width = img.shape[:2]
# Preprocess image
processed_img = self.preprocess(img)
# Measure inference time
start_time = time.time()
try:
# Run inference using model's predict method
# Expected return format from model's predict:
# List of dicts with keys: 'bbox', 'class_id', 'confidence'
# bbox: (x, y, w, h) normalized [0-1]
model_results = self.model.predict(processed_img)
# Calculate inference time in milliseconds
inference_time = (time.time() - start_time) * 1000
# Convert model results to Detection objects
detections = []
for result in model_results:
# Skip low confidence detections
confidence = result.get('confidence', 0)
if confidence < self.conf_threshold:
continue
# Get bounding box (normalized coordinates)
bbox = result.get('bbox', [0, 0, 0, 0])
# Denormalize bbox to image coordinates
x = int(bbox[0] * img_width)
y = int(bbox[1] * img_height)
w = int(bbox[2] * img_width)
h = int(bbox[3] * img_height)
# Skip invalid boxes
if w <= 0 or h <= 0:
continue
# Get class ID and name
class_id = result.get('class_id', 0)
class_name = f"cls{class_id}"
if 0 <= class_id < len(self.classes):
class_name = self.classes[class_id]
# Create Detection object
label = f"[{self.model_name}] {class_name}"
detection = Detection(
label=label,
confidence=confidence,
x=x,
y=y,
width=w,
height=h,
color=self.color
)
detections.append(detection)
# Apply NMS if model doesn't do it internally
if hasattr(self.model, "applies_nms") and self.model.applies_nms:
return detections, inference_time
else:
# Convert detections to boxes and scores
boxes = [(d.x, d.y, d.width, d.height) for d in detections]
scores = [d.confidence for d in detections]
if boxes:
# Apply NMS
indices = self._non_max_suppression(boxes, scores, self.nms_threshold)
detections = [detections[i] for i in indices]
return detections, inference_time
except Exception as e:
print(f"Error during detection: {str(e)}")
return [], (time.time() - start_time) * 1000
def _non_max_suppression(self, boxes: List[Tuple], scores: List[float], threshold: float) -> List[int]:
"""Apply Non-Maximum Suppression to remove overlapping boxes"""
# Sort by score in descending order
indices = sorted(range(len(scores)), key=lambda i: scores[i], reverse=True)
keep = []
while indices:
# Get index with highest score
current = indices.pop(0)
keep.append(current)
# No more indices to process
if not indices:
break
# Get current box
x1, y1, w1, h1 = boxes[current]
x2_1 = x1 + w1
y2_1 = y1 + h1
area1 = w1 * h1
# Check remaining boxes
i = 0
while i < len(indices):
# Get box to compare
idx = indices[i]
x2, y2, w2, h2 = boxes[idx]
x2_2 = x2 + w2
y2_2 = y2 + h2
area2 = w2 * h2
# Calculate intersection
xx1 = max(x1, x2)
yy1 = max(y1, y2)
xx2 = min(x2_1, x2_2)
yy2 = min(y2_1, y2_2)
# Calculate intersection area
w = max(0, xx2 - xx1)
h = max(0, yy2 - yy1)
intersection = w * h
# Calculate IoU
union = area1 + area2 - intersection + 1e-9 # Avoid division by zero
iou = intersection / union
# Remove box if IoU is above threshold
if iou > threshold:
indices.pop(i)
else:
i += 1
return keep
def close(self):
"""Close the model resources"""
if hasattr(self.model, "close"):
self.model.close()
self.model = None
class ModelManager:
"""Model manager for detectors"""
def __init__(self):
self.models = {}
def load(self, models_config: List[Dict]):
"""
Load models from configuration
Args:
models_config: List of model configurations
"""
# Basic color palette for different models
palette = [0x00FF00, 0xFF8000, 0x00A0FF, 0xFF00FF, 0x00FFFF, 0xFF0000, 0x80FF00]
for i, model_config in enumerate(models_config):
name = model_config.get("name")
path = model_config.get("path")
size = model_config.get("size", [640, 640])
if not name or not path or not os.path.exists(path):
print(f"Skipping model: {name} - Invalid configuration")
continue
try:
# Use color from palette
color = palette[i % len(palette)]
# Create detector for Python model
detector = PythonModelDetector(
model_name=name,
model_path=path,
input_width=size[0],
input_height=size[1],
color=color
)
self.models[name] = detector
print(f"Model loaded: {name} ({path})")
except Exception as e:
print(f"Failed to load model {name}: {str(e)}")
def get(self, name: str) -> Optional[PythonModelDetector]:
"""Get detector by name"""
return self.models.get(name)
def all(self) -> List[PythonModelDetector]:
"""Get all detectors"""
return list(self.models.values())
def close(self):
"""Close all detectors"""
for detector in self.models.values():
try:
detector.close()
except:
pass
self.models.clear()

View File

@@ -0,0 +1,164 @@
import os
import base64
import cv2
import json
import numpy as np
from typing import Dict, List
from fastapi import FastAPI, HTTPException, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
from app.models import Detection, DetectionRequest, DetectionResponse, ModelInfo, ModelsResponse
from app.detector import ModelManager
# Initialize FastAPI app
app = FastAPI(
title="Python Model Inference Service",
description="API for object detection using Python models",
version="1.0.0"
)
# Configure CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Initialize model manager
model_manager = None
# Load models from configuration
@app.on_event("startup")
async def startup_event():
global model_manager
model_manager = ModelManager()
# Look for models.json configuration file
models_json_path = os.getenv("MODELS_JSON", os.path.join(os.path.dirname(__file__), "..", "models", "models.json"))
if os.path.exists(models_json_path):
try:
with open(models_json_path, "r") as f:
models_config = json.load(f)
model_manager.load(models_config)
print(f"Loaded model configuration from {models_json_path}")
except Exception as e:
print(f"Failed to load models from {models_json_path}: {str(e)}")
else:
print(f"Models configuration not found: {models_json_path}")
@app.on_event("shutdown")
async def shutdown_event():
global model_manager
if model_manager:
model_manager.close()
@app.get("/health")
async def health_check():
"""Health check endpoint"""
return {"status": "ok"}
@app.get("/api/models", response_model=ModelsResponse)
async def get_models():
"""Get available models"""
global model_manager
if not model_manager:
raise HTTPException(status_code=500, detail="Model manager not initialized")
detectors = model_manager.all()
models = []
for detector in detectors:
model_info = ModelInfo(
name=detector.model_name,
path=getattr(detector, 'model_path', ''),
size=[detector.input_width, detector.input_height],
backend="Python",
loaded=True
)
models.append(model_info)
return ModelsResponse(models=models)
@app.post("/api/detect", response_model=DetectionResponse)
async def detect(request: DetectionRequest):
"""Detect objects in an image"""
global model_manager
if not model_manager:
raise HTTPException(status_code=500, detail="Model manager not initialized")
# Get detector for requested model
detector = model_manager.get(request.model_name)
if not detector:
raise HTTPException(status_code=404, detail=f"Model not found: {request.model_name}")
# Decode base64 image
try:
# Remove data URL prefix if present
if "base64," in request.image_data:
image_data = request.image_data.split("base64,")[1]
else:
image_data = request.image_data
# Decode base64 image
image_bytes = base64.b64decode(image_data)
nparr = np.frombuffer(image_bytes, np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
if image is None:
raise HTTPException(status_code=400, detail="Invalid image data")
except Exception as e:
raise HTTPException(status_code=400, detail=f"Failed to decode image: {str(e)}")
# Run detection
detections, inference_time = detector.detect(image)
return DetectionResponse(
model_name=request.model_name,
detections=detections,
inference_time=inference_time
)
@app.post("/api/detect/file", response_model=DetectionResponse)
async def detect_file(
model_name: str,
file: UploadFile = File(...)
):
"""Detect objects in an uploaded image file"""
global model_manager
if not model_manager:
raise HTTPException(status_code=500, detail="Model manager not initialized")
# Get detector for requested model
detector = model_manager.get(model_name)
if not detector:
raise HTTPException(status_code=404, detail=f"Model not found: {model_name}")
# Read uploaded file
try:
contents = await file.read()
nparr = np.frombuffer(contents, np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
if image is None:
raise HTTPException(status_code=400, detail="Invalid image data")
except Exception as e:
raise HTTPException(status_code=400, detail=f"Failed to process image: {str(e)}")
# Run detection
detections, inference_time = detector.detect(image)
return DetectionResponse(
model_name=model_name,
detections=detections,
inference_time=inference_time
)
if __name__ == "__main__":
uvicorn.run("app.main:app", host="0.0.0.0", port=8000, reload=True)

View File

@@ -0,0 +1,40 @@
from pydantic import BaseModel
from typing import List, Optional
class Detection(BaseModel):
"""Object detection result"""
label: str
confidence: float
x: int
y: int
width: int
height: int
color: int = 0x00FF00 # Default green color
class DetectionRequest(BaseModel):
"""Request for model inference on image data"""
model_name: str
image_data: str # Base64 encoded image
class DetectionResponse(BaseModel):
"""Response with detection results"""
model_name: str
detections: List[Detection]
inference_time: float # Time in milliseconds
class ModelInfo(BaseModel):
"""Model information"""
name: str
path: str
size: List[int] # [width, height]
backend: str = "ONNX"
loaded: bool = False
class ModelsResponse(BaseModel):
"""Response with available models"""
models: List[ModelInfo]

Binary file not shown.

View File

@@ -0,0 +1 @@
垃圾

View File

@@ -0,0 +1,207 @@
import os
import numpy as np
import cv2
from typing import List, Dict, Any
import torch
class Model:
"""
垃圾识别模型 - 直接加载 PyTorch 模型文件
"""
def __init__(self):
"""初始化模型"""
# 获取当前文件所在目录路径
model_dir = os.path.dirname(os.path.abspath(__file__))
# 模型文件路径
model_path = os.path.join(model_dir, "best.pt")
print(f"正在加载垃圾识别模型: {model_path}")
# 加载 PyTorch 模型
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {self.device}")
# 使用 YOLOv5 或通用方式加载模型
try:
# 尝试使用 YOLOv5 加载
import sys
sys.path.append(os.path.dirname(model_dir)) # 添加父目录到路径
try:
# 方法1: 如果安装了 YOLOv5
import yolov5
self.model = yolov5.load(model_path, device=self.device)
self.yolov5_api = True
print("使用 YOLOv5 包加载模型")
except (ImportError, ModuleNotFoundError):
# 方法2: 直接加载 YOLO 代码
from models.yolov5_utils import attempt_load
self.model = attempt_load(model_path, device=self.device)
self.yolov5_api = False
print("使用内置 YOLOv5 工具加载模型")
except Exception as e:
# 方法3: 通用 PyTorch 加载
print(f"YOLOv5 加载失败: {e}")
print("使用通用 PyTorch 加载")
self.model = torch.load(model_path, map_location=self.device)
if isinstance(self.model, dict) and 'model' in self.model:
self.model = self.model['model']
self.yolov5_api = False
# 如果是 ScriptModule设置为评估模式
if isinstance(self.model, torch.jit.ScriptModule):
self.model.eval()
elif hasattr(self.model, 'eval'):
self.model.eval()
# 加载类别名称
self.classes = []
classes_path = os.path.join(model_dir, "classes.txt")
if os.path.exists(classes_path):
with open(classes_path, 'r', encoding='utf-8') as f:
self.classes = [line.strip() for line in f.readlines() if line.strip()]
print(f"已加载 {len(self.classes)} 个类别")
else:
# 如果模型自带类别信息
if hasattr(self.model, 'names') and self.model.names:
self.classes = self.model.names
print(f"使用模型自带类别,共 {len(self.classes)} 个类别")
else:
print("未找到类别文件,将使用数字索引作为类别名")
# 设置识别参数
self.conf_threshold = 0.25 # 置信度阈值
self.img_size = 640 # 默认输入图像大小
print("垃圾识别模型加载完成")
def preprocess(self, image: np.ndarray) -> np.ndarray:
"""预处理图像"""
# 如果是使用 YOLOv5 API不需要预处理
if hasattr(self, 'yolov5_api') and self.yolov5_api:
return image
# 默认预处理:调整大小并归一化
img = cv2.resize(image, (self.img_size, self.img_size))
# BGR 转 RGB
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 归一化 [0, 255] -> [0, 1]
img = img / 255.0
# HWC -> CHW (高度,宽度,通道 -> 通道,高度,宽度)
img = img.transpose(2, 0, 1)
# 转为 torch tensor
img = torch.from_numpy(img).float()
# 添加批次维度
img = img.unsqueeze(0)
# 移至设备
img = img.to(self.device)
return img
def predict(self, image: np.ndarray) -> List[Dict[str, Any]]:
"""模型推理"""
original_height, original_width = image.shape[:2]
try:
# 如果使用 YOLOv5 API
if hasattr(self, 'yolov5_api') and self.yolov5_api:
# YOLOv5 API 直接处理图像
results = self.model(image)
# 提取检测结果
predictions = results.pred[0] # 第一批次的预测
detections = []
for *xyxy, conf, cls_id in predictions.cpu().numpy():
x1, y1, x2, y2 = xyxy
# 转换为归一化坐标 (x, y, w, h)
x = x1 / original_width
y = y1 / original_height
w = (x2 - x1) / original_width
h = (y2 - y1) / original_height
# 整数类别 ID
cls_id = int(cls_id)
# 获取类别名称
class_name = f"cls{cls_id}"
if 0 <= cls_id < len(self.classes):
class_name = self.classes[cls_id]
# 添加检测结果
if conf >= self.conf_threshold:
detections.append({
'bbox': (x, y, w, h),
'class_id': cls_id,
'confidence': float(conf)
})
return detections
else:
# 通用 PyTorch 模型处理
# 预处理图像
img = self.preprocess(image)
# 推理
with torch.no_grad():
outputs = self.model(img)
# 后处理结果(这里需要根据模型输出格式调整)
detections = []
# 假设输出格式是 YOLO 风格:[batch_idx, x1, y1, x2, y2, conf, cls_id]
if isinstance(outputs, torch.Tensor) and outputs.dim() == 2 and outputs.size(1) >= 6:
for *xyxy, conf, cls_id in outputs.cpu().numpy():
if conf >= self.conf_threshold:
x1, y1, x2, y2 = xyxy
# 转换为归一化坐标 (x, y, w, h)
x = x1 / original_width
y = y1 / original_height
w = (x2 - x1) / original_width
h = (y2 - y1) / original_height
# 整数类别 ID
cls_id = int(cls_id)
detections.append({
'bbox': (x, y, w, h),
'class_id': cls_id,
'confidence': float(conf)
})
# 处理其他可能的输出格式
else:
# 这里需要根据模型的实际输出格式进行适配
print("警告:无法识别的模型输出格式,请检查模型类型")
return detections
except Exception as e:
print(f"推理过程中出错: {str(e)}")
# 出错时返回空结果
return []
@property
def applies_nms(self) -> bool:
"""模型是否内部应用了 NMS"""
# YOLOv5 会自动应用 NMS
return True
def close(self):
"""释放资源"""
if hasattr(self, 'model'):
# 删除模型以释放 GPU 内存
del self.model
if torch.cuda.is_available():
torch.cuda.empty_cache()
print("垃圾识别模型已关闭")

View File

@@ -0,0 +1,8 @@
[
{
"name": "yolov8_detector",
"path": "models/yolov8_model.py",
"size": [640, 640],
"comment": "YOLOv8检测模型确保将训练好的best.pt文件放在models目录下"
}
]

View File

@@ -0,0 +1,126 @@
import numpy as np
import cv2
from typing import List, Dict, Any, Tuple
class Model:
"""
Smoke detection model implementation
This is a simple example that could be replaced with an actual
TensorFlow, PyTorch, or other ML framework implementation.
"""
def __init__(self):
"""Initialize smoke detection model"""
# In a real implementation, you would load your model here
print("Smoke detection model initialized")
# Define smoke class IDs
self.smoke_classes = {
0: "smoke",
1: "fire"
}
def preprocess(self, image: np.ndarray) -> np.ndarray:
"""Preprocess image for model input"""
# Convert BGR to grayscale for smoke detection
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Convert back to 3 channels to match model expected input shape
gray_3ch = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
# In a real implementation, you would do normalization, etc.
return gray_3ch
def predict(self, image: np.ndarray) -> List[Dict[str, Any]]:
"""
Run smoke detection on the image
This is a simplified example that uses basic image processing
In a real implementation, you would use your ML model
"""
# Convert to grayscale for processing
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Apply Gaussian blur to reduce noise
blurred = cv2.GaussianBlur(gray, (15, 15), 0)
# Simple thresholding to find potential smoke regions
# In a real implementation, you'd use a trained model
_, thresh = cv2.threshold(blurred, 100, 255, cv2.THRESH_BINARY)
# Find contours in the thresholded image
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Process contours to find potential smoke regions
detections = []
height, width = image.shape[:2]
for contour in contours:
# Get bounding box
x, y, w, h = cv2.boundingRect(contour)
# Filter small regions
if w > width * 0.05 and h > height * 0.05:
# Calculate area ratio
area = cv2.contourArea(contour)
rect_area = w * h
fill_ratio = area / rect_area if rect_area > 0 else 0
# Smoke tends to have irregular shapes
# This is just for demonstration purposes
if fill_ratio > 0.2 and fill_ratio < 0.8:
# Normalize coordinates
x_norm = x / width
y_norm = y / height
w_norm = w / width
h_norm = h / height
# Determine if it's smoke or fire (just a simple heuristic for demo)
# In a real model, this would be determined by the model prediction
class_id = 0 # Default to smoke
# Check if the region has high red values (fire)
roi = image[y:y+h, x:x+w]
if roi.size > 0: # Make sure ROI is not empty
avg_color = np.mean(roi, axis=(0, 1))
if avg_color[2] > 150 and avg_color[2] > avg_color[0] * 1.5: # High red, indicating fire
class_id = 1 # Fire
# Calculate confidence based on fill ratio
# This is just for demonstration
confidence = 0.5 + fill_ratio * 0.3
# Add to detections
detections.append({
'bbox': (x_norm, y_norm, w_norm, h_norm),
'class_id': class_id,
'confidence': confidence
})
# For demo purposes, if no smoke detected by algorithm,
# add a small chance of random detection
if not detections and np.random.random() < 0.1: # 10% chance
# Random smoke detection
x = np.random.random() * 0.7
y = np.random.random() * 0.7
w = 0.1 + np.random.random() * 0.2
h = 0.1 + np.random.random() * 0.2
confidence = 0.5 + np.random.random() * 0.3
detections.append({
'bbox': (x, y, w, h),
'class_id': 0, # Smoke
'confidence': confidence
})
return detections
@property
def applies_nms(self) -> bool:
"""Model does not apply NMS internally"""
return False
def close(self):
"""Release resources"""
# In a real implementation, you would release model resources here
pass

View File

@@ -0,0 +1,56 @@
import torch
import torch.nn as nn
import sys
import os
def attempt_load(weights, device=''):
"""尝试加载YOLOv5模型"""
# 加载模型
model = torch.load(weights, map_location=device)
# 确定模型格式
if isinstance(model, dict):
if 'model' in model: # state_dict格式
model = model['model']
elif 'state_dict' in model: # state_dict格式
model = model['state_dict']
# 如果是state_dict则需要创建模型架构
if isinstance(model, dict):
print("警告:加载的是权重字典,尝试创建默认模型结构")
from models.yolov5_model import YOLOv5
model_arch = YOLOv5()
model_arch.load_state_dict(model)
model = model_arch
# 设置为评估模式
if isinstance(model, nn.Module):
model.eval()
# 检查是否有类别信息
if not hasattr(model, 'names') or not model.names:
print("模型没有类别信息,尝试加载默认类别")
# 设置通用类别
model.names = ['object']
return model
class YOLOv5:
"""简化版YOLOv5模型结构用于加载权重"""
def __init__(self):
super(YOLOv5, self).__init__()
self.names = [] # 类别名称
# 这里应该添加真实的网络结构
# 但为了简单起见,我们只提供一个占位符
# 在实际使用中,您应该实现完整的网络架构
def forward(self, x):
# 这里应该是实际的前向传播逻辑
# 这只是一个占位符
raise NotImplementedError("这是一个占位符模型请使用完整的YOLOv5模型实现")
def load_state_dict(self, state_dict):
print("尝试加载模型权重")
# 实际的权重加载逻辑
# 这只是一个占位符
return self

View File

@@ -0,0 +1,135 @@
import os
import numpy as np
import cv2
from typing import List, Dict, Any
import torch
class Model:
"""
YOLOv8 模型包装类 - 使用 Ultralytics YOLO
"""
def __init__(self):
"""初始化YOLOv8模型"""
# 获取当前文件所在目录路径
model_dir = os.path.dirname(os.path.abspath(__file__))
# 模型文件路径
model_path = os.path.join(model_dir, "best.pt")
print(f"正在加载YOLOv8模型: {model_path}")
# 检查设备
self.device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"使用设备: {self.device}")
# 使用 Ultralytics YOLO 加载模型
try:
from ultralytics import YOLO
self.model = YOLO(model_path)
print("使用 Ultralytics YOLO 加载模型成功")
except ImportError:
raise ImportError("请安装 ultralytics: pip install ultralytics>=8.0.0")
except Exception as e:
raise Exception(f"加载YOLOv8模型失败: {str(e)}")
# 加载类别名称
self.classes = []
classes_path = os.path.join(model_dir, "classes.txt")
if os.path.exists(classes_path):
with open(classes_path, 'r', encoding='utf-8') as f:
self.classes = [line.strip() for line in f.readlines() if line.strip()]
print(f"已加载 {len(self.classes)} 个类别")
else:
# 使用模型自带的类别信息
if hasattr(self.model, 'names') and self.model.names:
self.classes = list(self.model.names.values()) if isinstance(self.model.names, dict) else self.model.names
print(f"使用模型自带类别,共 {len(self.classes)} 个类别")
else:
print("未找到类别文件,将使用数字索引作为类别名")
# 设置识别参数
self.conf_threshold = 0.25 # 置信度阈值
self.img_size = 640 # 默认输入图像大小
print("YOLOv8模型加载完成")
def preprocess(self, image: np.ndarray) -> np.ndarray:
"""预处理图像 - YOLOv8会自动处理这里直接返回"""
return image
def predict(self, image: np.ndarray) -> List[Dict[str, Any]]:
"""模型推理"""
original_height, original_width = image.shape[:2]
try:
# YOLOv8推理
results = self.model(
image,
conf=self.conf_threshold,
device=self.device,
verbose=False
)
detections = []
# 解析结果
for result in results:
# 获取检测框
boxes = result.boxes
if boxes is None or len(boxes) == 0:
continue
# 遍历每个检测框
for box in boxes:
# 获取坐标 (xyxy格式)
xyxy = box.xyxy[0].cpu().numpy()
x1, y1, x2, y2 = xyxy
# 转换为归一化坐标 (x, y, w, h)
x = x1 / original_width
y = y1 / original_height
w = (x2 - x1) / original_width
h = (y2 - y1) / original_height
# 获取置信度
conf = float(box.conf[0].cpu().numpy())
# 获取类别ID
cls_id = int(box.cls[0].cpu().numpy())
# 获取类别名称
class_name = f"cls{cls_id}"
if 0 <= cls_id < len(self.classes):
class_name = self.classes[cls_id]
# 添加检测结果
if conf >= self.conf_threshold:
detections.append({
'bbox': (x, y, w, h),
'class_id': cls_id,
'confidence': conf
})
return detections
except Exception as e:
print(f"推理过程中出错: {str(e)}")
import traceback
traceback.print_exc()
return []
@property
def applies_nms(self) -> bool:
"""模型是否内部应用了 NMS"""
# YOLOv8会自动应用 NMS
return True
def close(self):
"""释放资源"""
if hasattr(self, 'model'):
# 删除模型以释放 GPU 内存
del self.model
if torch.cuda.is_available():
torch.cuda.empty_cache()
print("YOLOv8模型已关闭")

View File

@@ -0,0 +1,10 @@
fastapi==0.103.1
uvicorn==0.23.2
opencv-python==4.8.0.76
numpy==1.25.2
pydantic==2.3.0
python-multipart==0.0.6
minio==7.1.15
torch>=1.7.0
torchvision>=0.8.1
ultralytics>=8.0.0

View File

@@ -0,0 +1,5 @@
@echo off
echo Starting Python Inference Service...
cd /d %~dp0
python -m app.main
pause

View File

@@ -0,0 +1,4 @@
#!/bin/bash
echo "Starting Python Inference Service..."
cd "$(dirname "$0")"
python -m app.main

41
rtsp-vue/.dockerignore Normal file
View File

@@ -0,0 +1,41 @@
# Dependencies
node_modules/
npm-debug.log*
yarn-debug.log*
yarn-error.log*
package-lock.json
yarn.lock
# Build output
dist/
build/
# IDE
.vscode
.idea
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?
# OS
.DS_Store
Thumbs.db
# Git
.git
.gitignore
# CI/CD
.github
.gitlab-ci.yml
# Documentation
*.md
README*
# Scripts
bin/
*.bat
*.sh

45
rtsp-vue/Dockerfile Normal file
View File

@@ -0,0 +1,45 @@
# 构建阶段
FROM node:18-alpine AS builder
# 设置工作目录
WORKDIR /app
# 设置npm镜像源
RUN npm config set registry https://registry.npmmirror.com
# 复制package.json和package-lock.json
COPY package*.json ./
# 安装依赖
RUN npm install
# 复制源代码
COPY . .
# 构建生产环境项目
RUN npm run build:prod
# 运行阶段
FROM nginx:1.25-alpine
# 安装curl用于健康检查
RUN apk add --no-cache curl
# 删除默认nginx配置
RUN rm -rf /etc/nginx/conf.d/*
# 复制nginx配置
COPY nginx.conf /etc/nginx/conf.d/default.conf
# 复制构建产物
COPY --from=builder /app/dist /usr/share/nginx/html
# 暴露端口
EXPOSE 80
# 健康检查
HEALTHCHECK --interval=30s --timeout=10s --retries=3 \
CMD curl -f http://localhost:80 || exit 1
# 启动Nginx
CMD ["nginx", "-g", "daemon off;"]

58
rtsp-vue/nginx.conf Normal file
View File

@@ -0,0 +1,58 @@
server {
listen 80;
server_name localhost;
# 前端资源
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
# 后端API代理
location /prod-api/ {
proxy_pass http://rtsp-backend:8080/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_connect_timeout 600;
proxy_read_timeout 600;
proxy_send_timeout 600;
}
# WebSocket支持用于视频流
location /websocket/ {
proxy_pass http://rtsp-backend:8080/websocket/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_connect_timeout 7d;
proxy_send_timeout 7d;
proxy_read_timeout 7d;
}
# Python推理服务代理
location /python-api/ {
proxy_pass http://rtsp-python-service:8000/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# MinIO使用外部服务不需要代理
# 错误页面配置
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
# 日志配置
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
}

63
ruoyi-admin/Dockerfile Normal file
View File

@@ -0,0 +1,63 @@
# 构建阶段
FROM maven:3.8-eclipse-temurin-17 AS builder
# 设置工作目录
WORKDIR /build
# 复制pom文件
COPY pom.xml .
COPY ruoyi-admin/pom.xml ./ruoyi-admin/
COPY ruoyi-common/pom.xml ./ruoyi-common/
COPY ruoyi-framework/pom.xml ./ruoyi-framework/
COPY ruoyi-generator/pom.xml ./ruoyi-generator/
COPY ruoyi-quartz/pom.xml ./ruoyi-quartz/
COPY ruoyi-system/pom.xml ./ruoyi-system/
COPY ruoyi-video/pom.xml ./ruoyi-video/
# 下载依赖
RUN mvn dependency:go-offline -B
# 复制源代码
COPY ruoyi-admin/src ./ruoyi-admin/src
COPY ruoyi-common/src ./ruoyi-common/src
COPY ruoyi-framework/src ./ruoyi-framework/src
COPY ruoyi-generator/src ./ruoyi-generator/src
COPY ruoyi-quartz/src ./ruoyi-quartz/src
COPY ruoyi-system/src ./ruoyi-system/src
COPY ruoyi-video/src ./ruoyi-video/src
COPY ArcFace64.dat ./
# 构建项目
RUN mvn clean package -DskipTests -pl ruoyi-admin -am
# 运行阶段
FROM eclipse-temurin:17-jre-alpine
# 安装必要的工具
RUN apk add --no-cache curl tzdata && \
cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
echo "Asia/Shanghai" > /etc/timezone && \
apk del tzdata
# 设置工作目录
WORKDIR /app
# 创建日志和上传目录
RUN mkdir -p /app/logs /app/upload
# 复制jar包和配置文件
COPY --from=builder /build/ruoyi-admin/target/ruoyi-admin.jar /app/app.jar
COPY --from=builder /build/ArcFace64.dat /app/
# 设置环境变量
ENV JAVA_OPTS="-Xms512m -Xmx1024m -Djava.security.egd=file:/dev/./urandom"
# 暴露端口(仅内部使用)
EXPOSE 8080
# 健康检查
HEALTHCHECK --interval=30s --timeout=10s --retries=3 --start-period=60s \
CMD curl -f http://localhost:8080/actuator/health || exit 1
# 启动应用
ENTRYPOINT ["sh", "-c", "java ${JAVA_OPTS} -jar /app/app.jar"]

View File

@@ -30,6 +30,12 @@
<artifactId>springdoc-openapi-starter-webmvc-ui</artifactId>
</dependency>
<!-- spring-boot-actuator for health check -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-actuator</artifactId>
</dependency>
<!-- Mysql驱动包 -->
<dependency>
<groupId>com.mysql</groupId>

View File

@@ -101,7 +101,7 @@ public class CommonController
// persist to v_minio_object
VMinioObject record = new VMinioObject();
record.setObjectName(fileName);
record.setUrl(url);
record.setObjectUrl(url);
record.setOriginalName(file.getOriginalFilename());
try { record.setCreateBy(SecurityUtils.getUsername()); } catch (Exception ignored) {}
vMinioObjectService.insert(record);
@@ -158,7 +158,7 @@ public class CommonController
// persist each to v_minio_object
VMinioObject record = new VMinioObject();
record.setObjectName(fileName);
record.setUrl(url);
record.setObjectUrl(url);
record.setOriginalName(file.getOriginalFilename());
try { record.setCreateBy(SecurityUtils.getUsername()); } catch (Exception ignored) {}
vMinioObjectService.insert(record);

View File

@@ -156,10 +156,6 @@ mediasServer:
# 无人拉流观看持续多久自动关闭1分钟
noClientsDuration: 60000
# 虹软sdk
arcFace:
appId: '替换成你的appId'
sdkKey: '替换成你的sdkKey'
# 视频分析配置
video:
@@ -174,3 +170,17 @@ video:
transport: tcp
# 重试次数
retryCount: 3
# Spring Boot Actuator配置
management:
endpoints:
web:
exposure:
include: health,info
base-path: /actuator
endpoint:
health:
show-details: when-authorized
health:
defaults:
enabled: true

View File

@@ -1,5 +1,7 @@
[
{"name":"garbage","path":"libs/models/garbage","size":[640,640],"backend":"OpenCV"},
{"name":"smoke","path":"libs/models/smoke","size":[640,640],"backend":"OpenCV"}
{
"name": "garbage",
"pythonModelName": "garbage_detector",
"pythonApiUrl": "http://localhost:8000/api/detect/file"
}
]

View File

@@ -1,100 +1,97 @@
package com.ruoyi.framework.service;
import com.ruoyi.framework.config.MinioProperties;
import com.ruoyi.common.utils.file.FileUploadUtils;
import io.minio.BucketExistsArgs;
import io.minio.MakeBucketArgs;
import io.minio.RemoveObjectArgs;
import io.minio.MinioClient;
import io.minio.PutObjectArgs;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.io.InputStream;
import org.springframework.web.multipart.MultipartFile;
import java.io.InputStream;
/**
* MinIO 对象存储服务接口
*/
public interface MinioService {
@Service
public class MinioService {
/**
* 上传对象
*
* @param bucketName 存储桶名称
* @param objectName 对象名称
* @param inputStream 输入流
* @return 对象访问URL
*/
String putObject(String bucketName, String objectName, InputStream inputStream);
private final MinioClient minioClient;
private final MinioProperties properties;
/**
* 获取对象
*
* @param bucketName 存储桶名称
* @param objectName 对象名称
* @return 对象输入流
*/
InputStream getObject(String bucketName, String objectName);
@Autowired
public MinioService(MinioProperties properties) {
this.properties = properties;
this.minioClient = MinioClient.builder()
.endpoint(properties.getEndpoint())
.credentials(properties.getAccessKey(), properties.getSecretKey())
.build();
}
/**
* 删除对象
*
* @param bucketName 存储桶名称
* @param objectName 对象名称
*/
void removeObject(String bucketName, String objectName);
public UploadResult upload(MultipartFile file) throws Exception {
ensureBucket();
String objectName = FileUploadUtils.extractFilename(file);
try (InputStream is = file.getInputStream()) {
minioClient.putObject(
PutObjectArgs.builder()
.bucket(properties.getBucket())
.object(objectName)
.stream(is, file.getSize(), -1)
.contentType(file.getContentType())
.build()
);
}
String url = buildObjectUrl(objectName);
return new UploadResult(objectName, url);
/**
* 删除对象别名方法同removeObject
*
* @param objectName 对象名称
*/
default void deleteObject(String objectName) {
removeObject("default", objectName);
}
/**
* Upload using the provided unique objectName.
* 删除对象(别名方法)
*
* @param bucketName 存储桶名称
* @param objectName 对象名称
*/
public UploadResult uploadWithName(MultipartFile file, String objectName) throws Exception {
ensureBucket();
try (InputStream is = file.getInputStream()) {
minioClient.putObject(
PutObjectArgs.builder()
.bucket(properties.getBucket())
.object(objectName)
.stream(is, file.getSize(), -1)
.contentType(file.getContentType())
.build()
);
}
String url = buildObjectUrl(objectName);
return new UploadResult(objectName, url);
}
public String buildObjectUrl(String objectName) {
String endpoint = properties.getEndpoint();
if (endpoint.endsWith("/")) {
endpoint = endpoint.substring(0, endpoint.length() - 1);
}
return endpoint + "/" + properties.getBucket() + "/" + objectName;
}
private void ensureBucket() throws Exception {
boolean exists = minioClient.bucketExists(BucketExistsArgs.builder()
.bucket(properties.getBucket())
.build());
if (!exists) {
minioClient.makeBucket(MakeBucketArgs.builder()
.bucket(properties.getBucket())
.build());
}
default void deleteObject(String bucketName, String objectName) {
removeObject(bucketName, objectName);
}
/**
* Delete an object from MinIO by its object name (key).
* 检查对象是否存在
*
* @param bucketName 存储桶名称
* @param objectName 对象名称
* @return 是否存在
*/
public void deleteObject(String objectName) throws Exception {
minioClient.removeObject(
RemoveObjectArgs.builder()
.bucket(properties.getBucket())
.object(objectName)
.build()
);
}
boolean objectExists(String bucketName, String objectName);
/**
* 获取对象URL
*
* @param bucketName 存储桶名称
* @param objectName 对象名称
* @return 对象URL
*/
String getObjectUrl(String bucketName, String objectName);
/**
* 上传文件
*
* @param file 文件
* @return 上传结果
*/
UploadResult upload(MultipartFile file) throws Exception;
/**
* 使用指定的对象名上传文件
*
* @param file 文件
* @param objectName 对象名
* @return 上传结果
*/
UploadResult uploadWithName(MultipartFile file, String objectName) throws Exception;
/**
* 上传结果类
*/
public static class UploadResult {
private final String objectName;
private final String url;

View File

@@ -1,37 +1,21 @@
package com.ruoyi.video.common;
/**
* 定义一个枚举类,表示客户端类型
* @Author: orange
* @CreateTime: 2025-01-16
* 客户端类型枚举
*/
public enum ClientType {
/**
* HTTP客户端
*/
HTTP,
// 定义一个HTTP类型的客户端类型为0信息为"http"
HTTP(0,"http"),
// 定义一个WEBSOCKET类型的客户端类型为1信息为"websocket"
WEBSOCKET(1,"websocket"),
;
// 定义客户端类型
private int type;
// 定义客户端信息
private String info;
// 构造方法,初始化客户端类型和信息
private ClientType(int type, String info){
this.type = type;
this.info = info;
}
// 获取客户端类型
public int getType(){
return type;
}
// 获取客户端信息
public String getInfo(){
return info;
}
/**
* WebSocket客户端
*/
WEBSOCKET,
/**
* API客户端
*/
API
}

View File

@@ -10,6 +10,14 @@ import java.util.*;
public final class ModelManager implements AutoCloseable {
private final Map<String, YoloDetector> map = new LinkedHashMap<>();
// Python服务的默认API URL
private static final String DEFAULT_PYTHON_API_URL = "http://localhost:8000/api/detect/file";
/**
* 从配置加载检测器
* @param modelsJson 模型配置JSON的URL
* @throws Exception 如果加载失败
*/
public void load(URL modelsJson) throws Exception {
ObjectMapper om = new ObjectMapper();
ArrayNode arr = (ArrayNode) om.readTree(modelsJson);
@@ -20,20 +28,15 @@ public final class ModelManager implements AutoCloseable {
int i=0;
for (var node : arr) {
String name = node.get("name").asText();
String path = node.get("path").asText();
int w = node.get("size").get(0).asInt();
int h = node.get("size").get(1).asInt();
String backend = node.get("backend").asText();
URL dirUrl = Objects.requireNonNull(getClass().getClassLoader().getResource(path),
"Resource not found: " + path);
Path dir = Paths.get(dirUrl.toURI());
String pythonModelName = node.get("pythonModelName").asText(name); // 默认与name相同
String pythonApiUrl = node.has("pythonApiUrl") ?
node.get("pythonApiUrl").asText() : DEFAULT_PYTHON_API_URL;
int rgb = palette[i % palette.length]; i++;
int bgr = ((rgb & 0xFF) << 16) | (rgb & 0xFF00) | ((rgb >> 16) & 0xFF);
// 使用OnnxYoloDetector替代OpenVinoYoloDetector
YoloDetector det = new OnnxYoloDetector(name, dir, w, h, backend, bgr);
// 使用HttpYoloDetector替代本地ONNX检测器
YoloDetector det = new HttpYoloDetector(name, pythonApiUrl, pythonModelName, bgr);
map.put(name, det);
}
}

View File

@@ -1,86 +1,55 @@
package com.ruoyi.video.domain;
import com.ruoyi.common.annotation.Excel;
import com.ruoyi.common.core.domain.BaseEntity;
import com.fasterxml.jackson.annotation.JsonFormat;
import java.util.Date;
/**
* 警记录对象 v_alarm_record
*
* @Author: orange
* @CreateTime: 2025-01-16
* 警记录实体类
*/
public class AlarmRecord extends BaseEntity {
private static final long serialVersionUID = 1L;
/** 报警记录ID */
public class AlarmRecord {
/** 告警ID */
private Long alarmId;
/** 巡检任务ID */
@Excel(name = "巡检任务ID")
private Long taskId;
/** 任务名称 */
@Excel(name = "任务名称")
private String taskName;
/** 设备ID */
@Excel(name = "设备ID")
private Long deviceId;
/** 设备名称 */
@Excel(name = "设备名称")
private String deviceName;
/** 报警类型 */
@Excel(name = "报警类型")
/** 告警类型 */
private String alarmType;
/** 警级别(1=低,2=中,3=高) */
@Excel(name = "报警级别", readConverterExp = "1=低,2=中,3=高")
/** 警级别 */
private String alarmLevel;
/** 报警描述 */
@Excel(name = "报警描述")
private String alarmDesc;
/** 告警内容 */
private String alarmContent;
/** 检测置信度 */
@Excel(name = "检测置信度")
private Double confidence;
/** 巡检任务ID */
private Long taskId;
/** 报警图片路径 */
@Excel(name = "报警图片")
private String imagePath;
/** 图像对象存储ID */
private Long imageOssId;
/** 报警视频路径 */
@Excel(name = "报警视频")
private String videoPath;
/** 帧位置 */
private long framePosition;
/** 报警时间 */
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
@Excel(name = "报警时间", width = 30, dateFormat = "yyyy-MM-dd HH:mm:ss")
private Date alarmTime;
/** 置信度 */
private double confidence;
/** 处理状态(0=未处理,1=已处理,2=已忽略) */
@Excel(name = "处理状态", readConverterExp = "0=未处理,1=已处理,2=已忽略")
private String handleStatus;
/** 处理状态(0-未处理,1-已处理) */
private Integer status;
/** 处理人 */
@Excel(name = "处理人")
private String handleBy;
/** 处理时间 */
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
@Excel(name = "处理时间", width = 30, dateFormat = "yyyy-MM-dd HH:mm:ss")
private Date handleTime;
/** 处理备注 */
@Excel(name = "处理备注")
private String handleRemark;
public AlarmRecord() {}
/** 创建时间 */
private Date createTime;
/** 备注 */
private String remark;
public Long getAlarmId() {
return alarmId;
@@ -90,22 +59,6 @@ public class AlarmRecord extends BaseEntity {
this.alarmId = alarmId;
}
public Long getTaskId() {
return taskId;
}
public void setTaskId(Long taskId) {
this.taskId = taskId;
}
public String getTaskName() {
return taskName;
}
public void setTaskName(String taskName) {
this.taskName = taskName;
}
public Long getDeviceId() {
return deviceId;
}
@@ -114,14 +67,6 @@ public class AlarmRecord extends BaseEntity {
this.deviceId = deviceId;
}
public String getDeviceName() {
return deviceName;
}
public void setDeviceName(String deviceName) {
this.deviceName = deviceName;
}
public String getAlarmType() {
return alarmType;
}
@@ -130,60 +75,52 @@ public class AlarmRecord extends BaseEntity {
this.alarmType = alarmType;
}
public String getAlarmLevel() {
return alarmLevel;
public String getAlarmContent() {
return alarmContent;
}
public void setAlarmLevel(String alarmLevel) {
this.alarmLevel = alarmLevel;
public void setAlarmContent(String alarmContent) {
this.alarmContent = alarmContent;
}
public String getAlarmDesc() {
return alarmDesc;
public Long getTaskId() {
return taskId;
}
public void setAlarmDesc(String alarmDesc) {
this.alarmDesc = alarmDesc;
public void setTaskId(Long taskId) {
this.taskId = taskId;
}
public Double getConfidence() {
public Long getImageOssId() {
return imageOssId;
}
public void setImageOssId(Long imageOssId) {
this.imageOssId = imageOssId;
}
public long getFramePosition() {
return framePosition;
}
public void setFramePosition(long framePosition) {
this.framePosition = framePosition;
}
public double getConfidence() {
return confidence;
}
public void setConfidence(Double confidence) {
public void setConfidence(double confidence) {
this.confidence = confidence;
}
public String getImagePath() {
return imagePath;
public Integer getStatus() {
return status;
}
public void setImagePath(String imagePath) {
this.imagePath = imagePath;
}
public String getVideoPath() {
return videoPath;
}
public void setVideoPath(String videoPath) {
this.videoPath = videoPath;
}
public Date getAlarmTime() {
return alarmTime;
}
public void setAlarmTime(Date alarmTime) {
this.alarmTime = alarmTime;
}
public String getHandleStatus() {
return handleStatus;
}
public void setHandleStatus(String handleStatus) {
this.handleStatus = handleStatus;
public void setStatus(Integer status) {
this.status = status;
}
public String getHandleBy() {
@@ -210,25 +147,51 @@ public class AlarmRecord extends BaseEntity {
this.handleRemark = handleRemark;
}
@Override
public String toString() {
return "AlarmRecord{" +
"alarmId=" + alarmId +
", taskId=" + taskId +
", taskName='" + taskName + '\'' +
", deviceId=" + deviceId +
", deviceName='" + deviceName + '\'' +
", alarmType='" + alarmType + '\'' +
", alarmLevel='" + alarmLevel + '\'' +
", alarmDesc='" + alarmDesc + '\'' +
", confidence=" + confidence +
", imagePath='" + imagePath + '\'' +
", videoPath='" + videoPath + '\'' +
", alarmTime=" + alarmTime +
", handleStatus='" + handleStatus + '\'' +
", handleBy='" + handleBy + '\'' +
", handleTime=" + handleTime +
", handleRemark='" + handleRemark + '\'' +
'}';
public Date getCreateTime() {
return createTime;
}
public void setCreateTime(Date createTime) {
this.createTime = createTime;
}
public String getRemark() {
return remark;
}
public void setRemark(String remark) {
this.remark = remark;
}
/**
* 获取ID的兼容方法保持与原有代码兼容
*/
public Long getId() {
return alarmId;
}
/**
* 设置ID的兼容方法保持与原有代码兼容
*/
public void setId(Long id) {
this.alarmId = id;
}
/**
* 设置告警级别
*
* @param alarmLevel 告警级别
*/
public void setAlarmLevel(String alarmLevel) {
this.alarmLevel = alarmLevel;
}
/**
* 获取告警级别
*
* @return 告警级别
*/
public String getAlarmLevel() {
return alarmLevel;
}
}

View File

@@ -2,5 +2,61 @@ package com.ruoyi.video.domain;
import org.bytedeco.opencv.opencv_core.Rect;
public record Detection(String cls, float conf, Rect box, int colorBGR) {
/**
* 检测结果实体类
*/
public class Detection {
/** 标签 */
private String label;
/** 置信度 */
private double confidence;
/** 边界框 */
private Rect rect;
/** 边界框颜色(BGR格式) */
private int colorBGR;
public Detection() {
}
public Detection(String label, double confidence, Rect rect, int colorBGR) {
this.label = label;
this.confidence = confidence;
this.rect = rect;
this.colorBGR = colorBGR;
}
public String getLabel() {
return label;
}
public void setLabel(String label) {
this.label = label;
}
public double getConfidence() {
return confidence;
}
public void setConfidence(double confidence) {
this.confidence = confidence;
}
public Rect getRect() {
return rect;
}
public void setRect(Rect rect) {
this.rect = rect;
}
public int getColorBGR() {
return colorBGR;
}
public void setColorBGR(int colorBGR) {
this.colorBGR = colorBGR;
}
}

View File

@@ -1,74 +1,40 @@
package com.ruoyi.video.domain;
import com.ruoyi.common.annotation.Excel;
import com.ruoyi.common.core.domain.BaseEntity;
import com.fasterxml.jackson.annotation.JsonFormat;
import java.util.Date;
/**
* 巡检任务对象 v_inspection_task
*
* @Author: orange
* @CreateTime: 2025-01-16
* 巡检任务实体类
*/
public class InspectionTask extends BaseEntity {
private static final long serialVersionUID = 1L;
public class InspectionTask {
/** 巡检任务ID */
private Long taskId;
/** 任务名称 */
@Excel(name = "任务名称")
private String taskName;
/** 设备ID */
@Excel(name = "设备ID")
private Long deviceId;
/** 设备名称 */
@Excel(name = "设备名称")
private String deviceName;
/** Cron表达式 */
@Excel(name = "Cron表达式")
private String cronExpression;
/** 巡检时长(秒) */
@Excel(name = "巡检时长")
private Integer duration;
/** 任务状态(0=启用,1=停用) */
@Excel(name = "任务状态", readConverterExp = "0=启用,1=停用")
private String status;
/** 视频对象存储ID */
private Long videoOssId;
/** 是否启用检测(0=启用,1=停用) */
@Excel(name = "启用检测", readConverterExp = "0=启用,1=停用")
private String enableDetection;
/** 处理后视频对象存储ID */
private Long processedVideoOssId;
/** 检测阈值 */
@Excel(name = "检测阈值")
private Double threshold;
/** 视频状态(0-未录制,1-已录制未分析,2-已分析) */
private Integer videoStatus;
/** 最后执行时间 */
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
@Excel(name = "最后执行时间", width = 30, dateFormat = "yyyy-MM-dd HH:mm:ss")
private Date lastExecuteTime;
/** 任务状态(0-待执行,1-执行中,2-已完成) */
private Integer status;
/** 下次执行时间 */
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
@Excel(name = "下次执行时间", width = 30, dateFormat = "yyyy-MM-dd HH:mm:ss")
private Date nextExecuteTime;
/** 创建时间 */
private Date createTime;
/** 执行次数 */
@Excel(name = "执行次数")
private Long executeCount;
/** 更新时间 */
private Date updateTime;
/** 报警次数 */
@Excel(name = "报警次数")
private Long alarmCount;
public InspectionTask() {}
/** 备注 */
private String remark;
public Long getTaskId() {
return taskId;
@@ -79,14 +45,6 @@ public class InspectionTask extends BaseEntity {
}
public String getTaskName() {
return taskName;
}
public void setTaskName(String taskName) {
this.taskName = taskName;
}
public Long getDeviceId() {
return deviceId;
}
@@ -95,22 +53,6 @@ public class InspectionTask extends BaseEntity {
this.deviceId = deviceId;
}
public String getDeviceName() {
return deviceName;
}
public void setDeviceName(String deviceName) {
this.deviceName = deviceName;
}
public String getCronExpression() {
return cronExpression;
}
public void setCronExpression(String cronExpression) {
this.cronExpression = cronExpression;
}
public Integer getDuration() {
return duration;
}
@@ -119,78 +61,73 @@ public class InspectionTask extends BaseEntity {
this.duration = duration;
}
public String getStatus() {
public Long getVideoOssId() {
return videoOssId;
}
public void setVideoOssId(Long videoOssId) {
this.videoOssId = videoOssId;
}
public Long getProcessedVideoOssId() {
return processedVideoOssId;
}
public void setProcessedVideoOssId(Long processedVideoOssId) {
this.processedVideoOssId = processedVideoOssId;
}
public Integer getVideoStatus() {
return videoStatus;
}
public void setVideoStatus(Integer videoStatus) {
this.videoStatus = videoStatus;
}
public Integer getStatus() {
return status;
}
public void setStatus(String status) {
public void setStatus(Integer status) {
this.status = status;
}
public String getEnableDetection() {
return enableDetection;
public Date getCreateTime() {
return createTime;
}
public void setEnableDetection(String enableDetection) {
this.enableDetection = enableDetection;
public void setCreateTime(Date createTime) {
this.createTime = createTime;
}
public Double getThreshold() {
return threshold;
public Date getUpdateTime() {
return updateTime;
}
public void setThreshold(Double threshold) {
this.threshold = threshold;
public void setUpdateTime(Date updateTime) {
this.updateTime = updateTime;
}
public Date getLastExecuteTime() {
return lastExecuteTime;
public String getRemark() {
return remark;
}
public void setLastExecuteTime(Date lastExecuteTime) {
this.lastExecuteTime = lastExecuteTime;
public void setRemark(String remark) {
this.remark = remark;
}
public Date getNextExecuteTime() {
return nextExecuteTime;
/**
* 获取ID的兼容方法保持与原有代码兼容
*/
public Long getId() {
return taskId;
}
public void setNextExecuteTime(Date nextExecuteTime) {
this.nextExecuteTime = nextExecuteTime;
}
public Long getExecuteCount() {
return executeCount;
}
public void setExecuteCount(Long executeCount) {
this.executeCount = executeCount;
}
public Long getAlarmCount() {
return alarmCount;
}
public void setAlarmCount(Long alarmCount) {
this.alarmCount = alarmCount;
}
@Override
public String toString() {
return "InspectionTask{" +
"taskId=" + taskId +
", taskName='" + taskName + '\'' +
", deviceId=" + deviceId +
", deviceName='" + deviceName + '\'' +
", cronExpression='" + cronExpression + '\'' +
", duration=" + duration +
", status='" + status + '\'' +
", enableDetection='" + enableDetection + '\'' +
", threshold=" + threshold +
", lastExecuteTime=" + lastExecuteTime +
", nextExecuteTime=" + nextExecuteTime +
", executeCount=" + executeCount +
", alarmCount=" + alarmCount +
'}';
/**
* 设置ID的兼容方法保持与原有代码兼容
*/
public void setId(Long id) {
this.taskId = id;
}
}

View File

@@ -1,36 +1,151 @@
package com.ruoyi.video.domain;
import com.ruoyi.common.core.domain.BaseEntity;
import java.util.Date;
/**
* MinIO 返回结果记录实体,对应表 v_minio_object
* MinIO对象存储实体类
*/
public class VMinioObject extends BaseEntity {
private static final long serialVersionUID = 1L;
/** 主键ID */
public class VMinioObject {
/** 对象ID */
private Long objectId;
/** MinIO 对象名Key */
/** 存储桶名称 */
private String bucketName;
/** 对象名称 */
private String objectName;
/** 访问URL */
private String url;
/** 原始文件名(上传时的文件名) */
private String originalName;
/** 对象URL */
private String objectUrl;
/** 对象大小(字节) */
private Long objectSize;
/** 对象类型 */
private String objectType;
/** 创建者 */
private String createBy;
/** 创建时间 */
private Date createTime;
/** 更新者 */
private String updateBy;
/** 更新时间 */
private Date updateTime;
/** 备注 */
private String remark;
/** 删除标志0存在 2删除 */
private String delFlag;
public Long getObjectId() { return objectId; }
public void setObjectId(Long objectId) { this.objectId = objectId; }
public String getDelFlag() {
return delFlag;
}
public String getObjectName() { return objectName; }
public void setObjectName(String objectName) { this.objectName = objectName; }
public void setDelFlag(String delFlag) {
this.delFlag = delFlag;
}
public String getUrl() { return url; }
public void setUrl(String url) { this.url = url; }
/** 原始文件名 */
private String originalName;
public String getOriginalName() { return originalName; }
public void setOriginalName(String originalName) { this.originalName = originalName; }
public String getOriginalName() {
return originalName;
}
public String getDelFlag() { return delFlag; }
public void setDelFlag(String delFlag) { this.delFlag = delFlag; }
public void setOriginalName(String originalName) {
this.originalName = originalName;
}
public Long getObjectId () {
return objectId;
}
public void setObjectId(Long objectId) {
this.objectId = objectId;
}
public String getBucketName() {
return bucketName;
}
public void setBucketName(String bucketName) {
this.bucketName = bucketName;
}
public String getObjectName() {
return objectName;
}
public void setObjectName(String objectName) {
this.objectName = objectName;
}
public String getObjectUrl() {
return objectUrl;
}
public void setObjectUrl(String objectUrl) {
this.objectUrl = objectUrl;
}
public Long getObjectSize() {
return objectSize;
}
public void setObjectSize(Long objectSize) {
this.objectSize = objectSize;
}
public String getObjectType() {
return objectType;
}
public void setObjectType(String objectType) {
this.objectType = objectType;
}
public String getCreateBy() {
return createBy;
}
public void setCreateBy(String createBy) {
this.createBy = createBy;
}
public Date getCreateTime() {
return createTime;
}
public void setCreateTime(Date createTime) {
this.createTime = createTime;
}
public String getUpdateBy() {
return updateBy;
}
public void setUpdateBy(String updateBy) {
this.updateBy = updateBy;
}
public Date getUpdateTime() {
return updateTime;
}
public void setUpdateTime(Date updateTime) {
this.updateTime = updateTime;
}
public String getRemark() {
return remark;
}
public void setRemark(String remark) {
this.remark = remark;
}
}

View File

@@ -0,0 +1,22 @@
package com.ruoyi.video.event;
import org.springframework.context.ApplicationEvent;
/**
* 巡检任务完成事件
*/
public class TaskCompletedEvent extends ApplicationEvent {
private static final long serialVersionUID = 1L;
private final Long taskId;
public TaskCompletedEvent(Object source, Long taskId) {
super(source);
this.taskId = taskId;
}
public Long getTaskId() {
return taskId;
}
}

View File

@@ -0,0 +1,23 @@
package com.ruoyi.video.event;
import com.ruoyi.video.domain.InspectionTask;
import org.springframework.context.ApplicationEvent;
/**
* 巡检任务开始事件
*/
public class TaskStartEvent extends ApplicationEvent {
private static final long serialVersionUID = 1L;
private final InspectionTask task;
public TaskStartEvent(Object source, InspectionTask task) {
super(source);
this.task = task;
}
public InspectionTask getTask() {
return task;
}
}

View File

@@ -0,0 +1,75 @@
package com.ruoyi.video.listener;
import com.ruoyi.video.domain.InspectionTask;
import com.ruoyi.video.event.TaskCompletedEvent;
import com.ruoyi.video.event.TaskStartEvent;
import com.ruoyi.video.service.MediaService;
import com.ruoyi.video.service.VideoAnalysisService;
import com.ruoyi.video.thread.MediaTransferFlvByJavacv;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.event.EventListener;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Component;
/**
* 巡检任务事件监听器
* 处理任务开始和完成事件
*/
@Component
public class InspectionTaskEventListener {
private static final Logger log = LoggerFactory.getLogger(InspectionTaskEventListener.class);
@Autowired
private MediaService mediaService;
@Autowired
private VideoAnalysisService videoAnalysisService;
/**
* 处理任务开始事件
* @param event 任务开始事件
*/
@EventListener
public void handleTaskStartEvent(TaskStartEvent event) {
InspectionTask task = event.getTask();
log.info("接收到任务开始事件: 任务ID={}, 设备ID={}", task.getTaskId(), task.getDeviceId());
try {
// 获取对应的媒体传输器
MediaTransferFlvByJavacv transfer = mediaService.getMediaTransfer(task.getDeviceId());
if (transfer != null) {
// 开始录制视频
transfer.startInspection(task.getTaskId(), task.getDeviceId(), task.getDuration());
log.info("已开始视频录制: 任务ID={}, 设备ID={}, 持续时间={}秒",
task.getTaskId(), task.getDeviceId(), task.getDuration());
} else {
log.error("未找到对应的媒体传输器: 设备ID={}", task.getDeviceId());
}
} catch (Exception e) {
log.error("处理任务开始事件失败: {}", e.getMessage());
}
}
/**
* 处理任务完成事件
* @param event 任务完成事件
*/
@EventListener
@Async
public void handleTaskCompletedEvent(TaskCompletedEvent event) {
Long taskId = event.getTaskId();
log.info("接收到任务完成事件: 任务ID={}", taskId);
try {
// 开始异步视频分析
videoAnalysisService.analyzeVideo(taskId);
} catch (Exception e) {
log.error("处理任务完成事件失败: {}", e.getMessage());
}
}
}

View File

@@ -1,72 +1,67 @@
package com.ruoyi.video.mapper;
import java.util.List;
import com.ruoyi.video.domain.AlarmRecord;
import org.apache.ibatis.annotations.Mapper;
import org.apache.ibatis.annotations.Param;
import java.util.List;
/**
* 警记录Mapper接口
*
* @author ruoyi
* @date 2025-01-16
* 警记录Mapper接口
*/
@Mapper
public interface AlarmRecordMapper {
/**
* 查询警记录
* 查询警记录
*
* @param alarmId 报警记录主键
* @return 警记录
* @param alarmId 告警ID
* @return 警记录
*/
public AlarmRecord selectAlarmRecordByAlarmId(Long alarmId);
public AlarmRecord selectAlarmRecordById(Long alarmId);
/**
* 查询警记录列表
* 查询警记录列表
*
* @param alarmRecord 警记录
* @return 警记录集合
* @param alarmRecord 警记录
* @return 警记录集合
*/
public List<AlarmRecord> selectAlarmRecordList(AlarmRecord alarmRecord);
/**
* 新增警记录
* 新增警记录
*
* @param alarmRecord 警记录
* @param alarmRecord 警记录
* @return 结果
*/
public int insertAlarmRecord(AlarmRecord alarmRecord);
/**
* 修改警记录
* 修改警记录
*
* @param alarmRecord 警记录
* @param alarmRecord 警记录
* @return 结果
*/
public int updateAlarmRecord(AlarmRecord alarmRecord);
/**
* 删除警记录
* 删除警记录
*
* @param alarmId 报警记录主键
* @param alarmId 告警ID
* @return 结果
*/
public int deleteAlarmRecordByAlarmId(Long alarmId);
public int deleteAlarmRecordById(Long alarmId);
/**
* 批量删除警记录
* 批量删除警记录
*
* @param alarmIds 需要删除的数据主键集合
* @param alarmIds 需要删除的数据ID集合
* @return 结果
*/
public int deleteAlarmRecordByAlarmIds(Long[] alarmIds);
public int deleteAlarmRecordByIds(Long[] alarmIds);
/**
* 处理警记录
* 处理警记录
*
* @param alarmId 警ID
* @param alarmId 警ID
* @param handleStatus 处理状态
* @param handleRemark 处理备注
* @param handleBy 处理人
@@ -78,10 +73,10 @@ public interface AlarmRecordMapper {
@Param("handleBy") String handleBy);
/**
* 根据任务ID统计警数量
* 根据任务ID统计警数量
*
* @param taskId 任务ID
* @return 警数量
* @return 警数量
*/
public Long countAlarmByTaskId(@Param("taskId") Long taskId);
}

View File

@@ -8,21 +8,9 @@ import java.util.List;
/**
* 巡检任务Mapper接口
*
* @author ruoyi
* @date 2025-01-16
*/
@Mapper
public interface InspectionTaskMapper {
/**
* 查询巡检任务
*
* @param taskId 巡检任务主键
* @return 巡检任务
*/
public InspectionTask selectInspectionTaskByTaskId(Long taskId);
/**
* 查询巡检任务列表
*
@@ -31,6 +19,14 @@ public interface InspectionTaskMapper {
*/
public List<InspectionTask> selectInspectionTaskList(InspectionTask inspectionTask);
/**
* 查询巡检任务
*
* @param taskId 巡检任务ID
* @return 巡检任务
*/
public InspectionTask selectInspectionTaskById(Long taskId);
/**
* 新增巡检任务
*
@@ -50,18 +46,18 @@ public interface InspectionTaskMapper {
/**
* 删除巡检任务
*
* @param taskId 巡检任务主键
* @param taskId 巡检任务ID
* @return 结果
*/
public int deleteInspectionTaskByTaskId(Long taskId);
public int deleteInspectionTaskById(Long taskId);
/**
* 批量删除巡检任务
*
* @param taskIds 需要删除的数据主键集合
* @param taskIds 需要删除的数据ID集合
* @return 结果
*/
public int deleteInspectionTaskByTaskIds(Long[] taskIds);
public int deleteInspectionTaskByIds(Long[] taskIds);
/**
* 查询启用状态的巡检任务列表

View File

@@ -4,12 +4,74 @@ import com.ruoyi.video.domain.VMinioObject;
import org.apache.ibatis.annotations.Mapper;
import org.apache.ibatis.annotations.Param;
import java.util.List;
/**
* MinIO对象Mapper接口
*/
@Mapper
public interface VMinioObjectMapper {
int insertVMinioObject(VMinioObject obj);
VMinioObject selectVMinioObjectById(@Param("id") Long id);
int deleteVMinioObjectById(@Param("id") Long id);
/**
* 查询MinIO对象
*
* @param objectId MinIO对象ID
* @return MinIO对象
*/
public VMinioObject selectVMinioObjectById(Long objectId);
VMinioObject selectVMinioObjectByObjectName(@Param("objectName") String objectName);
int deleteVMinioObjectByObjectName(@Param("objectName") String objectName);
/**
* 查询MinIO对象列表
*
* @param vMinioObject MinIO对象
* @return MinIO对象集合
*/
public List<VMinioObject> selectVMinioObjectList(VMinioObject vMinioObject);
/**
* 新增MinIO对象
*
* @param vMinioObject MinIO对象
* @return 结果
*/
public int insertVMinioObject(VMinioObject vMinioObject);
/**
* 修改MinIO对象
*
* @param vMinioObject MinIO对象
* @return 结果
*/
public int updateVMinioObject(VMinioObject vMinioObject);
/**
* 删除MinIO对象
*
* @param objectId MinIO对象ID
* @return 结果
*/
public int deleteVMinioObjectById(Long objectId);
/**
* 批量删除MinIO对象
*
* @param objectIds 需要删除的数据ID
* @return 结果
*/
public int deleteVMinioObjectByIds(Long[] objectIds);
/**
* 根据对象名称查询MinIO对象
*
* @param objectName 对象名称
* @return MinIO对象
*/
public VMinioObject selectVMinioObjectByObjectName(String objectName);
/**
* 根据对象名称删除MinIO对象
*
* @param objectName 对象名称
* @return 结果
*/
public int deleteVMinioObjectByObjectName(String objectName);
}

View File

@@ -0,0 +1,39 @@
package com.ruoyi.video.service;
import com.ruoyi.video.domain.InspectionTaskRecord;
import java.util.List;
/**
* 巡检任务记录Service接口
*/
public interface IInspectionTaskRecordService {
/**
* 查询巡检任务记录
*/
InspectionTaskRecord selectInspectionTaskRecordByRecordId(Long recordId);
/**
* 查询巡检任务记录列表
*/
List<InspectionTaskRecord> selectInspectionTaskRecordList(InspectionTaskRecord record);
/**
* 新增巡检任务记录
*/
int insertInspectionTaskRecord(InspectionTaskRecord record);
/**
* 修改巡检任务记录
*/
int updateInspectionTaskRecord(InspectionTaskRecord record);
/**
* 删除巡检任务记录
*/
int deleteInspectionTaskRecordByRecordId(Long recordId);
/**
* 批量删除巡检任务记录
*/
int deleteInspectionTaskRecordByRecordIds(Long[] recordIds);
}

View File

@@ -1,13 +1,97 @@
package com.ruoyi.video.service;
import com.ruoyi.video.domain.VMinioObject;
import java.util.List;
/**
* MinIO对象服务接口
*/
public interface IVMinioObjectService {
int insert(VMinioObject obj);
VMinioObject selectById(Long id);
int deleteById(Long id);
/**
* 查询MinIO对象
*
* @param objectId MinIO对象ID
* @return MinIO对象
*/
public VMinioObject selectVMinioObjectById(Long objectId);
VMinioObject selectByObjectName(String objectName);
int deleteByObjectName(String objectName);
/**
* 查询MinIO对象列表
*
* @param vMinioObject MinIO对象
* @return MinIO对象集合
*/
public List<VMinioObject> selectVMinioObjectList(VMinioObject vMinioObject);
/**
* 新增MinIO对象
*
* @param vMinioObject MinIO对象
* @return 结果
*/
public Long insertVMinioObject(VMinioObject vMinioObject);
/**
* 修改MinIO对象
*
* @param vMinioObject MinIO对象
* @return 结果
*/
public int updateVMinioObject(VMinioObject vMinioObject);
/**
* 删除MinIO对象信息
*
* @param objectId MinIO对象ID
* @return 结果
*/
public int deleteVMinioObjectById(Long objectId);
/**
* 批量删除MinIO对象
*
* @param objectIds 需要删除的MinIO对象ID
* @return 结果
*/
public int deleteVMinioObjectByIds(Long[] objectIds);
/**
* 根据对象名查询
*
* @param objectName 对象名
* @return MinIO对象
*/
public VMinioObject selectByObjectName(String objectName);
/**
* 根据ID查询
*
* @param id 对象ID
* @return MinIO对象
*/
public VMinioObject selectById(Long id);
/**
* 根据ID删除
*
* @param id 对象ID
* @return 结果
*/
public int deleteById(Long id);
/**
* 根据对象名删除
*
* @param objectName 对象名
* @return 结果
*/
public int deleteByObjectName(String objectName);
/**
* 插入记录
*
* @param obj MinIO对象
* @return 结果
*/
public int insert(VMinioObject obj);
}

View File

@@ -1,8 +1,8 @@
package com.ruoyi.video.service;
import com.ruoyi.video.domain.InspectionTask;
import com.ruoyi.video.domain.AlarmRecord;
import com.ruoyi.video.domain.Detection;
import com.ruoyi.video.domain.InspectionTask;
import org.springframework.scheduling.annotation.Async;
import java.util.List;
@@ -17,62 +17,100 @@ public interface InspectionTaskService {
/**
* 查询巡检任务列表
*
* @param inspectionTask 巡检任务查询条件
* @return 巡检任务集合
*/
List<InspectionTask> selectInspectionTaskList(InspectionTask inspectionTask);
/**
* 根据ID查询巡检任务
*
* @param taskId 巡检任务ID
* @return 巡检任务信息
*/
InspectionTask selectInspectionTaskById(Long taskId);
/**
* 新增巡检任务
*
* @param inspectionTask 巡检任务信息
* @return 结果
*/
int insertInspectionTask(InspectionTask inspectionTask);
/**
* 修改巡检任务
*
* @param inspectionTask 巡检任务信息
* @return 结果
*/
int updateInspectionTask(InspectionTask inspectionTask);
/**
* 删除巡检任务
* 批量删除巡检任务
*
* @param taskIds 需要删除的巡检任务ID
* @return 结果
*/
int deleteInspectionTaskByIds(Long[] taskIds);
/**
* 启动巡检任务
*
* @param taskId 巡检任务ID
* @return 结果
*/
boolean startInspectionTask(Long taskId);
/**
* 停止巡检任务
*
* @param taskId 巡检任务ID
* @return 结果
*/
boolean stopInspectionTask(Long taskId);
/**
* 执行单次巡检任务
* 执行巡检任务
*
* @param taskId 巡检任务ID
*/
@Async
void executeInspectionTask(Long taskId);
/**
* 处理检测结果,如果有异常则生成报警
* 处理检测结果
*
* @param taskId 巡检任务ID
* @param detections 检测结果
* @param imagePath 图像路径
*/
void handleDetectionResults(Long taskId, List<Detection> detections, String imagePath);
/**
* 保存警记录
* 保存警记录
*
* @param alarmRecord 告警记录
*/
void saveAlarmRecord(AlarmRecord alarmRecord);
/**
* 查询警记录列表
* 查询警记录列表
*
* @param alarmRecord 告警记录查询条件
* @return 告警记录集合
*/
List<AlarmRecord> selectAlarmRecordList(AlarmRecord alarmRecord);
/**
* 处理警记录
* 处理警记录
*
* @param alarmId 告警ID
* @param handleStatus 处理状态
* @param handleRemark 处理备注
* @param handleBy 处理人
* @return 结果
*/
int handleAlarmRecord(Long alarmId, String handleStatus, String handleRemark, String handleBy);
}

View File

@@ -12,6 +12,7 @@ import org.springframework.stereotype.Service;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Consumer;
import java.util.Map;
/**
* 媒体服务,支持全局网络超时、读写超时、无人拉流持续时长自动关闭流等配置
@@ -26,6 +27,16 @@ public class MediaService {
*/
public static ConcurrentHashMap<String, MediaTransfer> cameras = new ConcurrentHashMap<>();
/**
* 客户端类型映射
*/
public static ConcurrentHashMap<String, ClientType> clients = new ConcurrentHashMap<>();
/**
* 客户端连接映射
*/
public static ConcurrentHashMap<String, Map<String, ChannelHandlerContext>> clientConnections = new ConcurrentHashMap<>();
/**
* http-flv播放
* @param cameraDto 摄像头配置
@@ -253,4 +264,34 @@ public class MediaService {
}
cameras.remove(mediaKey);
}
/**
* 获取设备对应的媒体传输器
*
* @param deviceId 设备ID
* @return 媒体传输器实例如果不存在则返回null
*/
public MediaTransferFlvByJavacv getMediaTransfer(Long deviceId) {
if (deviceId == null) {
return null;
}
// 遍历所有已注册的相机
for (Map.Entry<String, MediaTransfer> entry : cameras.entrySet()) {
String mediaKey = entry.getKey();
if (mediaKey.startsWith("device_" + deviceId + "_")) {
// 找到匹配设备ID的mediaKey
MediaTransfer mediaTransfer = entry.getValue();
if (mediaTransfer instanceof MediaTransferFlvByJavacv) {
MediaTransferFlvByJavacv transfer = (MediaTransferFlvByJavacv) mediaTransfer;
if (transfer.getCameraDto() != null &&
mediaKey.equals(transfer.getCameraDto().getMediaKey())) {
return transfer;
}
}
}
}
return null;
}
}

View File

@@ -0,0 +1,595 @@
package com.ruoyi.video.service;
import com.ruoyi.common.utils.spring.SpringUtils;
import com.ruoyi.framework.service.MinioService;
import com.ruoyi.video.domain.AlarmRecord;
import com.ruoyi.video.domain.Detection;
import com.ruoyi.video.domain.InspectionTask;
import com.ruoyi.video.domain.VMinioObject;
import com.ruoyi.video.mapper.AlarmRecordMapper;
import com.ruoyi.video.mapper.InspectionTaskMapper;
import com.ruoyi.video.thread.detector.HttpYoloDetector;
import com.ruoyi.video.utils.Overlay;
import com.ruoyi.video.utils.CustomMultipartFile;
import lombok.extern.slf4j.Slf4j;
import org.bytedeco.ffmpeg.avcodec.AVPacket;
import org.bytedeco.ffmpeg.global.avcodec;
import org.bytedeco.ffmpeg.global.avutil;
import org.bytedeco.javacv.FFmpegFrameGrabber;
import org.bytedeco.javacv.FFmpegFrameRecorder;
import org.bytedeco.javacv.Frame;
import org.bytedeco.javacv.OpenCVFrameConverter;
import org.bytedeco.opencv.opencv_core.Mat;
import org.bytedeco.opencv.opencv_core.Point;
import org.bytedeco.opencv.opencv_core.Rect;
import org.bytedeco.opencv.opencv_core.Scalar;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
import java.io.File;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.nio.IntBuffer;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import static org.bytedeco.opencv.global.opencv_imgproc.*;
/**
* 视频分析服务 - 离线分析视频并处理结果
*/
@Slf4j
@Service
public class VideoAnalysisService {
@Autowired
private MinioService minioService;
@Autowired
private IVMinioObjectService vMinioObjectService;
@Autowired
private InspectionTaskMapper taskMapper;
@Autowired
private AlarmRecordMapper alarmRecordMapper;
@Autowired
private com.ruoyi.video.mapper.InspectionTaskRecordMapper inspectionTaskRecordMapper;
// 检测器配置 - 使用容器名而不是localhost
private static final String PYTHON_API_URL = "http://rtsp-python-service:8000/api/detect/file";
private static final String MODEL_NAME = "yolov8_detector";
/**
* 分析视频并更新记录(同步调用)
* @param task 巡检任务
* @param record 巡检记录
* @param videoFile 视频文件
*/
public void analyzeVideoWithRecord(InspectionTask task, com.ruoyi.video.domain.InspectionTaskRecord record, File videoFile) {
log.info("开始分析视频并更新记录: 任务ID={}, 记录ID={}", task.getTaskId(), record.getRecordId());
try {
// 创建输出视频文件
File outputVideoFile = File.createTempFile("analysis_output_", ".mp4");
// 创建检测器
HttpYoloDetector detector = new HttpYoloDetector("yolov8", PYTHON_API_URL, MODEL_NAME, 0x00FF00);
// 处理视频并记录检测结果
String detectionResult = processVideoWithRecord(videoFile, outputVideoFile, detector, task, record);
// 更新记录的识别结果
record.setResult(detectionResult);
inspectionTaskRecordMapper.updateInspectionTaskRecord(record);
// 清理临时输出文件
if (outputVideoFile.exists()) {
outputVideoFile.delete();
}
log.info("视频分析完成: 任务ID={}, 记录ID={}, 检测结果={}", task.getTaskId(), record.getRecordId(), detectionResult);
} catch (Exception e) {
log.error("视频分析失败: 任务ID={}, 记录ID={}, 错误={}", task.getTaskId(), record.getRecordId(), e.getMessage(), e);
// 更新记录为部分成功
record.setStatus(2);
record.setResult("分析失败: " + e.getMessage());
inspectionTaskRecordMapper.updateInspectionTaskRecord(record);
}
}
/**
* 处理视频并记录检测结果
* @return 检测结果摘要
*/
private String processVideoWithRecord(File inputFile, File outputFile, HttpYoloDetector detector,
InspectionTask task, com.ruoyi.video.domain.InspectionTaskRecord record) throws Exception {
FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(inputFile);
grabber.start();
FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(outputFile,
grabber.getImageWidth(), grabber.getImageHeight(), grabber.getAudioChannels());
recorder.setFormat("mp4");
recorder.setVideoCodec(avcodec.AV_CODEC_ID_H264);
recorder.setFrameRate(grabber.getFrameRate());
recorder.setVideoBitrate(grabber.getVideoBitrate());
if (grabber.getAudioChannels() > 0) {
recorder.setAudioCodec(avcodec.AV_CODEC_ID_AAC);
recorder.setAudioBitrate(grabber.getAudioBitrate());
recorder.setAudioChannels(grabber.getAudioChannels());
recorder.setSampleRate(grabber.getSampleRate());
}
recorder.start();
OpenCVFrameConverter.ToMat converter = new OpenCVFrameConverter.ToMat();
// 用于去重的垃圾检测结果缓存
Map<String, Long> detectedGarbageCache = new HashMap<>();
Map<String, Integer> detectionCounts = new HashMap<>(); // 统计每种类别的数量
// 跟踪检测到的垃圾ID
final Long[] detectionId = {1L};
// 帧计数
long frameCount = 0;
int totalDetections = 0;
Frame frame;
while ((frame = grabber.grab()) != null) {
frameCount++;
if (frame.image != null) {
// 处理视频帧
Mat mat = converter.convert(frame);
if (mat != null && !mat.isNull()) {
// 每10帧执行一次检测减少API调用频率
if (frameCount % 10 == 0) {
List<Detection> detections = detector.detect(mat);
if (!CollectionUtils.isEmpty(detections)) {
for (Detection detection : detections) {
// 检查是否为新的垃圾检测结果
String detectionKey = generateDetectionKey(detection);
if (!detectedGarbageCache.containsKey(detectionKey)) {
// 这是新检测到的垃圾
detectedGarbageCache.put(detectionKey, detectionId[0]++);
totalDetections++;
// 统计类别数量
String label = detection.getLabel();
detectionCounts.put(label, detectionCounts.getOrDefault(label, 0) + 1);
// 创建告警记录(不重复)
createAlarmRecordForRecord(task, record, detection, mat, frameCount);
} else {
// 更新上次检测时间
detectedGarbageCache.put(detectionKey, detectionId[0]++);
}
}
// 清理超过60秒未检测到的垃圾假设30fps
Long currentId = detectionId[0];
detectedGarbageCache.entrySet().removeIf(entry ->
(currentId - entry.getValue()) > grabber.getFrameRate() * 60);
}
}
// 转回Frame并写入录像
Frame processedFrame = converter.convert(mat);
recorder.record(processedFrame);
} else {
// 原样写入
recorder.record(frame);
}
} else if (frame.samples != null) {
// 音频帧原样写入
recorder.record(frame);
}
}
recorder.stop();
recorder.close();
grabber.stop();
grabber.close();
// 上传处理后的视频到MinIO
uploadProcessedVideoForRecord(outputFile, task, record);
// 生成检测结果摘要
StringBuilder resultSummary = new StringBuilder();
resultSummary.append("共检测到 ").append(totalDetections).append(" 个问题");
if (!detectionCounts.isEmpty()) {
resultSummary.append(",详情:");
detectionCounts.forEach((label, count) ->
resultSummary.append(label).append("(").append(count).append(") "));
}
return resultSummary.toString();
}
/**
* 为记录创建告警
*/
private void createAlarmRecordForRecord(InspectionTask task, com.ruoyi.video.domain.InspectionTaskRecord record,
Detection detection, Mat frame, long frameCount) throws Exception {
// 创建告警图像临时文件
File alarmImageFile = File.createTempFile("alarm_", ".jpg");
// 裁剪检测区域,略微扩大区域
Rect rect = detection.getRect();
int x = Math.max(0, rect.x() - 10);
int y = Math.max(0, rect.y() - 10);
int w = Math.min(frame.cols() - x, rect.width() + 20);
int h = Math.min(frame.rows() - y, rect.height() + 20);
// 使用OpenCV保存告警图片
Mat roi = new Mat(frame, new Rect(x, y, w, h));
org.bytedeco.opencv.global.opencv_imgcodecs.imwrite(alarmImageFile.getAbsolutePath(), roi);
// 上传告警图片到MinIO
String fileName = "alarm_" + System.currentTimeMillis() + ".jpg";
String bucketName = "alarm-images";
CustomMultipartFile multipartFile = new CustomMultipartFile(alarmImageFile, fileName, "image/jpeg");
String objectUrl = minioService.putObject(bucketName, fileName, multipartFile.getInputStream());
VMinioObject minioObject = new VMinioObject();
minioObject.setBucketName(bucketName);
minioObject.setObjectName(fileName);
minioObject.setObjectUrl(objectUrl);
minioObject.setCreateBy("system");
minioObject.setCreateTime(new Date());
Long objectId = vMinioObjectService.insertVMinioObject(minioObject);
// 创建告警记录
AlarmRecord alarmRecord = new AlarmRecord();
alarmRecord.setDeviceId(task.getDeviceId());
alarmRecord.setAlarmType("detection");
alarmRecord.setAlarmContent(detection.getLabel() + " - 置信度: " + String.format("%.2f", detection.getConfidence()));
alarmRecord.setTaskId(task.getTaskId());
alarmRecord.setImageOssId(objectId);
alarmRecord.setFramePosition(frameCount);
alarmRecord.setConfidence(detection.getConfidence());
alarmRecord.setCreateTime(new Date());
alarmRecord.setStatus(0); // 未处理
alarmRecordMapper.insertAlarmRecord(alarmRecord);
log.info("创建告警记录: 类型={}, 任务ID={}, 记录ID={}, 告警ID={}",
detection.getLabel(), task.getTaskId(), record.getRecordId(), alarmRecord.getAlarmId());
// 删除临时文件
if (alarmImageFile.exists()) {
alarmImageFile.delete();
}
}
/**
* 上传处理后的视频(针对记录)
*/
private void uploadProcessedVideoForRecord(File videoFile, InspectionTask task,
com.ruoyi.video.domain.InspectionTaskRecord record) throws Exception {
String fileName = "processed_" + System.currentTimeMillis() + ".mp4";
String bucketName = "inspection-videos";
CustomMultipartFile multipartFile = new CustomMultipartFile(videoFile, fileName, "video/mp4");
String objectUrl = minioService.putObject(bucketName, fileName, multipartFile.getInputStream());
VMinioObject minioObject = new VMinioObject();
minioObject.setBucketName(bucketName);
minioObject.setObjectName(fileName);
minioObject.setObjectUrl(objectUrl);
minioObject.setCreateBy("system");
minioObject.setCreateTime(new Date());
Long objectId = vMinioObjectService.insertVMinioObject(minioObject);
// 更新任务的处理后视频ID
task.setProcessedVideoOssId(objectId);
task.setVideoStatus(2); // 2: 已分析
taskMapper.updateInspectionTask(task);
// 更新记录的附件信息添加处理后的视频URL
String currentAccessory = record.getAccessory();
record.setAccessory(currentAccessory + ";" + objectUrl);
inspectionTaskRecordMapper.updateInspectionTaskRecord(record);
log.info("处理后视频已上传: 任务ID={}, 记录ID={}, MinIO对象ID={}",
task.getTaskId(), record.getRecordId(), objectId);
}
/**
* 异步分析视频
* @param taskId 巡检任务ID
*/
@Async
public void analyzeVideo(Long taskId) {
log.info("开始异步分析视频: 任务ID={}", taskId);
try {
// 1. 获取任务信息
InspectionTask task = taskMapper.selectInspectionTaskById(taskId);
if (task == null) {
log.error("任务不存在: {}", taskId);
return;
}
// 2. 获取原始视频信息
Long videoOssId = task.getVideoOssId();
if (videoOssId == null) {
log.error("任务视频不存在: {}", taskId);
return;
}
VMinioObject videoObject = vMinioObjectService.selectVMinioObjectById(videoOssId);
if (videoObject == null) {
log.error("MinIO对象不存在: {}", videoOssId);
return;
}
// 3. 下载原始视频到临时文件
File videoTempFile = File.createTempFile("analysis_input_", ".mp4");
try (InputStream inputStream = minioService.getObject(videoObject.getBucketName(), videoObject.getObjectName());
FileOutputStream fileOutputStream = new FileOutputStream(videoTempFile)) {
byte[] buffer = new byte[8192];
int bytesRead;
while ((bytesRead = inputStream.read(buffer)) != -1) {
fileOutputStream.write(buffer, 0, bytesRead);
}
}
// 4. 创建输出视频文件
File outputVideoFile = File.createTempFile("analysis_output_", ".mp4");
// 5. 创建检测器
HttpYoloDetector detector = new HttpYoloDetector("garbage", PYTHON_API_URL, MODEL_NAME, 0x00FF00);
// 6. 处理视频
processVideo(videoTempFile, outputVideoFile, detector, task);
// 7. 清理临时文件
if (videoTempFile.exists()) {
videoTempFile.delete();
}
log.info("视频分析完成: 任务ID={}", taskId);
} catch (Exception e) {
log.error("视频分析失败: 任务ID={}, 错误={}", taskId, e.getMessage());
e.printStackTrace();
}
}
/**
* 处理视频
* @param inputFile 输入视频文件
* @param outputFile 输出视频文件
* @param detector 检测器
* @param task 巡检任务
*/
private void processVideo(File inputFile, File outputFile, HttpYoloDetector detector, InspectionTask task) throws Exception {
FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(inputFile);
grabber.start();
FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(outputFile,
grabber.getImageWidth(), grabber.getImageHeight(), grabber.getAudioChannels());
recorder.setFormat("mp4");
recorder.setVideoCodec(avcodec.AV_CODEC_ID_H264);
recorder.setFrameRate(grabber.getFrameRate());
recorder.setVideoBitrate(grabber.getVideoBitrate());
recorder.setAudioCodec(avcodec.AV_CODEC_ID_AAC);
recorder.setAudioBitrate(grabber.getAudioBitrate());
recorder.setAudioChannels(grabber.getAudioChannels());
recorder.setSampleRate(grabber.getSampleRate());
recorder.start();
OpenCVFrameConverter.ToMat converter = new OpenCVFrameConverter.ToMat();
// 用于去重的垃圾检测结果缓存
Map<String, Long> detectedGarbageCache = new HashMap<>();
// 跟踪检测到的垃圾ID
final Long[] detectionId = {1L}; // 使用数组实现effectively final
// 帧计数
long frameCount = 0;
Frame frame;
while ((frame = grabber.grab()) != null) {
frameCount++;
if (frame.image != null) {
// 处理视频帧
Mat mat = converter.convert(frame);
if (mat != null && !mat.isNull()) {
// 每10帧执行一次检测减少API调用频率
if (frameCount % 10 == 0) {
List<Detection> detections = detector.detect(mat);
if (!CollectionUtils.isEmpty(detections)) {
// 绘制检测框
for (Detection detection : detections) {
drawDetection(mat, detection);
// 检查是否为新的垃圾检测结果
String detectionKey = generateDetectionKey(detection);
if (!detectedGarbageCache.containsKey(detectionKey)) {
// 这是新检测到的垃圾
detectedGarbageCache.put(detectionKey, detectionId[0]++);
// 创建告警记录
createAlarmRecord(task, detection, mat, frameCount);
} else {
// 更新上次检测时间
detectedGarbageCache.put(detectionKey, detectionId[0]++);
}
}
// 清理超过60秒未检测到的垃圾假设30fps
Long currentId = detectionId[0];
detectedGarbageCache.entrySet().removeIf(entry ->
(currentId - entry.getValue()) > grabber.getFrameRate() * 60);
}
}
// 转回Frame并写入录像
Frame processedFrame = converter.convert(mat);
recorder.record(processedFrame);
} else {
// 原样写入
recorder.record(frame);
}
} else if (frame.samples != null) {
// 音频帧原样写入
recorder.record(frame);
}
}
recorder.stop();
recorder.close();
grabber.stop();
grabber.close();
// 上传处理后的视频
uploadProcessedVideo(outputFile, task);
}
/**
* 上传处理后的视频到MinIO
*/
private void uploadProcessedVideo(File videoFile, InspectionTask task) throws Exception {
String fileName = "processed_" + System.currentTimeMillis() + ".mp4";
String bucketName = "inspection-videos";
CustomMultipartFile multipartFile = new CustomMultipartFile(videoFile, fileName, "video/mp4");
String objectUrl = minioService.putObject(bucketName, fileName, multipartFile.getInputStream());
VMinioObject minioObject = new VMinioObject();
minioObject.setBucketName(bucketName);
minioObject.setObjectName(fileName);
minioObject.setObjectUrl(objectUrl);
minioObject.setCreateBy("system");
minioObject.setCreateTime(new Date());
Long objectId = vMinioObjectService.insertVMinioObject(minioObject);
// 更新任务的处理后视频ID
task.setProcessedVideoOssId(objectId);
task.setVideoStatus(2); // 2: 已分析
taskMapper.updateInspectionTask(task);
log.info("处理后视频已上传: 任务ID={}, MinIO对象ID={}", task.getTaskId(), objectId);
// 删除临时文件
if (videoFile.exists()) {
videoFile.delete();
}
}
/**
* 生成检测结果的唯一键,用于检测结果去重
*/
private String generateDetectionKey(Detection detection) {
// 使用检测框的位置和大小来生成键
// 允许小范围波动,认为是同一个物体
Rect rect = detection.getRect();
int x = rect.x() / 10 * 10; // 取10的倍数允许小波动
int y = rect.y() / 10 * 10;
int w = rect.width() / 10 * 10;
int h = rect.height() / 10 * 10;
return String.format("%s_%d_%d_%d_%d", detection.getLabel(), x, y, w, h);
}
/**
* 创建告警记录
*/
private void createAlarmRecord(InspectionTask task, Detection detection, Mat frame, long frameCount) throws Exception {
// 创建告警图像临时文件
File alarmImageFile = File.createTempFile("alarm_", ".jpg");
// 裁剪检测区域,略微扩大区域
Rect rect = detection.getRect();
int x = Math.max(0, rect.x() - 10);
int y = Math.max(0, rect.y() - 10);
int w = Math.min(frame.cols() - x, rect.width() + 20);
int h = Math.min(frame.rows() - y, rect.height() + 20);
// 使用OpenCV保存告警图片
Mat roi = new Mat(frame, new Rect(x, y, w, h));
org.bytedeco.opencv.global.opencv_imgcodecs.imwrite(alarmImageFile.getAbsolutePath(), roi);
// 上传告警图片到MinIO
String fileName = "alarm_" + System.currentTimeMillis() + ".jpg";
String bucketName = "alarm-images";
CustomMultipartFile multipartFile = new CustomMultipartFile(alarmImageFile, fileName, "image/jpeg");
String objectUrl = minioService.putObject(bucketName, fileName, multipartFile.getInputStream());
VMinioObject minioObject = new VMinioObject();
minioObject.setBucketName(bucketName);
minioObject.setObjectName(fileName);
minioObject.setObjectUrl(objectUrl);
minioObject.setCreateBy("system");
minioObject.setCreateTime(new Date());
Long objectId = vMinioObjectService.insertVMinioObject(minioObject);
// 创建告警记录
AlarmRecord alarmRecord = new AlarmRecord();
alarmRecord.setDeviceId(task.getDeviceId());
alarmRecord.setAlarmType("garbage");
alarmRecord.setAlarmContent(detection.getLabel() + " - 置信度: " + String.format("%.2f", detection.getConfidence()));
alarmRecord.setTaskId(task.getTaskId());
alarmRecord.setImageOssId(objectId);
alarmRecord.setFramePosition(frameCount);
alarmRecord.setConfidence(detection.getConfidence());
alarmRecord.setCreateTime(new Date());
alarmRecordMapper.insertAlarmRecord(alarmRecord);
log.info("创建告警记录: 类型={}, 任务ID={}, 告警ID={}", detection.getLabel(), task.getTaskId(), alarmRecord.getAlarmId());
// 删除临时文件
if (alarmImageFile.exists()) {
alarmImageFile.delete();
}
}
/**
* 在图像上绘制检测框
*/
private void drawDetection(Mat frame, Detection detection) {
// 绘制边界框
Rect rect = detection.getRect();
Scalar color = new Scalar(detection.getColorBGR());
int thickness = 2;
rectangle(frame, rect, color, thickness, LINE_8, 0);
// 绘制标签背景
String label = String.format("%s: %.2f", detection.getLabel(), detection.getConfidence());
Point textPosition = new Point(rect.x(), rect.y() - 10);
// 获取文本大小 - 添加缺失的IntBuffer参数
IntBuffer baseline = IntBuffer.allocate(1);
org.bytedeco.opencv.opencv_core.Size textSize = getTextSize(
label, FONT_HERSHEY_SIMPLEX, 0.5, thickness, baseline);
// 绘制标签背景矩形
rectangle(frame,
new Rect(rect.x(), rect.y() - textSize.height() - 10,
textSize.width(), textSize.height() + 10),
color, FILLED, LINE_8, 0);
// 绘制文本
putText(frame, label, textPosition,
FONT_HERSHEY_SIMPLEX, 0.5, new Scalar(255, 255, 255, 0),
1, LINE_AA, false);
}
}

View File

@@ -0,0 +1,52 @@
package com.ruoyi.video.service.impl;
import com.ruoyi.video.domain.InspectionTaskRecord;
import com.ruoyi.video.mapper.InspectionTaskRecordMapper;
import com.ruoyi.video.service.IInspectionTaskRecordService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.Date;
import java.util.List;
/**
* 巡检任务记录Service业务层处理
*/
@Service
public class IInspectionTaskRecordServiceImpl implements IInspectionTaskRecordService {
@Autowired
private InspectionTaskRecordMapper inspectionTaskRecordMapper;
@Override
public InspectionTaskRecord selectInspectionTaskRecordByRecordId(Long recordId) {
return inspectionTaskRecordMapper.selectInspectionTaskRecordByRecordId(recordId);
}
@Override
public List<InspectionTaskRecord> selectInspectionTaskRecordList(InspectionTaskRecord record) {
return inspectionTaskRecordMapper.selectInspectionTaskRecordList(record);
}
@Override
public int insertInspectionTaskRecord(InspectionTaskRecord record) {
record.setCreateTime(new Date());
return inspectionTaskRecordMapper.insertInspectionTaskRecord(record);
}
@Override
public int updateInspectionTaskRecord(InspectionTaskRecord record) {
record.setUpdateTime(new Date());
return inspectionTaskRecordMapper.updateInspectionTaskRecord(record);
}
@Override
public int deleteInspectionTaskRecordByRecordId(Long recordId) {
return inspectionTaskRecordMapper.deleteInspectionTaskRecordByRecordId(recordId);
}
@Override
public int deleteInspectionTaskRecordByRecordIds(Long[] recordIds) {
return inspectionTaskRecordMapper.deleteInspectionTaskRecordByRecordIds(recordIds);
}
}

View File

@@ -39,9 +39,6 @@ import java.util.concurrent.ConcurrentHashMap;
/**
* 巡检任务服务实现
*
* @Author: orange
* @CreateTime: 2025-01-16
*/
@Slf4j
@Service
@@ -56,6 +53,15 @@ public class InspectionTaskServiceImpl implements InspectionTaskService {
@Autowired
private AlarmRecordMapper alarmRecordMapper;
@Autowired
private com.ruoyi.video.mapper.InspectionTaskRecordMapper inspectionTaskRecordMapper;
@Autowired
private MinioService minioService;
@Autowired
private IVMinioObjectService vMinioObjectService;
// 运行状态缓存(避免重复执行)
private final Map<Long, Boolean> runningTasks = new ConcurrentHashMap<>();
@@ -70,21 +76,22 @@ public class InspectionTaskServiceImpl implements InspectionTaskService {
@Override
public InspectionTask selectInspectionTaskById(Long taskId) {
return inspectionTaskMapper.selectInspectionTaskByTaskId(taskId);
return inspectionTaskMapper.selectInspectionTaskById(taskId);
}
@Override
public int insertInspectionTask(InspectionTask inspectionTask) {
inspectionTask.setCreateTime(DateUtils.getNowDate());
inspectionTask.setCreateBy(SecurityUtils.getUsername());
inspectionTask.setExecuteCount(0L);
inspectionTask.setAlarmCount(0L);
// 这些字段在新版实体类中可能不存在,需要进行调整
// inspectionTask.setExecuteCount(0L);
// inspectionTask.setAlarmCount(0L);
// 获取设备信息
Device device = deviceService.selectDeviceByDeviceId(inspectionTask.getDeviceId());
if (device != null) {
inspectionTask.setDeviceName(device.getIp());
}
// 新版实体类可能不需要设备名称
// if (device != null) {
// inspectionTask.setDeviceName(device.getIp());
// }
return inspectionTaskMapper.insertInspectionTask(inspectionTask);
}
@@ -92,7 +99,6 @@ public class InspectionTaskServiceImpl implements InspectionTaskService {
@Override
public int updateInspectionTask(InspectionTask inspectionTask) {
inspectionTask.setUpdateTime(DateUtils.getNowDate());
inspectionTask.setUpdateBy(SecurityUtils.getUsername());
return inspectionTaskMapper.updateInspectionTask(inspectionTask);
}
@@ -101,88 +107,232 @@ public class InspectionTaskServiceImpl implements InspectionTaskService {
for (Long taskId : taskIds) {
stopInspectionTask(taskId);
}
return inspectionTaskMapper.deleteInspectionTaskByTaskIds(taskIds);
return inspectionTaskMapper.deleteInspectionTaskByIds(taskIds);
}
@Override
public boolean startInspectionTask(Long taskId) {
InspectionTask task = inspectionTaskMapper.selectInspectionTaskByTaskId(taskId);
InspectionTask task = inspectionTaskMapper.selectInspectionTaskById(taskId);
if (task == null) {
return false;
}
task.setStatus("0"); // 启用
// 启用任务,使用新版实体类的方法
task.setStatus(0); // 0表示启用
task.setUpdateTime(DateUtils.getNowDate());
task.setUpdateBy(SecurityUtils.getUsername());
inspectionTaskMapper.updateInspectionTask(task);
runningTasks.put(taskId, true);
// 这里应该集成到Quartz定时任务中
log.info("启动巡检任务: {} - {}", taskId, task.getTaskName());
log.info("启动巡检任务: {}", taskId);
return true;
}
@Override
public boolean stopInspectionTask(Long taskId) {
InspectionTask task = inspectionTaskMapper.selectInspectionTaskByTaskId(taskId);
InspectionTask task = inspectionTaskMapper.selectInspectionTaskById(taskId);
if (task == null) {
return false;
}
task.setStatus("1"); // 停用
// 停用任务,使用新版实体类的方法
task.setStatus(1); // 1表示停用
task.setUpdateTime(DateUtils.getNowDate());
task.setUpdateBy(SecurityUtils.getUsername());
inspectionTaskMapper.updateInspectionTask(task);
runningTasks.remove(taskId);
log.info("停止巡检任务: {} - {}", taskId, task.getTaskName());
log.info("停止巡检任务: {}", taskId);
return true;
}
@Override
@Async
public void executeInspectionTask(Long taskId) {
InspectionTask task = inspectionTaskMapper.selectInspectionTaskByTaskId(taskId);
if (task == null || !"0".equals(task.getStatus())) {
InspectionTask task = inspectionTaskMapper.selectInspectionTaskById(taskId);
if (task == null || task.getStatus() != 0) { // 0表示启用状态
return;
}
log.info("开始执行巡检任务: {} - {}", taskId, task.getTaskName());
log.info("开始执行巡检任务: {}", taskId);
// 创建巡检记录
InspectionTaskRecord record = new InspectionTaskRecord();
record.setTaskId(taskId);
record.setExecuteTime(new Date());
record.setStatus(1); // 执行中
inspectionTaskRecordMapper.insertInspectionTaskRecord(record);
Long recordId = record.getRecordId();
long startTime = System.currentTimeMillis();
try {
// 更新执行信息
task.setLastExecuteTime(new Date());
task.setExecuteCount(task.getExecuteCount() + 1);
// 更新任务状态为执行中
task.setStatus(1);
inspectionTaskMapper.updateInspectionTask(task);
// 获取设备信息
Device device = deviceService.selectDeviceByDeviceId(task.getDeviceId());
if (device == null) {
log.error("设备不存在: {}", task.getDeviceId());
updateRecordFailed(record, "设备不存在");
return;
}
// 执行视频分析
performVideoAnalysis(task, device);
// 执行视频录制和分析
performVideoAnalysisWithRecord(task, device, record);
// 更新任务执行统计信息
Long alarmCount = alarmRecordMapper.countAlarmByTaskId(taskId);
inspectionTaskMapper.updateTaskExecuteInfo(taskId, task.getExecuteCount(), alarmCount);
// 更新执行时长
long duration = (System.currentTimeMillis() - startTime) / 1000;
record.setDuration((int) duration);
record.setStatus(0); // 成功
inspectionTaskRecordMapper.updateInspectionTaskRecord(record);
// 更新任务状态为已完成
task.setStatus(2);
inspectionTaskMapper.updateInspectionTask(task);
} catch (Exception e) {
log.error("执行巡检任务失败: {} - {}", taskId, e.getMessage(), e);
log.error("巡检任务执行失败: taskId={}", taskId, e);
updateRecordFailed(record, e.getMessage());
// 更新任务状态为已完成(虽然失败)
task.setStatus(2);
inspectionTaskMapper.updateInspectionTask(task);
}
}
/**
* 更新记录为失败状态
*/
private void updateRecordFailed(InspectionTaskRecord record, String errorMsg) {
record.setStatus(1); // 失败
record.setResult("执行失败: " + errorMsg);
inspectionTaskRecordMapper.updateInspectionTaskRecord(record);
}
/**
* 执行视频录制和分析(带记录)
*/
private void performVideoAnalysisWithRecord(InspectionTask task, Device device, InspectionTaskRecord record) throws Exception {
FFmpegFrameGrabber grabber = null;
FFmpegFrameRecorder recorder = null;
File videoTempFile = null;
try {
// 1. 创建临时视频文件
videoTempFile = File.createTempFile("inspection_", ".mp4");
// 2. 创建视频抓取器
grabber = new FFmpegFrameGrabber(device.getUrl());
grabber.setOption("rtsp_transport", "tcp");
grabber.setOption("stimeout", "5000000");
grabber.start();
// 3. 创建视频录制器
recorder = new FFmpegFrameRecorder(videoTempFile,
grabber.getImageWidth(), grabber.getImageHeight(), grabber.getAudioChannels());
recorder.setFormat("mp4");
recorder.setVideoCodec(org.bytedeco.ffmpeg.global.avcodec.AV_CODEC_ID_H264);
recorder.setFrameRate(grabber.getFrameRate());
recorder.setVideoBitrate(grabber.getVideoBitrate());
if (grabber.getAudioChannels() > 0) {
recorder.setAudioCodec(org.bytedeco.ffmpeg.global.avcodec.AV_CODEC_ID_AAC);
recorder.setAudioBitrate(grabber.getAudioBitrate());
recorder.setAudioChannels(grabber.getAudioChannels());
recorder.setSampleRate(grabber.getSampleRate());
}
recorder.start();
log.info("开始录制视频流: {}, 时长: {}秒", device.getUrl(), task.getDuration());
// 4. 录制指定时长的视频
long startTime = System.currentTimeMillis();
long duration = task.getDuration() * 1000L; // 转换为毫秒
while (System.currentTimeMillis() - startTime < duration) {
Frame frame = grabber.grab();
if (frame != null) {
recorder.record(frame);
}
}
// 5. 停止录制
recorder.stop();
recorder.close();
grabber.stop();
grabber.close();
log.info("视频录制完成: {}", videoTempFile.getAbsolutePath());
// 6. 上传视频到MinIO
String fileName = "inspection_" + task.getTaskId() + "_" + System.currentTimeMillis() + ".mp4";
String bucketName = "inspection-videos";
CustomMultipartFile multipartFile = new CustomMultipartFile(videoTempFile, fileName, "video/mp4");
String objectUrl = minioService.putObject(bucketName, fileName, multipartFile.getInputStream());
VMinioObject minioObject = new VMinioObject();
minioObject.setBucketName(bucketName);
minioObject.setObjectName(fileName);
minioObject.setObjectUrl(objectUrl);
minioObject.setCreateBy("system");
minioObject.setCreateTime(new Date());
Long objectId = vMinioObjectService.insertVMinioObject(minioObject);
// 7. 更新任务的视频ID和状态
task.setVideoOssId(objectId);
task.setVideoStatus(1); // 已录制未分析
inspectionTaskMapper.updateInspectionTask(task);
// 8. 更新记录的附件信息视频URL
record.setAccessory(objectUrl);
inspectionTaskRecordMapper.updateInspectionTaskRecord(record);
log.info("视频已上传到MinIO: objectId={}, url={}", objectId, objectUrl);
// 9. 调用Python服务进行视频分析
analyzeVideoAndUpdateRecord(task, record, videoTempFile);
} finally {
// 清理资源
if (recorder != null) {
try { recorder.close(); } catch (Exception ignore) {}
}
if (grabber != null) {
try { grabber.close(); } catch (Exception ignore) {}
}
if (videoTempFile != null && videoTempFile.exists()) {
videoTempFile.delete();
}
}
}
/**
* 分析视频并更新记录
*/
private void analyzeVideoAndUpdateRecord(InspectionTask task, InspectionTaskRecord record, File videoFile) throws Exception {
log.info("开始分析视频: taskId={}, recordId={}", task.getTaskId(), record.getRecordId());
// 调用VideoAnalysisService进行分析
com.ruoyi.video.service.VideoAnalysisService videoAnalysisService =
SpringUtils.getBean(com.ruoyi.video.service.VideoAnalysisService.class);
// 异步调用视频分析(会处理检测结果并创建告警)
videoAnalysisService.analyzeVideoWithRecord(task, record, videoFile);
}
/**
* 执行视频分析
*/
private void performVideoAnalysis(InspectionTask task, Device device) {
if (!"0".equals(task.getEnableDetection())) {
log.info("巡检任务未启用检测: {}", task.getTaskId());
return;
}
// 这些条件在新版实体类中可能需要调整
// 0表示启用检测
// if (task.getEnableDetection() != 0) {
// log.info("巡检任务未启用检测: {}", task.getTaskId());
// return;
// }
FFmpegFrameGrabber grabber = null;
FFmpegFrameRecorder sessionRecorder = null;
@@ -214,7 +364,9 @@ public class InspectionTaskServiceImpl implements InspectionTaskService {
// 会话聚合参数与状态
final long minGapMs = 3000L; // 目标消失超过该值视为结束
final long maxDurationMs = 30000L; // 单次会话最长30s
final float detectionThreshold = task.getThreshold() != null ? task.getThreshold().floatValue() : 0.7f;
// 这个字段在新版实体类中可能需要调整
// final float detectionThreshold = task.getThreshold() != null ? task.getThreshold().floatValue() : 0.7f;
final float detectionThreshold = 0.7f; // 使用默认阈值
boolean sessionActive = false;
long sessionStartMs = 0L;
@@ -263,9 +415,9 @@ public class InspectionTaskServiceImpl implements InspectionTaskService {
// 最高置信度
Detection best = detections.stream()
.max(Comparator.comparingDouble(Detection::conf))
.max(Comparator.comparingDouble(Detection::getConfidence))
.orElse(null);
boolean hasTarget = best != null && best.conf() >= detectionThreshold;
boolean hasTarget = best != null && best.getConfidence() >= detectionThreshold;
if (hasTarget) {
lastSeenMs = now;
@@ -296,23 +448,24 @@ public class InspectionTaskServiceImpl implements InspectionTaskService {
VMinioObject rec = new VMinioObject();
rec.setObjectName(up.getObjectName());
rec.setUrl(up.getUrl());
rec.setObjectUrl(up.getUrl());
rec.setOriginalName(originalName);
objSvc.insert(rec);
objSvc.insertVMinioObject(rec);
AlarmRecord alarm = new AlarmRecord();
alarm.setTaskId(task.getTaskId());
alarm.setTaskName(task.getTaskName());
// 这些字段在新版实体类中可能不存在,需要调整
// alarm.setTaskName(task.getTaskName());
alarm.setDeviceId(task.getDeviceId());
alarm.setDeviceName(task.getDeviceName());
alarm.setAlarmType(best.cls());
alarm.setAlarmLevel(getAlarmLevel(best.conf()));
alarm.setAlarmDesc(String.format("检测到%s置信度: %.2f", best.cls(), best.conf()));
alarm.setConfidence((double) best.conf());
alarm.setImagePath(up.getUrl());
alarm.setAlarmTime(new Date(now));
alarm.setHandleStatus("0");
alarm.setCreateBy(SecurityUtils.getUsername());
// alarm.setDeviceName(task.getDeviceName());
alarm.setAlarmType(best.getLabel());
// 这里需要转换double为float
alarm.setAlarmLevel(getAlarmLevel((float)best.getConfidence()));
alarm.setAlarmContent(String.format("检测到%s置信度: %.2f", best.getLabel(), best.getConfidence()));
alarm.setConfidence(best.getConfidence());
alarm.setImageOssId(rec.getObjectId());
alarm.setCreateTime(new Date(now));
alarm.setStatus(0); // 0: 未处理
saveAlarmRecord(alarm);
currentAlarmId = alarm.getAlarmId();
@@ -387,13 +540,14 @@ public class InspectionTaskServiceImpl implements InspectionTaskService {
VMinioObject vrec = new VMinioObject();
vrec.setObjectName(upv.getObjectName());
vrec.setUrl(upv.getUrl());
vrec.setObjectUrl(upv.getUrl());
vrec.setOriginalName(originalName);
objSvc.insert(vrec);
objSvc.insertVMinioObject(vrec);
AlarmRecord patch = new AlarmRecord();
patch.setAlarmId(currentAlarmId);
patch.setVideoPath(upv.getUrl());
// videoPath字段在新版实体类中可能不存在需要调整
// patch.setVideoPath(upv.getUrl());
alarmRecordMapper.updateAlarmRecord(patch);
} catch (Exception ue) {
log.warn("巡检会话-上传/回填视频失败: {}", ue.getMessage());
@@ -468,7 +622,7 @@ public class InspectionTaskServiceImpl implements InspectionTaskService {
private List<Detection> performDetection(Mat mat) {
try {
if (modelManager != null) {
YoloDetector detector = modelManager.get("garbage"); // 应该使用垃圾检测 当开启自动巡检的时候 需要替换模型
YoloDetector detector = modelManager.get("garbage"); // 使用垃圾检测模型
if (detector != null) {
return detector.detect(mat);
}
@@ -508,40 +662,41 @@ public class InspectionTaskServiceImpl implements InspectionTaskService {
@Override
public void handleDetectionResults(Long taskId, List<Detection> detections, String imagePath) {
InspectionTask task = inspectionTaskMapper.selectInspectionTaskByTaskId(taskId);
InspectionTask task = inspectionTaskMapper.selectInspectionTaskById(taskId);
if (task == null || detections.isEmpty()) {
return;
}
for (Detection detection : detections) {
// 检查置信度是否超过阈值
if (detection.conf() >= task.getThreshold()) {
// 创建警记录
// 检查置信度是否超过阈值 - 这里使用了默认阈值,因为实体类可能已变更
if (detection.getConfidence() >= 0.7) {
// 创建警记录
AlarmRecord alarmRecord = new AlarmRecord();
alarmRecord.setTaskId(taskId);
alarmRecord.setTaskName(task.getTaskName());
// 这些字段在新版实体类中可能不存在,需要调整
// alarmRecord.setTaskName(task.getTaskName());
alarmRecord.setDeviceId(task.getDeviceId());
alarmRecord.setDeviceName(task.getDeviceName());
alarmRecord.setAlarmType(detection.cls());
alarmRecord.setAlarmLevel(getAlarmLevel(detection.conf()));
alarmRecord.setAlarmDesc(String.format("检测到%s置信度: %.2f",
detection.cls(), detection.conf()));
alarmRecord.setConfidence((double) detection.conf());
alarmRecord.setImagePath(imagePath);
alarmRecord.setAlarmTime(new Date());
alarmRecord.setHandleStatus("0"); // 未处理
alarmRecord.setCreateBy(SecurityUtils.getUsername());
// alarmRecord.setDeviceName(task.getDeviceName());
alarmRecord.setAlarmType(detection.getLabel());
alarmRecord.setAlarmLevel(getAlarmLevel((float)detection.getConfidence()));
alarmRecord.setAlarmContent(String.format("检测到%s置信度: %.2f",
detection.getLabel(), detection.getConfidence()));
alarmRecord.setConfidence(detection.getConfidence());
// 设置图片路径 - 需要调整为适合新实体类的方式
// alarmRecord.setImagePath(imagePath);
alarmRecord.setCreateTime(new Date());
alarmRecord.setStatus(0); // 0: 未处理
saveAlarmRecord(alarmRecord);
log.warn("生成警记录: 任务[{}] 检测到[{}] 置信度[{}]",
taskId, detection.cls(), detection.conf());
log.warn("生成警记录: 任务[{}] 检测到[{}] 置信度[{}]",
taskId, detection.getLabel(), detection.getConfidence());
}
}
}
/**
* 根据置信度确定警级别
* 根据置信度确定警级别
*/
private String getAlarmLevel(float confidence) {
if (confidence >= 0.9f) {
@@ -557,7 +712,7 @@ public class InspectionTaskServiceImpl implements InspectionTaskService {
public void saveAlarmRecord(AlarmRecord alarmRecord) {
alarmRecord.setCreateTime(DateUtils.getNowDate());
alarmRecordMapper.insertAlarmRecord(alarmRecord);
log.info("保存警记录: {}", alarmRecord.getAlarmId());
log.info("保存警记录: {}", alarmRecord.getAlarmId());
}
@Override

View File

@@ -6,19 +6,48 @@ import com.ruoyi.video.service.IVMinioObjectService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.List;
@Service
public class VMinioObjectServiceImpl implements IVMinioObjectService {
@Autowired
private VMinioObjectMapper mapper;
@Override
public VMinioObject selectVMinioObjectById(Long objectId) {
return mapper.selectVMinioObjectById(objectId);
}
@Override
public int insert(VMinioObject obj) {
if (obj.getDelFlag() == null) {
obj.setDelFlag("0");
public List<VMinioObject> selectVMinioObjectList(VMinioObject vMinioObject) {
return mapper.selectVMinioObjectList(vMinioObject);
}
return mapper.insertVMinioObject(obj);
@Override
public Long insertVMinioObject(VMinioObject vMinioObject) {
int rows = mapper.insertVMinioObject(vMinioObject);
return rows > 0 ? vMinioObject.getObjectId() : null;
}
@Override
public int updateVMinioObject(VMinioObject vMinioObject) {
return mapper.updateVMinioObject(vMinioObject);
}
@Override
public int deleteVMinioObjectById(Long objectId) {
return mapper.deleteVMinioObjectById(objectId);
}
@Override
public int deleteVMinioObjectByIds(Long[] objectIds) {
return mapper.deleteVMinioObjectByIds(objectIds);
}
@Override
public VMinioObject selectByObjectName(String objectName) {
return mapper.selectVMinioObjectByObjectName(objectName);
}
@Override
@@ -31,13 +60,16 @@ public class VMinioObjectServiceImpl implements IVMinioObjectService {
return mapper.deleteVMinioObjectById(id);
}
@Override
public VMinioObject selectByObjectName(String objectName) {
return mapper.selectVMinioObjectByObjectName(objectName);
}
@Override
public int deleteByObjectName(String objectName) {
return mapper.deleteVMinioObjectByObjectName(objectName);
}
@Override
public int insert(VMinioObject obj) {
if (obj.getDelFlag() == null) {
obj.setDelFlag("0");
}
return mapper.insertVMinioObject(obj);
}
}

View File

@@ -540,7 +540,7 @@ public class MediaTransferFlvByFFmpeg extends MediaTransfer {
try {
if (ctx.channel().isWritable()) {
// 发送帧前先发送header
if (ClientType.HTTP.getType() == ctype.getType()) {
if (ClientType.HTTP == ctype) {
ChannelFuture future = ctx.writeAndFlush(Unpooled.copiedBuffer(header));
future.addListener(new GenericFutureListener<Future<? super Void>>() {
@Override
@@ -550,7 +550,7 @@ public class MediaTransferFlvByFFmpeg extends MediaTransfer {
}
}
});
} else if (ClientType.WEBSOCKET.getType() == ctype.getType()) {
} else if (ClientType.WEBSOCKET == ctype) {
ChannelFuture future = ctx
.writeAndFlush(new BinaryWebSocketFrame(Unpooled.copiedBuffer(header)));
future.addListener(new GenericFutureListener<Future<? super Void>>() {

View File

@@ -0,0 +1,175 @@
package com.ruoyi.video.thread.detector;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.ruoyi.video.domain.Detection;
import org.bytedeco.opencv.opencv_core.*;
import org.bytedeco.javacpp.BytePointer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.http.HttpEntity;
import org.springframework.http.HttpHeaders;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
import org.springframework.web.client.RestTemplate;
import org.springframework.web.multipart.MultipartFile;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.*;
import static org.bytedeco.opencv.global.opencv_imgcodecs.imencode;
/**
* 通过HTTP调用YOLOv8 Python服务的检测器
* 替换原有的本地模型加载方式
*/
public class HttpYoloDetector implements YoloDetector {
private static final Logger log = LoggerFactory.getLogger(HttpYoloDetector.class);
private final String name;
private final String apiUrl;
private final String modelName;
private final int colorBGR;
private final RestTemplate restTemplate;
private final ObjectMapper objectMapper;
/**
* 创建HTTP检测器
* @param name 检测器名称
* @param apiUrl Python服务的API URL (例如: "http://localhost:8000/api/detect/file")
* @param modelName 要使用的模型名称
* @param colorBGR 边界框颜色 (BGR格式)
*/
public HttpYoloDetector(String name, String apiUrl, String modelName, int colorBGR) {
this.name = name;
this.apiUrl = apiUrl;
this.modelName = modelName;
this.colorBGR = colorBGR;
this.restTemplate = new RestTemplate();
this.objectMapper = new ObjectMapper();
log.info("创建HTTP YOLOv8检测器: {}, 服务地址: {}, 模型: {}", name, apiUrl, modelName);
}
@Override
public String name() {
return name;
}
@Override
public List<Detection> detect(Mat bgr) {
if (bgr == null || bgr.empty()) {
return Collections.emptyList();
}
try {
// 将OpenCV的Mat转换为JPEG字节数组
BytePointer buffer = new BytePointer();
imencode(".jpg", bgr, buffer);
byte[] jpgBytes = new byte[(int)(buffer.capacity())];
buffer.get(jpgBytes);
buffer.deallocate();
// 准备HTTP请求参数
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.MULTIPART_FORM_DATA);
MultiValueMap<String, Object> body = new LinkedMultiValueMap<>();
body.add("model_name", modelName);
body.add("file", new CustomByteArrayResource(jpgBytes, "image.jpg"));
HttpEntity<MultiValueMap<String, Object>> requestEntity = new HttpEntity<>(body, headers);
// 发送请求到Python服务
ResponseEntity<String> response = restTemplate.postForEntity(apiUrl, requestEntity, String.class);
String responseBody = response.getBody();
if (responseBody != null) {
// 解析响应JSON
Map<String, Object> result = objectMapper.readValue(responseBody, Map.class);
List<Map<String, Object>> detectionsJson = (List<Map<String, Object>>) result.get("detections");
List<Detection> detections = new ArrayList<>();
for (Map<String, Object> det : detectionsJson) {
String label = (String) det.get("label");
double confidence = ((Number) det.get("confidence")).doubleValue();
int x = ((Number) det.get("x")).intValue();
int y = ((Number) det.get("y")).intValue();
int width = ((Number) det.get("width")).intValue();
int height = ((Number) det.get("height")).intValue();
detections.add(new Detection(label, confidence, new Rect(x, y, width, height), colorBGR));
}
return detections;
}
} catch (Exception e) {
log.error("HTTP检测请求失败: {}", e.getMessage());
}
return Collections.emptyList();
}
// 用于RestTemplate的字节数组资源类
private static class CustomByteArrayResource implements org.springframework.core.io.Resource {
private final byte[] byteArray;
private final String filename;
public CustomByteArrayResource(byte[] byteArray, String filename) {
this.byteArray = byteArray;
this.filename = filename;
}
@Override
public String getFilename() {
return this.filename;
}
@Override
public java.io.InputStream getInputStream() throws IOException {
return new java.io.ByteArrayInputStream(this.byteArray);
}
@Override
public boolean exists() {
return true;
}
@Override
public java.net.URL getURL() throws IOException {
throw new IOException("Not supported");
}
@Override
public java.net.URI getURI() throws IOException {
throw new IOException("Not supported");
}
@Override
public java.io.File getFile() throws IOException {
throw new IOException("Not supported");
}
@Override
public long contentLength() {
return this.byteArray.length;
}
@Override
public long lastModified() {
return System.currentTimeMillis();
}
@Override
public org.springframework.core.io.Resource createRelative(String relativePath) throws IOException {
throw new IOException("Not supported");
}
@Override
public String getDescription() {
return "Byte array resource [" + this.filename + "]";
}
}
}

View File

@@ -1,248 +0,0 @@
package com.ruoyi.video.thread.detector;
import com.ruoyi.video.domain.Detection;
import org.bytedeco.javacpp.indexer.FloatRawIndexer;
import org.bytedeco.opencv.opencv_core.*;
import org.bytedeco.opencv.opencv_dnn.Net;
import java.nio.file.*;
import java.util.*;
import static org.bytedeco.opencv.global.opencv_dnn.*;
import static org.bytedeco.opencv.global.opencv_core.*;
import static org.bytedeco.opencv.global.opencv_imgproc.*;
public final class OnnxYoloDetector implements YoloDetector {
private final String modelName;
private final Net net;
private final Size input;
private final float confTh = 0.25f, nmsTh = 0.45f;
private final String[] classes;
private final int colorBGR;
public OnnxYoloDetector(String name, Path dir, int inW, int inH, String backend, int colorBGR) throws Exception {
this.modelName = name;
this.input = new Size(inW, inH);
this.colorBGR = colorBGR;
// 查找ONNX模型文件
String onnx = findModelFile(dir, ".onnx");
if (onnx == null) {
throw new Exception("找不到ONNX模型文件请确保目录中存在 .onnx 文件: " + dir);
}
// 读取类别文件
Path clsPath = dir.resolve("classes.txt");
if (Files.exists(clsPath)) {
this.classes = Files.readAllLines(clsPath).stream().map(String::trim)
.filter(s -> !s.isEmpty()).toArray(String[]::new);
} else {
this.classes = new String[0];
}
try {
// 加载ONNX模型
this.net = readNetFromONNX(onnx);
// 设置OpenCV后端
net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(DNN_TARGET_CPU);
System.out.println("ONNX模型加载成功: " + name + " (" + onnx + ")");
} catch (Exception e) {
throw new Exception("模型加载失败: " + e.getMessage() +
"\n请确保ONNX模型文件格式正确", e);
}
}
/**
* 在目录中查找指定扩展名的模型文件
*/
private String findModelFile(Path dir, String extension) {
try {
return Files.list(dir)
.filter(path -> path.toString().toLowerCase().endsWith(extension.toLowerCase()))
.map(Path::toString)
.findFirst()
.orElse(null);
} catch (Exception e) {
return null;
}
}
@Override public String name() { return modelName; }
@Override
public List<Detection> detect(Mat bgr) {
if (bgr == null || bgr.empty()) return Collections.emptyList();
// 统一成 BGR 3 通道,避免 blobFromImage 断言失败
if (bgr.channels() != 3) {
Mat tmp = new Mat();
if (bgr.channels() == 1) cvtColor(bgr, tmp, COLOR_GRAY2BGR);
else if (bgr.channels() == 4) cvtColor(bgr, tmp, COLOR_BGRA2BGR);
else bgr.copyTo(tmp);
bgr = tmp;
}
try (Mat blob = blobFromImage(bgr, 1.0/255.0, input, new Scalar(0.0), true, false, CV_32F)) {
net.setInput(blob);
// ===== 多输出兼容Bytedeco 正确写法)=====
org.bytedeco.opencv.opencv_core.StringVector outNames = net.getUnconnectedOutLayersNames();
List<Mat> outs = new ArrayList<>();
if (outNames == null || outNames.size() == 0) {
// 只有一个默认输出
Mat out = net.forward(); // ← 直接返回 Mat
outs.add(out);
} else {
// 多输出:用 MatVector 承接
org.bytedeco.opencv.opencv_core.MatVector outBlobs =
new org.bytedeco.opencv.opencv_core.MatVector(outNames.size());
net.forward(outBlobs, outNames); // ← 正确的重载
for (long i = 0; i < outBlobs.size(); i++) {
outs.add(outBlobs.get(i));
}
}
int fw = bgr.cols(), fh = bgr.rows();
List<Rect2d> boxes = new ArrayList<>();
List<Float> scores = new ArrayList<>();
List<Integer> classIds = new ArrayList<>();
for (Mat out : outs) {
parseYoloOutput(out, fw, fh, boxes, scores, classIds);
}
if (boxes.isEmpty()) return Collections.emptyList();
// 纯 Java NMS避免 MatOf* / Vector API 兼容问题
List<Integer> keep = nmsIndices(boxes, scores, nmsTh);
List<Detection> result = new ArrayList<>(keep.size());
for (int k : keep) {
Rect2d r = boxes.get(k);
Rect rect = new Rect((int)r.x(), (int)r.y(), (int)r.width(), (int)r.height());
int cid = classIds.get(k);
String cname = (cid >= 0 && cid < classes.length) ? classes[cid] : ("cls"+cid);
result.add(new Detection("["+modelName+"] "+cname, scores.get(k), rect, colorBGR));
}
return result;
} catch (Throwable e) {
// 单帧失败不影响整体
return Collections.emptyList();
}
}
/** 解析 YOLO-IR 输出为 N×CC>=6并填充 boxes/scores/classIds。 */
private void parseYoloOutput(Mat out, int fw, int fh,
List<Rect2d> boxes, List<Float> scores, List<Integer> classIds) {
int dims = out.dims();
Mat m;
if (dims == 2) {
// NxC 或 CxN
if (out.cols() >= 6) {
m = out;
} else {
Mat tmp = new Mat();
transpose(out, tmp); // CxN -> NxC
m = tmp;
}
} else if (dims == 3) {
// [1,N,C] 或 [1,C,N]
if (out.size(2) >= 6) {
m = out.reshape(1, out.size(1)); // -> N×C
} else {
Mat squeezed = out.reshape(1, out.size(1)); // C×N
Mat tmp = new Mat();
transpose(squeezed, tmp); // -> N×C
m = tmp;
}
} else if (dims == 4) {
// [1,1,N,C] 或 [1,1,C,N]
int a = out.size(2), b = out.size(3);
if (b >= 6) {
m = out.reshape(1, a).clone(); // -> N×C
} else {
Mat cxn = out.reshape(1, b); // C×N
Mat tmp = new Mat();
transpose(cxn, tmp); // -> N×C
m = tmp.clone();
}
} else {
return; // 不支持的形状
}
int N = m.rows(), C = m.cols();
if (C < 6 || N <= 0) return;
FloatRawIndexer idx = m.createIndexer();
for (int i = 0; i < N; i++) {
float cx = idx.get(i,0), cy = idx.get(i,1), w = idx.get(i,2), h = idx.get(i,3);
float obj = idx.get(i,4);
int bestCls = -1; float bestScore = 0f;
for (int c = 5; c < C; c++) {
float p = idx.get(i,c);
if (p > bestScore) { bestScore = p; bestCls = c - 5; }
}
float conf = obj * bestScore;
if (conf < confTh) continue;
// 默认假设归一化中心点格式 (cx,cy,w,h);若你的 IR 是 x1,y1,x2,y2请把这里换算改掉
int bx = Math.max(0, Math.round(cx * fw - (w * fw) / 2f));
int by = Math.max(0, Math.round(cy * fh - (h * fh) / 2f));
int bw = Math.min(fw - bx, Math.round(w * fw));
int bh = Math.min(fh - by, Math.round(h * fh));
if (bw <= 0 || bh <= 0) continue;
boxes.add(new Rect2d(bx, by, bw, bh));
scores.add(conf);
classIds.add(bestCls);
}
}
/** 纯 Java NMSIoU 抑制),返回保留的下标列表。 */
private List<Integer> nmsIndices(List<Rect2d> boxes, List<Float> scores, float nmsThreshold) {
List<Integer> order = new ArrayList<>(boxes.size());
for (int i = 0; i < boxes.size(); i++) order.add(i);
// 按分数降序
order.sort((i, j) -> Float.compare(scores.get(j), scores.get(i)));
List<Integer> keep = new ArrayList<>();
boolean[] removed = new boolean[boxes.size()];
for (int a = 0; a < order.size(); a++) {
int i = order.get(a);
if (removed[i]) continue;
keep.add(i);
Rect2d bi = boxes.get(i);
double areaI = bi.width() * bi.height();
for (int b = a + 1; b < order.size(); b++) {
int j = order.get(b);
if (removed[j]) continue;
Rect2d bj = boxes.get(j);
double areaJ = bj.width() * bj.height();
double xx1 = Math.max(bi.x(), bj.x());
double yy1 = Math.max(bi.y(), bj.y());
double xx2 = Math.min(bi.x() + bi.width(), bj.x() + bj.width());
double yy2 = Math.min(bi.y() + bi.height(), bj.y() + bj.height());
double w = Math.max(0, xx2 - xx1);
double h = Math.max(0, yy2 - yy1);
double inter = w * h;
double iou = inter / (areaI + areaJ - inter + 1e-9);
if (iou > nmsThreshold) removed[j] = true;
}
}
return keep;
}
@Override public void close(){ net.close(); }
}

View File

@@ -2,130 +2,121 @@ package com.ruoyi.video.utils;
import org.springframework.web.multipart.MultipartFile;
import java.io.*;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
/**
* File包装成MultipartFile
* @Author: orange
* @CreateTime: 2025-01-16
* 自定义MultipartFile实现,用于文件上传
*/
public class CustomMultipartFile implements MultipartFile {
// 定义一个File类型的变量file
private final File file;
// 定义一个String类型的变量contentType
private final String name;
private final String originalFilename;
private final String contentType;
// 构造方法传入一个File类型的变量file和一个String类型的变量contentType
public CustomMultipartFile(File file, String contentType) {
/**
* 构造函数 - 从File创建
*
* @param file 文件
* @param filename 文件名
* @param contentType 内容类型
*/
public CustomMultipartFile(File file, String filename, String contentType) {
this.file = file;
this.name = filename;
this.originalFilename = filename;
this.contentType = contentType;
}
// 获取文件名
/**
* 构造函数 - 从字节数组创建
*
* @param bytes 文件字节数组
* @param filename 文件名
* @param contentType 内容类型
*/
public CustomMultipartFile(byte[] bytes, String filename, String contentType) {
this.file = null;
this.name = filename;
this.originalFilename = filename;
this.contentType = contentType;
}
/**
* 构造函数 - 从File创建指定Content-Type
*
* @param file 文件
* @param contentType 内容类型
*/
public CustomMultipartFile(File file, String contentType) {
this.file = file;
this.name = file.getName();
this.originalFilename = file.getName();
this.contentType = contentType;
}
@Override
public String getName() {
return this.file.getName();
return name;
}
// 获取原始文件名
@Override
public String getOriginalFilename() {
return this.file.getName();
return originalFilename;
}
// 获取文件类型
@Override
public String getContentType() {
return this.contentType;
return contentType;
}
// 判断文件是否为空
@Override
public boolean isEmpty() {
return this.file.length() == 0L;
return getSize() == 0;
}
// 获取文件大小
@Override
public long getSize() {
return this.file.length();
if (file != null) {
return file.length();
}
return 0;
}
// 获取文件的字节数组
@Override
public byte[] getBytes() throws IOException {
// 创建一个FileInputStream对象传入file
InputStream inputStream = new FileInputStream(this.file);
byte[] var2;
try {
// 读取所有字节数组
var2 = inputStream.readAllBytes();
} catch (Throwable var5) {
try {
// 关闭输入流
inputStream.close();
} catch (Throwable var4) {
var5.addSuppressed(var4);
if (file != null) {
try (InputStream is = new FileInputStream(file)) {
return is.readAllBytes();
}
}
return new byte[0];
}
throw var5;
}
// 关闭输入流
inputStream.close();
return var2;
}
// 获取文件的输入流
@Override
public InputStream getInputStream() throws IOException {
return new FileInputStream(this.file);
if (file != null) {
return new FileInputStream(file);
}
return new ByteArrayInputStream(new byte[0]);
}
// 将文件传输到指定的文件
@Override
public void transferTo(File dest) throws IOException, IllegalStateException {
// 如果目标文件不存在,则创建
if (!dest.exists()) {
dest.createNewFile();
}
// 创建一个FileInputStream对象传入file
InputStream inputStream = new FileInputStream(this.file);
try {
// 创建一个FileOutputStream对象传入dest
OutputStream outputStream = new FileOutputStream(dest);
try {
// 创建一个字节数组大小为1024
byte[] buffer = new byte[1024];
if (file != null) {
try (FileInputStream fis = new FileInputStream(file)) {
try (java.io.FileOutputStream fos = new java.io.FileOutputStream(dest)) {
byte[] buffer = new byte[8192];
int bytesRead;
// 循环读取文件,直到读取完毕
while((bytesRead = inputStream.read(buffer)) != -1) {
// 将读取的字节数组写入目标文件
outputStream.write(buffer, 0, bytesRead);
while ((bytesRead = fis.read(buffer)) != -1) {
fos.write(buffer, 0, bytesRead);
}
} catch (Throwable var8) {
try {
// 关闭输出流
outputStream.close();
} catch (Throwable var7) {
var8.addSuppressed(var7);
}
throw var8;
}
// 关闭输出流
outputStream.close();
} catch (Throwable var9) {
try {
// 关闭输入流
inputStream.close();
} catch (Throwable var6) {
var9.addSuppressed(var6);
}
throw var9;
}
// 关闭输入流
inputStream.close();
}
}

View File

@@ -11,11 +11,11 @@ public final class Overlay {
public static void draw(List<Detection> dets, Mat frame) {
for (Detection d : dets) {
Rect r = d.box();
int bgr = d.colorBGR();
Rect r = d.getRect();
int bgr = d.getColorBGR();
Scalar c = new Scalar(bgr & 0xFF, (bgr >> 8) & 0xFF, (bgr >> 16) & 0xFF, 0);
rectangle(frame, r, c, 2, LINE_8, 0);
String label = d.cls()+" "+String.format("%.2f", d.conf());
String label = d.getLabel()+" "+String.format("%.2f", d.getConfidence());
int[] baseline = new int[1];
Size t = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, baseline);
int x = Math.max(0, r.x());