package com.klp.service.impl; import cn.hutool.core.bean.BeanUtil; import com.klp.common.core.page.TableDataInfo; import com.klp.common.core.domain.PageQuery; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; import com.baomidou.mybatisplus.core.toolkit.Wrappers; import com.klp.common.utils.StringUtils; import lombok.RequiredArgsConstructor; import org.springframework.stereotype.Service; import com.klp.domain.bo.WmsBatchBo; import com.klp.domain.vo.WmsBatchVo; import com.klp.domain.WmsBatch; import com.klp.mapper.WmsBatchMapper; import com.klp.service.IWmsBatchService; import java.util.*; import java.util.stream.Collectors; /** * 批次(合并相同工艺的任务)Service业务层处理 * * @author klp * @date 2025-08-14 */ @RequiredArgsConstructor @Service public class WmsBatchServiceImpl implements IWmsBatchService { private final WmsBatchMapper baseMapper; /** * 查询批次(合并相同工艺的任务) */ @Override public WmsBatchVo queryById(Long batchId){ return baseMapper.selectVoById(batchId); } /** * 查询批次(合并相同工艺的任务)列表 */ @Override public TableDataInfo queryPageList(WmsBatchBo bo, PageQuery pageQuery) { LambdaQueryWrapper lqw = buildQueryWrapper(bo); Page result = baseMapper.selectVoPage(pageQuery.build(), lqw); return TableDataInfo.build(result); } /** * 查询批次(合并相同工艺的任务)列表 */ @Override public List queryList(WmsBatchBo bo) { LambdaQueryWrapper lqw = buildQueryWrapper(bo); return baseMapper.selectVoList(lqw); } private LambdaQueryWrapper buildQueryWrapper(WmsBatchBo bo) { Map params = bo.getParams(); LambdaQueryWrapper lqw = Wrappers.lambdaQuery(); lqw.eq(StringUtils.isNotBlank(bo.getBatchNo()), WmsBatch::getBatchNo, bo.getBatchNo()); lqw.eq(bo.getProcessId() != null, WmsBatch::getProcessId, bo.getProcessId()); lqw.eq(bo.getTotalQuantity() != null, WmsBatch::getTotalQuantity, bo.getTotalQuantity()); lqw.eq(StringUtils.isNotBlank(bo.getMergeSource()), WmsBatch::getMergeSource, bo.getMergeSource()); lqw.eq(bo.getEstimatedStartTime() != null, WmsBatch::getEstimatedStartTime, bo.getEstimatedStartTime()); lqw.eq(bo.getEstimatedEndTime() != null, WmsBatch::getEstimatedEndTime, bo.getEstimatedEndTime()); lqw.eq(StringUtils.isNotBlank(bo.getBatchStatus()), WmsBatch::getBatchStatus, bo.getBatchStatus()); return lqw; } /** * 新增批次(合并相同工艺的任务) */ @Override public Boolean insertByBo(WmsBatchBo bo) { WmsBatch add = BeanUtil.toBean(bo, WmsBatch.class); validEntityBeforeSave(add); boolean flag = baseMapper.insert(add) > 0; if (flag) { bo.setBatchId(add.getBatchId()); } return flag; } /** * 修改批次(合并相同工艺的任务) */ @Override public Boolean updateByBo(WmsBatchBo bo) { WmsBatch update = BeanUtil.toBean(bo, WmsBatch.class); validEntityBeforeSave(update); return baseMapper.updateById(update) > 0; } /** * 保存前的数据校验 */ private void validEntityBeforeSave(WmsBatch entity){ //TODO 做一些数据校验,如唯一约束 } /** * 批量删除批次(合并相同工艺的任务) */ @Override public Boolean deleteWithValidByIds(Collection ids, Boolean isValid) { if(isValid){ //TODO 做一些业务上的校验,判断是否需要校验 } return baseMapper.deleteBatchIds(ids) > 0; } /** * 检测任务执行是否会产生死锁 * * @param rows 任务执行顺序数组 * @return 是否存在死锁 */ @Override public boolean checkDeadlock(List>> rows) { // 构建进程依赖图 Map> graph = buildDependencyGraph(rows); // 检测是否存在环(死锁) return hasCycle(graph); } /** * 生成不会产生死锁的批次分配方案 * * @param rows 任务执行顺序数组 * @return 批次分配方案 */ @Override public List generateNonDeadlockBatches(List>> rows) { // 获取所有任务 Set allTasks = new HashSet<>(); for (List> row : rows) { for (Map task : row) { allTasks.add(task.get("taskId").toString()); } } // 构建进程依赖图 Map> processGraph = buildProcessDependencyGraph(rows); // 构建任务依赖图 Map> taskGraph = buildTaskDependencyGraph(rows); // 使用拓扑排序找出可行的批次分配方案 List> batches = topologicalSort(processGraph, taskGraph, allTasks); // 将批次转换为字符串格式 return batches.stream() .map(batch -> String.join(",", batch)) .collect(Collectors.toList()); } /** * 构建依赖图 * 如果进程A的任务必须在进程B的任务之前执行,则B依赖于A */ private Map> buildDependencyGraph(List>> rows) { Map> graph = new HashMap<>(); Map processSequence = new HashMap<>(); // 遍历每一行,记录每个进程在每一行的执行顺序 for (List> row : rows) { // 按sequare排序 row.sort(Comparator.comparingInt(task -> Integer.parseInt(task.get("sequare").toString()))); // 记录每个进程的执行顺序 for (int i = 0; i < row.size(); i++) { Map task = row.get(i); String processId = task.get("processId").toString(); // 如果当前进程已经有更早的执行顺序,则保留更早的顺序 processSequence.putIfAbsent(processId, i); processSequence.put(processId, Math.min(processSequence.get(processId), i)); } // 构建依赖关系 for (int i = 0; i < row.size() - 1; i++) { String currentProcess = row.get(i).get("processId").toString(); String nextProcess = row.get(i + 1).get("processId").toString(); // 添加依赖:nextProcess依赖于currentProcess graph.putIfAbsent(nextProcess, new HashSet<>()); graph.get(nextProcess).add(currentProcess); } } return graph; } /** * 构建进程依赖图 */ private Map> buildProcessDependencyGraph(List>> rows) { Map> graph = new HashMap<>(); // 遍历每一行,记录进程间的依赖关系 for (List> row : rows) { // 按sequare排序 row.sort(Comparator.comparingInt(task -> Integer.parseInt(task.get("sequare").toString()))); // 构建依赖关系 for (int i = 0; i < row.size() - 1; i++) { String currentProcess = row.get(i).get("processId").toString(); String nextProcess = row.get(i + 1).get("processId").toString(); // 添加依赖:nextProcess依赖于currentProcess graph.putIfAbsent(nextProcess, new HashSet<>()); graph.get(nextProcess).add(currentProcess); } } return graph; } /** * 构建任务依赖图 */ private Map> buildTaskDependencyGraph(List>> rows) { Map> graph = new HashMap<>(); // 遍历每一行,记录任务间的依赖关系 for (List> row : rows) { // 按sequare排序 row.sort(Comparator.comparingInt(task -> Integer.parseInt(task.get("sequare").toString()))); // 构建依赖关系 for (int i = 0; i < row.size() - 1; i++) { String currentTask = row.get(i).get("taskId").toString(); String nextTask = row.get(i + 1).get("taskId").toString(); // 添加依赖:nextTask依赖于currentTask graph.putIfAbsent(nextTask, new HashSet<>()); graph.get(nextTask).add(currentTask); } } return graph; } /** * 检测图中是否存在环(死锁) */ private boolean hasCycle(Map> graph) { // 所有节点的状态:0=未访问,1=访问中,2=已访问 Map visited = new HashMap<>(); // 初始化所有节点为未访问 for (String node : graph.keySet()) { visited.put(node, 0); } // 对每个未访问的节点进行DFS for (String node : graph.keySet()) { if (visited.get(node) == 0) { if (hasCycleDFS(node, graph, visited)) { return true; } } } return false; } /** * DFS检测环 */ private boolean hasCycleDFS(String node, Map> graph, Map visited) { // 标记当前节点为访问中 visited.put(node, 1); // 访问所有邻居 if (graph.containsKey(node)) { for (String neighbor : graph.get(node)) { // 如果邻居正在被访问,说明存在环 if (visited.getOrDefault(neighbor, 0) == 1) { return true; } // 如果邻居未被访问,继续DFS if (visited.getOrDefault(neighbor, 0) == 0) { if (hasCycleDFS(neighbor, graph, visited)) { return true; } } } } // 标记当前节点为已访问 visited.put(node, 2); return false; } /** * 使用拓扑排序生成批次分配方案 */ private List> topologicalSort(Map> processGraph, Map> taskGraph, Set allTasks) { // 计算每个任务的入度 Map inDegree = new HashMap<>(); for (String task : allTasks) { inDegree.put(task, 0); } // 更新入度 for (Map.Entry> entry : taskGraph.entrySet()) { String task = entry.getKey(); inDegree.put(task, entry.getValue().size()); } // 创建队列,将入度为0的任务加入队列 Queue queue = new LinkedList<>(); for (Map.Entry entry : inDegree.entrySet()) { if (entry.getValue() == 0) { queue.add(entry.getKey()); } } // 存储批次分配方案 List> batches = new ArrayList<>(); // 进行拓扑排序 while (!queue.isEmpty()) { // 当前批次的任务 Set currentBatch = new HashSet<>(); // 处理当前队列中的所有任务(这些任务可以并行执行) int size = queue.size(); for (int i = 0; i < size; i++) { String task = queue.poll(); currentBatch.add(task); // 更新依赖于当前任务的任务的入度 for (Map.Entry> entry : taskGraph.entrySet()) { String dependentTask = entry.getKey(); Set dependencies = entry.getValue(); if (dependencies.contains(task)) { inDegree.put(dependentTask, inDegree.get(dependentTask) - 1); // 如果入度为0,加入队列 if (inDegree.get(dependentTask) == 0) { queue.add(dependentTask); } } } } // 添加当前批次 if (!currentBatch.isEmpty()) { batches.add(currentBatch); } } return batches; } }