Compare commits

..

169 Commits

Author SHA1 Message Date
chenbowen
7f7c4210ac Merge branch 'dev' into test 2025-11-28 14:01:14 +08:00
chenbowen
db3afb5b64 Merge remote-tracking branch 'base-version/main' into dev 2025-11-28 11:07:42 +08:00
chenbowen
542466270a 1. 修复自定义 sql 中大写表名无法匹配到 mybatis 中的缓存表信息,导致表被忽略租户的问题
2. 新增 iwork feign api 调用
2025-11-28 11:05:09 +08:00
chenbowen
03ebe21670 1. 清理 iwork 无用的接口。
2. 整合 iwork 用户的密码管理策略。
2025-11-27 20:25:02 +08:00
chenbowen
64d0d4e55e 1. iwork 统一用印发起接口 2025-11-27 20:19:27 +08:00
chenbowen
22599bbc65 Merge branch 'dev' into test 2025-11-27 16:46:27 +08:00
chenbowen
240a531ee1 Merge remote-tracking branch 'base-version/main' into dev
# Conflicts:
#	zt-module-bpm/zt-module-bpm-server/src/main/java/liquibase/database/core/DmDatabase.java
2025-11-27 16:35:49 +08:00
chenbowen
00b2f6312d 修复 flowable 无法通过 dm 数据库驱动正常获取 schema 的bug 2025-11-27 16:01:05 +08:00
chenbowen
446b5ca7a4 剔除掉 swagger 不能请求的 rpc-api 2025-11-27 13:48:55 +08:00
chenbowen
28a49ce45a 修复 dm jdbc 不兼容 flowable 转义 sql 的错误 2025-11-27 13:26:30 +08:00
chenbowen
4bd0402dde 禁止事件引擎重复自动建表 2025-11-27 11:16:49 +08:00
chenbowen
0ab550123f 关闭 databus web 请求连接池 2025-11-27 10:27:30 +08:00
chenbowen
cd21239ff2 flowable 达梦迁移 2025-11-27 09:58:44 +08:00
chenbowen
837e09941a Merge branch 'dev' into test 2025-11-26 20:14:04 +08:00
chenbowen
256bf22a10 Merge remote-tracking branch 'base-version/main' into dev 2025-11-26 20:12:46 +08:00
chenbowen
76eabb6db0 修复 system 模块编译错误 2025-11-26 20:12:07 +08:00
chenbowen
06909fafea 当前登录用户新增公司编码与部门编码属性 2025-11-26 20:01:34 +08:00
qianshijiang
00956030a4 错误信息未记录到日志文件 2025-11-26 15:52:00 +08:00
chenbowen
2dac28d3b3 Merge branch 'dev' into test 2025-11-26 13:46:23 +08:00
chenbowen
dbb1d1905e Merge remote-tracking branch 'base-version/main' into dev 2025-11-26 13:46:02 +08:00
chenbowen
08232eb3cb iwork 人员组织同步相关 2025-11-26 13:45:06 +08:00
chenbowen
5de2801fc9 Merge branch 'dev' into test 2025-11-26 12:40:05 +08:00
chenbowen
e9994a24c2 Merge remote-tracking branch 'base-version/main' into dev 2025-11-26 12:39:44 +08:00
chenbowen
a10732119b iwork 人员组织同步相关 2025-11-26 12:38:38 +08:00
qianshijiang
e7efddf976 配置mybais-plus打印sql 2025-11-26 11:57:17 +08:00
chenbowen
13ec805c20 Merge branch 'dev' into test 2025-11-26 11:34:59 +08:00
chenbowen
61e61d08b6 Merge remote-tracking branch 'base-version/main' into dev 2025-11-26 11:34:41 +08:00
chenbowen
5698c34185 iwork 人员组织同步相关 2025-11-26 11:34:04 +08:00
qianshijiang
96058e29c2 Merge remote-tracking branch 'origin/dev' into dev 2025-11-26 10:44:28 +08:00
qianshijiang
27d22de4e0 日志配置修改 2025-11-26 10:44:13 +08:00
chenbowen
f1242e74fc Merge branch 'dev' into test 2025-11-26 10:43:25 +08:00
chenbowen
0c0d82f465 Merge remote-tracking branch 'base-version/main' into dev 2025-11-26 10:43:04 +08:00
chenbowen
12ba2cf756 iwork 人员组织同步相关 2025-11-26 10:42:24 +08:00
qianshijiang
b1bd193f50 nacos配置。 2025-11-26 08:57:00 +08:00
chenbowen
0b4b87845c Merge branch 'dev' into test 2025-11-26 01:48:49 +08:00
chenbowen
a2f2325119 Merge remote-tracking branch 'base-version/main' into dev 2025-11-26 01:48:31 +08:00
chenbowen
4c79ac8a6d iwork 人员组织同步相关 2025-11-26 01:48:10 +08:00
chenbowen
0c0cb27c15 Merge branch 'dev' into test 2025-11-26 01:35:09 +08:00
chenbowen
a263632e49 Merge remote-tracking branch 'base-version/main' into dev 2025-11-26 01:34:50 +08:00
chenbowen
2e2b7ac6fa iwork 人员组织同步相关 2025-11-26 01:34:08 +08:00
chenbowen
9730573546 Merge branch 'dev' into test 2025-11-26 01:06:13 +08:00
chenbowen
299132943c Merge remote-tracking branch 'base-version/main' into dev 2025-11-26 01:05:55 +08:00
chenbowen
76ba994b50 iwork 人员组织同步相关 2025-11-26 01:04:35 +08:00
chenbowen
dd284728b4 Merge branch 'dev' into test 2025-11-25 23:27:22 +08:00
chenbowen
685ed6b504 Merge remote-tracking branch 'base-version/main' into dev 2025-11-25 23:27:01 +08:00
chenbowen
f754b1c694 iwork 人员组织同步相关 2025-11-25 23:26:26 +08:00
chenbowen
dc1db47d07 iwork 人员组织同步相关 2025-11-25 20:31:56 +08:00
chenbowen
dd38e65972 Merge branch 'dev' into test 2025-11-25 20:09:31 +08:00
chenbowen
02e0c81446 Merge remote-tracking branch 'base-version/main' into dev 2025-11-25 20:09:06 +08:00
chenbowen
6c8c479984 同步 nacos 配置到基础系统 2025-11-25 20:08:31 +08:00
chenbowen
829229a355 Merge branch 'dev' into test 2025-11-25 19:15:29 +08:00
chenbowen
067f7226f4 Merge remote-tracking branch 'base-version/main' into dev 2025-11-25 19:14:59 +08:00
chenbowen
b35df8493c 同步 nacos 配置到基础系统 2025-11-25 19:13:27 +08:00
hewencai
518aa2a773 Merge remote-tracking branch 'origin/dev' into dev 2025-11-25 19:09:13 +08:00
hewencai
4003388740 feat:集成移动云mas短信平台 2025-11-25 19:08:55 +08:00
chenbowen
f16509c107 Merge branch 'dev' into test 2025-11-25 18:58:07 +08:00
chenbowen
565a625df7 Merge remote-tracking branch 'base-version/main' into dev 2025-11-25 18:57:47 +08:00
chenbowen
adcea87bbf 同步 nacos 配置到基础系统 2025-11-25 18:56:12 +08:00
chenbowen
a79806690d Merge branch 'dev' into test 2025-11-25 17:43:28 +08:00
chenbowen
8689c5e844 Merge remote-tracking branch 'base-version/main' into dev 2025-11-25 17:42:26 +08:00
chenbowen
5be1b75be8 iwork 人员组织同步相关,兼容 iwork 返回 2025-11-25 17:41:39 +08:00
chenbowen
06d9ae2688 Merge branch 'dev' into test 2025-11-25 17:22:53 +08:00
chenbowen
547b1d9afb Merge remote-tracking branch 'base-version/main' into dev 2025-11-25 17:22:36 +08:00
chenbowen
2f9c28f166 iwork 人员组织同步相关,兼容 iwork 返回 2025-11-25 17:22:11 +08:00
chenbowen
9a0e60ad84 Merge branch 'dev' into test 2025-11-25 17:19:06 +08:00
chenbowen
c24ae5bad8 Merge remote-tracking branch 'base-version/main' into dev 2025-11-25 17:18:43 +08:00
chenbowen
64eb031486 iwork 人员组织同步相关,兼容 iwork 返回 2025-11-25 17:18:04 +08:00
chenbowen
30b22698e8 Merge branch 'dev' into test 2025-11-25 16:50:11 +08:00
chenbowen
95fab27556 Merge remote-tracking branch 'base-version/main' into dev 2025-11-25 16:49:43 +08:00
chenbowen
2efb815d59 iwork 人员组织同步相关,兼容 iwork 返回 2025-11-25 16:41:29 +08:00
chenbowen
3d5f07b7a5 Merge branch 'dev' into test 2025-11-25 16:08:41 +08:00
chenbowen
77b4e62def Merge remote-tracking branch 'base-version/main' into dev 2025-11-25 16:06:41 +08:00
chenbowen
e2dbaf12a4 iwork 人员组织同步相关 2025-11-25 16:05:52 +08:00
chenbowen
f8d95218f5 Merge branch 'dev' into test 2025-11-25 15:55:44 +08:00
chenbowen
e0d5c0221e Merge remote-tracking branch 'base-version/main' into dev 2025-11-25 15:55:13 +08:00
chenbowen
59afa893b0 修复编译错误 2025-11-25 15:54:50 +08:00
chenbowen
d4d80ce86a iwork 人员组织同步相关 2025-11-25 15:48:47 +08:00
chenbowen
70868a77c0 Merge branch 'dev' into test 2025-11-25 09:15:54 +08:00
chenbowen
eab968da72 Merge remote-tracking branch 'base-version/main' into dev
# Conflicts:
#	pom.xml
2025-11-25 09:15:32 +08:00
chenbowen
e212ba4d2f 升级版本 2025-11-25 09:14:36 +08:00
chenbowen
4ddd38c3b6 1. 升级 3.0.45 2025-11-24 19:45:47 +08:00
chenbowen
54f763c15e Merge branch 'dev' into test 2025-11-24 19:35:37 +08:00
chenbowen
65f62fddd6 Merge remote-tracking branch 'base-version/main' into dev 2025-11-24 19:19:09 +08:00
chenbowen
65b99740c1 新增 long 集合类型的 string 转换 2025-11-24 19:18:23 +08:00
chenbowen
f7ee073617 Merge branch 'dev' into test 2025-11-24 15:54:06 +08:00
chenbowen
4b17c1d833 Merge remote-tracking branch 'base-version/main' into dev 2025-11-24 15:53:49 +08:00
chenbowen
77c46acf9e 1. 修改请求 iwork 的传参方式 2025-11-24 15:51:45 +08:00
qianshijiang
3d3808be48 编译报错修改。 2025-11-24 09:52:28 +08:00
qianshijiang
6b266e3211 编译报错修改。 2025-11-24 09:49:44 +08:00
chenbowen
491f8c8b1f Merge branch 'dev' into test 2025-11-21 18:30:32 +08:00
chenbowen
e57f649e72 Merge remote-tracking branch 'base-version/main' into dev 2025-11-21 18:30:15 +08:00
chenbowen
ce39dc6d4b 1. iwork 二次适配 2025-11-21 18:20:11 +08:00
chenbowen
8dd7ccaf5f Merge branch 'dev' into test 2025-11-21 17:25:09 +08:00
chenbowen
14c81350bd Merge remote-tracking branch 'base-version/main' into dev 2025-11-21 17:24:41 +08:00
chenbowen
7e3baeba46 修复 nacos 刷新配置后,ignoreurl 失效的bug 2025-11-21 17:23:36 +08:00
chenbowen
4106cfced2 Merge branch 'dev' into test
# Conflicts:
#	zt-dependencies/pom.xml
2025-11-20 18:36:13 +08:00
chenbowen
985b429f6f Merge remote-tracking branch 'base-version/main' into dev
# Conflicts:
#	zt-dependencies/pom.xml
2025-11-20 18:35:03 +08:00
chenbowen
0b646295da 1. 新增 iwork 同步用户组织信息接口
2. 修复错误设置版本信息在 zt-dependencies 的 bug
2025-11-20 18:27:01 +08:00
qianshijiang
5374b0567b 封装树形数据操作工具类 2025-11-20 15:09:08 +08:00
qianshijiang
47f710648b 新增业务附件查询权限标识 2025-11-20 10:25:57 +08:00
chenbowen
4c9b1c616e Merge branch 'dev' into test 2025-11-18 19:03:09 +08:00
chenbowen
88fb1652e6 Merge remote-tracking branch 'base-version/main' into dev
# Conflicts:
#	zt-dependencies/pom.xml
#	zt-module-system/zt-module-system-server/src/main/java/com/zt/plat/module/system/service/sso/ExternalSsoServiceImpl.java
2025-11-18 18:51:57 +08:00
chenbowen
52a0b561f9 1. 新增针对外部的自定义 sso 菜单外链 2025-11-18 18:47:50 +08:00
chenbowen
74d2a1a236 1. 升级 3.0.44 2025-11-18 14:13:47 +08:00
chenbowen
585dd3449f Merge branch 'dev' into test 2025-11-18 11:39:28 +08:00
chenbowen
8dc3e81d4f 1. 升级 3.0.44 2025-11-18 11:39:01 +08:00
chenbowen
6b3453e4ee Merge branch 'dev' into test 2025-11-18 10:59:55 +08:00
chenbowen
633e430f46 1. 新增分页接口聚合查询注解支持
2. 优化 databus api 日志记录的字段缺失问题
3. 新增 eplat sso 页面登录校验
4. 用户、部门编辑新增 seata 事务支持
5. 新增 iwork 流程发起接口
6. 新增 eban 同步用户时的岗位处理逻辑
7. 新增无 skywalking 时的 traceId 支持
2025-11-18 10:59:29 +08:00
qianshijiang
8b3d93dc17 新增业务附件查询权限标识 2025-11-18 10:57:42 +08:00
chenbowen
266eb45e00 1. 新增分页接口聚合查询注解支持
2. 优化 databus api 日志记录的字段缺失问题
3. 新增 eplat sso 页面登录校验
4. 用户、部门编辑新增 seata 事务支持
5. 新增 iwork 流程发起接口
6. 新增 eban 同步用户时的岗位处理逻辑
7. 新增无 skywalking 时的 traceId 支持
2025-11-18 10:03:34 +08:00
qianshijiang
c4482af144 新增业务附件查询权限标识 2025-11-17 18:01:00 +08:00
chenbowen
d5a1e5c157 Merge branch 'dev' into test 2025-11-13 21:41:13 +08:00
chenbowen
53a0293b7c Merge remote-tracking branch 'base-version/main' into dev 2025-11-13 21:40:08 +08:00
chenbowen
af7f103a38 调整后台返回部门id与公司id类型为 string 2025-11-13 21:39:27 +08:00
chenbowen
2280d29fb6 调整后台返回部门id与公司id类型为 string 2025-11-13 21:16:08 +08:00
chenbowen
71e63519ae 调整后台返回部门id与公司id类型为 string 2025-11-13 21:10:38 +08:00
chenbowen
c8dca75943 Merge branch 'dev' into test 2025-11-13 18:45:33 +08:00
chenbowen
2eb09ff35d Merge remote-tracking branch 'base-version/main' into dev 2025-11-13 18:45:11 +08:00
chenbowen
c1f12dfe5e 1. 修复 userinfo 过多导致的用户登录失败错误 2025-11-13 18:44:28 +08:00
wencai he
428c9a60b1 Merge branch 'dev' into 'test'
Dev

See merge request jygk/dsc!3
2025-11-13 09:44:31 +00:00
hewencai
4efa894c8f Merge remote-tracking branch 'origin/dev' into dev 2025-11-13 17:32:31 +08:00
hewencai
f02745454d 修改门户管理图片显示逻辑 2025-11-13 17:32:30 +08:00
qianshijiang
d9fa921fda 添加异步任务工具类 2025-11-13 14:59:11 +08:00
chenbowen
b6951a4c6b 1. infra 模块扩展为两个实例 2025-11-13 11:22:18 +08:00
chenbowen
3e5a0a4845 1. 缩减 dsc 服务占用资源 2025-11-13 11:11:22 +08:00
wencai he
49a54e2199 Merge branch 'dev' into 'test'
feat:新增门户管理功能

See merge request jygk/dsc!2
2025-11-13 02:29:50 +00:00
hewencai
e00086c6e8 feat:新增门户管理功能 2025-11-13 10:28:19 +08:00
chenbowen
9c99750dd8 1. zt-module-report 不在进行部署 2025-11-13 09:14:50 +08:00
chenbowen
bd56cb0405 Merge branch 'dev' into test 2025-11-12 22:26:02 +08:00
chenbowen
c399bdf720 Merge remote-tracking branch 'base-version/main' into dev 2025-11-12 22:25:46 +08:00
chenbowen
1a34cbc678 1. 验证码接口 url 不校验租户 2025-11-12 22:23:58 +08:00
chenbowen
81fb8eea8f Merge branch 'dev' into test 2025-11-11 17:16:15 +08:00
chenbowen
8423775582 Merge remote-tracking branch 'base-version/main' into dev 2025-11-11 17:15:55 +08:00
chenbowen
aa159638b9 1. 只要标记了业务接口,就需要全量的拦截,不再区分 url 2025-11-11 17:14:31 +08:00
wencai he
2c4f46b6de Merge branch 'dev' into 'test'
Dev

See merge request jygk/dsc!1
2025-11-10 08:06:35 +00:00
hewencai
03c76b071a Merge remote-tracking branch 'origin/dev' into dev 2025-11-10 10:41:48 +08:00
hewencai
a6b87f01a7 add:增加seata和doc4j依赖 2025-11-10 10:41:08 +08:00
chenbowen
3312ed328d 1. dsc bpm deploy 单独进行发布 2025-11-07 12:58:59 +08:00
chenbowen
7d74ff7acc Merge branch 'dev' into test 2025-11-07 12:46:18 +08:00
chenbowen
b7f07ba8da Merge remote-tracking branch 'base-version/main' into dev 2025-11-07 12:45:49 +08:00
chenbowen
3c19722cbd 1. 调整附件临时 url 过期时间为 24 小时 2025-11-07 12:37:49 +08:00
chenbowen
5037c741a9 Merge branch 'dev' into test 2025-11-06 23:10:34 +08:00
chenbowen
b8b8495dfe Merge remote-tracking branch 'base-version/main' into dev 2025-11-06 23:10:12 +08:00
chenbowen
f25ee5091e 1. 调整 databus api 策略,添加每次重试新建请求的逻辑 2025-11-06 23:08:55 +08:00
chenbowen
5fb15c31ed 1. 调整 pod 资源限制 2025-11-06 18:33:25 +08:00
chenbowen
07e000ae33 Merge branch 'dev' into test
# Conflicts:
#	deployment.yaml
2025-11-06 01:09:56 +08:00
chenbowen
c1ea3c6754 1. 调整 pod 资源限制 2025-11-06 01:09:31 +08:00
chenbowen
4197bbfbf8 Merge branch 'dev' into test 2025-11-06 00:50:12 +08:00
chenbowen
a9efa1d84a 1. 新增业务系统日志收集 2025-11-06 00:49:53 +08:00
chenbowen
c67debb9cf Merge branch 'dev' into test 2025-11-06 00:42:15 +08:00
chenbowen
f64b76e703 1. 新增业务系统日志收集 2025-11-06 00:41:57 +08:00
chenbowen
3025c56495 Merge branch 'dev' into test 2025-11-06 00:38:04 +08:00
chenbowen
2a0e0da08a 1. 新增业务系统日志收集 2025-11-06 00:37:48 +08:00
chenbowen
309a3f352b Merge branch 'dev' into test 2025-11-06 00:28:45 +08:00
chenbowen
00297f65b4 1. 新增业务系统日志收集 2025-11-06 00:28:14 +08:00
chenbowen
270132b7c1 1. 组织物料状态字段补充
2. template 模块不再参与编译
2025-11-05 17:20:00 +08:00
chenbowen
b17126485c Merge branch 'dev' into test 2025-11-05 16:33:37 +08:00
chenbowen
e98615c896 Merge remote-tracking branch 'base-version/main' into dev 2025-11-05 16:32:45 +08:00
chenbowen
ec473a00d4 1. 升级 skywalking 对应依赖版本
2. 整合 eban 相关配置,新增登出时同步全局失效 eban token 接口
2025-11-05 16:29:40 +08:00
chenbowen
60d6446287 Merge branch 'dev' into test 2025-11-04 18:50:01 +08:00
chenbowen
79add68d87 Merge remote-tracking branch 'base-version/main' into dev 2025-11-04 18:49:42 +08:00
chenbowen
811270a4c5 1. 提高 databus api 的网络失败重试次数,避免复用旧链接导致的 connection reset 错误
2. 兼容顶级组织同步时的组织编码生成逻辑
2025-11-04 14:43:53 +08:00
chenbowen
0b27f9ba0b Merge branch 'dev' into test 2025-11-03 14:21:51 +08:00
chenbowen
e6fe57533e Merge remote-tracking branch 'base-version/main' into dev 2025-11-03 14:21:25 +08:00
chenbowen
b98f605dfd 1. 优化 bpm 流程中所有关于用户选择,部门选择的组件
2. 优化 api 调试提示,优化 api 调用因为链接复用导致的链接被拒绝问题
3. 新增字典数据导入功能
2025-11-03 14:20:13 +08:00
chenbowen
42f61158c6 1. 补全工艺工序页面菜单权限 2025-10-31 09:49:29 +08:00
chenbowen
46e38c4ca3 Merge branch 'dev' into test 2025-10-31 09:30:15 +08:00
chenbowen
005e119ffb Merge remote-tracking branch 'base-version/main' into dev 2025-10-31 09:29:56 +08:00
chenbowen
ddee4da72a 1. 新增 api 调用日志记录,历史版本回滚
2. 新增用户角色权限监督功能
2025-10-31 09:28:59 +08:00
283 changed files with 19663 additions and 541 deletions

View File

@@ -29,14 +29,14 @@ spec:
httpGet:
path: /actuator/health
port: 48080
initialDelaySeconds: 10
initialDelaySeconds: 50
periodSeconds: 5
failureThreshold: 3
livenessProbe:
httpGet:
path: /actuator/health
port: 48080
initialDelaySeconds: 30
initialDelaySeconds: 50
periodSeconds: 10
failureThreshold: 5
resources:
@@ -44,7 +44,7 @@ spec:
cpu: "500m"
memory: "1024Mi"
limits:
cpu: "700m"
cpu: "1024m"
memory: "2048Mi"
terminationGracePeriodSeconds: 30
---
@@ -76,7 +76,7 @@ metadata:
description: DESC_PLACEHOLDER
rollout.kubernetes.io/change-cause: "DESC_PLACEHOLDER:VERSION_PLACEHOLDER"
spec:
replicas: 1
replicas: 2
selector:
matchLabels:
app: zt-module-infra
@@ -96,14 +96,14 @@ spec:
httpGet:
path: /actuator/health
port: 48082
initialDelaySeconds: 10
initialDelaySeconds: 50
periodSeconds: 5
failureThreshold: 3
livenessProbe:
httpGet:
path: /actuator/health
port: 48082
initialDelaySeconds: 30
initialDelaySeconds: 50
periodSeconds: 10
failureThreshold: 5
resources:
@@ -111,7 +111,7 @@ spec:
cpu: "500m"
memory: "1024Mi"
limits:
cpu: "700m"
cpu: "1024m"
memory: "2048Mi"
terminationGracePeriodSeconds: 30
strategy:
@@ -168,14 +168,14 @@ spec:
httpGet:
path: /actuator/health
port: 48081
initialDelaySeconds: 10
initialDelaySeconds: 50
periodSeconds: 5
failureThreshold: 3
livenessProbe:
httpGet:
path: /actuator/health
port: 48081
initialDelaySeconds: 30
initialDelaySeconds: 50
periodSeconds: 10
failureThreshold: 5
resources:
@@ -183,7 +183,7 @@ spec:
cpu: "500m"
memory: "1024Mi"
limits:
cpu: "700m"
cpu: "1024m"
memory: "2048Mi"
terminationGracePeriodSeconds: 30
strategy:
@@ -207,77 +207,149 @@ spec:
targetPort: 48081
nodePort: 30091
---
# zt-module-report
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: ns-d6a0e78ebd674c279614498e4c57b133
name: zt-module-report
labels:
app: zt-module-report
annotations:
version: "VERSION_PLACEHOLDER"
description: DESC_PLACEHOLDER
rollout.kubernetes.io/change-cause: "DESC_PLACEHOLDER:VERSION_PLACEHOLDER"
spec:
replicas: 1
selector:
matchLabels:
app: zt-module-report
template:
metadata:
labels:
app: zt-module-report
spec:
containers:
- name: zt-module-report
image: 172.16.46.66:10043/zt/zt-module-report:VERSION_PLACEHOLDER
imagePullPolicy: Always
env:
- name: TZ
value: Asia/Shanghai
readinessProbe:
httpGet:
path: /actuator/health
port: 48084
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 3
livenessProbe:
httpGet:
path: /actuator/health
port: 48084
initialDelaySeconds: 30
periodSeconds: 10
failureThreshold: 5
resources:
requests:
cpu: "500m"
memory: "1024Mi"
limits:
cpu: "700m"
memory: "2048Mi"
terminationGracePeriodSeconds: 30
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
# zt-module-bpm
#apiVersion: apps/v1
#kind: Deployment
#metadata:
# namespace: ns-d6a0e78ebd674c279614498e4c57b133
# name: zt-module-bpm
# labels:
# app: zt-module-bpm
# annotations:
# version: "VERSION_PLACEHOLDER"
# description: DESC_PLACEHOLDER
# rollout.kubernetes.io/change-cause: "DESC_PLACEHOLDER:VERSION_PLACEHOLDER"
#spec:
# replicas: 1
# selector:
# matchLabels:
# app: zt-module-bpm
# template:
# metadata:
# labels:
# app: zt-module-bpm
# spec:
# containers:
# - name: zt-module-bpm
# image: 172.16.46.66:10043/zt/zt-module-bpm:VERSION_PLACEHOLDER
# imagePullPolicy: Always
# env:
# - name: TZ
# value: Asia/Shanghai
# readinessProbe:
# httpGet:
# path: /actuator/health
# port: 48083
# initialDelaySeconds: 50
# periodSeconds: 5
# failureThreshold: 3
# livenessProbe:
# httpGet:
# path: /actuator/health
# port: 48083
# initialDelaySeconds: 50
# periodSeconds: 10
# failureThreshold: 5
# resources:
# requests:
# cpu: "500m"
# memory: "1024Mi"
# limits:
# cpu: "2048m"
# memory: "2048Mi"
# terminationGracePeriodSeconds: 30
# strategy:
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 1
# maxUnavailable: 0
#---
#apiVersion: v1
#kind: Service
#metadata:
# namespace: ns-d6a0e78ebd674c279614498e4c57b133
# name: zt-module-bpm
#spec:
# type: NodePort
# selector:
# app: zt-module-bpm
# ports:
# - protocol: TCP
# port: 48083
# targetPort: 48083
# nodePort: 30093
---
apiVersion: v1
kind: Service
metadata:
namespace: ns-d6a0e78ebd674c279614498e4c57b133
name: zt-module-report
spec:
type: NodePort
selector:
app: zt-module-report
ports:
- protocol: TCP
port: 48084
targetPort: 48084
nodePort: 30094
# zt-module-report
#apiVersion: apps/v1
#kind: Deployment
#metadata:
# namespace: ns-d6a0e78ebd674c279614498e4c57b133
# name: zt-module-report
# labels:
# app: zt-module-report
# annotations:
# version: "VERSION_PLACEHOLDER"
# description: DESC_PLACEHOLDER
# rollout.kubernetes.io/change-cause: "DESC_PLACEHOLDER:VERSION_PLACEHOLDER"
#spec:
# replicas: 1
# selector:
# matchLabels:
# app: zt-module-report
# template:
# metadata:
# labels:
# app: zt-module-report
# spec:
# containers:
# - name: zt-module-report
# image: 172.16.46.66:10043/zt/zt-module-report:VERSION_PLACEHOLDER
# imagePullPolicy: Always
# env:
# - name: TZ
# value: Asia/Shanghai
# readinessProbe:
# httpGet:
# path: /actuator/health
# port: 48084
# initialDelaySeconds: 50
# periodSeconds: 5
# failureThreshold: 3
# livenessProbe:
# httpGet:
# path: /actuator/health
# port: 48084
# initialDelaySeconds: 50
# periodSeconds: 10
# failureThreshold: 5
# resources:
# requests:
# cpu: "500m"
# memory: "1024Mi"
# limits:
# cpu: "2048m"
# memory: "2048Mi"
# terminationGracePeriodSeconds: 30
# strategy:
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 1
# maxUnavailable: 0
#---
#apiVersion: v1
#kind: Service
#metadata:
# namespace: ns-d6a0e78ebd674c279614498e4c57b133
# name: zt-module-report
#spec:
# type: NodePort
# selector:
# app: zt-module-report
# ports:
# - protocol: TCP
# port: 48084
# targetPort: 48084
# nodePort: 30094
---
# zt-module-databus
apiVersion: apps/v1
@@ -312,14 +384,14 @@ spec:
httpGet:
path: /actuator/health
port: 48100
initialDelaySeconds: 10
initialDelaySeconds: 50
periodSeconds: 5
failureThreshold: 3
livenessProbe:
httpGet:
path: /actuator/health
port: 48100
initialDelaySeconds: 30
initialDelaySeconds: 50
periodSeconds: 10
failureThreshold: 5
resources:

340
docs/iWork集成说明.md Normal file
View File

@@ -0,0 +1,340 @@
# iWork 统一集成使用说明
本文档介绍如何在 System 模块中使用项目已实现的统一 iWork 流程发起能力controller + service + properties。内容包含配置项、调用方式内部 Java 调用 & 外部 HTTP 调用)、请求/响应示例、错误处理、缓存与 Token 生命周期、典型问题与排查步骤。
---
## 概览
项目在 `system` 模块下实现了一套对外统一的 iWork 集成能力:
- 提供管理端接口REST路径前缀`/system/integration/iwork`
- 提供 Service 层 `IWorkIntegrationService`,供其它模块以 Spring Bean 注入方式直接调用。
- 使用 `IWorkProperties` 绑定 `application.yml``iwork` 的配置项。
- Token / 会话采用本地 Caffeine 缓存缓存(按 appId + operatorUserId 缓存 session并在到期前按配置提前刷新。
- 使用统一配置的 appId、公钥以及默认流程编号无需再维护多套凭证。
---
## 配置YAML
`application.yml`(或 profile添加或修改如下项示例摘自 `zt-server/src/main/resources/application.yaml`
```yaml
iwork:
base-url: https://iwork.example.com
app-id: my-iwork-app # 固定使用的 iWork 应用编号
client-public-key: MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A... # 与 iWork 约定的客户端公钥Base64
user-id: system # 默认操作用户(当调用未指定 operatorUserId 时使用)
org:
token-seed: 5936562a-d47c-4a29-9b74-b310e6c971b7
paths:
subcompany-page: /api/hrm/resful/getHrmsubcompanyWithPage
department-page: /api/hrm/resful/getHrmdepartmentWithPage
job-title-page: /api/hrm/resful/getJobtitleInfoWithPage
user-page: /api/hrm/resful/getHrmUserInfoWithPage
sync-subcompany: /api/hrm/resful/synSubcompany
sync-department: /api/hrm/resful/synDepartment
sync-job-title: /api/hrm/resful/synJobtitle
sync-user: /api/hrm/resful/synHrmresource
workflow-id: 54 # 当调用方未传 workflowId 时使用的默认流程编号
paths:
register: /api/ec/dev/auth/regist
apply-token: /api/ec/dev/auth/applytoken
user-info: /api/workflow/paService/getUserInfo
create-workflow: /api/workflow/paService/doCreateRequest
void-workflow: /api/workflow/paService/doCancelRequest
token:
ttl-seconds: 3600 # token 有效期(秒)
refresh-ahead-seconds: 60 # 在到期前多少秒认为需要刷新
client:
connect-timeout: 5s
response-timeout: 30s
```
说明:
- `base-url` 为 iWork 网关的基础地址,不能留空。
- `app-id``client-public-key` 共同构成注册/申请 token 所需的凭据信息,由配置统一提供,不再支持多套切换。
- `workflow-id` 提供全局默认流程编号,单次调用也可通过 `workflowId` 覆盖。
- 请求头键名固定为 `app-id``client-public-key``secret``token``time``user-id`,无需在配置中重复声明。
- `org.*` 配置负责 iWork 人力组织 REST 代理:`token-seed` 为与 iWork 约定的标识,系统会自动将其与毫秒时间戳拼接并计算 MD5 生成 `key`,无需额外传递 token。
---
## 典型调用路径Controller
Controller 暴露的 REST 接口:
- POST /system/integration/iwork/user/resolve
- 说明:根据外部识别信息查找 iWork 的用户编号userId
- 请求:见下方 `Resolve User` 示例。
- POST /system/integration/iwork/workflow/create
- 说明:在 iWork 中发起流程。
- 请求:见下方 `Create Workflow` 示例。
- POST /system/integration/iwork/workflow/void
- 说明:作废/干预流程。
- 请求:见下方 `Void Workflow` 示例。
这些接口的响应均使用项目的 `CommonResult` 封装,实际返回的业务对象在 `data` 字段。
---
### 人力组织 REST 接口key + ts
为对接 PDF 所述的人力组织 RESTFUL 接口Controller 额外暴露了以下代理端点,用于通过 `key + ts` 生成的 token 与 iWork 交互:
- POST `/system/integration/iwork/hr/subcompany/page` —— 请求体传入 `params`Map对应 `getHrmsubcompanyWithPage`
- POST `/system/integration/iwork/hr/department/page` —— 对应 `getHrmdepartmentWithPage`
- POST `/system/integration/iwork/hr/job-title/page` —— 对应 `getJobtitleInfoWithPage`
- POST `/system/integration/iwork/hr/user/page` —— 对应 `getHrmUserInfoWithPage`
- POST `/system/integration/iwork/hr/subcompany/sync` —— 请求体传入 `data`List<Map>),对应 `synSubcompany`
- POST `/system/integration/iwork/hr/department/sync` —— 对应 `synDepartment`
- POST `/system/integration/iwork/hr/job-title/sync` —— 对应 `synJobtitle`
- POST `/system/integration/iwork/hr/user/sync` —— 对应 `synHrmresource`
所有请求均自动封装为 `application/x-www-form-urlencoded`,并把 `token` 字段设置为 `{"key":"<md5>","ts":"<timestamp>"}`,无需调用方重复计算。
---
## 请求 VO 说明(重要字段)
- IWorkBaseReqVO公用字段
- `appId` (String):为兼容历史接口保留,系统始终使用配置项 `iwork.app-id`
- `operatorUserId` (String):在 iWork 内部代表操作人的用户编号(可为空,框架会使用 `properties.userId`)。
- `forceRefreshToken` (Boolean):是否强制刷新 token例如遇到 token 错误时强制刷新)。
- IWorkUserInfoReqVO用于解析用户
- `identifierKey` (String):外部标识 key必须例如 "loginid")。
- `identifierValue` (String):外部标识值(必须,例如用户名)。
- `payload` (Map):额外的请求载荷,会与 identifier 合并后发送到 iWork。
- `queryParams` (Map):如果需要通过查询参数传递额外信息,可使用此字段。
- IWorkUserInfoRespVO解析用户响应
- `userId` (String):从 iWork 响应中解析出的用户编号(如果能解析到)。
- `payload` / `rawBody`:原始返回信息。
- `success` / `message`:调用成功标志与提示信息。
- IWorkWorkflowCreateReqVO发起流程
- `requestName` (String):流程标题。
- `workflowId` (Long):流程模板 ID可选缺省时使用配置的默认值
- `mainFields` (`List<IWorkFormFieldVO>`):主表字段集合。
- `detailTables` (`List<IWorkDetailTableVO>`):明细表集合(可选)。
- `otherParams` / `formExtras`:额外参数,`formExtras` 会以 form-data 方式追加。
- IWorkWorkflowVoidReqVO作废
- `requestId` (String):流程请求编号(必填)。
- `reason``extraParams``formExtras` 等用于传递作废原因或额外字段。
- IWorkFormFieldVO表单字段
- `fieldName` (String):字段名(必填),与 iWork 表单字段 key 对应。
- `fieldValue` (String):字段值(必填)。
- IWorkDetailRecordVO明细记录
- `recordOrder` (Integer):可选记录序号(从 0 开始),用于 iWork 明细排序。
- `fields` (List&lt;IWorkFormFieldVO&gt;):该明细行下的字段集合(必填)。
- IWorkDetailTableVO明细表
- `tableDBName` (String)iWork 明细表表名(必填,如 `formtable_main_26_dt1`)。
- `records` (List&lt;IWorkDetailRecordVO&gt;):明细记录集合(必填)。
---
## Java内部调用示例
项目同时提供 `IWorkIntegrationService` Bean可直接注入并调用
```java
import com.zt.plat.module.system.controller.admin.integration.iwork.vo.IWorkDetailRecordVO;
import com.zt.plat.module.system.controller.admin.integration.iwork.vo.IWorkDetailTableVO;
import com.zt.plat.module.system.controller.admin.integration.iwork.vo.IWorkFormFieldVO;
import com.zt.plat.module.system.controller.admin.integration.iwork.vo.IWorkOperationRespVO;
import com.zt.plat.module.system.controller.admin.integration.iwork.vo.IWorkWorkflowCreateReqVO;
import com.zt.plat.module.system.service.integration.iwork.IWorkIntegrationService;
import lombok.RequiredArgsConstructor;
import org.springframework.stereotype.Component;
import java.util.List;
@RequiredArgsConstructor
@Component
public class MyService {
private final IWorkIntegrationService iworkService;
public void startFlow() {
IWorkWorkflowCreateReqVO req = new IWorkWorkflowCreateReqVO();
// 使用 application.yml 中配置的 app-id无需额外指定
req.setRequestName("测试-创建流程");
// 若需要覆盖配置的默认流程,可显式设置 workflowId
// req.setWorkflowId(54L);
// 主表字段
IWorkFormFieldVO nameField = new IWorkFormFieldVO();
nameField.setFieldName("name");
nameField.setFieldValue("张三");
IWorkFormFieldVO amountField = new IWorkFormFieldVO();
amountField.setFieldName("amount");
amountField.setFieldValue("1000");
req.setMainFields(List.of(nameField, amountField));
// 明细表(可选)
IWorkFormFieldVO detailField = new IWorkFormFieldVO();
detailField.setFieldName("itemName");
detailField.setFieldValue("办公用品");
IWorkDetailRecordVO record = new IWorkDetailRecordVO();
record.setRecordOrder(0);
record.setFields(List.of(detailField));
IWorkDetailTableVO detailTable = new IWorkDetailTableVO();
detailTable.setTableDBName("formtable_main_26_dt1");
detailTable.setRecords(List.of(record));
req.setDetailTables(List.of(detailTable));
IWorkOperationRespVO resp = iworkService.createWorkflow(req);
if (resp.isSuccess()) {
// 处理成功,例如记录 requestId
} else {
// 日志或重试
}
}
}
```
说明:
- 若需使用特定凭证,可设置 `req.setAppId("my-iwork-app")`
- 若需覆盖默认流程模板,可调用 `req.setWorkflowId(123L)` 指定。
- 若希望以特定 iWork 操作人发起,可设置 `req.setOperatorUserId("1001")`
---
## HTTP外部调用示例cURL
1. Resolve user
```bash
curl -X POST \
-H "Content-Type: application/json" \
-d '{
"appId":"my-iwork-app",
"identifierKey":"loginid",
"identifierValue":"zhangsan"
}' \
https://your-zt-server/admin-api/system/integration/iwork/user/resolve
```
成功返回示例CommonResult 包装):
```json
{
"code": 0,
"msg": "success",
"data": {
"userId": "1001",
"success": true,
"payload": { ... },
"rawBody": "{...}"
}
}
```
1. Create workflow
```bash
curl -X POST -H "Content-Type: application/json" -d '{
"requestName":"测试REST创建流程",
"workflowId":54,
"mainFields":[{"fieldName":"name","fieldValue":"张三"}],
"appId":"my-iwork-app"
}' https://your-zt-server/admin-api/system/integration/iwork/workflow/create
```
1. Void workflow
```bash
curl -X POST -H "Content-Type: application/json" -d '{
"requestId":"REQ-001",
"reason":"作废原因",
"appId":"my-iwork-app"
}' https://your-zt-server/admin-api/system/integration/iwork/workflow/void
```
---
## 核心逻辑与细节
1. 基础参数解析
系统始终使用 `application.yml` 中配置的 `app-id``client-public-key` 与 iWork 通信。
请求体中的 `appId` 字段仅为兼容历史调用而保留,框架内部不会使用该值做切换。
1. Workflow 模板解析
调用时优先使用请求体中的 `workflowId`
若未显式传入,则回退到全局 `iwork.workflow-id`,若仍为空则抛出 `IWORK_WORKFLOW_ID_MISSING`
1. 注册 + RSA + Token
- 在首次或 token 过期时,会按以下步骤获取 session
1. 向 iWork 的 `register` 接口发起请求Headers 包含 appId 与 clientPublicKey
2. 从注册响应中获取 `secret``spk`(服务端公钥),使用本地的 client 公钥做 RSA 加密(`spk` 用于加密),得到加密后的 secret 与 encryptedUserId。
3. 使用注册返回的密钥申请 tokenapply-tokentoken 会被按 `ttl-seconds` 缓存。
- `IWorkIntegrationServiceImpl` 中维护一个 Caffeine `sessionCache`,缓存 key 为 `appId::operatorUserId`
- 当 token 接近到期(`refresh-ahead-seconds`)时会在下一次请求触发刷新。
1. 请求构造
- JSON 请求使用 `application/json`,表单请求(如创建流程/作废)使用 `application/x-www-form-urlencoded`
- 认证 Header`IWorkProperties.Headers` 中的常量控制,固定键名为 `app-id``client-public-key``secret``token``time``user-id`
1. 响应解析
- 实现里对响应成功的判定比较宽松:检查 `code``status``success``errno` 等字段(支持布尔、字符串 0/1/success以判断是否成功并解析常见的 message 字段 `msg|message|errmsg`
---
## 常见错误与排查
- baseUrl 未配置IWORK_BASE_URL_MISSING
- 处理:确保 `iwork.base-url` 配置正确。
- 配置缺失IWORK_CONFIGURATION_INVALID
- 场景:`app-id``client-public-key``user-id` 等关键字段没有配置或只包含空白字符。
- 处理:在 `application.yml` 或配置中心中补充对应字段,确保它们与 iWork 侧一致。
- 流程编号缺失IWORK_WORKFLOW_ID_MISSING
- 场景:请求体、凭证与全局配置均未提供流程模板编号。
- 处理:在请求中指定 `workflowId`,或在配置中设置 `workflow-id` / 凭证级 `default-workflow-id`
- RSA 加密/注册/申请 token 失败IWORK_REGISTER_FAILED / IWORK_APPLY_TOKEN_FAILED / IWORK_REMOTE_REQUEST_FAILED
- 处理:通过日志查看 iWork 返回的 HTTP 状态码与 body确认请求头/路径/参数是否匹配 iWork 网关要求。
- 用户解析失败
- 确认 `identifierKey`/`identifierValue` 是否正确填写并与 iWork 的查询接口契合;可启用 `forceRefreshToken` 触发 session 刷新以排除 token 过期造成的问题。
---
## 进阶主题
- 并发与缓存
- `sessionCache` 最大条目数为 256若高并发/多凭证/多操作人场景,可能需调整容量。
- 超时与 HTTP 客户端
- `IWorkProperties.client.response-timeout` 可用于设定响应超时;连接超时通常由 Reactor Netty 全局配置控制。
- 单元测试
- 项目中已有 MockWebServer 测试样例(`IWorkIntegrationServiceImplTest`),可参考测试用例模拟 iWork 的注册、申请 token、用户查询、创建/作废流程的交互。
---
## 小结与建议
- 在配置中补齐 `iwork.app-id``iwork.client-public-key``iwork.user-id``iwork.workflow-id` 等关键字段。
- 优先在本地通过 `IWorkIntegrationService` Java API 调试,成功后再通过 Controller 的 REST 接口对外暴露。
- 若遇到请求失败,查看应用日志(`[iWork]` 前缀的日志)与 iWork 网关返回 body定位是注册、申请 token还是业务接口user-info/create/void失败。
文档已生成并保存到:`docs/iWork集成说明.md`

View File

View File

@@ -0,0 +1,124 @@
# 分页汇总功能使用说明
本文档介绍如何在平台项目中启用分页接口的汇总行SUM统计能力。该能力基于 `PageResult` 返回体与 `@PageSum` 标注,在分页查询时自动计算并返回指定字段的合计值。
## 适用场景
- 需要在分页列表底部展示金额、数量等合计值。
- 希望后端自动补充汇总信息,避免前端手动累加。
- 已使用 `BaseMapperX` 及其 `selectPage` 等分页便捷方法。
## 功能概览
| 组件 | 位置 | 作用 |
| --- | --- | --- |
| `@PageSum` | `com.zt.plat.framework.common.annotation.PageSum` | 标注需要参与 SUM 聚合的实体字段 |
| `PageResult.summary` | `com.zt.plat.framework.common.pojo.PageResult` | 承载字段 -> `BigDecimal` 的汇总结果 |
| `PageSumSupport` | `com.zt.plat.framework.mybatis.core.sum.PageSumSupport` | 负责扫描注解、克隆查询条件并执行 SUM 查询 |
| `BaseMapperX.selectPage` | `com.zt.plat.framework.mybatis.core.mapper.BaseMapperX` | 在分页与非分页查询后自动附加汇总信息 |
## 接入步骤
### 1. 在实体上标注 `@PageSum`
```java
@TableName("order_summary")
public class OrderSummaryDO {
private Long id;
@PageSum
private BigDecimal amount;
@PageSum(column = "tax_amount")
private BigDecimal tax;
@PageSum(column = "discount")
private BigDecimal discountSummary;
// 其它字段 ...
}
```
- 不传 `column` 时,默认使用 MyBatis-Plus 实体字段映射的数据库列。
- 如需跨表或函数(例如 `sum(price * quantity)`),可在 `column` 中直接写 SQL 片段。
- 必须是数值类型(`Number``BigDecimal`、原生数值)。非数值字段会被忽略并打印警告日志。
- 对于并不存在于表中的“汇总专用字段”,仅需在 `@PageSum` 中声明 `exist = false`,框架会自动注入等效的 `@TableField(exist = false)`,无需再次编写 `@TableField` 注解。
### 2. 使用 `BaseMapperX` 的分页能力
```java
PageResult<OrderSummaryRespVO> page = orderSummaryMapper.selectPage(pageParam, wrapper);
```
-`BaseMapperX.selectPage`(含排序参数版本)支持自动附加汇总结果。
- 对于 `PageParam.PAGE_SIZE_NONE`(不分页)场景同样有效。
- `selectJoinPage` 暂未附加汇总信息,如需支持请二次封装。
> ⚠️ 目前的汇总增强依赖 MyBatis-Plus 默认分页(单表/简单条件)实现聚合。若需在复杂联表或高度自定义 SQL 中进行统计,请单独编写汇总接口,或在自定义逻辑中手工调用 `PageSumSupport.tryAttachSummary(...)`,避免影响现有查询语句。
### 3. 暴露响应结果
`PageResult` 现在包含两个与数量相关的属性:
- `total`:分页总数量,仍通过 `total` 字段返回(向后兼容 `totalCount` 反序列化)。
- `summary`Map 结构,键为实体字段名,值为 `BigDecimal` 类型的合计值。
示例响应:
```json
{
"code": 0,
"data": {
"list": [
{ "id": 1, "amount": 20.00, "tax": 1.20 },
{ "id": 2, "amount": 30.00, "tax": 1.80 }
],
"total": 2,
"summary": {
"amount": 50.00,
"tax": 3.00
}
}
}
```
前端即可直接读取 `data.summary.amount` 展示汇总行,无需手工聚合。
## 常见问题
### 汇总结果为空
- 检查实体字段是否正确标注 `@PageSum`,且类型为数值。
- 确认 Mapper 的泛型实体与查询结果实体一致,`PageSumSupport` 会基于 Mapper 泛型解析实体类型。
- 若查询条件覆盖了 `select` 列表(如显式调用 `select(...)`),请确保 SUM 语句仍能执行;`PageSumSupport` 会克隆 Wrapper 并重新设置 `select` 列表,手写 SQL 需保证兼容。
### 自定义 SQL & 复杂场景
- 对于需要复杂汇总(如 CASE WHEN可在 `column` 属性中写 SQL 表达式:
```java
@PageSum(column = "SUM(CASE WHEN status = 'PAID' THEN amount ELSE 0 END)")
private BigDecimal paidAmount;
```
- 当前实现 **仅会扫描 Mapper 泛型实体类** 上的 `@PageSum` 标注。若分页接口最终返回 VO请先在实体上完成标注再使用 `PageResult.convert(...)` 或其它方式将数据转换为 VO转换后 `summary` 内容会被完整保留。
### 非 BaseMapperX 查询
- 目前自动聚合只对 `BaseMapperX.selectPage` 及分页列表查询有效。
- 若使用 XML 自定义 SQL可在逻辑中手动调用 `PageSumSupport.tryAttachSummary(mapper, wrapper, pageResult)`。
## 调试与测试
- 单元测试示例:`com.zt.plat.framework.mybatis.core.sum.PageSumSupportTest`。
- 运行 `mvn -pl zt-framework/zt-spring-boot-starter-mybatis -am test` 可验证功能和回归。
- 日志中会输出数字解析或字段配置异常的告警信息,便于定位问题。
## 变更兼容性
- `PageResult` 仍通过 `list`/`total` 提供原有分页数据,向后兼容旧接口。
- 新增 `summary` 字段,前端可按需展示。
- `totalCount` Setter / Getter 仍保留(`@JsonIgnore`),可兼容旧代码逻辑。
如需进一步扩展(例如 AVG、MAX 等聚合),可按现有结构在 `PageSumSupport` 基础上新增注解与聚合逻辑。

194
docs/外部单点登录.md Normal file
View File

@@ -0,0 +1,194 @@
# 外部单点登录External SSO接入说明
## 功能概述
- 支持外部系统携带一次性 token 跳转本系统,实现免密单点登录。
- 去除了历史实现中的 payload 解密、nonce 强校验、自动建号与邮箱匹配等逻辑,所有账号均需在本地预先存在并保持映射关系。
- 前后端新增 `/system/sso/verify` 校验能力,返回标准的 `AuthLoginRespVO` 令牌信息,并记录审计与登录日志。
- 通过 `ExternalSsoClient` 抽象外部接口调用,可按需自定义实现或复用默认 HTTP 客户端封装。
## 关键组件
### 后端
- `ExternalSsoServiceImpl`:单点登录主流程,实现参数校验、外部用户查询、本地账号匹配、令牌签发与日志记录。
- `ExternalSsoStrategy`:定义按来源系统拆分的策略接口,不同系统可实现自定义的拉取与匹配逻辑。
- `DefaultExternalSsoStrategy`:默认策略实现,复用配置化的 HTTP 客户端与匹配顺序,可按优先级被自定义策略覆盖。
- `ExternalSsoClient`:获取外部用户信息的接口抽象。
- `DefaultExternalSsoClient`:基于 `RestTemplate` 的默认实现,支持 Header/Query/Body 占位符渲染、重试、响应字段映射、代理配置。
- `ExternalSsoClientConfiguration`:通过 `@Configuration` 在缺省情况下注册 `DefaultExternalSsoClient` Bean允许业务自定义覆盖。
- `ExternalSsoProperties``external-sso.*` 配置项,包含开关、外部接口、账号映射、跨域等子配置;示例配置已同步到 `zt-module-system/zt-module-system-server/src/main/resources/application.yaml``zt-server/src/main/resources/application.yaml`
- `ExternalSsoVerifyReqVO``POST /system/sso/verify` 请求载荷。
- `ExternalSsoUserInfo`:外部用户标准化模型,包含基础字段与自定义属性集合。
### 前端
- `src/router/modules/remaining.ts`:新增隐藏路由 `/externalsso`,用于回调页面。
- `src/views/Login/ExternalSsoCallback.vue`:处理 URL 参数、调用校验接口、落地令牌、跳转目标页面,异常时提示并引导返回登录页。
- `src/api/login/index.ts`:新增 `externalSsoVerify` 方法,请求 `/system/sso/verify` 并返回 `TokenType`
## 调用流程
1. 外部系统完成本地认证后,构造 URL`{本系统域名}/#/externalsso?x-token={外部token}&target={回跳地址}` 并跳转,可选附带 `sourceSystem` 指定来源系统。
2. Vue 页面 `ExternalSsoCallback` 解析查询参数,优先读取 `x-token`(兼容历史的 `token` 参数)并校验是否存在;缺失时终止流程并提示错误。
3. 前端调用 `POST /system/sso/verify`,请求体:
```json
{
"token": "外部系统颁发的 token",
"targetUri": "/#/dashboard", // 可选
"sourceSystem": "partner-a" // 可选
}
```
4. `ExternalSsoServiceImpl#verifyToken` 执行以下步骤:
- 校验功能开关与 token 有效性。
- 基于 `sourceSystem` 选择匹配的 `ExternalSsoStrategy`,若无匹配直接返回来源系统不支持。
- 通过策略触发 `ExternalSsoClient` 拉取外部用户信息(默认调用配置的 HTTP 接口)。
- 按策略内定义的匹配顺序(默认外部 ID → 用户名 → 手机号)查找本地账号,未命中直接返回 "未找到匹配的本地用户"。
- 校验账号状态是否启用,签发 OAuth2 访问令牌并记录登录日志(类型 `LOGIN_EXTERNAL_SSO`)。
- 生成操作审计日志,记录外部响应摘要、映射账号、目标地址、来源系统等信息。
5. 前端拿到 `AuthLoginRespVO`,通过 `authUtil.setToken`/`setTenantId` 持久化,随后跳转到整理后的 `targetUri`(默认 `/`)。
6. 当流程出现异常(例如 token 缺失、外部接口失败、账号不存在)时,后端返回对应错误码,前端弹出提示并清除本地缓存。
下图为时序示意:
```text
外部系统 -> 浏览器 -> Vue /externalsso -> POST /system/sso/verify -> ExternalSsoService -> ExternalSsoClient -> 外部用户接口
\-> OAuth2TokenService -> 登录日志/审计日志
```
## 前端细节
- `ExternalSsoCallback.vue` 对 `target`/`targetUri`/`redirect` 参数做统一解码与归一化,支持携带绝对地址或 hash 路由,防止开放跳转漏洞。
- 解析 URL 中的 `sourceSystem`(兼容 `source`、`systemCode`)并透传给后端,便于在多来源系统场景下选择策略。
- 在成功回调后调用 `router.replace`,保证不会产生历史记录;失败时引导用户回到 `/login` 并附带原目标地址。
- 通过 `buildErrorMessage` 兼容后端返回的 `msg`、`Error` 对象或字符串,统一展示错误提示。
## 后端流程拆解
- **开关与参数校验**
- 关闭开关或缺少 token 时抛出 `EXTERNAL_SSO_DISABLED` / `EXTERNAL_SSO_TOKEN_MISSING`。
- **外部用户获取**
- 默认客户端会在 `external-sso.remote` 中加载请求配置,支持 GET/POST 等多种场景。
- 占位符:`${externalUserId}`(初始值为 token、`${shareToken}`/`${token}`(共享服务访问 token、`${xToken}`(原始回调 token、`${targetUri}`、`${sourceSystem}`。
- 会自动通过 `ShareServiceUtils` 获取共享服务访问 token并写入 `ShareServiceProperties#tokenHeaderName` 对应的请求头。
- 请求体会以 JSON 形式发送 `{ "x-token": "回调参数中的 token" }`,满足上游接口 `S_BF_CS_01` 的要求。
- `validateResponse` 可按 `codeField` 与 `successCode` 校验业务状态,失败时抛出带详细信息的 `ExternalSsoClientException`。
- **本地账号匹配**
- 使用 `mapping.order` 控制字段优先级;`custom.entries` 保存 "外部ID → 本地用户ID" 静态映射。
- 找不到用户或账号禁用时分别抛出 `EXTERNAL_SSO_USER_NOT_FOUND`、`EXTERNAL_SSO_USER_DISABLED`,均会同步写入登录日志。
- **令牌签发与日志**
- 通过 `OAuth2TokenService#createAccessToken` 使用默认客户端 `CLIENT_ID_DEFAULT` 颁发本地访问令牌。
- `recordAuditLog` 把原始响应的 SHA-256 摘要、外部属性、token 摘要等写入操作日志,便于排查。
- `recordLoginLog` 记录登录行为并在成功时更新用户最后登录 IP。
## `ExternalSsoClient` 扩展
- 默认实现 `DefaultExternalSsoClient` 由 `ExternalSsoClientConfiguration` 自动注册,若需要接入其它协议,可在任意配置类中自定义:
```java
@Bean
public ExternalSsoClient customExternalSsoClient(...) {
return new MyExternalSsoClient(...);
}
```
- 默认实现要点:
- 按配置构造 `RestTemplate`,支持连接/读取超时、HTTP 代理、重试次数。
- 解析 JSON 响应,将指定字段映射到 `ExternalSsoUserInfo`,并保留原始 data 节点到 `attributes`。
- 在解析失败、状态码异常时抛出带原始响应的 `ExternalSsoClientException`。
## 配置项参考
```yaml
external-sso:
enabled: true
system-code: example-partner
token:
secret: "shared-secret"
algorithm: AES
allowed-clock-skew-seconds: 60
max-age-seconds: 300
require-nonce: false
replay-protection-enabled: false
remote:
base-url: http://10.1.7.110
user-info-path: /api/sso/user
method: POST
headers:
Authorization: "Bearer ${token}"
query-params: {}
body:
userId: "${externalUserId}"
code-field: code
success-code: "0"
message-field: message
data-field: data
user-id-field: data.userId
username-field: data.username
nickname-field: data.nickname
email-field: data.email
mobile-field: data.mobile
tenant-id-field: data.tenantId
connect-timeout-millis: 3000
read-timeout-millis: 5000
retry-count: 1
proxy:
enabled: false
mapping:
order:
- EXTERNAL_ID
- USERNAME
- MOBILE
ignore-case: true
update-profile-on-login: false
custom:
entries:
partnerUser001: 10001
cors:
allowed-origins:
- https://partner.example.com
allowed-methods: ["OPTIONS", "POST"]
allowed-headers: ["Authorization", "Content-Type"]
allow-credentials: true
max-age: 1800
```
| 配置路径 | 说明 |
| --- | --- |
| `enabled` | 总开关,关闭后接口直接返回 `EXTERNAL_SSO_DISABLED` |
| `system-code` | 默认来源系统标识,可作为 `sourceSystem` 的缺省值及日志标签 |
| `token.*` | 若仍需解密/校验外部 token可在自定义 `ExternalSsoClient` 内按需使用;默认实现仅透传 |
| `remote.*` | 外部接口 HTTP 调用参数、字段映射与超时控制,模板占位符支持 `externalUserId`、`shareToken`(`token`)、`xToken`、`targetUri`、`sourceSystem` |
| `mapping.order` | 本地账号匹配优先级,支持 `EXTERNAL_ID`、`USERNAME`、`MOBILE` |
| `mapping.custom.entries` | 外部用户标识到本地用户 ID 的静态映射表 |
| `cors.*` | 用于开放 `/system/sso/verify` 的跨域访问白名单 |
## 错误码与日志
- 错误码:
- `1_002_000_050`:功能未开启。
- `1_002_000_051`token 缺失。
- `1_002_000_055`:外部接口异常,具体原因写入占位符。
- `1_002_000_056`:未匹配到本地用户。
- `1_002_000_057`:本地用户已禁用。
- `1_002_000_058`:来源系统不支持,需配置匹配的策略实现。
- 登录日志:使用 `LoginLogTypeEnum.LOGIN_EXTERNAL_SSO` 记录成功/失败。
- 操作日志:类型 `EXTERNAL_SSO/VERIFY`,包含外部用户 ID、映射账号、目标地址、来源系统、响应摘要等元数据。
## 注意事项
- 所有账号须提前维护映射,系统不会自动创建或按邮箱兜底匹配用户。
- `targetUri` 会在前端归一化,避免开放跳转风险;无合法目标时默认跳转首页。
- 确保外部接口返回的 JSON 字段与配置保持一致,必要时可通过 `remote.data-field` 指向具体节点。
- 若外部接口速度较慢或易失败,可提高 `retry-count`、超时时间或自定义客户端实现。
- 如需记录更多审计信息,可在 `ExternalSsoUserInfo#addAttribute` 注入自定义字段,审计日志会自动保留。
## 扩展与测试建议
- 通过提供新的 `ExternalSsoStrategy` 或 `ExternalSsoClient` Bean可扩展不同来源系统的对接方式。
- 建议为主要错误场景编写集成测试token 缺失、映射缺失、来源系统不支持、外部接口超时、账号被禁用等。
- 外部系统回调前可先调用 `/system/sso/verify` 联调接口验证配置是否正确,再接入正式流程。

View File

@@ -0,0 +1,205 @@
# Databus 模块 API 功能与第三方调用说明
> 适用范围:`zt-module-databus`Server 侧)+ `zt-module-databus-api`(接口定义)。本文基于 2025-11-20 主干分支代码。
## 1. 模块定位与整体能力
- **目标**:对外暴露统一的数据/业务编排网关,允许后台在可视化界面中配置 API、步骤、变换与限流策略并即时发布到运行态。
- **核心特性**
1. API 全生命周期管理(定义、版本、回滚、发布缓存刷新)。
2. 编排引擎基于 Spring Integration 动态装配,支持 Start/HTTP/RPC/Script/End 步骤及 JSON 变换链路。
3. 多重安全防护IP 白/黑名单、应用凭证、时间戳 + 随机串、报文加解密、签名、防重放、租户隔离、匿名固定用户等。
4. QoS 能力可插拔限流策略Redis 固定窗口计数)、审计日志、追踪 ID & Step 级结果入库。
5. Debug 支持:管理端 `POST /databus/gateway/invoke` 可注入任意参数模拟真实调用。
## 2. 运行时架构概览
| 组件 | 位置 | 作用 |
| --- | --- | --- |
| `GatewaySecurityFilter` | `framework.integration.gateway.security` | 过滤并校验所有落在 `databus.api-portal.base-path` 之下的 HTTP 请求,完成 IP 校验、报文解密、签名、防重放、匿名用户注入、响应加密。 |
| `ApiGatewayExecutionService` | `framework.integration.gateway.core` | 将 HTTP 请求映射为 `ApiInvocationContext`,调度 Integration Flow构造统一响应。 |
| `IntegrationFlowManager` | `framework.integration.gateway.core` | 按 `apiCode + version` 动态注册 Spring Integration Flow支持热刷新与调试临时 Flow。 |
| `ApiFlowDispatcher` | 同上 | 依据 apiCode/version 找到输入通道,发送请求并等待 `ApiInvocationContext` 回传。 |
| `PolicyAdvisorFactory` + `DefaultRateLimitPolicyEvaluator` | `framework.integration.gateway.core/policy` | 在 Flow 上织入限流等策略,当前默认实现支持 Redis 固定窗口。 |
| `ApiGatewayAccessLogger` | `framework.integration.gateway.core` | 生成访问日志 `databus_api_access_log`,记录 Trace、请求/响应、耗时、步骤结果等。 |
| 管控 REST 控制器 | `controller.admin.gateway.*` | 管理 API 定义、版本、凭证、策略、访问日志等。 |
## 4. 管控端 REST 接口速查
| 模块 | 方法 | 路径 | 说明 |
| --- | --- | --- | --- |
| API 定义 | GET | `/databus/gateway/definition/page` | 分页查询(支持 code/描述筛选)。 |
| | GET | `/databus/gateway/definition/{id}` | 详情(含步骤、变换、限流绑定)。 |
| | POST | `/databus/gateway/definition` | 新建定义,必填步骤(至少 Start+End。 |
| | PUT | `/databus/gateway/definition` | 更新并自动刷新对应 Flow。 |
| | DELETE | `/databus/gateway/definition/{id}` | 删除并注销 Flow。 |
| API 网关 | POST | `/databus/gateway/invoke` | 管理端调试调用。 |
| | GET | `/databus/gateway/definitions` | 拉取当前已上线定义(供灰度/网关缓存)。 |
| | POST | `/databus/gateway/cache/refresh` | 强制刷新所有 Flow 缓存。 |
| API 版本 | GET | `/databus/gateway/version/get?id=` | 查询版本详情(自动还原 snapshotData。 |
| | GET | `/databus/gateway/version/page` | 分页。 |
| | GET | `/databus/gateway/version/list?apiId=` | 列出某 API 的全部版本。 |
| | PUT | `/databus/gateway/version/rollback` | 根据 `id + remark` 回滚。 |
| | GET | `/databus/gateway/version/compare` | 差异对比sourceId/targetId。 |
| 客户端凭证 | GET | `/databus/gateway/credential/page` | 分页。 |
| | GET | `/databus/gateway/credential/get?id=` | 详情(含匿名配置)。 |
| | POST | `/databus/gateway/credential/create` | 新增凭证。 |
| | PUT | `/databus/gateway/credential/update` | 更新。 |
| | DELETE | `/databus/gateway/credential/delete?id=` | 删除。 |
| | GET | `/databus/gateway/credential/list-simple` | 下拉使用。 |
| 限流策略 | GET | `/databus/gateway/policy/rate-limit/page` | 分页检索。 |
| | GET | `/databus/gateway/policy/rate-limit/{id}` | 详情。 |
| | GET | `/databus/gateway/policy/rate-limit/simple-list` | 精简列表。 |
| | POST/PUT/DELETE | `/databus/gateway/policy/rate-limit` | 新增/更新/删除。 |
| 访问日志 | GET | `/databus/gateway/access-log/page` | 分页(需 `databus:gateway:access-log:query` 权限)。 |
| | GET | `/databus/gateway/access-log/get?id=` | 单条详情(自动补充 API 描述)。 |
> 所有接口默认返回 `CommonResult` 包装,字段 `code/message/data`。必要时参考对应 VO位置 `controller.admin.gateway.vo`)。
## 5. API 生命周期管理要点
1. **状态机**`ApiStatusEnum`(草稿/已上线/已下线/已废弃。Integration Flow 只加载 `ONLINE` 状态定义。
2. **版本快照**:每次保存时写入 `databus_api_version`,可通过 `snapshotData` 一键恢复(`rollback` 接口)。
3. **变换校验**:保存时会校验同一级 `TransformPhaseEnum` 不可重复,并确保 Start/End 唯一且位于首尾。
4. **缓存刷新**
- 单 API创建/更新/删除后自动调用 `IntegrationFlowManager.refresh(apiCode, version)`
- 全量:管理员可调用 `/databus/gateway/cache/refresh` 做兜底。
## 6. 网关请求路径与响应格式
- **默认 Base Path**`/admin-api/databus/api/portal`(可通过 `databus.api-portal.base-path` 覆盖;兼容旧版 `/databus/api/portal`)。
- **最终路径**`{basePath}/{apiCode}/{version}`,示例 `/admin-api/databus/api/portal/order.create/v1`
- **支持方法**GET/POST/PUT/DELETE/PATCH均被映射为 `ApiInvocationContext.httpMethod`
- **响应包装**
```json
{
"code": 200,
"message": "OK",
"response": { "bizField": "value" },
"traceId": "c8a3d52f-..."
}
```
> `code` 与 HTTP 状态保持一致;`response` 为 API 变换后的业务体;所有错误也沿用该 Envelope若启用响应加密则返回 Base64 字串)。
## 7. 配置项(`application.yml`)重点
```yaml
databus:
api-portal:
base-path: /admin-api/databus/api/portal
allowed-ips: [10.0.0.0/24] # 可为空表示全放行
denied-ips: []
enable-tenant-header: true
tenant-header: ZT-Tenant-Id
enable-audit: true
enable-rate-limit: true
security:
enabled: true
signature-type: MD5 # 或 SHA256
encryption-type: AES # 或 DES
allowed-clock-skew-seconds: 300
nonce-ttl-seconds: 600
require-body-encryption: true
encrypt-response: true
```
> `GatewaySecurityFilter` 会自动注册到最高优先级 +10确保该路径的请求先经过安全校验。
## 8. 第三方调用流程详解
### 8.1 前置准备
1. **申请凭证**:在后台创建 `API 客户端凭证`,得到:
- `appId`(对应 `ZT-App-Id` 头)
- `encryptionKey`(用于 AES/DES 对称加密,服务器使用 `CryptoSignatureUtils.decrypt` 解密)
- `encryptionType``signatureType`
- `allowAnonymous` = true 时需选择一个固定系统用户(服务器将自动颁发内部 JWT
2. **确定 API**:记录 `apiCode``version`、请求方法、入参/变换契约。
3. **网络白名单**:将第三方出口 IP 加入 `allowed-ips`,否则直接返回 403。
4. **Redis 要求**:需保证 Redis 可用(用于 nonce、防重放、限流计数
### 8.2 请求构建步骤
| 序号 | 操作 | 说明 |
| --- | --- | --- |
| 1 | 生成时间戳 | `timestamp = System.currentTimeMillis()`,与服务器时间差 ≤ 300s。 |
| 2 | 生成随机串 | `nonce` 长度≥8可使用 `UUID.randomUUID().toString().replace("-", "")`。 |
| 3 | 准备明文 Body | 例如 `{"orderNo":"SO20251120001"}`,记为 `plainBody`。 |
| 4 | 计算签名 | 将所有签名字段放入 Map详见下节调用 `CryptoSignatureUtils.verifySignature` 同样的规则:对 key 排序、跳过 `signature` 字段、使用 `&` 连接 `key=value`,再用 `MD5/SHA256` 计算;结果赋值给 `ZT-Signature`。*注意:签名使用明文 body。* |
| 5 | 加密请求体 | 使用凭证的 `encryptionKey + encryptionType``plainBody` 进行对称加密Base64 结果作为 HTTP BodyContent-Type 可设 `text/plain``application/json`。 |
| 6 | 组装请求头 | `ZT-App-Id`, `ZT-Timestamp`, `ZT-Nonce`, `ZT-Signature`, `ZT-Tenant-Id`(可选), `X-Client-Id`(建议,与限流相关),如有自带 JWT 则设置 `Authorization`。 |
| 7 | 发送请求 | URL = `https://{host}{basePath}/{apiCode}/{version}`,方法与 API 定义保持一致。 |
#### 签名字段示例
```
appId=demo-app
&body={"orderNo":"SO20251120001"}
&nonce=0c5e2df9a1
&timestamp=1732070400000
```
- Query 参数将被拼接为 `key=value`(多值以逗号连接),自动忽略 `signature` 字段。
- Request Body 若非 JSON则退化为字符串整体签名。
#### cURL 示例
```bash
curl -X POST "https://gw.example.com/admin-api/databus/api/portal/order.create/v1" \
-H "ZT-App-Id: demo-app" \
-H "ZT-Timestamp: 1732070400000" \
-H "ZT-Nonce: 0c5e2df9a1" \
-H "ZT-Signature: 8e377..." \
-H "X-Client-Id: mall" \
-H "Content-Type: text/plain" \
-d "Q2hhcnNldGV4dC1CYXNlNjQgZW5jcnlwdGVkIGJvZHk="
```
> `-d` 的实际内容应当是 AES/ DES 加密后的 Base64 字符串。
### 8.3 响应处理
1. 读取 HTTP 状态与 `ApiGatewayResponse.code/message/traceId`
2.`security.encrypt-response=true`,则响应体本身是加密串,需要使用同一 `encryptionKey/encryptionType` 解密得到 JSON再解析 `response` 字段。
3. `traceId` 可用于后台日志及 `访问日志` 页面关联排查。
### 8.4 错误与重试策略
| 场景 | 表现 | 处理建议 |
| --- | --- | --- |
| 时间戳/Nonce 不合法 | HTTP 401`message` = `请求到达时间超出 300s`/`重复请求` | 校准服务器时间;`nonce` 不可重复Redis TTL 默认 600s。 |
| 签名失败 | HTTP 401`message` = `签名校验失败` | 检查签名字符串、字符编码、大小写。 |
| 未配置密钥 | HTTP 500`message` = `应用未配置加密密钥` | 在后台凭证中补齐密钥与算法,或取消强制加密。 |
| 限流触发 | HTTP 429`message` = `请求触发限流策略` | 调整 `X-Client-Id` 级并发或增大策略 `limit/windowSeconds`。 |
| API 未发布 | HTTP 404`message` = `API 定义未发布或已下线` | 确认 `status=ONLINE`,并刷新缓存。 |
## 9. 限流策略配置
- 存储在 `ApiPolicyRateLimitDO.config`JSON 结构示例:
```json
{
"limit": 1000,
"windowSeconds": 60,
"keyTemplate": "${apiCode}:${tenantId}:${header.X-Client-Id}" // 预留扩展
}
```
- 当前默认实现读取 `limit`(默认 100`windowSeconds`(默认 60
- Redis Key 格式:`databus:api:rl:{apiCode}:{version}:{X-Client-Id}`,当计数首次出现时自动设置过期。
- 限流拦截后会抛出 `API_RATE_LIMIT_EXCEEDED`,在访问日志中标记 `status=1/2`
## 10. 访问日志字段对照
| 字段 | 说明 |
| --- | --- |
| `traceId` | 来自 `TracerUtils`,可在日志与链路追踪中搜索。 |
| `requestHeaders`, `requestBody`, `responseBody` | 默认截断至 4000 字符JSON 序列化存储。 |
| `status` | 0=成功,1=客户端错误,2=服务端错误,3=未知。 |
| `stepResults` | 序列化的步骤执行列表(见 `ApiStepResult`),含 `request/response/elapsed/error`。 |
| `extra` | 附加变量/属性,供排查自定义上下文。 |
> 可通过 `/databus/gateway/access-log/page` + `traceId` 或 `apiCode` 条件快速定位第三方问题。

38
pom.xml
View File

@@ -18,9 +18,9 @@
<module>zt-module-infra</module>
<!-- <module>zt-module-bpm</module>-->
<module>zt-module-report</module>
<!-- <module>zt-module-mp</module>-->
<!--<module>zt-module-mp</module>-->
<!-- <module>zt-module-ai</module>-->
<module>zt-module-template</module>
<!-- <module>zt-module-template</module>-->
<!-- <module>zt-module-iot</module>-->
<module>zt-module-databus</module>
<!-- <module>zt-module-rule</module>-->
@@ -32,7 +32,7 @@
<url>https://github.com/YunaiV/ruoyi-vue-pro</url>
<properties>
<revision>3.0.43</revision>
<revision>3.0.45</revision>
<!-- Maven 相关 -->
<java.version>17</java.version>
<maven.compiler.source>${java.version}</maven.compiler.source>
@@ -205,8 +205,13 @@
<name>中铜 ZStack 私服</name>
<url>http://172.16.46.63:30708/repository/test/</url>
<releases>
<enabled>true</enabled>
<updatePolicy>always</updatePolicy>
<checksumPolicy>warn</checksumPolicy>
</releases>
<snapshots>
<enabled>true</enabled>
<updatePolicy>always</updatePolicy>
</snapshots>
</repository>
</repositories>
@@ -232,8 +237,8 @@
<config.server-addr>172.16.46.63:30848</config.server-addr>
<config.namespace>dev</config.namespace>
<config.group>DEFAULT_GROUP</config.group>
<config.username/>
<config.password/>
<config.username>nacos</config.username>
<config.password>P@ssword25</config.password>
<config.version>1.0.0</config.version>
</properties>
</profile>
@@ -245,8 +250,8 @@
<config.server-addr>172.16.46.63:30848</config.server-addr>
<config.namespace>prod</config.namespace>
<config.group>DEFAULT_GROUP</config.group>
<config.username/>
<config.password/>
<config.username>nacos</config.username>
<config.password>P@ssword25</config.password>
<config.version>1.0.0</config.version>
</properties>
</profile>
@@ -258,8 +263,8 @@
<config.server-addr>172.16.46.63:30848</config.server-addr>
<config.namespace>local</config.namespace>
<config.group>DEFAULT_GROUP</config.group>
<config.username/>
<config.password/>
<config.username>nacos</config.username>
<config.password>P@ssword25</config.password>
<config.version>1.0.0</config.version>
</properties>
</profile>
@@ -269,6 +274,19 @@
<config.namespace>chenbowen</config.namespace>
</properties>
</profile>
<profile>
<id>qsj</id>
<properties>
<env.name>dev</env.name>
<!--Nacos 配置-->
<config.server-addr>172.16.46.63:30848</config.server-addr>
<config.namespace>qsj</config.namespace>
<config.group>DEFAULT_GROUP</config.group>
<config.username>nacos</config.username>
<config.password>P@ssword25</config.password>
<config.version>1.0.0</config.version>
</properties>
</profile>
</profiles>
</project>

581
sql/dm/bpm.sql Normal file

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,15 @@
-- DM8 字典导入按钮权限脚本
-- 幂等处理:清理旧的导入权限按钮,再重新写入
DELETE FROM system_role_menu WHERE menu_id = 103001;
DELETE FROM system_menu WHERE id = 103001;
INSERT INTO system_menu (
id, name, permission, type, sort, parent_id, path, icon, component, component_name,
status, visible, keep_alive, always_show, creator, create_time, updater, update_time, deleted
) VALUES (
103001, '字典导入', 'system:dict:import', 3, 6, 105, '#', '#', '', NULL,
0, '1', '1', '1', 'admin', CURRENT_TIMESTAMP, 'admin', CURRENT_TIMESTAMP, '0'
);
-- 如需同步给指定角色,请手工向 system_role_menu 插入对应关系记录

View File

@@ -0,0 +1,44 @@
-- 工艺工序页面菜单与权限初始化脚本DM8
-- 默认挂载在基础数据目录parent_id = 6200如需调整请修改 parent_id。
DELETE FROM system_menu
WHERE id IN (6207, 620701, 620702, 620703, 620704, 620705);
INSERT INTO system_menu (
id, name, permission, type, sort, parent_id,
path, icon, component, component_name,
status, visible, keep_alive, always_show,
creator, create_time, updater, update_time, deleted
) VALUES (
6207, '工艺工序', 'base:processing-infomation-operation:query', 2, 70, 6200,
'processing-infomation-operation', 'ep:operation', 'base/processinginfomationoperation/index', 'ProcessingInfomationOperation',
0, '1', '0', '1',
'admin', CURRENT_TIMESTAMP, 'admin', CURRENT_TIMESTAMP, '0'
);
INSERT INTO system_menu (
id, name, permission, type, sort, parent_id,
path, icon, component, component_name,
status, visible, keep_alive, always_show,
creator, create_time, updater, update_time, deleted
) VALUES
(620701, '工艺工序查询', 'base:processing-infomation-operation:query', 3, 1, 6207,
'', '', '', '',
0, '1', '1', '1',
'admin', CURRENT_TIMESTAMP, 'admin', CURRENT_TIMESTAMP, '0'),
(620702, '工艺工序创建', 'base:processing-infomation-operation:create', 3, 2, 6207,
'', '', '', '',
0, '1', '1', '1',
'admin', CURRENT_TIMESTAMP, 'admin', CURRENT_TIMESTAMP, '0'),
(620703, '工艺工序更新', 'base:processing-infomation-operation:update', 3, 3, 6207,
'', '', '', '',
0, '1', '1', '1',
'admin', CURRENT_TIMESTAMP, 'admin', CURRENT_TIMESTAMP, '0'),
(620704, '工艺工序删除', 'base:processing-infomation-operation:delete', 3, 4, 6207,
'', '', '', '',
0, '1', '1', '1',
'admin', CURRENT_TIMESTAMP, 'admin', CURRENT_TIMESTAMP, '0'),
(620705, '工艺工序导出', 'base:processing-infomation-operation:export', 3, 5, 6207,
'', '', '', '',
0, '1', '1', '1',
'admin', CURRENT_TIMESTAMP, 'admin', CURRENT_TIMESTAMP, '0');

View File

@@ -0,0 +1,33 @@
-- =============================================
-- 数据总线 API 版本历史菜单权限(备忘录模式)
-- 功能说明:
-- 1. 查看版本历史
-- 2. 查看版本详情
-- 3. 版本回滚
-- 4. 版本对比
-- =============================================
-- 删除旧的版本管理菜单权限
DELETE FROM system_menu WHERE id IN (650107, 650108, 650109, 650110);
-- 插入新的版本历史管理权限
INSERT INTO system_menu (id, name, permission, type, sort, parent_id, path, icon, component, component_name,
status, visible, keep_alive, always_show, creator, create_time, updater, update_time, deleted)
VALUES
-- 查询版本历史列表
(650107, 'API版本历史', 'databus:gateway:version:query', 3, 7, 6501, '', '', '', '',
0, '1', '1', '1', 'admin', CURRENT_TIMESTAMP, 'admin', CURRENT_TIMESTAMP, '0'),
-- 查看版本详情
(650108, 'API版本详情', 'databus:gateway:version:detail', 3, 8, 6501, '', '', '', '',
0, '1', '1', '1', 'admin', CURRENT_TIMESTAMP, 'admin', CURRENT_TIMESTAMP, '0'),
-- 版本回滚
(650109, 'API版本回滚', 'databus:gateway:version:rollback', 3, 9, 6501, '', '', '', '',
0, '1', '1', '1', 'admin', CURRENT_TIMESTAMP, 'admin', CURRENT_TIMESTAMP, '0'),
-- 版本对比
(650110, 'API版本对比', 'databus:gateway:version:compare', 3, 10, 6501, '', '', '', '',
0, '1', '1', '1', 'admin', CURRENT_TIMESTAMP, 'admin', CURRENT_TIMESTAMP, '0');
-- 说明
-- 1. 不再需要"创建版本"权限,因为系统自动创建
-- 2. 不再需要"删除版本"权限,版本历史不可删除
-- 3. 保留查询、详情、回滚、对比四个核心功能

View File

@@ -0,0 +1,52 @@
-- =============================================
-- 数据总线 API 版本历史表(备忘录模式)
-- 功能说明:
-- 1. 每次保存 API 配置时自动创建版本记录
-- 2. 版本号自动递增v1, v2, v3...
-- 3. 保留完整历史链,不可删除
-- 4. 支持一键回滚到任意历史版本
-- 5. 支持版本对比功能
-- =============================================
-- 如果表已存在则删除
DROP TABLE IF EXISTS databus_api_version;
-- 创建版本历史表DM8 语法)
CREATE TABLE databus_api_version (
id BIGINT NOT NULL,
api_id BIGINT NOT NULL,
version_number INTEGER NOT NULL,
snapshot_data CLOB NOT NULL,
description VARCHAR(500),
is_current NUMBER(1) DEFAULT 0 NOT NULL,
operator VARCHAR(64),
creator VARCHAR(64) DEFAULT '' NOT NULL,
create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
updater VARCHAR(64) DEFAULT '' NOT NULL,
update_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
deleted NUMBER(1) DEFAULT 0 NOT NULL,
tenant_id BIGINT DEFAULT 0 NOT NULL,
CONSTRAINT pk_databus_api_version PRIMARY KEY (id)
);
-- 创建索引
CREATE INDEX idx_databus_api_version_api_id ON databus_api_version (api_id);
CREATE INDEX idx_databus_api_version_version_number ON databus_api_version (api_id, version_number);
CREATE INDEX idx_databus_api_version_is_current ON databus_api_version (api_id, is_current);
CREATE INDEX idx_databus_api_version_create_time ON databus_api_version (create_time);
CREATE INDEX idx_databus_api_version_operator ON databus_api_version (operator);
COMMENT ON TABLE databus_api_version IS '数据总线API版本历史表采用备忘录模式每次保存API时自动创建版本快照支持完整的版本历史追溯和回滚';
COMMENT ON COLUMN databus_api_version.id IS '主键ID';
COMMENT ON COLUMN databus_api_version.api_id IS 'API定义ID关联databus_api_definition表';
COMMENT ON COLUMN databus_api_version.version_number IS '版本号同一API下自动递增1,2,3...';
COMMENT ON COLUMN databus_api_version.snapshot_data IS 'API完整配置快照JSON格式包含definition、steps、transforms等所有信息';
COMMENT ON COLUMN databus_api_version.description IS '变更说明,记录本次修改的内容';
COMMENT ON COLUMN databus_api_version.is_current IS '是否为当前版本1=是0=否同一API只有一个当前版本';
COMMENT ON COLUMN databus_api_version.operator IS '操作人,记录谁创建了这个版本';
COMMENT ON COLUMN databus_api_version.creator IS '创建者';
COMMENT ON COLUMN databus_api_version.create_time IS '创建时间(版本创建时间)';
COMMENT ON COLUMN databus_api_version.updater IS '更新者';
COMMENT ON COLUMN databus_api_version.update_time IS '更新时间';
COMMENT ON COLUMN databus_api_version.deleted IS '是否删除(逻辑删除,实际不删除版本历史)';
COMMENT ON COLUMN databus_api_version.tenant_id IS '租户ID';

View File

@@ -0,0 +1,13 @@
-- 数据总线 API 访问日志菜单权限初始化DM8
-- 创建访问日志页面及查询按钮权限。如已存在将先行移除再新增。
DELETE FROM system_menu WHERE id IN (6504, 650401);
INSERT INTO system_menu (id, name, permission, type, sort, parent_id, path, icon, component, component_name,
status, visible, keep_alive, always_show, creator, create_time, updater, update_time, deleted)
VALUES (6504, '访问日志', 'databus:gateway:access-log:query', 2, 40, 6500, 'access-log', 'ep:document', 'databus/accesslog/index', 'DatabusAccessLog',
0, '1', '1', '1', 'admin', CURRENT_TIMESTAMP, 'admin', CURRENT_TIMESTAMP, '0');
INSERT INTO system_menu (id, name, permission, type, sort, parent_id, path, icon, component, component_name,
status, visible, keep_alive, always_show, creator, create_time, updater, update_time, deleted)
VALUES (650401, '访问日志查询', 'databus:gateway:access-log:query', 3, 1, 6504, '', '', '', '',
0, '1', '1', '1', 'admin', CURRENT_TIMESTAMP, 'admin', CURRENT_TIMESTAMP, '0');

View File

@@ -0,0 +1,74 @@
CREATE TABLE "RUOYI-VUE-PRO"."DATABUS_API_ACCESS_LOG"
(
"ID" BIGINT NOT NULL,
"TRACE_ID" VARCHAR(64) DEFAULT NULL,
"API_CODE" VARCHAR(128) DEFAULT NULL,
"API_VERSION" VARCHAR(32) DEFAULT NULL,
"REQUEST_METHOD" VARCHAR(16) DEFAULT NULL,
"REQUEST_PATH" VARCHAR(512) DEFAULT NULL,
"REQUEST_QUERY" TEXT,
"REQUEST_HEADERS" TEXT,
"REQUEST_BODY" TEXT,
"RESPONSE_STATUS" INT DEFAULT NULL,
"RESPONSE_MESSAGE" VARCHAR(500) DEFAULT NULL,
"RESPONSE_BODY" TEXT,
"STATUS" SMALLINT DEFAULT 3 NOT NULL,
"ERROR_CODE" VARCHAR(100) DEFAULT NULL,
"ERROR_MESSAGE" VARCHAR(1000) DEFAULT NULL,
"EXCEPTION_STACK" TEXT,
"CLIENT_IP" VARCHAR(64) DEFAULT NULL,
"USER_AGENT" VARCHAR(512) DEFAULT NULL,
"DURATION" BIGINT DEFAULT NULL,
"REQUEST_TIME" DATETIME(6) DEFAULT CURRENT_TIMESTAMP NOT NULL,
"RESPONSE_TIME" DATETIME(6) DEFAULT NULL,
"STEP_RESULTS" TEXT,
"EXTRA" TEXT,
"CREATOR" VARCHAR(64) DEFAULT '' NOT NULL,
"CREATE_TIME" DATETIME(6) DEFAULT CURRENT_TIMESTAMP NOT NULL,
"UPDATER" VARCHAR(64) DEFAULT '' NOT NULL,
"UPDATE_TIME" DATETIME(6) DEFAULT CURRENT_TIMESTAMP NOT NULL,
"DELETED" BIT DEFAULT '0' NOT NULL,
"TENANT_ID" BIGINT DEFAULT 0 NOT NULL,
NOT CLUSTER PRIMARY KEY("ID")) STORAGE(ON "MAIN", CLUSTERBTR) ;
COMMENT ON TABLE "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG IS 'Databus API 访问日志表';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."API_CODE" IS 'API 编码';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."API_VERSION" IS 'API 版本';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."CLIENT_IP" IS '客户端 IP';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."CREATE_TIME" IS '创建时间';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."CREATOR" IS '创建者';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."DELETED" IS '是否删除';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."DURATION" IS '请求耗时(毫秒)';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."ERROR_CODE" IS '业务错误码';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."ERROR_MESSAGE" IS '错误信息';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."EXCEPTION_STACK" IS '异常堆栈';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."EXTRA" IS '额外调试信息JSON 字符串)';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."ID" IS '日志主键';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."REQUEST_BODY" IS '请求体JSON 字符串)';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."REQUEST_HEADERS" IS '请求头JSON 字符串)';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."REQUEST_METHOD" IS '请求方法';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."REQUEST_PATH" IS '请求路径';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."REQUEST_QUERY" IS '请求查询参数JSON 字符串)';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."REQUEST_TIME" IS '请求时间';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."RESPONSE_BODY" IS '响应体JSON 字符串)';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."RESPONSE_MESSAGE" IS '响应提示信息';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."RESPONSE_STATUS" IS '响应 HTTP 状态码';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."RESPONSE_TIME" IS '响应时间';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."STATUS" IS '访问状态0-成功 1-客户端错误 2-服务端错误 3-未知';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."STEP_RESULTS" IS '执行步骤结果JSON 字符串)';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."TENANT_ID" IS '租户编号';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."TRACE_ID" IS '追踪 ID';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."UPDATER" IS '更新者';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."UPDATE_TIME" IS '更新时间';
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."USER_AGENT" IS 'User-Agent';
CREATE OR REPLACE INDEX "IDX_DATABUS_API_ACCESS_LOG_TRACE" ON "RUOYI-VUE-PRO"."DATABUS_API_ACCESS_LOG"("TRACE_ID" ASC) STORAGE(ON "MAIN", CLUSTERBTR) ;
CREATE OR REPLACE INDEX "IDX_DATABUS_API_ACCESS_LOG_CODE" ON "RUOYI-VUE-PRO"."DATABUS_API_ACCESS_LOG"("API_CODE" ASC) STORAGE(ON "MAIN", CLUSTERBTR) ;
CREATE OR REPLACE INDEX "IDX_DATABUS_API_ACCESS_LOG_METHOD" ON "RUOYI-VUE-PRO"."DATABUS_API_ACCESS_LOG"("REQUEST_METHOD" ASC) STORAGE(ON "MAIN", CLUSTERBTR) ;
CREATE OR REPLACE INDEX "IDX_DATABUS_API_ACCESS_LOG_STATUS" ON "RUOYI-VUE-PRO"."DATABUS_API_ACCESS_LOG"("STATUS" ASC) STORAGE(ON "MAIN", CLUSTERBTR) ;
CREATE OR REPLACE INDEX "IDX_DATABUS_API_ACCESS_LOG_RESP_STATUS" ON "RUOYI-VUE-PRO"."DATABUS_API_ACCESS_LOG"("RESPONSE_STATUS" ASC) STORAGE(ON "MAIN", CLUSTERBTR) ;
CREATE OR REPLACE INDEX "IDX_DATABUS_API_ACCESS_LOG_REQUEST_TIME" ON "RUOYI-VUE-PRO"."DATABUS_API_ACCESS_LOG"("REQUEST_TIME" ASC) STORAGE(ON "MAIN", CLUSTERBTR) ;
CREATE OR REPLACE INDEX "IDX_DATABUS_API_ACCESS_LOG_CLIENT_IP" ON "RUOYI-VUE-PRO"."DATABUS_API_ACCESS_LOG"("CLIENT_IP" ASC) STORAGE(ON "MAIN", CLUSTERBTR) ;
CREATE OR REPLACE INDEX "IDX_DATABUS_API_ACCESS_LOG_TENANT" ON "RUOYI-VUE-PRO"."DATABUS_API_ACCESS_LOG"("TENANT_ID" ASC) STORAGE(ON "MAIN", CLUSTERBTR) ;

View File

@@ -0,0 +1,52 @@
-- 权限监督按钮及接口权限DM8 专用)
-- 执行前请确认未占用 1068 主键
DELETE FROM system_menu WHERE id = 1068;
INSERT INTO system_menu (
id,
name,
permission,
type,
sort,
parent_id,
path,
icon,
component,
component_name,
status,
visible,
keep_alive,
always_show,
creator,
create_time,
updater,
update_time,
deleted
)
SELECT
1068,
'权限监督',
'system:permission:user-permission-supervision',
3,
9,
101,
'',
'',
'',
NULL,
0,
'1',
'1',
'1',
'admin',
'2025-10-29 00:00:00',
'',
'2025-10-29 00:00:00',
'0'
FROM dual
WHERE NOT EXISTS (
SELECT 1
FROM system_menu
WHERE id = 1068
);

View File

@@ -0,0 +1,8 @@
-- ---------------------------------------------------------------------------
-- 脚本名称 : 组织物料状态字段补充_20251105.sql
-- 适用数据库 : DM8
-- 变更目的 : 为 bse_dept_mtrl 表补充状态字段 STS解决查询时列不存在的问题
-- 影响说明 : 仅新增并初始化 STS 字段,默认值为 '1'(有效)
-- 执行前请先确认已备份相关数据
-- ---------------------------------------------------------------------------
ALTER TABLE BSE_DEPT_MTRL ADD STS VARCHAR2(5) DEFAULT '0';

View File

@@ -26,7 +26,7 @@
<url>https://github.com/YunaiV/ruoyi-vue-pro</url>
<properties>
<revision>3.0.43</revision>
<revision>3.0.45</revision>
<flatten-maven-plugin.version>1.6.0</flatten-maven-plugin.version>
<!-- 统一依赖管理 -->
<spring.boot.version>3.4.5</spring.boot.version>
@@ -56,13 +56,14 @@
<!-- 服务保障相关 -->
<lock4j.version>2.2.7</lock4j.version>
<!-- 监控相关 -->
<skywalking.version>9.0.0</skywalking.version>
<skywalking.version>9.5.0</skywalking.version>
<spring-boot-admin.version>3.4.5</spring-boot-admin.version>
<opentracing.version>0.33.0</opentracing.version>
<!-- Test 测试相关 -->
<podam.version>8.0.2.RELEASE</podam.version>
<jedis-mock.version>1.1.4</jedis-mock.version>
<mockito-inline.version>5.2.0</mockito-inline.version>
<okhttp3.version>4.12.0</okhttp3.version>
<!-- Bpm 工作流相关 -->
<flowable.version>7.0.1</flowable.version>
<!-- 工具类相关 -->
@@ -86,6 +87,9 @@
<netty.version>4.1.116.Final</netty.version>
<mqtt.version>1.2.5</mqtt.version>
<pf4j-spring.version>0.9.0</pf4j-spring.version>
<okhttp3.version>4.12.0</okhttp3.version>
<!-- 规则引擎 -->
<liteflow.version>2.15.1</liteflow.version>
<vertx.version>4.5.13</vertx.version>
<!-- 三方云服务相关 -->
<commons-io.version>2.17.0</commons-io.version>
@@ -468,6 +472,12 @@
<version>${podam.version}</version>
</dependency>
<dependency>
<groupId>com.squareup.okhttp3</groupId>
<artifactId>okhttp</artifactId>
<version>${okhttp3.version}</version>
</dependency>
<!-- 工作流相关 -->
<dependency>
<groupId>org.flowable</groupId>
@@ -661,6 +671,13 @@
</exclusions>
</dependency>
<!-- 规则引擎 -->
<dependency>
<groupId>com.yomahub</groupId>
<artifactId>liteflow-spring-boot-starter</artifactId>
<version>${liteflow.version}</version>
</dependency>
<!-- PF4J -->
<dependency>
<groupId>org.pf4j</groupId>

View File

@@ -52,6 +52,12 @@
<scope>provided</scope> <!-- 设置为 provided只有工具类需要使用到 -->
</dependency>
<dependency>
<groupId>org.springframework.data</groupId>
<artifactId>spring-data-redis</artifactId>
<scope>provided</scope> <!-- 设置为 provided只有工具类需要使用到 -->
</dependency>
<dependency>
<groupId>jakarta.servlet</groupId>
<artifactId>jakarta.servlet-api</artifactId>
@@ -151,6 +157,12 @@
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,31 @@
package com.zt.plat.framework.common.annotation;
import java.lang.annotation.*;
/**
* 标记分页结果中需要求和的字段。
* <p>
* 未显式指定列名时,会默认使用实体字段对应的数据库列。
* <p>
* {@link #exist()} 可以用于声明该字段并不存在于表结构中,相当于为字段添加
* {@code @TableField(exist = false)},方便在 DO 中声明专用于汇总结果的临时字段。
*/
@Documented
@Target(ElementType.FIELD)
@Retention(RetentionPolicy.RUNTIME)
public @interface PageSum {
/**
* 自定义求和的数据库列名或表达式,未设置时默认使用实体字段对应的列。
*/
String column() default "";
/**
* 是否在实体字段上声明真实存在的数据库列。
* <p>
* 设为 {@code false} 时,框架会自动为该字段提供 {@code @TableField(exist = false)} 的能力,
* 适用于只在分页响应中返回的临时统计字段。
*/
boolean exist() default false;
}

View File

@@ -11,7 +11,7 @@ public class CompanyDeptInfo {
/**
* 公司Id
*/
private Long companyId;
private String companyId;
/**
* 公司名称
*/
@@ -19,7 +19,7 @@ public class CompanyDeptInfo {
/**
* 部门Id
*/
private Long deptId;
private String deptId;
/**
* 部门名称
*/

View File

@@ -1,11 +1,18 @@
package com.zt.plat.framework.common.pojo;
import com.fasterxml.jackson.annotation.JsonAlias;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import io.swagger.v3.oas.annotations.media.Schema;
import lombok.Data;
import java.io.Serializable;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@Schema(description = "分页结果")
@Data
@@ -15,19 +22,31 @@ public final class PageResult<T> implements Serializable {
private List<T> list;
@Schema(description = "总量", requiredMode = Schema.RequiredMode.REQUIRED)
@JsonProperty("total")
@JsonAlias({"totalCount"})
private Long total;
@Schema(description = "汇总信息(字段需使用 @PageSum 标注)")
@JsonProperty("summary")
private Map<String, BigDecimal> summary;
public PageResult() {
this.list = new ArrayList<>();
this.summary = Collections.emptyMap();
}
public PageResult(List<T> list, Long total) {
this(list, total, null);
}
public PageResult(List<T> list, Long total, Map<String, BigDecimal> summary) {
this.list = list;
this.total = total;
setSummaryInternal(summary);
}
public PageResult(Long total) {
this.list = new ArrayList<>();
this.total = total;
this(new ArrayList<>(), total, null);
}
public static <T> PageResult<T> empty() {
@@ -38,4 +57,30 @@ public final class PageResult<T> implements Serializable {
return new PageResult<>(total);
}
public void setSummary(Map<String, BigDecimal> summary) {
setSummaryInternal(summary);
}
private void setSummaryInternal(Map<String, BigDecimal> summary) {
if (summary == null || summary.isEmpty()) {
this.summary = Collections.emptyMap();
return;
}
this.summary = new LinkedHashMap<>(summary);
}
public <R> PageResult<R> convert(List<R> newList) {
return new PageResult<>(newList, total, summary);
}
@JsonIgnore
public Long getTotalCount() {
return total;
}
@JsonIgnore
public void setTotalCount(Long totalCount) {
this.total = totalCount;
}
}

View File

@@ -81,36 +81,40 @@ public class AsyncLatchUtils {
System.out.println("主流程开始,准备分发异步任务...");
System.out.println("主线程id:" + Thread.currentThread().getId());
// 2. 提交多个异步任务
// 任务一:获取用户信息
AsyncLatchUtils.submitTask(executorService, () -> {
try {
try {
System.out.println("任务一子线程id:" + Thread.currentThread().getId());
System.out.println("开始获取用户信息...");
Thread.sleep(1000); // 模拟耗时
System.out.println("获取用户信息成功!");
} catch (InterruptedException e) {
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
});
// 任务二:获取订单信息
AsyncLatchUtils.submitTask(executorService, () -> {
try {
try {
System.out.println("任务二子线程id:" + Thread.currentThread().getId());
System.out.println("开始获取订单信息...");
Thread.sleep(1500); // 模拟耗时
System.out.println("获取订单信息成功!");
} catch (InterruptedException e) {
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
});
// 任务三:获取商品信息
AsyncLatchUtils.submitTask(executorService, () -> {
try {
try {
System.out.println("任务三子线程id:" + Thread.currentThread().getId());
System.out.println("开始获取商品信息...");
Thread.sleep(500); // 模拟耗时
System.out.println("获取商品信息成功!");
} catch (InterruptedException e) {
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
});
@@ -118,12 +122,12 @@ public class AsyncLatchUtils {
System.out.println("所有异步任务已提交,主线程开始等待...");
// 3. 等待所有任务完成最长等待5秒
boolean allTasksCompleted = AsyncLatchUtils.waitFor(5, TimeUnit.SECONDS);
boolean allTasksCompleted = AsyncLatchUtils.waitFor(5, TimeUnit.SECONDS);
// 4. 根据等待结果继续主流程
if (allTasksCompleted) {
System.out.println("所有异步任务执行成功,主流程继续...");
} else {
} else {
System.err.println("有任务执行超时,主流程中断!");
}

View File

@@ -0,0 +1,112 @@
package com.zt.plat.framework.common.util.integration;
import cn.hutool.core.util.StrUtil;
import lombok.Getter;
import lombok.Setter;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.stereotype.Component;
import java.time.Duration;
/**
* 配置参数,控制对接 ePlat 共享服务的请求行为。
*/
@Getter
@Setter
@Component
@ConfigurationProperties(prefix = "eplat.share")
public class ShareServiceProperties {
private static final String DEFAULT_TOKEN_ENDPOINT_PATH = "/eplat/oauth/token";
/**
* 共享服务基础地址例如https://example.com/share。
*/
private String urlPrefix;
/**
* OAuth 客户端标识。
*/
private String clientId;
/**
* OAuth 客户端密钥。
*/
private String clientSecret;
/**
* OAuth scope默认 read。
*/
private String scope = "read";
/**
* 访问 token 在 Redis 中的缓存 key。
*/
private String tokenCacheKey = "eplat:cache:shareToken";
/**
* 刷新 token 在 Redis 中的缓存 key。
*/
private String refreshTokenCacheKey = "eplat:cache:shareRefreshToken";
/**
* 调用共享服务时携带 token 的请求头名称。
*/
private String tokenHeaderName = "Xplat-Token";
/**
* 获取 token 的接口路径,默认 /eplat/oauth/token。
*/
private String tokenEndpointPath = DEFAULT_TOKEN_ENDPOINT_PATH;
/**
* 访问 token 默认有效期,默认 5000 秒,建议略小于服务端实际过期时间。
*/
private Duration tokenTtl = Duration.ofSeconds(5000);
/**
* 刷新 token 默认有效期,若未设置则取访问 token 的 2 倍。
*/
private Duration refreshTokenTtl;
/**
* 构造具体服务的请求地址。
*
* @param serviceNo 服务号
* @return 完整请求地址
*/
public String buildServiceUrl(String serviceNo) {
return normalizeBaseUrl(urlPrefix) + "/service/" + serviceNo;
}
/**
* 构造获取 token 的请求地址。
*
* @return token 请求地址
*/
public String buildTokenUrl() {
String base = normalizeBaseUrl(urlPrefix);
String path = StrUtil.prependIfMissing(tokenEndpointPath, "/");
return base + path;
}
/**
* 刷新 token 的缓存有效期。
*
* @return 刷新 token 有效期
*/
public Duration getRefreshTokenTtl() {
if (refreshTokenTtl != null) {
return refreshTokenTtl;
}
return tokenTtl.multipliedBy(2);
}
private static String normalizeBaseUrl(String url) {
if (StrUtil.isBlank(url)) {
throw new IllegalArgumentException("共享服务地址不能为空");
}
return StrUtil.removeSuffix(url.trim(), "/");
}
}

View File

@@ -0,0 +1,237 @@
package com.zt.plat.framework.common.util.integration;
import cn.hutool.core.util.StrUtil;
import com.zt.plat.framework.common.util.json.JsonUtils;
import lombok.extern.slf4j.Slf4j;
import org.springframework.data.redis.core.StringRedisTemplate;
import org.springframework.data.redis.core.ValueOperations;
import org.springframework.http.HttpEntity;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpMethod;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.util.Assert;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
import org.springframework.web.client.RestClientException;
import org.springframework.web.client.RestTemplate;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
/**
* ePlat 共享服务调用工具,负责发送请求与自动刷新访问 token。
*/
@Slf4j
public final class ShareServiceUtils {
private static final Duration MIN_CACHE_TTL = Duration.ofSeconds(1);
private static final ConcurrentMap<String, Lock> TOKEN_REFRESH_LOCKS = new ConcurrentHashMap<>();
private ShareServiceUtils() {
}
public static String callShareService(RestTemplate restTemplate,
StringRedisTemplate redisTemplate,
ShareServiceProperties properties,
String serviceNo,
String requestBody) {
return callShareService(restTemplate, redisTemplate, properties, serviceNo, (Object) requestBody);
}
public static String callShareService(RestTemplate restTemplate,
StringRedisTemplate redisTemplate,
ShareServiceProperties properties,
String serviceNo,
Object requestBody) {
Assert.notNull(restTemplate, "RestTemplate 不能为空");
Assert.notNull(redisTemplate, "StringRedisTemplate 不能为空");
Assert.notNull(properties, "ShareServiceProperties 不能为空");
Assert.hasText(serviceNo, "服务号不能为空");
String url = properties.buildServiceUrl(serviceNo);
String payload = convertRequestBody(requestBody);
log.info("共享服务调用地址:[{}],请求体:[{}]", url, payload);
String token = obtainAccessToken(restTemplate, redisTemplate, properties);
log.debug("共享服务服务号 [{}] 使用的 token 已获取", serviceNo);
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_JSON);
headers.set(properties.getTokenHeaderName(), token);
HttpEntity<String> entity = new HttpEntity<>(payload, headers);
ResponseEntity<String> response = restTemplate.exchange(url, HttpMethod.POST, entity, String.class);
return Objects.requireNonNullElse(response.getBody(), "");
}
/**
* 获取共享服务的访问 token可复用于自定义调用场景。
*
* @param restTemplate 用于请求共享服务的 {@link RestTemplate}
* @param redisTemplate 缓存 token 的 {@link StringRedisTemplate}
* @param properties 共享服务配置
* @return 访问共享服务的 token
*/
public static String getAccessToken(RestTemplate restTemplate,
StringRedisTemplate redisTemplate,
ShareServiceProperties properties) {
Assert.notNull(restTemplate, "RestTemplate 不能为空");
Assert.notNull(redisTemplate, "StringRedisTemplate 不能为空");
Assert.notNull(properties, "ShareServiceProperties 不能为空");
return obtainAccessToken(restTemplate, redisTemplate, properties);
}
private static String convertRequestBody(Object requestBody) {
if (requestBody == null) {
return "";
}
if (requestBody instanceof String str) {
return str;
}
if (requestBody instanceof byte[] bytes) {
return new String(bytes, StandardCharsets.UTF_8);
}
return JsonUtils.toJsonString(requestBody);
}
private static String obtainAccessToken(RestTemplate restTemplate,
StringRedisTemplate redisTemplate,
ShareServiceProperties properties) {
// 直接从 Redis 读取可复用的 token
ValueOperations<String, String> valueOps = redisTemplate.opsForValue();
String token = valueOps.get(properties.getTokenCacheKey());
if (StrUtil.isNotBlank(token)) {
return token;
}
// 针对同一个缓存 key 做细粒度加锁,避免并发刷新问题
Lock lock = TOKEN_REFRESH_LOCKS.computeIfAbsent(properties.getTokenCacheKey(), key -> new ReentrantLock());
lock.lock();
try {
token = valueOps.get(properties.getTokenCacheKey());
if (StrUtil.isNotBlank(token)) {
return token;
}
return refreshAccessToken(restTemplate, redisTemplate, properties, valueOps);
} finally {
lock.unlock();
}
}
private static String refreshAccessToken(RestTemplate restTemplate,
StringRedisTemplate redisTemplate,
ShareServiceProperties properties,
ValueOperations<String, String> valueOps) {
String refreshToken = valueOps.get(properties.getRefreshTokenCacheKey());
if (StrUtil.isNotBlank(refreshToken)) {
try {
return requestToken(restTemplate, redisTemplate, properties,
buildRefreshTokenParams(properties, refreshToken));
} catch (RuntimeException ex) {
log.warn("刷新共享服务 token 失败,准备回退为 client_credentials 模式", ex);
redisTemplate.delete(properties.getRefreshTokenCacheKey());
}
}
return requestToken(restTemplate, redisTemplate, properties,
buildClientCredentialsParams(properties));
}
private static MultiValueMap<String, String> buildClientCredentialsParams(ShareServiceProperties properties) {
MultiValueMap<String, String> params = baseTokenParams(properties);
params.add("grant_type", "client_credentials");
if (StrUtil.isNotBlank(properties.getScope())) {
params.add("scope", properties.getScope());
}
return params;
}
private static MultiValueMap<String, String> buildRefreshTokenParams(ShareServiceProperties properties,
String refreshToken) {
MultiValueMap<String, String> params = baseTokenParams(properties);
params.add("grant_type", "refresh_token");
params.add("refresh_token", refreshToken);
return params;
}
private static MultiValueMap<String, String> baseTokenParams(ShareServiceProperties properties) {
MultiValueMap<String, String> params = new LinkedMultiValueMap<>();
Assert.hasText(properties.getClientId(), "clientId 不能为空");
Assert.hasText(properties.getClientSecret(), "clientSecret 不能为空");
params.add("client_id", properties.getClientId());
params.add("client_secret", properties.getClientSecret());
return params;
}
private static String requestToken(RestTemplate restTemplate,
StringRedisTemplate redisTemplate,
ShareServiceProperties properties,
MultiValueMap<String, String> body) {
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_FORM_URLENCODED);
HttpEntity<MultiValueMap<String, String>> entity = new HttpEntity<>(body, headers);
String tokenUrl = properties.buildTokenUrl();
log.info("共享服务获取 token 地址:[{}],授权方式:[{}]", tokenUrl, body.getFirst("grant_type"));
ResponseEntity<String> response;
try {
response = restTemplate.postForEntity(tokenUrl, entity, String.class);
} catch (RestClientException ex) {
throw new IllegalStateException("请求共享服务 token 失败", ex);
}
String responseBody = response.getBody();
if (StrUtil.isBlank(responseBody)) {
throw new IllegalStateException("共享服务返回的 token 内容为空");
}
TokenResponse tokenResponse = parseTokenResponse(responseBody);
cacheTokens(redisTemplate, properties, tokenResponse);
return tokenResponse.accessToken();
}
private static TokenResponse parseTokenResponse(String body) {
var node = JsonUtils.parseTree(body);
String accessToken = node.path("access_token").asText(null);
if (StrUtil.isBlank(accessToken)) {
throw new IllegalStateException("共享服务返回结果缺少 access_token 字段");
}
String refreshToken = node.path("refresh_token").asText(null);
long expiresIn = node.path("expires_in").asLong(-1);
long refreshExpiresIn = node.path("refresh_expires_in").asLong(-1);
return new TokenResponse(accessToken, refreshToken, expiresIn, refreshExpiresIn);
}
private static void cacheTokens(StringRedisTemplate redisTemplate,
ShareServiceProperties properties,
TokenResponse tokenResponse) {
// 将最新的 token 与刷新 token 写回缓存
ValueOperations<String, String> valueOps = redisTemplate.opsForValue();
Duration tokenTtl = resolveTtl(tokenResponse.expiresIn(), properties.getTokenTtl());
valueOps.set(properties.getTokenCacheKey(), tokenResponse.accessToken(), tokenTtl);
if (StrUtil.isNotBlank(tokenResponse.refreshToken())) {
Duration refreshTtl = resolveTtl(tokenResponse.refreshExpiresIn(), properties.getRefreshTokenTtl());
valueOps.set(properties.getRefreshTokenCacheKey(), tokenResponse.refreshToken(), refreshTtl);
}
}
private static Duration resolveTtl(long expiresInSeconds, Duration fallback) {
Duration effectiveFallback = fallback;
if (effectiveFallback == null || effectiveFallback.compareTo(MIN_CACHE_TTL) < 0) {
effectiveFallback = Duration.ofMinutes(5);
}
if (expiresInSeconds > 0) {
Duration candidate = Duration.ofSeconds(expiresInSeconds);
if (candidate.compareTo(MIN_CACHE_TTL) < 0) {
candidate = MIN_CACHE_TTL;
}
return candidate.compareTo(effectiveFallback) < 0 ? candidate : effectiveFallback;
}
return effectiveFallback;
}
private record TokenResponse(String accessToken, String refreshToken, long expiresIn, long refreshExpiresIn) {
}
}

View File

@@ -0,0 +1,43 @@
package com.zt.plat.framework.common.util.json.databind;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.SerializerProvider;
import com.fasterxml.jackson.databind.ser.std.StdSerializer;
import java.io.IOException;
/**
* Long / long 数组序列化器,统一按字符串输出以规避 JS 精度问题。
*/
public class LongArraySerializer extends StdSerializer<Object> {
public static final LongArraySerializer INSTANCE = new LongArraySerializer();
private LongArraySerializer() {
super(Object.class);
}
@Override
public void serialize(Object value, JsonGenerator gen, SerializerProvider provider) throws IOException {
gen.writeStartArray();
if (value instanceof long[]) {
long[] array = (long[]) value;
for (long element : array) {
// 原生 long 必定有值,直接走 NumberSerializer
NumberSerializer.INSTANCE.serialize(element, gen, provider);
}
gen.writeEndArray();
return;
}
Long[] array = (Long[]) value;
for (Long element : array) {
if (element == null) {
provider.defaultSerializeNull(gen);
continue;
}
NumberSerializer.INSTANCE.serialize(element, gen, provider);
}
gen.writeEndArray();
}
}

View File

@@ -0,0 +1,37 @@
package com.zt.plat.framework.common.util.json.databind;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.SerializerProvider;
import com.fasterxml.jackson.databind.ser.std.StdSerializer;
import com.fasterxml.jackson.databind.type.TypeFactory;
import java.io.IOException;
import java.util.Collection;
/**
* 将 {@link Collection} 中的 Long 元素序列化成字符串,避免 JavaScript 精度问题。
*/
public class LongCollectionSerializer extends StdSerializer<Collection<?>> {
public static final LongCollectionSerializer INSTANCE = new LongCollectionSerializer();
private LongCollectionSerializer() {
super(TypeFactory.defaultInstance().constructCollectionType(Collection.class, Object.class));
}
@Override
public void serialize(Collection<?> value, JsonGenerator gen, SerializerProvider provider) throws IOException {
// 传入集合本身与元素数量,方便 Jackson 合理推测数组边界
gen.writeStartArray(value, value.size());
for (Object element : value) {
if (element == null) {
// 允许集合中存在 null保持 Jackson 默认的 null 序列化行为
provider.defaultSerializeNull(gen);
continue;
}
// 所有 Long/long 元素统一走 NumberSerializer保证前端精度
NumberSerializer.INSTANCE.serialize((Number) element, gen, provider);
}
gen.writeEndArray();
}
}

View File

@@ -0,0 +1,52 @@
package com.zt.plat.framework.common.util.json.databind;
import com.fasterxml.jackson.databind.BeanDescription;
import com.fasterxml.jackson.databind.JsonSerializer;
import com.fasterxml.jackson.databind.SerializationConfig;
import com.fasterxml.jackson.databind.ser.BeanSerializerModifier;
import com.fasterxml.jackson.databind.type.ArrayType;
import com.fasterxml.jackson.databind.type.CollectionType;
import com.fasterxml.jackson.databind.type.CollectionLikeType;
import com.fasterxml.jackson.databind.JavaType;
/**
* 针对 Long 相关集合、数组的序列化增强,确保统一走 Long 的自定义序列化逻辑。
*/
public class LongTypeSerializerModifier extends BeanSerializerModifier {
@Override
public JsonSerializer<?> modifyCollectionSerializer(SerializationConfig config, CollectionType valueType,
BeanDescription beanDesc, JsonSerializer<?> serializer) {
// List、Set 等容器若包含 Long则切换到 LongCollectionSerializer
return needsLongCollectionSerializer(valueType.getContentType()) ? LongCollectionSerializer.INSTANCE : serializer;
}
@Override
public JsonSerializer<?> modifyCollectionLikeSerializer(SerializationConfig config, CollectionLikeType valueType,
BeanDescription beanDesc, JsonSerializer<?> serializer) {
// 处理 CollectionLike如 Page、Optional 等)中的 Long 元素
return needsLongCollectionSerializer(valueType.getContentType()) ? LongCollectionSerializer.INSTANCE : serializer;
}
@Override
public JsonSerializer<?> modifyArraySerializer(SerializationConfig config, ArrayType valueType,
BeanDescription beanDesc, JsonSerializer<?> serializer) {
// 针对 long[]、Long[] 两种数组使用统一的数组序列化器
Class<?> rawClass = valueType.getRawClass();
if (long[].class.equals(rawClass)) {
return LongArraySerializer.INSTANCE;
}
if (Long[].class.equals(rawClass)) {
return LongArraySerializer.INSTANCE;
}
return serializer;
}
private boolean needsLongCollectionSerializer(JavaType contentType) {
if (contentType == null) {
return false;
}
Class<?> rawClass = contentType.getRawClass();
return Long.class.equals(rawClass) || Long.TYPE.equals(rawClass);
}
}

View File

@@ -1,6 +1,14 @@
package com.zt.plat.framework.common.util.monitor;
import jakarta.servlet.http.HttpServletRequest;
import org.apache.commons.lang3.StringUtils;
import org.apache.skywalking.apm.toolkit.trace.TraceContext;
import org.slf4j.MDC;
import org.springframework.web.context.request.RequestAttributes;
import org.springframework.web.context.request.RequestContextHolder;
import org.springframework.web.context.request.ServletRequestAttributes;
import java.util.UUID;
/**
* 链路追踪工具类
@@ -9,7 +17,33 @@ import org.apache.skywalking.apm.toolkit.trace.TraceContext;
*
* @author ZT
*/
public class TracerUtils {
public final class TracerUtils {
/**
* SkyWalking 在未接入 Agent 时返回的默认占位值
*/
private static final String SKY_WALKING_PLACEHOLDER = "N/A";
/**
* SkyWalking 在忽略追踪时返回的占位值
*/
private static final String SKY_WALKING_IGNORED = "Ignored_Trace";
private static final String MDC_TRACE_ID_KEY = "traceId";
private static final String REQUEST_ATTRIBUTE_KEY = TracerUtils.class.getName() + ".TRACE_ID";
private static final String[] HEADER_CANDIDATES = {
"trace-id",
"Trace-Id",
"x-trace-id",
"X-Trace-Id",
"x-request-id",
"X-Request-Id"
};
/**
* 兜底的 traceId保证在未接入链路追踪时依旧具备追踪能力
*/
private static final InheritableThreadLocal<String> FALLBACK_TRACE_ID = new InheritableThreadLocal<>();
/**
* 私有化构造方法
@@ -18,13 +52,121 @@ public class TracerUtils {
}
/**
* 获得链路追踪编号,直接返回 SkyWalking 的 TraceId
* 如果不存在的话为空字符串!!!
* 获得链路追踪编号。
* <p>
* 优先返回 SkyWalking 的 TraceId在缺少链路上下文或者未接入 SkyWalking 时,会优先复用来自请求上下文的 TraceId
* 否则生成一个新的兜底 TraceId并在当前线程、请求上下文与日志 MDC 中缓存,确保后续组件能够复用。
*
* @return 链路追踪编号
*/
public static String getTraceId() {
return TraceContext.traceId();
String traceId = TraceContext.traceId();
if (isValidTraceId(traceId)) {
cacheTraceId(traceId);
return traceId;
}
String cached = resolveCachedTraceId();
if (StringUtils.isNotBlank(cached)) {
return cached;
}
String generated = generateFallbackTraceId();
cacheTraceId(generated);
return generated;
}
/**
* 手动绑定外部传入的 TraceId例如消费消息、处理异步任务时。
*
* @param traceId 链路编号
*/
public static void bindTraceId(String traceId) {
if (StringUtils.isBlank(traceId)) {
return;
}
cacheTraceId(traceId.trim());
}
/**
* 清理当前线程关联的兜底 traceId避免线程复用导致污染。
*/
public static void clear() {
FALLBACK_TRACE_ID.remove();
MDC.remove(MDC_TRACE_ID_KEY);
HttpServletRequest request = currentRequest();
if (request != null) {
request.removeAttribute(REQUEST_ATTRIBUTE_KEY);
}
}
private static boolean isValidTraceId(String traceId) {
if (StringUtils.isBlank(traceId)) {
return false;
}
if (StringUtils.equalsIgnoreCase(traceId, SKY_WALKING_PLACEHOLDER)) {
return false;
}
return !StringUtils.equalsIgnoreCase(traceId, SKY_WALKING_IGNORED);
}
private static String resolveCachedTraceId() {
String cached = FALLBACK_TRACE_ID.get();
if (StringUtils.isNotBlank(cached)) {
return cached;
}
HttpServletRequest request = currentRequest();
if (request != null) {
Object attribute = request.getAttribute(REQUEST_ATTRIBUTE_KEY);
if (attribute instanceof String attrValue && StringUtils.isNotBlank(attrValue)) {
cacheTraceId(attrValue);
return attrValue;
}
String headerValue = resolveTraceIdFromHeader(request);
if (StringUtils.isNotBlank(headerValue)) {
cacheTraceId(headerValue);
return headerValue;
}
}
String mdcTraceId = MDC.get(MDC_TRACE_ID_KEY);
if (StringUtils.isNotBlank(mdcTraceId)) {
cacheTraceId(mdcTraceId);
return mdcTraceId;
}
return null;
}
private static void cacheTraceId(String traceId) {
if (StringUtils.isBlank(traceId)) {
return;
}
String trimmed = traceId.trim();
FALLBACK_TRACE_ID.set(trimmed);
MDC.put(MDC_TRACE_ID_KEY, trimmed);
HttpServletRequest request = currentRequest();
if (request != null) {
request.setAttribute(REQUEST_ATTRIBUTE_KEY, trimmed);
}
}
private static HttpServletRequest currentRequest() {
RequestAttributes requestAttributes = RequestContextHolder.getRequestAttributes();
if (requestAttributes instanceof ServletRequestAttributes servletRequestAttributes) {
return servletRequestAttributes.getRequest();
}
return null;
}
private static String resolveTraceIdFromHeader(HttpServletRequest request) {
for (String header : HEADER_CANDIDATES) {
String value = request.getHeader(header);
if (StringUtils.isNotBlank(value)) {
return value.trim();
}
}
return null;
}
private static String generateFallbackTraceId() {
return StringUtils.replace(UUID.randomUUID().toString(), "-", "");
}
}

View File

@@ -4,6 +4,7 @@ import cn.hutool.core.bean.BeanUtil;
import com.zt.plat.framework.common.pojo.PageResult;
import com.zt.plat.framework.common.util.collection.CollectionUtils;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.function.Consumer;
@@ -69,10 +70,13 @@ public class BeanUtils {
return null;
}
List<T> list = toBean(source.getList(), targetType);
if (list == null) {
list = Collections.emptyList();
}
if (peek != null) {
list.forEach(peek);
}
return new PageResult<>(list, source.getTotal());
return new PageResult<>(list, source.getTotal(), source.getSummary());
}
public static void copyProperties(Object source, Object target) {

View File

@@ -0,0 +1,216 @@
package com.zt.plat.framework.common.util.tree;
import com.alibaba.fastjson.JSON;
import com.zt.plat.framework.common.util.object.ObjectUtils;
import lombok.Data;
import java.util.*;
import java.util.function.*;
import java.util.stream.Collectors;
/**
* 树操作方法工具类
*/
public class TreeUtil {
/**
* 将list合成树
*@param list 需要合成树的List
*@param rootCheck 判断E中为根节点的条件x->x.getPId()==-1L,x->x.getParentId()==null,x->x.getParentMenuId()==0
*@param parentCheck 判断E中为父节点条件(x,y)->x.getId().equals(y.getPId())
*@param setSubChildren E中设置下级数据方法Menu::setSubMenus
*@param<E> 泛型实体对象
*@return 合成好的树
*/
public static <E> List<E> makeTree(List<E> list, Predicate<E> rootCheck, BiFunction<E,E,Boolean> parentCheck, BiConsumer<E,List<E>> setSubChildren){
return list.stream().filter(rootCheck).peek(x->setSubChildren.accept(x,makeChildren(x,list,parentCheck,setSubChildren))).collect(Collectors.toList());
}
/**
*将树打平成tree
*@paramtree需要打平的树
*@paramgetSubChildren设置下级数据方法Menu::getSubMenus,x->x.setSubMenus(null)
*@paramsetSubChildren将下级数据置空方法x->x.setSubMenus(null)
*@return打平后的数据
*@param<E>泛型实体对象
*/
public static <E> List<E> flat(List<E> tree, Function<E,List<E>> getSubChildren, Consumer<E> setSubChildren){
List<E> res = new ArrayList<>();
forPostOrder(tree,item->{
setSubChildren.accept(item);
res.add(item);
},getSubChildren);
return res;
}
/**
*前序遍历
*
*@paramtree需要遍历的树
*@paramconsumer遍历后对单个元素的处理方法x->System.out.println(x)、System.out::println打印元素
*@paramsetSubChildren设置下级数据方法Menu::getSubMenus,x->x.setSubMenus(null)
*@param<E>泛型实体对象
*/
public static <E> void forPreOrder(List<E> tree,Consumer<E> consumer,Function<E,List<E>> setSubChildren){
for(E l : tree){
consumer.accept(l);
List<E> es = setSubChildren.apply(l);
if(es != null && es.size() > 0){
forPreOrder(es,consumer,setSubChildren);
}
}
}
/**
*层序遍历
*
*@paramtree需要遍历的树
*@paramconsumer遍历后对单个元素的处理方法x->System.out.println(x)、System.out::println打印元素
*@paramsetSubChildren设置下级数据方法Menu::getSubMenus,x->x.setSubMenus(null)
*@param<E>泛型实体对象
*/
public static <E> void forLevelOrder(List<E> tree,Consumer<E> consumer,Function<E,List<E>> setSubChildren){
Queue<E> queue=new LinkedList<>(tree);
while(!queue.isEmpty()){
E item = queue.poll();
consumer.accept(item);
List<E> childList = setSubChildren.apply(item);
if(childList !=null && !childList.isEmpty()){
queue.addAll(childList);
}
}
}
/**
*后序遍历
*
*@paramtree需要遍历的树
*@paramconsumer遍历后对单个元素的处理方法x->System.out.println(x)、System.out::println打印元素
*@paramsetSubChildren设置下级数据方法Menu::getSubMenus,x->x.setSubMenus(null)
*@param<E>泛型实体对象
*/
public static <E> void forPostOrder(List<E> tree,Consumer<E> consumer,Function<E,List<E>> setSubChildren){
for(E item : tree) {
List<E> childList = setSubChildren.apply(item);
if(childList != null && !childList.isEmpty()){
forPostOrder(childList,consumer,setSubChildren);
}
consumer.accept(item);
}
}
/**
*对树所有子节点按comparator排序
*
*@paramtree需要排序的树
*@paramcomparator排序规则ComparatorComparator.comparing(MenuVo::getRank)按Rank正序,(x,y)->y.getRank().compareTo(x.getRank())按Rank倒序
*@paramgetChildren获取下级数据方法MenuVo::getSubMenus
*@return排序好的树
*@param<E>泛型实体对象
*/
public static <E> List<E> sort(List<E> tree, Comparator<? super E> comparator, Function<E,List<E>> getChildren){
for(E item : tree){
List<E> childList = getChildren.apply(item);
if(childList != null &&! childList.isEmpty()){
sort(childList,comparator,getChildren);
}
}
tree.sort(comparator);
return tree;
}
private static <E> List<E> makeChildren(E parent,List<E> allData,BiFunction<E,E,Boolean> parentCheck,BiConsumer<E,List<E>> children){
return allData.stream().filter(x->parentCheck.apply(parent,x)).peek(x->children.accept(x,makeChildren(x,allData,parentCheck,children))).collect(Collectors.toList());
}
/**
* 使用样例
* @param args
*/
public static void main(String[] args) {
MenuVo menu0 = new MenuVo(0L, -1L, "一级菜单", 0);
MenuVo menu1 = new MenuVo(1L, 0L, "二级菜单", 1);
MenuVo menu2 = new MenuVo(2L, 0L, "三级菜单", 2);
MenuVo menu3 = new MenuVo(3L, 1L, "四级菜单", 3);
MenuVo menu4 = new MenuVo(4L, 1L, "五级菜单", 4);
MenuVo menu5 = new MenuVo(5L, 2L, "六级菜单", 5);
MenuVo menu6 = new MenuVo(6L, 2L, "七级菜单", 6);
MenuVo menu7 = new MenuVo(7L, 3L, "八级菜单", 7);
MenuVo menu8 = new MenuVo(8L, 3L, "九级菜单", 8);
MenuVo menu9 = new MenuVo(9L, 4L, "十级菜单", 9);
//基本数据
List<MenuVo> menuList = Arrays.asList(menu0,menu1, menu2,menu3,menu4,menu5,menu6,menu7,menu8,menu9);
//合成树
/**
* 第1个参数List list为我们需要合成树的List如上面Demo中的menuList
* 第2个参数Predicate rootCheck判断为根节点的条件如上面Demo中pId==-1就是根节点
* 第3个参数parentCheck 判断为父节点条件如上面Demo中 id==pId
* 第4个参数setSubChildren设置下级数据方法如上面Demo中Menu::setSubMenus
*/
List<MenuVo> tree= TreeUtil.makeTree(menuList, x->x.getPId()==-1L,(x, y)->x.getId().equals(y.getPId()), MenuVo::setSubMenus);
System.out.println(JSON.toJSONString(tree));
//先序
/**
* 遍历数参数解释:
* tree 需要遍历的树就是makeTree()合
* 成的对象Consumer consumer 遍历后对单个元素的处理方法x-> System.out.println(x)、 postOrder.append(x.getId().toString())
* Function<E, List> getSubChildren,获取下级数据方法如Menu::getSubMenus
*/
StringBuffer preStr = new StringBuffer();
TreeUtil.forPreOrder(tree,x-> preStr.append(x.getId().toString()),MenuVo::getSubMenus);
ObjectUtils.equalsAny("0123456789",preStr.toString());
//层序
StringBuffer levelStr=new StringBuffer();
TreeUtil.forLevelOrder(tree,x-> levelStr.append(x.getId().toString()),MenuVo::getSubMenus);
ObjectUtils.equalsAny("0123456789",levelStr.toString());
//后序
StringBuffer postOrder=new StringBuffer();
TreeUtil.forPostOrder(tree,x-> postOrder.append(x.getId().toString()),MenuVo::getSubMenus);
ObjectUtils.equalsAny("7839415620",postOrder.toString());
// 树平铺
/**
* flat()参数解释:
* tree 需要打平的树,就是makeTree()合成的对象Function<E, List> getSubChildren,
* 获取下级数据方法如Menu::getSubMenusConsumer setSubChildren,
* 设置下级数据方法x->x.setSubMenus(null)
*/
List<MenuVo> flat = TreeUtil.flat(tree, MenuVo::getSubMenus,x->x.setSubMenus(null));
ObjectUtils.equalsAny(flat.size(),menuList.size());
flat.forEach(x -> {
if (x.getSubMenus() != null) {
throw new RuntimeException("树平铺失败");
}
});
// 按rank正序
/**
* sort参数解释
* tree 需要排序的树,就是makeTree()合成的对象Comparator<? super E> comparator
* 排序规则ComparatorComparator.comparing(MenuVo::getRank) 按Rank正序 ,(x,y)->y.getRank().compareTo(x.getRank())按Rank倒序Function<E, List> getChildren
* 获取下级数据方法MenuVo::getSubMenus
*/
List<MenuVo> sortTree= TreeUtil.sort(tree, Comparator.comparing(MenuVo::getRank), MenuVo::getSubMenus);
// 按rank倒序
List<MenuVo> sortTreeReverse = TreeUtil.sort(tree, (x,y)->y.getRank().compareTo(x.getRank()), MenuVo::getSubMenus);
}
@Data
static class MenuVo {
private Long id; // 主键id
private Long pId; // 父级id
private String name; // 菜单名称
private Integer rank = 0; // 排序
private List<MenuVo> subMenus = new ArrayList<>(); // 子菜单
public MenuVo(Long id, Long pId, String name, Integer rank) {
this.id = id;
this.pId = pId;
this.name = name;
this.rank = rank;
}
}
}

View File

@@ -31,6 +31,11 @@
<artifactId>zt-spring-boot-starter-biz-data-permission</artifactId>
<version>${revision}</version>
</dependency>
<dependency>
<groupId>com.zt.plat</groupId>
<artifactId>zt-spring-boot-starter-biz-tenant</artifactId>
<version>${revision}</version>
</dependency>
<!-- Test 测试相关 -->
<dependency>
<groupId>com.zt.plat</groupId>

View File

@@ -19,11 +19,11 @@ import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
public class ZtBusinessAutoConfiguration implements WebMvcConfigurer {
@Override
public void addInterceptors(InterceptorRegistry registry) {
// 拦截增删改和 set 相关的 url
// 拦截所有 url统一进行业务与文件上传请求头校验
registry.addInterceptor(new BusinessHeaderInterceptor())
.addPathPatterns("/**/add**", "/**/create**", "/**/update**", "/**/edit**", "/**/set**");
.addPathPatterns("/**");
registry.addInterceptor(new FileUploadHeaderInterceptor())
.addPathPatterns("/**/add**", "/**/create**", "/**/update**", "/**/edit**", "/**/set**");
.addPathPatterns("/**");
}
@Bean

View File

@@ -2,15 +2,20 @@ package com.zt.plat.framework.business.core.util;
import cn.hutool.json.JSONObject;
import cn.hutool.json.JSONUtil;
import com.zt.plat.framework.common.pojo.CommonResult;
import com.zt.plat.framework.common.pojo.CompanyDeptInfo;
import com.zt.plat.framework.common.util.json.JsonUtils;
import com.zt.plat.framework.common.util.spring.SpringUtils;
import com.zt.plat.framework.security.core.LoginUser;
import com.zt.plat.framework.tenant.core.context.CompanyContextHolder;
import com.zt.plat.framework.web.core.util.WebFrameworkUtils;
import com.zt.plat.module.system.api.dept.DeptApi;
import com.zt.plat.module.system.api.dept.dto.CompanyDeptInfoRespDTO;
import jakarta.servlet.http.HttpServletRequest;
import jakarta.servlet.http.HttpServletResponse;
import lombok.extern.slf4j.Slf4j;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Optional;
import java.util.Set;
import java.util.*;
import java.util.stream.Collectors;
import static com.zt.plat.framework.common.util.collection.CollectionUtils.singleton;
@@ -19,46 +24,179 @@ import static com.zt.plat.framework.security.core.util.SecurityFrameworkUtils.ge
/**
* @author chenbowen
*/
@Slf4j
public class BusinessDeptHandleUtil {
private static final String CONTEXT_KEY_COMPANY_DEPT_INFOS = "companyDeptInfos";
public static Set<CompanyDeptInfo> getBelongCompanyAndDept(HttpServletRequest request, HttpServletResponse response) throws Exception {
response.setContentType("application/json;charset=UTF-8");
String companyId = request.getHeader("visit-company-id");
String deptId = request.getHeader("visit-dept-id");
LoginUser loginUser = Optional.ofNullable(getLoginUser()).orElse(new LoginUser().setInfo(new HashMap<>()));
Set<CompanyDeptInfo> companyDeptSet = JSONUtil.parseArray(loginUser.getInfo().getOrDefault(LoginUser.INFO_KEY_COMPANY_DEPT_SET, "[]")).stream()
.map(obj -> JSONUtil.toBean((JSONObject) obj, CompanyDeptInfo.class))
.collect(Collectors.toSet());
String companyIdHeader = request.getHeader(WebFrameworkUtils.HEADER_VISIT_COMPANY_ID);
String deptIdHeader = request.getHeader(WebFrameworkUtils.HEADER_VISIT_DEPT_ID);
LoginUser currentLoginUser = getLoginUser();
Map<String, String> extraInfo = Optional.ofNullable(currentLoginUser)
.map(LoginUser::getInfo)
.orElseGet(HashMap::new);
if (currentLoginUser != null && currentLoginUser.getInfo() == null) {
currentLoginUser.setInfo(extraInfo);
}
Set<CompanyDeptInfo> companyDeptSet = resolveCompanyDeptInfos(currentLoginUser, extraInfo);
// 1. 有 companyId
if (companyId != null && !companyId.isBlank()) {
if (companyIdHeader != null && !companyIdHeader.isBlank()) {
// 根据请求头中的公司 ID 过滤出当前用户的公司部门信息
Set<CompanyDeptInfo> companyDeptSetByCompanyId = companyDeptSet.stream().filter(companyDeptInfo -> companyDeptInfo.getCompanyId().toString().equals(companyId)).collect(Collectors.toSet());
Set<CompanyDeptInfo> companyDeptSetByCompanyId = companyDeptSet.stream()
.filter(companyDeptInfo -> companyDeptInfo.getCompanyId().equals(companyIdHeader))
.collect(Collectors.toSet());
if (companyDeptSetByCompanyId.isEmpty()) {
// 当前公司下没有部门
CompanyDeptInfo data = new CompanyDeptInfo();
data.setCompanyId(Long.valueOf(companyId));
data.setDeptId(0L);
data.setCompanyId(companyIdHeader);
data.setDeptId("0");
return new HashSet<>(singleton(data));
}
// 如果有 deptId校验其是否属于该 companyId
if (deptId != null) {
boolean valid = companyDeptSetByCompanyId.stream().anyMatch(info -> String.valueOf(info.getDeptId()).equals(deptId));
if (deptIdHeader != null) {
boolean valid = companyDeptSetByCompanyId.stream().anyMatch(info -> String.valueOf(info.getDeptId()).equals(deptIdHeader));
if (!valid) {
return null;
}else{
} else {
// 部门存在,放行
return new HashSet<>();
}
}
if (companyDeptSetByCompanyId.size() == 1) {
CompanyDeptInfo singleCompanyDept = companyDeptSetByCompanyId.iterator().next();
if (applyAutoSelection(currentLoginUser, request, singleCompanyDept)) {
return Collections.emptySet();
}
}
return companyDeptSetByCompanyId;
}
// 2. 没有公司信息,尝试唯一性自动推断
// 如果当前用户下只有一个公司和部门的对于关系
if (companyDeptSet.size() == 1) {
CompanyDeptInfo companyDeptInfo = companyDeptSet.iterator().next();
if (applyAutoSelection(currentLoginUser, request, companyDeptInfo)) {
return Collections.emptySet();
}
return new HashSet<>(singleton(companyDeptInfo));
} else {
return companyDeptSet;
}
return companyDeptSet;
}
private static Set<CompanyDeptInfo> resolveCompanyDeptInfos(LoginUser loginUser, Map<String, String> extraInfo) {
if (loginUser == null) {
return Collections.emptySet();
}
Set<CompanyDeptInfo> cached = loginUser.getContext(CONTEXT_KEY_COMPANY_DEPT_INFOS, Set.class);
if (cached != null) {
return cached;
}
Set<CompanyDeptInfo> resolved = parseFromInfo(extraInfo);
if (resolved == null || resolved.isEmpty()) {
Set<CompanyDeptInfo> fetched = fetchCompanyDeptInfos(loginUser.getId());
if (!fetched.isEmpty()) {
resolved = fetched;
} else if (resolved == null) {
resolved = Collections.emptySet();
}
}
cacheCompanyDeptInfos(loginUser, extraInfo, resolved);
return resolved;
}
private static Set<CompanyDeptInfo> parseFromInfo(Map<String, String> extraInfo) {
if (extraInfo == null || !extraInfo.containsKey(LoginUser.INFO_KEY_COMPANY_DEPT_SET)) {
return null;
}
try {
return JSONUtil.parseArray(extraInfo.getOrDefault(LoginUser.INFO_KEY_COMPANY_DEPT_SET, "[]")).stream()
.map(obj -> JSONUtil.toBean((JSONObject) obj, CompanyDeptInfo.class))
.collect(Collectors.toCollection(LinkedHashSet::new));
} catch (Exception ex) {
log.warn("[parseFromInfo][解析公司部门信息失败] raw={}", extraInfo.get(LoginUser.INFO_KEY_COMPANY_DEPT_SET), ex);
return Collections.emptySet();
}
}
private static Set<CompanyDeptInfo> fetchCompanyDeptInfos(Long userId) {
if (userId == null) {
return Collections.emptySet();
}
try {
DeptApi deptApi = SpringUtils.getBean(DeptApi.class);
CommonResult<Set<CompanyDeptInfoRespDTO>> result = deptApi.getCompanyDeptInfoListByUserId(userId);
if (result == null || !result.isSuccess() || result.getData() == null) {
return Collections.emptySet();
}
return result.getData().stream()
.map(BusinessDeptHandleUtil::convert)
.collect(Collectors.toCollection(LinkedHashSet::new));
} catch (Exception ex) {
log.warn("[fetchCompanyDeptInfos][userId({}) 获取公司部门信息失败]", userId, ex);
return Collections.emptySet();
}
}
private static void cacheCompanyDeptInfos(LoginUser loginUser, Map<String, String> extraInfo, Set<CompanyDeptInfo> infos) {
if (infos == null) {
infos = Collections.emptySet();
}
loginUser.setContext(CONTEXT_KEY_COMPANY_DEPT_INFOS, infos);
if (extraInfo == null) {
return;
}
Set<String> companyIds = infos.stream()
.map(CompanyDeptInfo::getCompanyId)
.filter(Objects::nonNull)
.collect(Collectors.toCollection(LinkedHashSet::new));
Set<String> deptIds = infos.stream()
.map(CompanyDeptInfo::getDeptId)
.filter(Objects::nonNull)
.collect(Collectors.toCollection(LinkedHashSet::new));
extraInfo.put(LoginUser.INFO_KEY_COMPANY_DEPT_SET, JsonUtils.toJsonString(infos));
extraInfo.put(LoginUser.INFO_KEY_COMPANY_IDS, JsonUtils.toJsonString(companyIds));
extraInfo.put(LoginUser.INFO_KEY_DEPT_IDS, JsonUtils.toJsonString(deptIds));
}
private static CompanyDeptInfo convert(CompanyDeptInfoRespDTO dto) {
CompanyDeptInfo info = new CompanyDeptInfo();
info.setCompanyId(String.valueOf(dto.getCompanyId()));
info.setCompanyName(dto.getCompanyName());
info.setCompanyCode(dto.getCompanyCode());
info.setDeptId(String.valueOf(dto.getDeptId()));
info.setDeptName(dto.getDeptName());
info.setDeptCode(dto.getDeptCode());
return info;
}
private static boolean applyAutoSelection(LoginUser loginUser, HttpServletRequest request, CompanyDeptInfo info) {
if (info == null || info.getCompanyId() == null || "0".equals(info.getCompanyId())
|| info.getDeptId() == null || "0".equals(info.getDeptId())) {
return false;
}
if (loginUser != null) {
loginUser.setVisitCompanyId(Long.valueOf(info.getCompanyId()));
loginUser.setVisitCompanyName(info.getCompanyName());
loginUser.setVisitCompanyCode(info.getCompanyName());
loginUser.setVisitDeptId(Long.valueOf(info.getDeptId()));
loginUser.setVisitDeptName(info.getDeptName());
loginUser.setVisitDeptCode(info.getDeptName());
}
request.setAttribute(WebFrameworkUtils.HEADER_VISIT_COMPANY_ID, info.getCompanyId());
if (info.getCompanyName() != null) {
request.setAttribute(WebFrameworkUtils.HEADER_VISIT_COMPANY_NAME, info.getCompanyName());
}
request.setAttribute(WebFrameworkUtils.HEADER_VISIT_DEPT_ID, info.getDeptId());
if (info.getDeptName() != null) {
request.setAttribute(WebFrameworkUtils.HEADER_VISIT_DEPT_NAME, info.getDeptName());
}
CompanyContextHolder.setIgnore(false);
CompanyContextHolder.setCompanyId(Long.valueOf(info.getCompanyId()));
return true;
}
}

View File

@@ -107,11 +107,11 @@ class BusinessHeaderInterceptorTest {
// 构造 loginUser包含多个公司部门
CompanyDeptInfo deptInfo1 = new CompanyDeptInfo();
deptInfo1.setCompanyId(1L);
deptInfo1.setDeptId(2L);
deptInfo1.setCompanyId(String.valueOf(1L));
deptInfo1.setDeptId(String.valueOf(2L));
CompanyDeptInfo deptInfo2 = new CompanyDeptInfo();
deptInfo2.setCompanyId(2L);
deptInfo2.setDeptId(3L);
deptInfo2.setCompanyId(String.valueOf(2L));
deptInfo2.setDeptId(String.valueOf(3L));
Set<CompanyDeptInfo> deptSet = new HashSet<>();
deptSet.add(deptInfo1);
deptSet.add(deptInfo2);
@@ -141,8 +141,8 @@ class BusinessHeaderInterceptorTest {
// 构造 loginUser只有一个公司且公司下只有一个部门
CompanyDeptInfo deptInfo = new CompanyDeptInfo();
deptInfo.setCompanyId(100L);
deptInfo.setDeptId(200L);
deptInfo.setCompanyId(String.valueOf(100L));
deptInfo.setDeptId(String.valueOf(200L));
Set<CompanyDeptInfo> deptSet = new HashSet<>();
deptSet.add(deptInfo);
LoginUser loginUser = randomPojo(LoginUser.class, o -> o.setId(1L)
@@ -155,9 +155,9 @@ class BusinessHeaderInterceptorTest {
setLoginUserForTest(loginUser);
boolean result = interceptor.preHandle(request, response, handlerMethod);
assertFalse(result);
// 可选:verify(request).setAttribute("visit-company-id", String.valueOf(deptInfo.getCompanyId()));
// 可选:verify(request).setAttribute("visit-dept-id", String.valueOf(deptInfo.getDeptId()));
assertTrue(result);
verify(request).setAttribute(eq("visit-company-id"), eq(deptInfo.getCompanyId()));
verify(request).setAttribute(eq("visit-dept-id"), eq(deptInfo.getDeptId()));
}
/**
@@ -172,11 +172,11 @@ class BusinessHeaderInterceptorTest {
// 构造 loginUser多个公司部门
CompanyDeptInfo deptInfo1 = new CompanyDeptInfo();
deptInfo1.setCompanyId(1L);
deptInfo1.setDeptId(2L);
deptInfo1.setCompanyId(String.valueOf(1L));
deptInfo1.setDeptId(String.valueOf(2L));
CompanyDeptInfo deptInfo2 = new CompanyDeptInfo();
deptInfo2.setCompanyId(2L);
deptInfo2.setDeptId(3L);
deptInfo2.setCompanyId(String.valueOf(2L));
deptInfo2.setDeptId(String.valueOf(3L));
Set<CompanyDeptInfo> deptSet = new HashSet<>();
deptSet.add(deptInfo1);
deptSet.add(deptInfo2);
@@ -207,11 +207,11 @@ class BusinessHeaderInterceptorTest {
// 构造 loginUser只有其他公司部门
CompanyDeptInfo deptInfo1 = new CompanyDeptInfo();
deptInfo1.setCompanyId(1L);
deptInfo1.setDeptId(2L);
deptInfo1.setCompanyId(String.valueOf(1L));
deptInfo1.setDeptId(String.valueOf(2L));
CompanyDeptInfo deptInfo2 = new CompanyDeptInfo();
deptInfo2.setCompanyId(2L);
deptInfo2.setDeptId(3L);
deptInfo2.setCompanyId(String.valueOf(2L));
deptInfo2.setDeptId(String.valueOf(3L));
Set<CompanyDeptInfo> deptSet = new HashSet<>();
deptSet.add(deptInfo1);
deptSet.add(deptInfo2);

View File

@@ -25,6 +25,7 @@ import com.zt.plat.framework.web.core.handler.GlobalExceptionHandler;
import com.baomidou.mybatisplus.extension.plugins.MybatisPlusInterceptor;
import com.baomidou.mybatisplus.extension.plugins.inner.TenantLineInnerInterceptor;
import jakarta.annotation.Resource;
import org.springframework.beans.factory.SmartInitializingSingleton;
import org.springframework.boot.autoconfigure.AutoConfiguration;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
@@ -49,17 +50,30 @@ import org.springframework.web.util.pattern.PathPattern;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import static com.zt.plat.framework.common.util.collection.CollectionUtils.convertList;
@AutoConfiguration
@ConditionalOnProperty(prefix = "zt.tenant", value = "enable", matchIfMissing = true) // 允许使用 zt.tenant.enable=false 禁用多租户
@EnableConfigurationProperties(TenantProperties.class)
public class ZtTenantAutoConfiguration {
public class ZtTenantAutoConfiguration implements SmartInitializingSingleton {
@Resource
private ApplicationContext applicationContext;
@Resource
private TenantProperties tenantProperties;
/**
* 存放 @TenantIgnore 注解的 URL
*
* 为什么不直接放到 TenantProperties 中?
* 因为 TenantProperties 是 @ConfigurationProperties Bean可能会被 Nacos 等配置中心刷新,导致 programmatically 添加的 URL 丢失。
*/
private final Set<String> globalIgnoreUrls = ConcurrentHashMap.newKeySet();
@Bean
public TenantFrameworkService tenantFrameworkService(TenantCommonApi tenantApi) {
// 参见 https://gitee.com/zhijiantianya/zt-cloud/issues/IC6YZF
@@ -98,16 +112,18 @@ public class ZtTenantAutoConfiguration {
FilterRegistrationBean<TenantContextWebFilter> registrationBean = new FilterRegistrationBean<>();
registrationBean.setFilter(new TenantContextWebFilter());
registrationBean.setOrder(WebFilterOrderEnum.TENANT_CONTEXT_FILTER);
addIgnoreUrls(tenantProperties);
return registrationBean;
}
@Override
public void afterSingletonsInstantiated() {
addIgnoreUrls();
}
/**
* 如果 Controller 接口上,有 {@link TenantIgnore} 注解,那么添加到忽略的 URL 中
*
* @param tenantProperties 租户配置
*/
private void addIgnoreUrls(TenantProperties tenantProperties) {
private void addIgnoreUrls() {
// 获得接口对应的 HandlerMethod 集合
RequestMappingHandlerMapping requestMappingHandlerMapping = (RequestMappingHandlerMapping)
applicationContext.getBean("requestMappingHandlerMapping");
@@ -120,10 +136,10 @@ public class ZtTenantAutoConfiguration {
}
// 添加到忽略的 URL 中
if (entry.getKey().getPatternsCondition() != null) {
tenantProperties.getIgnoreUrls().addAll(entry.getKey().getPatternsCondition().getPatterns());
globalIgnoreUrls.addAll(entry.getKey().getPatternsCondition().getPatterns());
}
if (entry.getKey().getPathPatternsCondition() != null) {
tenantProperties.getIgnoreUrls().addAll(
globalIgnoreUrls.addAll(
convertList(entry.getKey().getPathPatternsCondition().getPatterns(), PathPattern::getPatternString));
}
}
@@ -172,7 +188,7 @@ public class ZtTenantAutoConfiguration {
TenantFrameworkService tenantFrameworkService) {
FilterRegistrationBean<TenantSecurityWebFilter> registrationBean = new FilterRegistrationBean<>();
registrationBean.setFilter(new TenantSecurityWebFilter(tenantProperties, webProperties,
globalExceptionHandler, tenantFrameworkService));
globalExceptionHandler, tenantFrameworkService, globalIgnoreUrls));
registrationBean.setOrder(WebFilterOrderEnum.TENANT_SECURITY_FILTER);
return registrationBean;
}

View File

@@ -1,12 +1,12 @@
package com.zt.plat.framework.tenant.core.db;
import com.zt.plat.framework.tenant.config.TenantProperties;
import com.zt.plat.framework.tenant.core.aop.TenantIgnore;
import com.zt.plat.framework.tenant.core.context.TenantContextHolder;
import com.baomidou.mybatisplus.core.metadata.TableInfo;
import com.baomidou.mybatisplus.core.metadata.TableInfoHelper;
import com.baomidou.mybatisplus.extension.plugins.handler.TenantLineHandler;
import com.baomidou.mybatisplus.extension.toolkit.SqlParserUtils;
import com.zt.plat.framework.tenant.config.TenantProperties;
import com.zt.plat.framework.tenant.core.aop.TenantIgnore;
import com.zt.plat.framework.tenant.core.context.TenantContextHolder;
import net.sf.jsqlparser.expression.Expression;
import net.sf.jsqlparser.expression.LongValue;
@@ -69,7 +69,12 @@ public class TenantDatabaseInterceptor implements TenantLineHandler {
// 找不到的表,说明不是 zt 项目里的,不进行拦截(忽略租户)
TableInfo tableInfo = TableInfoHelper.getTableInfo(tableName);
if (tableInfo == null) {
return true;
tableName = tableName.toLowerCase();
tableInfo = TableInfoHelper.getTableInfo(tableName);
}
if (tableInfo == null) {
tableName = tableName.toLowerCase();
tableInfo = TableInfoHelper.getTableInfo(tableName);
}
// 如果继承了 TenantBaseDO 基类,显然不忽略租户
if (TenantBaseDO.class.isAssignableFrom(tableInfo.getEntityType())) {

View File

@@ -21,6 +21,7 @@ import org.springframework.util.AntPathMatcher;
import java.io.IOException;
import java.util.Objects;
import java.util.Set;
/**
* 多租户 Security Web 过滤器
@@ -39,16 +40,19 @@ public class TenantSecurityWebFilter extends ApiRequestFilter {
private final GlobalExceptionHandler globalExceptionHandler;
private final TenantFrameworkService tenantFrameworkService;
private final Set<String> globalIgnoreUrls;
public TenantSecurityWebFilter(TenantProperties tenantProperties,
WebProperties webProperties,
GlobalExceptionHandler globalExceptionHandler,
TenantFrameworkService tenantFrameworkService) {
TenantFrameworkService tenantFrameworkService,
Set<String> globalIgnoreUrls) {
super(webProperties);
this.tenantProperties = tenantProperties;
this.pathMatcher = new AntPathMatcher();
this.globalExceptionHandler = globalExceptionHandler;
this.tenantFrameworkService = tenantFrameworkService;
this.globalIgnoreUrls = globalIgnoreUrls;
}
@Override
@@ -105,12 +109,20 @@ public class TenantSecurityWebFilter extends ApiRequestFilter {
if (CollUtil.contains(tenantProperties.getIgnoreUrls(), request.getRequestURI())) {
return true;
}
if (CollUtil.contains(globalIgnoreUrls, request.getRequestURI())) {
return true;
}
// 逐个 Ant 路径匹配
for (String url : tenantProperties.getIgnoreUrls()) {
if (pathMatcher.match(url, request.getRequestURI())) {
return true;
}
}
for (String url : globalIgnoreUrls) {
if (pathMatcher.match(url, request.getRequestURI())) {
return true;
}
}
return false;
}

View File

@@ -20,30 +20,68 @@ public class CompanyVisitContextInterceptor implements HandlerInterceptor {
@Override
public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) {
// 解析 header 并设置 visitCompanyId
LoginUser loginUser = SecurityFrameworkUtils.getLoginUser();
Long companyId = WebFrameworkUtils.getCompanyId(request);
// 优先使用请求头上的公司信息,若缺失则回退到请求属性或当前登录用户已缓存的访问公司
if (companyId == null || companyId <= 0L) {
Long attrCompanyId = resolveLong(request.getAttribute(WebFrameworkUtils.HEADER_VISIT_COMPANY_ID));
if (attrCompanyId != null && attrCompanyId > 0L) {
companyId = attrCompanyId;
} else if (loginUser != null && loginUser.getVisitCompanyId() != null && loginUser.getVisitCompanyId() > 0L) {
companyId = loginUser.getVisitCompanyId();
}
}
String companyName = WebFrameworkUtils.getCompanyName(request);
if (companyId <= 0L) {
// 如果没有设置 companyId则忽略
if (companyName == null || companyName.isEmpty()) {
Object attrCompanyName = request.getAttribute(WebFrameworkUtils.HEADER_VISIT_COMPANY_NAME);
if (attrCompanyName instanceof String) {
companyName = (String) attrCompanyName;
} else if (loginUser != null) {
companyName = loginUser.getVisitCompanyName();
}
}
Long deptId = WebFrameworkUtils.getDeptId(request);
// 部门信息同样遵循“请求头 -> 请求属性 -> 登录缓存”的回退顺序
if (deptId == null || deptId <= 0L) {
Long attrDeptId = resolveLong(request.getAttribute(WebFrameworkUtils.HEADER_VISIT_DEPT_ID));
if (attrDeptId != null && attrDeptId > 0L) {
deptId = attrDeptId;
} else if (loginUser != null && loginUser.getVisitDeptId() != null && loginUser.getVisitDeptId() > 0L) {
deptId = loginUser.getVisitDeptId();
}
}
String deptName = WebFrameworkUtils.getDeptName(request);
if (deptName == null || deptName.isEmpty()) {
Object attrDeptName = request.getAttribute(WebFrameworkUtils.HEADER_VISIT_DEPT_NAME);
if (attrDeptName instanceof String) {
deptName = (String) attrDeptName;
} else if (loginUser != null) {
deptName = loginUser.getVisitDeptName();
}
}
if (companyId == null || companyId <= 0L) {
CompanyContextHolder.setIgnore(true);
return true;
}
Long deptId = WebFrameworkUtils.getDeptId(request);
String deptName = WebFrameworkUtils.getDeptName(request);
LoginUser loginUser = SecurityFrameworkUtils.getLoginUser();
CompanyContextHolder.setIgnore(false);
CompanyContextHolder.setCompanyId(companyId);
if (loginUser == null) {
return true;
}
if (deptId > 0L) {
// 同步最新的访问公司/部门到登录用户对象,供后续数据权限及上下文读取
loginUser.setVisitCompanyId(companyId);
loginUser.setVisitCompanyName(companyName);
if (deptId != null && deptId > 0L) {
loginUser.setVisitDeptId(deptId);
loginUser.setVisitDeptName(deptName);
}
// if (!securityFrameworkService.hasAnyPermissions(PERMISSION)) {
// throw exception0(GlobalErrorCodeConstants.FORBIDDEN.getCode(), "您无权切换部门");
// }
loginUser.setVisitCompanyId(companyId);
loginUser.setVisitCompanyName(companyName);
CompanyContextHolder.setCompanyId(companyId);
return true;
}
@@ -55,4 +93,18 @@ public class CompanyVisitContextInterceptor implements HandlerInterceptor {
loginUser.setVisitCompanyId(0L);
}
}
private Long resolveLong(Object value) {
if (value instanceof Number) {
return ((Number) value).longValue();
}
if (value instanceof String) {
try {
return Long.parseLong(((String) value).trim());
} catch (NumberFormatException ignored) {
return null;
}
}
return null;
}
}

View File

@@ -66,7 +66,7 @@
<dependency>
<groupId>com.zt.plat</groupId>
<artifactId>zt-spring-boot-starter-biz-ip</artifactId>
<optional>true</optional> <!-- 设置为 optional只有在 AreaConvert 的时候使用 -->
<!--<optional>true</optional>--> <!-- 设置为 optional只有在 AreaConvert 的时候使用 -->
</dependency>
<!-- Test 测试相关 -->

View File

@@ -49,4 +49,11 @@ public class ExcelUtils {
.doReadAllSync();
}
public static <T> List<T> read(MultipartFile file, Class<T> head, int sheetNo) throws IOException {
return EasyExcel.read(file.getInputStream(), head, null)
.autoCloseStream(false)
.sheet(sheetNo)
.doReadSync();
}
}

View File

@@ -24,10 +24,15 @@ public class TraceFilter extends OncePerRequestFilter {
@Override
protected void doFilterInternal(HttpServletRequest request, HttpServletResponse response, FilterChain chain)
throws IOException, ServletException {
// 设置响应 traceId
response.addHeader(HEADER_NAME_TRACE_ID, TracerUtils.getTraceId());
// 继续过滤
chain.doFilter(request, response);
String traceId = TracerUtils.getTraceId();
try {
// 设置响应 traceId便于客户端回溯
response.addHeader(HEADER_NAME_TRACE_ID, traceId);
// 继续过滤
chain.doFilter(request, response);
} finally {
TracerUtils.clear();
}
}
}

View File

@@ -107,6 +107,12 @@
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-openfeign</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
</project>

View File

@@ -1,9 +1,9 @@
package com.zt.plat.framework.mybatis.config;
import cn.hutool.core.util.StrUtil;
import com.zt.plat.framework.mybatis.core.handler.DefaultDBFieldHandler;
import com.baomidou.mybatisplus.annotation.DbType;
import com.baomidou.mybatisplus.autoconfigure.MybatisPlusAutoConfiguration;
import com.baomidou.mybatisplus.autoconfigure.MybatisPlusPropertiesCustomizer;
import com.baomidou.mybatisplus.core.handlers.MetaObjectHandler;
import com.baomidou.mybatisplus.core.incrementer.IKeyGenerator;
import com.baomidou.mybatisplus.extension.incrementer.*;
@@ -11,6 +11,8 @@ import com.baomidou.mybatisplus.extension.parser.JsqlParserGlobal;
import com.baomidou.mybatisplus.extension.parser.cache.JdkSerialCaffeineJsqlParseCache;
import com.baomidou.mybatisplus.extension.plugins.MybatisPlusInterceptor;
import com.baomidou.mybatisplus.extension.plugins.inner.PaginationInnerInterceptor;
import com.zt.plat.framework.mybatis.core.handler.DefaultDBFieldHandler;
import com.zt.plat.framework.mybatis.core.sum.PageSumTableFieldAnnotationHandler;
import org.apache.ibatis.annotations.Mapper;
import org.mybatis.spring.annotation.MapperScan;
import org.springframework.boot.autoconfigure.AutoConfiguration;
@@ -25,29 +27,40 @@ import java.util.concurrent.TimeUnit;
*
* @author ZT
*/
@AutoConfiguration(before = MybatisPlusAutoConfiguration.class) // 目的:先于 MyBatis Plus 自动配置,避免 @MapperScan 可能扫描不到 Mapper 打印 warn 日志
@AutoConfiguration(before = MybatisPlusAutoConfiguration.class) // 先于官方自动配置,避免 Mapper 未扫描完成
@MapperScan(value = "${zt.info.base-package}", annotationClass = Mapper.class,
lazyInitialization = "${mybatis.lazy-initialization:false}") // Mapper 懒加载,目前仅用于单元测试
lazyInitialization = "${mybatis.lazy-initialization:false}") // Mapper 懒加载,目前仅单测需要
public class ZtMybatisAutoConfiguration {
static {
// 动态 SQL 智能优化支持本地缓存加速解析,更完善的租户复杂 XML 动态 SQL 支持,静态注入缓存
// 使用本地缓存加速 JsqlParser 解析,复杂动态 SQL 性能更稳定
JsqlParserGlobal.setJsqlParseCache(new JdkSerialCaffeineJsqlParseCache(
(cache) -> cache.maximumSize(1024)
.expireAfterWrite(5, TimeUnit.SECONDS))
);
cache -> cache.maximumSize(1024).expireAfterWrite(5, TimeUnit.SECONDS)));
}
@Bean
public MybatisPlusInterceptor mybatisPlusInterceptor() {
MybatisPlusInterceptor mybatisPlusInterceptor = new MybatisPlusInterceptor();
mybatisPlusInterceptor.addInnerInterceptor(new PaginationInnerInterceptor()); // 分页插件
return mybatisPlusInterceptor;
MybatisPlusInterceptor interceptor = new MybatisPlusInterceptor();
interceptor.addInnerInterceptor(new PaginationInnerInterceptor()); // 分页插件
return interceptor;
}
@Bean
public MetaObjectHandler defaultMetaObjectHandler() {
return new DefaultDBFieldHandler(); // 自动填充参数类
return new DefaultDBFieldHandler(); // 统一的公共字段填充
}
@Bean
public MybatisPlusPropertiesCustomizer pageSumAnnotationCustomizer() {
// 通过官方扩展点为 @PageSum 字段自动注入 exist = false 的 TableField 注解
return properties -> {
var globalConfig = properties.getGlobalConfig();
if (globalConfig == null) {
return;
}
globalConfig.setAnnotationHandler(
new PageSumTableFieldAnnotationHandler(globalConfig.getAnnotationHandler()));
};
}
@Bean

View File

@@ -1,6 +1,13 @@
package com.zt.plat.framework.mybatis.core.mapper;
import cn.hutool.core.collection.CollUtil;
import com.zt.plat.framework.common.pojo.PageParam;
import com.zt.plat.framework.common.pojo.PageResult;
import com.zt.plat.framework.common.pojo.SortablePageParam;
import com.zt.plat.framework.common.pojo.SortingField;
import com.zt.plat.framework.mybatis.core.sum.PageSumSupport;
import com.zt.plat.framework.mybatis.core.util.JdbcUtils;
import com.zt.plat.framework.mybatis.core.util.MyBatisUtils;
import com.baomidou.mybatisplus.annotation.DbType;
import com.baomidou.mybatisplus.core.conditions.Wrapper;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
@@ -43,14 +50,18 @@ public interface BaseMapperX<T> extends MPJBaseMapper<T> {
// 特殊:不分页,直接查询全部
if (PageParam.PAGE_SIZE_NONE.equals(pageParam.getPageSize())) {
List<T> list = selectList(queryWrapper);
return new PageResult<>(list, (long) list.size());
PageResult<T> pageResult = new PageResult<>(list, (long) list.size());
PageSumSupport.tryAttachSummary(this, queryWrapper, pageResult);
return pageResult;
}
// MyBatis Plus 查询
IPage<T> mpPage = MyBatisUtils.buildPage(pageParam, sortingFields);
selectPage(mpPage, queryWrapper);
// 转换返回
return new PageResult<>(mpPage.getRecords(), mpPage.getTotal());
PageResult<T> pageResult = new PageResult<>(mpPage.getRecords(), mpPage.getTotal());
PageSumSupport.tryAttachSummary(this, queryWrapper, pageResult);
return pageResult;
}
default <D> PageResult<D> selectJoinPage(PageParam pageParam, Class<D> clazz, MPJLambdaWrapper<T> lambdaWrapper) {

View File

@@ -0,0 +1,76 @@
package com.zt.plat.framework.mybatis.core.sum;
import java.lang.reflect.Field;
import java.util.Objects;
/**
* Metadata describing a field participating in page-level SUM aggregation.
*/
final class PageSumFieldMeta {
private final String propertyName;
private final String columnExpression;
private final String selectAlias;
private final Class<?> fieldType;
PageSumFieldMeta(String propertyName, String columnExpression, String selectAlias, Class<?> fieldType) {
this.propertyName = propertyName;
this.columnExpression = columnExpression;
this.selectAlias = selectAlias;
this.fieldType = fieldType;
}
static PageSumFieldMeta of(Field field, String columnExpression) {
String property = field.getName();
return new PageSumFieldMeta(property, columnExpression, property, field.getType());
}
String getPropertyName() {
return propertyName;
}
String getColumnExpression() {
return columnExpression;
}
String getSelectAlias() {
return selectAlias;
}
Class<?> getFieldType() {
return fieldType;
}
String buildSelectSegment() {
return "SUM(" + columnExpression + ") AS " + selectAlias;
}
@Override
public int hashCode() {
return Objects.hash(propertyName, columnExpression, selectAlias, fieldType);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof PageSumFieldMeta other)) {
return false;
}
return Objects.equals(propertyName, other.propertyName)
&& Objects.equals(columnExpression, other.columnExpression)
&& Objects.equals(selectAlias, other.selectAlias)
&& Objects.equals(fieldType, other.fieldType);
}
@Override
public String toString() {
return "PageSumFieldMeta{" +
"propertyName='" + propertyName + '\'' +
", columnExpression='" + columnExpression + '\'' +
", selectAlias='" + selectAlias + '\'' +
", fieldType=" + fieldType +
'}';
}
}

View File

@@ -0,0 +1,341 @@
package com.zt.plat.framework.mybatis.core.sum;
import cn.hutool.core.util.StrUtil;
import com.baomidou.mybatisplus.core.conditions.Wrapper;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.baomidou.mybatisplus.core.metadata.TableFieldInfo;
import com.baomidou.mybatisplus.core.metadata.TableInfo;
import com.baomidou.mybatisplus.core.metadata.TableInfoHelper;
import com.zt.plat.framework.common.annotation.PageSum;
import com.zt.plat.framework.common.pojo.PageResult;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.Field;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
* Utility that inspects {@link PageSum} annotations and attaches aggregated SUM results to {@link PageResult}.
*/
public final class PageSumSupport {
private static final Logger LOGGER = LoggerFactory.getLogger(PageSumSupport.class);
private static final ConcurrentMap<Class<?>, Optional<Class<?>>> ENTITY_CLASS_CACHE = new ConcurrentHashMap<>();
private static final ConcurrentMap<Class<?>, List<PageSumFieldMeta>> FIELD_META_CACHE = new ConcurrentHashMap<>();
private static final ConcurrentMap<Class<?>, Optional<Field>> SQL_SELECT_FIELD_CACHE = new ConcurrentHashMap<>();
private PageSumSupport() {
}
public static <T> void tryAttachSummary(Object mapperProxy, Wrapper<T> wrapper, PageResult<?> pageResult) {
if (mapperProxy == null || pageResult == null) {
return;
}
Class<?> entityClass = resolveEntityClass(mapperProxy.getClass());
if (entityClass == null) {
return;
}
List<PageSumFieldMeta> fieldMetas = resolveFieldMetas(entityClass);
if (fieldMetas.isEmpty()) {
return;
}
Map<String, BigDecimal> summary = executeSum((BaseMapper<T>) mapperProxy, wrapper, fieldMetas);
if (!summary.isEmpty()) {
pageResult.setSummary(summary);
}
}
private static Class<?> resolveEntityClass(Class<?> mapperProxyClass) {
return ENTITY_CLASS_CACHE.computeIfAbsent(mapperProxyClass, PageSumSupport::extractEntityClass)
.orElse(null);
}
private static Optional<Class<?>> extractEntityClass(Class<?> mapperProxyClass) {
Class<?>[] interfaces = mapperProxyClass.getInterfaces();
for (Class<?> iface : interfaces) {
Class<?> entityClass = extractEntityClassFromInterface(iface);
if (entityClass != null) {
return Optional.of(entityClass);
}
}
return Optional.empty();
}
private static Class<?> extractEntityClassFromInterface(Class<?> interfaceClass) {
if (interfaceClass == null || interfaceClass == Object.class) {
return null;
}
// inspect direct generic interfaces
for (Type type : interfaceClass.getGenericInterfaces()) {
Class<?> resolved = resolveFromType(type);
if (resolved != null) {
return resolved;
}
}
// fallback to parent interfaces recursively
for (Class<?> parent : interfaceClass.getInterfaces()) {
Class<?> resolved = extractEntityClassFromInterface(parent);
if (resolved != null) {
return resolved;
}
}
// handle generic super class (rare for interfaces but keep for completeness)
return resolveFromType(interfaceClass.getGenericSuperclass());
}
private static Class<?> resolveFromType(Type type) {
if (type == null) {
return null;
}
if (type instanceof ParameterizedType parameterizedType) {
Type raw = parameterizedType.getRawType();
if (raw instanceof Class<?> rawClass) {
if (BaseMapper.class.isAssignableFrom(rawClass)) {
Type[] actualTypes = parameterizedType.getActualTypeArguments();
if (actualTypes.length > 0) {
Type actual = actualTypes[0];
return toClass(actual);
}
}
Class<?> resolved = extractEntityClassFromInterface(rawClass);
if (resolved != null) {
return resolved;
}
}
for (Type actual : parameterizedType.getActualTypeArguments()) {
Class<?> resolved = resolveFromType(actual);
if (resolved != null) {
return resolved;
}
}
} else if (type instanceof Class<?> clazz) {
return extractEntityClassFromInterface(clazz);
}
return null;
}
private static Class<?> toClass(Type type) {
if (type instanceof Class<?> clazz) {
return clazz;
}
if (type instanceof ParameterizedType parameterizedType) {
Type raw = parameterizedType.getRawType();
if (raw instanceof Class<?>) {
return (Class<?>) raw;
}
}
return null;
}
private static List<PageSumFieldMeta> resolveFieldMetas(Class<?> entityClass) {
return FIELD_META_CACHE.computeIfAbsent(entityClass, PageSumSupport::scanFieldMetas);
}
private static List<PageSumFieldMeta> scanFieldMetas(Class<?> entityClass) {
TableInfo tableInfo = TableInfoHelper.getTableInfo(entityClass);
if (tableInfo == null) {
LOGGER.debug("No TableInfo found for entity {}, falling back to annotation provided column expressions.",
entityClass.getName());
}
Map<String, String> propertyColumnMap = tableInfo != null
? buildPropertyColumnMap(tableInfo)
: Collections.emptyMap();
List<PageSumFieldMeta> metas = new ArrayList<>();
Class<?> current = entityClass;
while (current != null && current != Object.class) {
Field[] fields = current.getDeclaredFields();
for (Field field : fields) {
PageSum annotation = field.getAnnotation(PageSum.class);
if (annotation == null) {
continue;
}
if (!isNumeric(field.getType())) {
LOGGER.warn("Field {}.{} annotated with @PageSum is not numeric and will be ignored.",
entityClass.getSimpleName(), field.getName());
continue;
}
String columnExpression = resolveColumnExpression(annotation, field, propertyColumnMap);
if (StrUtil.isBlank(columnExpression)) {
LOGGER.warn("Unable to resolve column for field {}.{} with @PageSum, skipping.",
entityClass.getSimpleName(), field.getName());
continue;
}
metas.add(PageSumFieldMeta.of(field, columnExpression));
}
current = current.getSuperclass();
}
return metas.isEmpty() ? Collections.emptyList() : Collections.unmodifiableList(metas);
}
private static Map<String, String> buildPropertyColumnMap(TableInfo tableInfo) {
Map<String, String> mapping = new LinkedHashMap<>();
if (StrUtil.isNotBlank(tableInfo.getKeyProperty()) && StrUtil.isNotBlank(tableInfo.getKeyColumn())) {
mapping.put(tableInfo.getKeyProperty(), tableInfo.getKeyColumn());
}
for (TableFieldInfo fieldInfo : tableInfo.getFieldList()) {
mapping.put(fieldInfo.getProperty(), fieldInfo.getColumn());
}
return mapping;
}
private static String resolveColumnExpression(PageSum annotation, Field field, Map<String, String> propertyColumnMap) {
if (StrUtil.isNotBlank(annotation.column())) {
return annotation.column();
}
return propertyColumnMap.get(field.getName());
}
private static boolean isNumeric(Class<?> type) {
if (type.isPrimitive()) {
return type == int.class || type == long.class || type == double.class
|| type == float.class || type == short.class || type == byte.class;
}
return Number.class.isAssignableFrom(type) || BigDecimal.class.isAssignableFrom(type)
|| BigInteger.class.isAssignableFrom(type);
}
private static <T> Map<String, BigDecimal> executeSum(BaseMapper<T> mapper, Wrapper<T> wrapper, List<PageSumFieldMeta> metas) {
if (metas.isEmpty()) {
return Collections.emptyMap();
}
Wrapper<T> workingWrapper = cloneWrapper(wrapper);
applySelect(workingWrapper, metas);
List<Map<String, Object>> rows = mapper.selectMaps(workingWrapper);
Map<String, BigDecimal> result = new LinkedHashMap<>(metas.size());
Map<String, Object> row = rows.isEmpty() ? Collections.emptyMap() : rows.get(0);
for (PageSumFieldMeta meta : metas) {
Object value = extractValue(row, meta.getSelectAlias());
result.put(meta.getPropertyName(), toBigDecimal(value));
}
return result;
}
private static <T> Wrapper<T> cloneWrapper(Wrapper<T> wrapper) {
if (wrapper == null) {
return new QueryWrapper<>();
}
if (wrapper instanceof com.baomidou.mybatisplus.core.conditions.AbstractWrapper<?, ?, ?> abstractWrapper) {
@SuppressWarnings("unchecked")
Wrapper<T> clone = (Wrapper<T>) abstractWrapper.clone();
return clone;
}
return wrapper;
}
private static void applySelect(Wrapper<?> wrapper, List<PageSumFieldMeta> metas) {
String selectSql = buildSelectSql(metas);
if (wrapper instanceof QueryWrapper<?> queryWrapper) {
queryWrapper.select(selectSql);
return;
}
if (wrapper instanceof LambdaQueryWrapper<?> lambdaQueryWrapper) {
setSqlSelect(lambdaQueryWrapper, selectSql);
return;
}
// attempt reflective fallback for other wrapper implementations extending LambdaQueryWrapper
setSqlSelect(wrapper, selectSql);
}
private static String buildSelectSql(List<PageSumFieldMeta> metas) {
StringBuilder builder = new StringBuilder();
for (int i = 0; i < metas.size(); i++) {
if (i > 0) {
builder.append(',');
}
builder.append(metas.get(i).buildSelectSegment());
}
return builder.toString();
}
private static void setSqlSelect(Object wrapper, String selectSql) {
Field field = SQL_SELECT_FIELD_CACHE.computeIfAbsent(wrapper.getClass(), PageSumSupport::locateSqlSelectField)
.orElse(null);
if (field == null) {
LOGGER.debug("Unable to locate sqlSelect field on wrapper {}, summary aggregation skipped.",
wrapper.getClass().getName());
return;
}
try {
com.baomidou.mybatisplus.core.conditions.SharedString shared = (com.baomidou.mybatisplus.core.conditions.SharedString) field.get(wrapper);
if (shared == null) {
shared = com.baomidou.mybatisplus.core.conditions.SharedString.emptyString();
field.set(wrapper, shared);
}
shared.setStringValue(selectSql);
} catch (IllegalAccessException ex) {
LOGGER.warn("Failed to set sqlSelect on wrapper {}: {}", wrapper.getClass().getName(), ex.getMessage());
}
}
private static Optional<Field> locateSqlSelectField(Class<?> wrapperClass) {
Class<?> current = wrapperClass;
while (current != null && current != Object.class) {
try {
Field field = current.getDeclaredField("sqlSelect");
field.setAccessible(true);
return Optional.of(field);
} catch (NoSuchFieldException ignored) {
current = current.getSuperclass();
}
}
return Optional.empty();
}
private static Object extractValue(Map<String, Object> row, String alias) {
if (row == null || row.isEmpty()) {
return null;
}
if (row.containsKey(alias)) {
return row.get(alias);
}
for (Map.Entry<String, Object> entry : row.entrySet()) {
if (alias.equalsIgnoreCase(entry.getKey())) {
return entry.getValue();
}
}
return null;
}
private static BigDecimal toBigDecimal(Object value) {
if (value == null) {
return BigDecimal.ZERO;
}
if (value instanceof BigDecimal decimal) {
return decimal;
}
if (value instanceof BigInteger bigInteger) {
return new BigDecimal(bigInteger);
}
if (value instanceof Number number) {
return new BigDecimal(number.toString());
}
if (value instanceof CharSequence sequence) {
String text = sequence.toString().trim();
if (text.isEmpty()) {
return BigDecimal.ZERO;
}
try {
return new BigDecimal(text);
} catch (NumberFormatException ex) {
LOGGER.warn("Unable to parse numeric summary value '{}': {}", text, ex.getMessage());
return BigDecimal.ZERO;
}
}
LOGGER.warn("Unsupported summary value type: {}", value.getClass().getName());
return BigDecimal.ZERO;
}
}

View File

@@ -0,0 +1,79 @@
package com.zt.plat.framework.mybatis.core.sum;
import com.baomidou.mybatisplus.annotation.TableField;
import com.baomidou.mybatisplus.core.handlers.AnnotationHandler;
import com.zt.plat.framework.common.annotation.PageSum;
import org.springframework.core.annotation.AnnotationUtils;
import java.lang.annotation.Annotation;
import java.lang.reflect.Field;
import java.util.Collections;
import java.util.Map;
/**
* 让 {@link PageSum#exist()} 能够自动生成 {@link TableField#exist()} = false 的能力,
* 这样 DO 层无需重复编写 {@code @TableField(exist = false)}。
*/
public class PageSumTableFieldAnnotationHandler implements AnnotationHandler {
private static final AnnotationHandler DEFAULT_HANDLER = new AnnotationHandler() { };
/** 预构建 @TableField(exist = false) 的属性集合,避免重复创建 Map 对象 */
private static final Map<String, Object> TABLE_FIELD_EXIST_FALSE_ATTRIBUTES =
Collections.singletonMap("exist", Boolean.FALSE);
private final AnnotationHandler delegate;
public PageSumTableFieldAnnotationHandler(AnnotationHandler delegate) {
this.delegate = delegate != null ? delegate : DEFAULT_HANDLER;
}
@Override
public <T extends Annotation> T getAnnotation(Class<?> target, Class<T> annotationClass) {
return delegate.getAnnotation(target, annotationClass);
}
@Override
public <T extends Annotation> boolean isAnnotationPresent(Class<?> target, Class<T> annotationClass) {
return delegate.isAnnotationPresent(target, annotationClass);
}
@Override
public <T extends Annotation> T getAnnotation(java.lang.reflect.Method method, Class<T> annotationClass) {
return delegate.getAnnotation(method, annotationClass);
}
@Override
public <T extends Annotation> boolean isAnnotationPresent(java.lang.reflect.Method method, Class<T> annotationClass) {
return delegate.isAnnotationPresent(method, annotationClass);
}
@Override
public <T extends Annotation> T getAnnotation(Field field, Class<T> annotationClass) {
T annotation = delegate.getAnnotation(field, annotationClass);
if (annotation != null || annotationClass != TableField.class) {
return annotation;
}
PageSum pageSum = delegate.getAnnotation(field, PageSum.class);
if (pageSum != null && !pageSum.exist()) {
// 当字段只用于分页汇总时,动态合成一个 exist = false 的 TableField 注解
return annotationClass.cast(synthesizeTableField(field));
}
return null;
}
@Override
public <T extends Annotation> boolean isAnnotationPresent(Field field, Class<T> annotationClass) {
if (delegate.isAnnotationPresent(field, annotationClass)) {
return true;
}
if (annotationClass != TableField.class) {
return false;
}
PageSum pageSum = delegate.getAnnotation(field, PageSum.class);
return pageSum != null && !pageSum.exist();
}
private static TableField synthesizeTableField(Field field) {
return AnnotationUtils.synthesizeAnnotation(TABLE_FIELD_EXIST_FALSE_ATTRIBUTES, TableField.class, field);
}
}

View File

@@ -0,0 +1,68 @@
package com.zt.plat.framework.mybatis.core.sum;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.zt.plat.framework.common.annotation.PageSum;
import com.zt.plat.framework.common.pojo.PageResult;
import com.zt.plat.framework.mybatis.core.mapper.BaseMapperX;
import org.junit.jupiter.api.Test;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.math.BigDecimal;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
class PageSumSupportTest {
@Test
void shouldAttachSummaryWhenAnnotationPresent() {
TestMapper mapper = createMapperProxy();
PageResult<TestEntity> pageResult = new PageResult<>(Collections.emptyList(), 0L);
QueryWrapper<TestEntity> wrapper = new QueryWrapper<>();
PageSumSupport.tryAttachSummary(mapper, wrapper, pageResult);
assertFalse(pageResult.getSummary().isEmpty());
assertEquals(new BigDecimal("123.45"), pageResult.getSummary().get("amount"));
assertEquals(new BigDecimal("50"), pageResult.getSummary().get("virtualAmount"));
}
private TestMapper createMapperProxy() {
InvocationHandler handler = new InvocationHandler() {
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
if (method.getDeclaringClass() == Object.class) {
return method.invoke(this, args);
}
if ("selectMaps".equals(method.getName())) {
Map<String, Object> row = new HashMap<>();
row.put("amount", new BigDecimal("123.45"));
row.put("virtualAmount", new BigDecimal("50"));
return List.of(row);
}
return Collections.emptyList();
}
};
return (TestMapper) Proxy.newProxyInstance(
TestMapper.class.getClassLoader(),
new Class[]{TestMapper.class},
handler);
}
interface TestMapper extends BaseMapperX<TestEntity> {
}
static class TestEntity {
@PageSum(column = "amount")
private BigDecimal amount;
@PageSum(column = "virtual_column", exist = false)
private BigDecimal virtualAmount;
}
}

View File

@@ -73,9 +73,11 @@ public class LoginUser {
private Long visitCompanyId;
private String visitCompanyName;
private String visitCompanyCode;
private Long visitDeptId;
private String visitDeptName;
private String visitDeptCode;
public void setContext(String key, Object value) {
if (context == null) {

View File

@@ -1,5 +1,6 @@
package com.zt.plat.framework.swagger.config;
import com.zt.plat.framework.common.enums.RpcConstants;
import io.swagger.v3.oas.models.Components;
import io.swagger.v3.oas.models.OpenAPI;
import io.swagger.v3.oas.models.info.Contact;
@@ -11,6 +12,7 @@ import io.swagger.v3.oas.models.parameters.Parameter;
import io.swagger.v3.oas.models.security.SecurityRequirement;
import io.swagger.v3.oas.models.security.SecurityScheme;
import org.springdoc.core.customizers.OpenApiBuilderCustomizer;
import org.springdoc.core.customizers.OpenApiCustomizer;
import org.springdoc.core.customizers.ServerBaseUrlCustomizer;
import org.springdoc.core.models.GroupedOpenApi;
import org.springdoc.core.properties.SpringDocConfigProperties;
@@ -123,12 +125,26 @@ public class ZtSwaggerAutoConfiguration {
return GroupedOpenApi.builder()
.group(group)
.pathsToMatch("/admin-api/" + path + "/**", "/app-api/" + path + "/**")
.pathsToExclude(RpcConstants.RPC_API_PREFIX + "/**")
.addOperationCustomizer((operation, handlerMethod) -> operation
.addParametersItem(buildTenantHeaderParameter())
.addParametersItem(buildSecurityHeaderParameter()))
.build();
}
@Bean
public OpenApiCustomizer rpcApiPathExclusionCustomiser() {
return openApi -> {
if (openApi == null || openApi.getPaths() == null) {
return;
}
openApi.getPaths().entrySet().removeIf(entry -> {
String path = entry.getKey();
return path != null && path.startsWith(RpcConstants.RPC_API_PREFIX);
});
};
}
/**
* 构建 Tenant 租户编号请求头参数
*

View File

@@ -1,6 +1,7 @@
## AdoptOpenJDK 停止发布 OpenJDK 二进制,而 Eclipse Temurin 是它的延伸,提供更好的稳定性
FROM 172.16.46.66:10043/base-service/eclipse-temurin:21-jre
ARG BASE_IMAGE=172.16.46.66:10043/base-service/skywalking-agent-jre:9.7.0
FROM ${BASE_IMAGE}
## 创建目录,并使用它作为工作目录
RUN mkdir -p /zt-gateway
@@ -10,10 +11,15 @@ COPY ./target/zt-gateway.jar app.jar
## 设置 TZ 时区
## 设置 JAVA_OPTS 环境变量,可通过 docker run -e "JAVA_OPTS=" 进行覆盖
ENV TZ=Asia/Shanghai JAVA_OPTS="-Xms512m -Xmx512m"
ENV TZ=Asia/Shanghai
ENV JAVA_OPTS="-Xms512m -Xmx512m"
ENV SW_AGENT_HOME=/opt/skywalking/agent
ENV SW_AGENT_NAME=zt-gateway
ENV SW_AGENT_COLLECTOR_BACKEND_SERVICES=172.16.46.63:30201
ENV AGENT_JAVA_OPTS="-javaagent:${SW_AGENT_HOME}/skywalking-agent.jar -Dskywalking.agent.service_name=${SW_AGENT_NAME} -Dskywalking.collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES}"
## 暴露后端项目的 48080 端口
EXPOSE 48080
## 启动后端项目
CMD java ${JAVA_OPTS} -Djava.security.egd=file:/dev/./urandom -jar app.jar
CMD java ${AGENT_JAVA_OPTS} ${JAVA_OPTS} -Djava.security.egd=file:/dev/./urandom -jar app.jar

View File

@@ -2,6 +2,7 @@ package com.zt.plat.gateway.jackson;
import cn.hutool.core.collection.CollUtil;
import com.zt.plat.framework.common.util.json.JsonUtils;
import com.zt.plat.framework.common.util.json.databind.LongTypeSerializerModifier;
import com.zt.plat.framework.common.util.json.databind.NumberSerializer;
import com.zt.plat.framework.common.util.json.databind.TimestampLocalDateTimeDeserializer;
import com.zt.plat.framework.common.util.json.databind.TimestampLocalDateTimeSerializer;
@@ -39,6 +40,7 @@ public class JacksonAutoConfiguration {
// 新增 LocalDateTime 序列化、反序列化规则,使用 Long 时间戳
.addSerializer(LocalDateTime.class, TimestampLocalDateTimeSerializer.INSTANCE)
.addDeserializer(LocalDateTime.class, TimestampLocalDateTimeDeserializer.INSTANCE);
simpleModule.setSerializerModifier(new LongTypeSerializerModifier());
// 1.2 注册到 objectMapper
objectMappers.forEach(objectMapper -> objectMapper.registerModule(simpleModule));

View File

@@ -4,8 +4,8 @@ spring:
cloud:
nacos:
server-addr: 172.16.46.63:30848 # Nacos 服务器地址
username: # Nacos 账号
password: # Nacos 密码
username: ${config.username} # Nacos 账号
password: ${config.password} # Nacos 密码
discovery: # 【配置中心】配置项
namespace: ${config.namespace} # 命名空间。这里使用 maven Profile 资源过滤进行动态替换
group: DEFAULT_GROUP # 使用的 Nacos 配置分组,默认为 DEFAULT_GROUP

View File

@@ -272,6 +272,13 @@ spring:
- Path=/admin-api/databus/**
filters:
- RewritePath=/admin-api/databus/v3/api-docs, /v3/api-docs # 配置,保证转发到 /v3/api-docs
## rule-server 服务
- id: rule-admin-api # 路由的编号
uri: grayLb://rule-server
predicates: # 断言,作为路由的匹配条件,对应 RouteDefinition 数组
- Path=/admin-api/rule/**
filters:
- RewritePath=/admin-api/rule/v3/api-docs, /v3/api-docs # 配置,保证转发到 /v3/api-docs
x-forwarded:
prefix-enabled: false # 避免 Swagger 重复带上额外的 /admin-api/system 前缀

View File

@@ -5,6 +5,10 @@
<springProperty scope="context" name="zt.info.base-package" source="zt.info.base-package"/>
<!-- 格式化输出:%d 表示日期,%X{tid} SkWalking 链路追踪编号,%thread 表示线程名,%-5level级别从左显示 5 个字符宽度,%msg日志消息%n是换行符 -->
<property name="PATTERN_DEFAULT" value="%d{${LOG_DATEFORMAT_PATTERN:-yyyy-MM-dd HH:mm:ss.SSS}} | %highlight(${LOG_LEVEL_PATTERN:-%5p} ${PID:- }) | %boldYellow(%thread [%tid]) %boldGreen(%-40.40logger{39}) | %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}"/>
<!--应用名称-->
<springProperty scope="context" name="spring.application.name" source="spring.application.name"/>
<!-- 日志输出路径 -->
<property name="LOG_DIR" value="${user.home}/logs/${spring.application.name}"/>
<!-- 控制台 Appender -->
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">     
@@ -31,7 +35,7 @@
<!-- 启动服务时,是否清理历史日志,一般不建议清理 -->
<cleanHistoryOnStart>${LOGBACK_ROLLINGPOLICY_CLEAN_HISTORY_ON_START:-false}</cleanHistoryOnStart>
<!-- 日志文件,到达多少容量,进行滚动 -->
<maxFileSize>${LOGBACK_ROLLINGPOLICY_MAX_FILE_SIZE:-10MB}</maxFileSize>
<maxFileSize>${LOGBACK_ROLLINGPOLICY_MAX_FILE_SIZE:-50MB}</maxFileSize>
<!-- 日志文件的总大小0 表示不限制 -->
<totalSizeCap>${LOGBACK_ROLLINGPOLICY_TOTAL_SIZE_CAP:-0}</totalSizeCap>
<!-- 日志文件的保留天数 -->
@@ -56,18 +60,39 @@
</encoder>
</appender>
<!-- ERROR 级别日志 -->
<appender name="ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_DIR}-error.log</file>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_DIR}-error.%d{yyyy-MM-dd}.log</fileNamePattern>
<maxHistory>30</maxHistory> <!-- 保留30天的日志 -->
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
</appender>
<!--logback的日志级别 FATAL > ERROR > WARN > INFO > DEBUG-->
<!-- 本地环境 -->
<springProfile name="local">
<root level="INFO">
<springProfile name="local,dev">
<root level="WARN">
<appender-ref ref="STDOUT"/>
<appender-ref ref="ERROR"/>
<appender-ref ref="GRPC"/> <!-- 本地环境下,如果不想接入 SkyWalking 日志服务,可以注释掉本行 -->
<appender-ref ref="ASYNC"/> <!-- 本地环境下,如果不想打印日志,可以注释掉本行 -->
</root>
</springProfile>
<!-- 其它环境 -->
<springProfile name="dev,test,stage,prod,default">
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="ERROR"/>
<appender-ref ref="ASYNC"/>
<appender-ref ref="GRPC"/>
</root>

View File

@@ -1,6 +1,7 @@
## AdoptOpenJDK 停止发布 OpenJDK 二进制,而 Eclipse Temurin 是它的延伸,提供更好的稳定性
FROM 172.16.46.66:10043/base-service/eclipse-temurin:21-jre
ARG BASE_IMAGE=172.16.46.66:10043/base-service/skywalking-agent-jre:9.7.0
FROM ${BASE_IMAGE}
## 创建目录,并使用它作为工作目录
RUN mkdir -p /zt-module-ai-server
@@ -10,10 +11,15 @@ COPY ./target/zt-module-ai-server.jar app.jar
## 设置 TZ 时区
## 设置 JAVA_OPTS 环境变量,可通过 docker run -e "JAVA_OPTS=" 进行覆盖
ENV TZ=Asia/Shanghai JAVA_OPTS="-Xms512m -Xmx512m"
ENV TZ=Asia/Shanghai
ENV JAVA_OPTS="-Xms512m -Xmx512m"
ENV SW_AGENT_HOME=/opt/skywalking/agent
ENV SW_AGENT_NAME=zt-module-ai-server
ENV SW_AGENT_COLLECTOR_BACKEND_SERVICES=172.16.46.63:30201
ENV AGENT_JAVA_OPTS="-javaagent:${SW_AGENT_HOME}/skywalking-agent.jar -Dskywalking.agent.service_name=${SW_AGENT_NAME} -Dskywalking.collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES}"
## 暴露后端项目的 48080 端口
EXPOSE 48090
## 启动后端项目
CMD java ${JAVA_OPTS} -Djava.security.egd=file:/dev/./urandom -jar app.jar
CMD java ${AGENT_JAVA_OPTS} ${JAVA_OPTS} -Djava.security.egd=file:/dev/./urandom -jar app.jar

View File

@@ -1,6 +1,7 @@
## AdoptOpenJDK 停止发布 OpenJDK 二进制,而 Eclipse Temurin 是它的延伸,提供更好的稳定性
FROM 172.16.46.66:10043/base-service/eclipse-temurin:21-jre
ARG BASE_IMAGE=172.16.46.66:10043/base-service/skywalking-agent-jre:9.7.0
FROM ${BASE_IMAGE}
## 创建目录,并使用它作为工作目录
RUN mkdir -p /zt-module-bpm-server
@@ -10,10 +11,15 @@ COPY ./target/zt-module-bpm-server.jar app.jar
## 设置 TZ 时区
## 设置 JAVA_OPTS 环境变量,可通过 docker run -e "JAVA_OPTS=" 进行覆盖
ENV TZ=Asia/Shanghai JAVA_OPTS="-Xms512m -Xmx512m"
ENV TZ=Asia/Shanghai
ENV JAVA_OPTS="-Xms512m -Xmx512m"
ENV SW_AGENT_HOME=/opt/skywalking/agent
ENV SW_AGENT_NAME=zt-module-bpm-server
ENV SW_AGENT_COLLECTOR_BACKEND_SERVICES=172.16.46.63:30201
ENV AGENT_JAVA_OPTS="-javaagent:${SW_AGENT_HOME}/skywalking-agent.jar -Dskywalking.agent.service_name=${SW_AGENT_NAME} -Dskywalking.collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES}"
## 暴露后端项目的 48080 端口
EXPOSE 48083
## 启动后端项目
CMD java ${JAVA_OPTS} -Djava.security.egd=file:/dev/./urandom -jar app.jar
CMD java ${AGENT_JAVA_OPTS} ${JAVA_OPTS} -Djava.security.egd=file:/dev/./urandom -jar app.jar

View File

@@ -8,17 +8,25 @@ import com.zt.plat.module.bpm.framework.flowable.core.event.BpmProcessInstanceEv
import com.zt.plat.module.system.api.user.AdminUserApi;
import org.flowable.common.engine.api.delegate.FlowableFunctionDelegate;
import org.flowable.common.engine.api.delegate.event.FlowableEventListener;
import org.flowable.engine.ProcessEngineConfiguration;
import org.flowable.spring.SpringProcessEngineConfiguration;
import org.flowable.spring.boot.EngineConfigurationConfigurer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.ObjectProvider;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.task.AsyncListenableTaskExecutor;
import org.springframework.jdbc.datasource.DataSourceUtils;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import java.util.List;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.SQLException;
/**
* BPM 模块的 Flowable 配置类
@@ -28,6 +36,8 @@ import java.util.List;
@Configuration(proxyBeanMethods = false)
public class BpmFlowableConfiguration {
private static final Logger log = LoggerFactory.getLogger(BpmFlowableConfiguration.class);
/**
* 参考 {@link org.flowable.spring.boot.FlowableJobConfiguration} 类,创建对应的 AsyncListenableTaskExecutor Bean
*
@@ -69,6 +79,37 @@ public class BpmFlowableConfiguration {
};
}
@Bean
public EngineConfigurationConfigurer<SpringProcessEngineConfiguration> dmProcessEngineConfigurationConfigurer(DataSource dataSource) {
return configuration -> {
try {
configureDmCompatibility(configuration, dataSource);
} catch (SQLException ex) {
log.warn("Failed to inspect datasource for DM compatibility; Flowable will keep default settings", ex);
}
};
}
private void configureDmCompatibility(SpringProcessEngineConfiguration configuration, DataSource dataSource) throws SQLException {
Connection connection = null;
try {
connection = DataSourceUtils.getConnection(dataSource);
DatabaseMetaData metaData = connection.getMetaData();
String productName = metaData.getDatabaseProductName();
String jdbcUrl = metaData.getURL();
boolean dmProduct = productName != null && productName.toLowerCase().contains("dm");
boolean dmUrl = jdbcUrl != null && jdbcUrl.toLowerCase().startsWith("jdbc:dm");
if (!dmProduct && !dmUrl) {
return;
}
log.info("Detected DM database (product='{}'); enabling Flowable Oracle compatibility with automatic schema updates", productName);
configuration.setDatabaseSchemaUpdate(ProcessEngineConfiguration.DB_SCHEMA_UPDATE_TRUE);
configuration.setDatabaseType("oracle");
} finally {
DataSourceUtils.releaseConnection(connection, dataSource);
}
}
// =========== 审批人相关的 Bean ==========
@Bean

View File

@@ -5,6 +5,25 @@
package liquibase.database.core;
import java.lang.reflect.Method;
import java.sql.CallableStatement;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.ResourceBundle;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import liquibase.CatalogAndSchema;
import liquibase.GlobalConfiguration;
import liquibase.Scope;
@@ -23,17 +42,15 @@ import liquibase.statement.UniqueConstraint;
import liquibase.statement.core.RawCallStatement;
import liquibase.statement.core.RawParameterizedSqlStatement;
import liquibase.structure.DatabaseObject;
import liquibase.structure.core.*;
import liquibase.structure.core.Catalog;
import liquibase.structure.core.Column;
import liquibase.structure.core.Index;
import liquibase.structure.core.PrimaryKey;
import liquibase.structure.core.Schema;
import liquibase.util.JdbcUtil;
import liquibase.util.StringUtil;
import org.apache.commons.lang3.StringUtils;
import java.lang.reflect.Method;
import java.sql.*;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class DmDatabase extends AbstractJdbcDatabase {
private static final String PROXY_USER_REGEX = ".*(?:thin|oci)\\:(.+)/@.*";
public static final Pattern PROXY_USER_PATTERN = Pattern.compile(".*(?:thin|oci)\\:(.+)/@.*");
@@ -98,6 +115,7 @@ public class DmDatabase extends AbstractJdbcDatabase {
public void setConnection(DatabaseConnection conn) {
this.reservedWords.addAll(Arrays.asList("GROUP", "USER", "SESSION", "PASSWORD", "RESOURCE", "START", "SIZE", "UID", "DESC", "ORDER"));
Connection sqlConn = null;
boolean dmDatabase = false;
if (!(conn instanceof OfflineConnection)) {
try {
if (conn instanceof JdbcConnection) {
@@ -124,26 +142,42 @@ public class DmDatabase extends AbstractJdbcDatabase {
Scope.getCurrentScope().getLog(this.getClass()).info("Could not set remarks reporting on OracleDatabase: " + e.getMessage());
}
CallableStatement statement = null;
try {
statement = sqlConn.prepareCall("{call DBMS_UTILITY.DB_VERSION(?,?)}");
statement.registerOutParameter(1, 12);
statement.registerOutParameter(2, 12);
statement.execute();
String compatibleVersion = statement.getString(2);
if (compatibleVersion != null) {
Matcher majorVersionMatcher = VERSION_PATTERN.matcher(compatibleVersion);
if (majorVersionMatcher.matches()) {
this.databaseMajorVersion = Integer.valueOf(majorVersionMatcher.group(1));
this.databaseMinorVersion = Integer.valueOf(majorVersionMatcher.group(2));
DatabaseMetaData metaData = sqlConn.getMetaData();
if (metaData != null) {
String productName = metaData.getDatabaseProductName();
dmDatabase = productName != null && PRODUCT_NAME.equalsIgnoreCase(productName);
if (dmDatabase) {
this.databaseMajorVersion = metaData.getDatabaseMajorVersion();
this.databaseMinorVersion = metaData.getDatabaseMinorVersion();
}
}
} catch (SQLException e) {
String message = "Cannot read from DBMS_UTILITY.DB_VERSION: " + e.getMessage();
Scope.getCurrentScope().getLog(this.getClass()).info("Could not set check compatibility mode on OracleDatabase, assuming not running in any sort of compatibility mode: " + message);
} finally {
JdbcUtil.closeStatement(statement);
Scope.getCurrentScope().getLog(this.getClass()).info("Unable to inspect database metadata for DM version detection: " + e.getMessage());
}
if (!dmDatabase) {
CallableStatement statement = null;
try {
statement = sqlConn.prepareCall("{call DBMS_UTILITY.DB_VERSION(?,?)}");
statement.registerOutParameter(1, 12);
statement.registerOutParameter(2, 12);
statement.execute();
String compatibleVersion = statement.getString(2);
if (compatibleVersion != null) {
Matcher majorVersionMatcher = VERSION_PATTERN.matcher(compatibleVersion);
if (majorVersionMatcher.matches()) {
this.databaseMajorVersion = Integer.valueOf(majorVersionMatcher.group(1));
this.databaseMinorVersion = Integer.valueOf(majorVersionMatcher.group(2));
}
}
} catch (SQLException e) {
String message = "Cannot read from DBMS_UTILITY.DB_VERSION: " + e.getMessage();
Scope.getCurrentScope().getLog(this.getClass()).info("Could not set check compatibility mode on OracleDatabase, assuming not running in any sort of compatibility mode: " + message);
} finally {
JdbcUtil.closeStatement(statement);
}
}
if (GlobalConfiguration.DDL_LOCK_TIMEOUT.getCurrentValue() != null) {
@@ -250,7 +284,15 @@ public class DmDatabase extends AbstractJdbcDatabase {
}
public boolean isCorrectDatabaseImplementation(DatabaseConnection conn) throws DatabaseException {
return "oracle".equalsIgnoreCase(conn.getDatabaseProductName());
String databaseProductName = conn == null ? null : conn.getDatabaseProductName();
if (databaseProductName == null) {
return false;
}
if (PRODUCT_NAME.equalsIgnoreCase(databaseProductName)) {
return true;
}
// Flowable 历史上将 DM 映射为 Oracle 元数据,因此这里同样接受 Oracle 以保持兼容
return "oracle".equalsIgnoreCase(databaseProductName);
}
public String getDefaultDriver(String url) {

View File

@@ -0,0 +1,32 @@
package liquibase.datatype.core;
import liquibase.database.Database;
import liquibase.database.core.DmDatabase;
import liquibase.datatype.DataTypeInfo;
import liquibase.datatype.DatabaseDataType;
@DataTypeInfo(
name = "boolean",
aliases = {"java.sql.Types.BOOLEAN", "java.lang.Boolean", "bit", "bool"},
minParameters = 0,
maxParameters = 0,
priority = 2
)
public class DmBooleanType extends BooleanType {
@Override
public boolean supports(Database database) {
if (database instanceof DmDatabase) {
return true;
}
return super.supports(database);
}
@Override
public DatabaseDataType toDatabaseDataType(Database database) {
if (database instanceof DmDatabase) {
return new DatabaseDataType("NUMBER", 1);
}
return super.toDatabaseDataType(database);
}
}

View File

@@ -0,0 +1,354 @@
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.flowable.common.engine.impl.db;
import org.apache.ibatis.session.SqlSessionFactory;
import org.flowable.common.engine.api.FlowableException;
import org.flowable.common.engine.impl.context.Context;
import org.flowable.common.engine.impl.interceptor.CommandContext;
import org.flowable.common.engine.impl.interceptor.Session;
import org.flowable.common.engine.impl.interceptor.SessionFactory;
import org.flowable.common.engine.impl.persistence.cache.EntityCache;
import org.flowable.common.engine.impl.persistence.entity.Entity;
import java.sql.SQLException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
/**
* @author Tom Baeyens
* @author Joram Barrez
*/
public class DbSqlSessionFactory implements SessionFactory {
protected Map<String, Map<String, String>> databaseSpecificStatements = new HashMap<>();
protected String databaseType;
protected String databaseTablePrefix = "";
protected boolean tablePrefixIsSchema;
protected String databaseCatalog;
protected String databaseSchema;
protected SqlSessionFactory sqlSessionFactory;
protected Map<String, String> statementMappings;
protected Map<Class<?>, String> insertStatements = new ConcurrentHashMap<>();
protected Map<Class<?>, String> updateStatements = new ConcurrentHashMap<>();
protected Map<Class<?>, String> deleteStatements = new ConcurrentHashMap<>();
protected Map<Class<?>, String> selectStatements = new ConcurrentHashMap<>();
protected List<Class<? extends Entity>> insertionOrder = new ArrayList<>();
protected List<Class<? extends Entity>> deletionOrder = new ArrayList<>();
protected boolean isDbHistoryUsed = true;
protected Set<Class<? extends Entity>> bulkInserteableEntityClasses = new HashSet<>();
protected Map<Class<?>, String> bulkInsertStatements = new ConcurrentHashMap<>();
protected int maxNrOfStatementsInBulkInsert = 100;
protected Map<String, Class<?>> logicalNameToClassMapping = new ConcurrentHashMap<>();
protected boolean usePrefixId;
public DbSqlSessionFactory(boolean usePrefixId) {
this.usePrefixId = usePrefixId;
}
@Override
public Class<?> getSessionType() {
return DbSqlSession.class;
}
@Override
public Session openSession(CommandContext commandContext) {
DbSqlSession dbSqlSession = createDbSqlSession();
// 当前系统适配 dm,如果存在 schema 为空的情况,从 connection 获取
try {
if (getDatabaseSchema() == null || getDatabaseSchema().length() == 0){
setDatabaseSchema(dbSqlSession.getSqlSession().getConnection().getSchema());
}
dbSqlSession.getSqlSession().getConnection().getSchema();
} catch (SQLException e) {
throw new RuntimeException(e);
}
if (getDatabaseSchema() != null && getDatabaseSchema().length() > 0) {
try {
dbSqlSession.getSqlSession().getConnection().setSchema(getDatabaseSchema());
} catch (SQLException e) {
throw new FlowableException("Could not set database schema on connection", e);
}
}
if (getDatabaseCatalog() != null && getDatabaseCatalog().length() > 0) {
try {
dbSqlSession.getSqlSession().getConnection().setCatalog(getDatabaseCatalog());
} catch (SQLException e) {
throw new FlowableException("Could not set database catalog on connection", e);
}
}
if (dbSqlSession.getSqlSession().getConnection() == null) {
throw new FlowableException("Invalid dbSqlSession: no active connection found");
}
return dbSqlSession;
}
protected DbSqlSession createDbSqlSession() {
return new DbSqlSession(this, Context.getCommandContext().getSession(EntityCache.class));
}
// insert, update and delete statements
// /////////////////////////////////////
public String getInsertStatement(Entity object) {
return getStatement(object.getClass(), insertStatements, "insert");
}
public String getInsertStatement(Class<? extends Entity> clazz) {
return getStatement(clazz, insertStatements, "insert");
}
public String getUpdateStatement(Entity object) {
return getStatement(object.getClass(), updateStatements, "update");
}
public String getDeleteStatement(Class<?> entityClass) {
return getStatement(entityClass, deleteStatements, "delete");
}
public String getSelectStatement(Class<?> entityClass) {
return getStatement(entityClass, selectStatements, "select");
}
protected String getStatement(Class<?> entityClass, Map<Class<?>, String> cachedStatements, String prefix) {
String statement = cachedStatements.get(entityClass);
if (statement != null) {
return statement;
}
statement = prefix + entityClass.getSimpleName();
if (statement.endsWith("Impl")) {
statement = statement.substring(0, statement.length() - 10); // removing 'entityImpl'
} else {
statement = statement.substring(0, statement.length() - 6); // removing 'entity'
}
cachedStatements.put(entityClass, statement);
return statement;
}
// db specific mappings
// /////////////////////////////////////////////////////
protected void addDatabaseSpecificStatement(String databaseType, String activitiStatement, String ibatisStatement) {
Map<String, String> specificStatements = databaseSpecificStatements.get(databaseType);
if (specificStatements == null) {
specificStatements = new HashMap<>();
databaseSpecificStatements.put(databaseType, specificStatements);
}
specificStatements.put(activitiStatement, ibatisStatement);
}
public String mapStatement(String statement) {
if (statementMappings == null) {
return statement;
}
String mappedStatement = statementMappings.get(statement);
return (mappedStatement != null ? mappedStatement : statement);
}
// customized getters and setters
// ///////////////////////////////////////////
public void setDatabaseType(String databaseType) {
this.databaseType = databaseType;
this.statementMappings = databaseSpecificStatements.get(databaseType);
}
public boolean isMysql() {
return "mysql".equals(getDatabaseType());
}
public boolean isOracle() {
return "oracle".equals(getDatabaseType());
}
public Boolean isBulkInsertable(Class<? extends Entity> entityClass) {
return bulkInserteableEntityClasses != null && bulkInserteableEntityClasses.contains(entityClass);
}
@SuppressWarnings("rawtypes")
public String getBulkInsertStatement(Class clazz) {
return getStatement(clazz, bulkInsertStatements, "bulkInsert");
}
public Set<Class<? extends Entity>> getBulkInserteableEntityClasses() {
return bulkInserteableEntityClasses;
}
public void setBulkInserteableEntityClasses(Set<Class<? extends Entity>> bulkInserteableEntityClasses) {
this.bulkInserteableEntityClasses = bulkInserteableEntityClasses;
}
public int getMaxNrOfStatementsInBulkInsert() {
return maxNrOfStatementsInBulkInsert;
}
public void setMaxNrOfStatementsInBulkInsert(int maxNrOfStatementsInBulkInsert) {
this.maxNrOfStatementsInBulkInsert = maxNrOfStatementsInBulkInsert;
}
public Map<Class<?>, String> getBulkInsertStatements() {
return bulkInsertStatements;
}
public void setBulkInsertStatements(Map<Class<?>, String> bulkInsertStatements) {
this.bulkInsertStatements = bulkInsertStatements;
}
// getters and setters //////////////////////////////////////////////////////
public SqlSessionFactory getSqlSessionFactory() {
return sqlSessionFactory;
}
public void setSqlSessionFactory(SqlSessionFactory sqlSessionFactory) {
this.sqlSessionFactory = sqlSessionFactory;
}
public String getDatabaseType() {
return databaseType;
}
public Map<String, Map<String, String>> getDatabaseSpecificStatements() {
return databaseSpecificStatements;
}
public void setDatabaseSpecificStatements(Map<String, Map<String, String>> databaseSpecificStatements) {
this.databaseSpecificStatements = databaseSpecificStatements;
}
public Map<String, String> getStatementMappings() {
return statementMappings;
}
public void setStatementMappings(Map<String, String> statementMappings) {
this.statementMappings = statementMappings;
}
public Map<Class<?>, String> getInsertStatements() {
return insertStatements;
}
public void setInsertStatements(Map<Class<?>, String> insertStatements) {
this.insertStatements = insertStatements;
}
public Map<Class<?>, String> getUpdateStatements() {
return updateStatements;
}
public void setUpdateStatements(Map<Class<?>, String> updateStatements) {
this.updateStatements = updateStatements;
}
public Map<Class<?>, String> getDeleteStatements() {
return deleteStatements;
}
public void setDeleteStatements(Map<Class<?>, String> deleteStatements) {
this.deleteStatements = deleteStatements;
}
public Map<Class<?>, String> getSelectStatements() {
return selectStatements;
}
public void setSelectStatements(Map<Class<?>, String> selectStatements) {
this.selectStatements = selectStatements;
}
public boolean isDbHistoryUsed() {
return isDbHistoryUsed;
}
public void setDbHistoryUsed(boolean isDbHistoryUsed) {
this.isDbHistoryUsed = isDbHistoryUsed;
}
public void setDatabaseTablePrefix(String databaseTablePrefix) {
this.databaseTablePrefix = databaseTablePrefix;
}
public String getDatabaseTablePrefix() {
return databaseTablePrefix;
}
public String getDatabaseCatalog() {
return databaseCatalog;
}
public void setDatabaseCatalog(String databaseCatalog) {
this.databaseCatalog = databaseCatalog;
}
public String getDatabaseSchema() {
return databaseSchema;
}
public void setDatabaseSchema(String databaseSchema) {
this.databaseSchema = databaseSchema;
}
public void setTablePrefixIsSchema(boolean tablePrefixIsSchema) {
this.tablePrefixIsSchema = tablePrefixIsSchema;
}
public boolean isTablePrefixIsSchema() {
return tablePrefixIsSchema;
}
public List<Class<? extends Entity>> getInsertionOrder() {
return insertionOrder;
}
public void setInsertionOrder(List<Class<? extends Entity>> insertionOrder) {
this.insertionOrder = insertionOrder;
}
public List<Class<? extends Entity>> getDeletionOrder() {
return deletionOrder;
}
public void setDeletionOrder(List<Class<? extends Entity>> deletionOrder) {
this.deletionOrder = deletionOrder;
}
public void addLogicalEntityClassMapping(String logicalName, Class<?> entityClass) {
logicalNameToClassMapping.put(logicalName, entityClass);
}
public Map<String, Class<?>> getLogicalNameToClassMapping() {
return logicalNameToClassMapping;
}
public void setLogicalNameToClassMapping(Map<String, Class<?>> logicalNameToClassMapping) {
this.logicalNameToClassMapping = logicalNameToClassMapping;
}
public boolean isUsePrefixId() {
return usePrefixId;
}
public void setUsePrefixId(boolean usePrefixId) {
this.usePrefixId = usePrefixId;
}
}

View File

@@ -13,6 +13,7 @@ liquibase.database.core.MariaDBDatabase
liquibase.database.core.MockDatabase
liquibase.database.core.MySQLDatabase
liquibase.database.core.OracleDatabase
liquibase.database.core.DmDatabase
liquibase.database.core.PostgresDatabase
liquibase.database.core.SQLiteDatabase
liquibase.database.core.SybaseASADatabase

View File

@@ -0,0 +1 @@
liquibase.datatype.core.DmBooleanType

View File

@@ -39,14 +39,14 @@ spring:
primary: master
datasource:
master:
url: jdbc:mysql://172.16.46.247:4787/ruoyi-vue-pro?useSSL=false&serverTimezone=Asia/Shanghai&allowPublicKeyRetrieval=true&nullCatalogMeansCurrent=true&rewriteBatchedStatements=true # MySQL Connector/J 8.X 连接的示例
username: jygk-test
password: Zgty@0527
url: jdbc:dm://172.16.46.247:1050?schema=BPM
username: SYSDBA
password: pgbsci6ddJ6Sqj@e
slave: # 模拟从库,可根据自己需要修改 # 模拟从库,可根据自己需要修改
lazy: true # 开启懒加载,保证启动速度
url: jdbc:mysql://172.16.46.247:4787/ruoyi-vue-pro?useSSL=false&serverTimezone=Asia/Shanghai&allowPublicKeyRetrieval=true&nullCatalogMeansCurrent=true&rewriteBatchedStatements=true # MySQL Connector/J 8.X 连接的示例
username: jygk-test
password: Zgty@0527
url: jdbc:dm://172.16.46.247:1050?schema=BPM
username: SYSDBA
password: pgbsci6ddJ6Sqj@e
# Redis 配置。Redisson 默认的配置足够使用,一般不需要进行调优
data:
@@ -56,6 +56,11 @@ spring:
database: 0 # 数据库索引
# password: 123456 # 密码,建议生产环境开启
# Flowable 在 DM 场景下需要识别为 Oracle 并自动升级表结构
flowable:
database-schema-update: true
database-type: oracle
--- #################### MQ 消息队列相关配置 ####################
--- #################### 定时任务相关配置 ####################

View File

@@ -0,0 +1,41 @@
create table FLW_RU_BATCH (
ID_ VARCHAR2(64) not null,
REV_ INTEGER,
TYPE_ VARCHAR2(64) not null,
SEARCH_KEY_ VARCHAR2(255),
SEARCH_KEY2_ VARCHAR2(255),
CREATE_TIME_ TIMESTAMP(6) not null,
COMPLETE_TIME_ TIMESTAMP(6),
STATUS_ VARCHAR2(255),
BATCH_DOC_ID_ VARCHAR2(64),
TENANT_ID_ VARCHAR2(255) default '',
primary key (ID_)
);
create table FLW_RU_BATCH_PART (
ID_ VARCHAR2(64) not null,
REV_ INTEGER,
BATCH_ID_ VARCHAR2(64),
TYPE_ VARCHAR2(64) not null,
SCOPE_ID_ VARCHAR2(64),
SUB_SCOPE_ID_ VARCHAR2(64),
SCOPE_TYPE_ VARCHAR2(64),
SEARCH_KEY_ VARCHAR2(255),
SEARCH_KEY2_ VARCHAR2(255),
CREATE_TIME_ TIMESTAMP(6) not null,
COMPLETE_TIME_ TIMESTAMP(6),
STATUS_ VARCHAR2(255),
RESULT_DOC_ID_ VARCHAR2(64),
TENANT_ID_ VARCHAR2(255) default '',
primary key (ID_)
);
create index FLW_IDX_BATCH_PART on FLW_RU_BATCH_PART(BATCH_ID_);
alter table FLW_RU_BATCH_PART
add constraint FLW_FK_BATCH_PART_PARENT
foreign key (BATCH_ID_)
references FLW_RU_BATCH (ID_);
insert into ACT_GE_PROPERTY values ('batch.schema.version', '7.0.1.1', 1);

View File

@@ -0,0 +1,4 @@
drop index FLW_IDX_BATCH_PART;
drop table FLW_RU_BATCH_PART;
drop table FLW_RU_BATCH;

View File

@@ -0,0 +1,23 @@
create table ACT_GE_PROPERTY (
NAME_ VARCHAR2(64),
VALUE_ VARCHAR2(300),
REV_ INTEGER,
primary key (NAME_)
);
create table ACT_GE_BYTEARRAY (
ID_ VARCHAR2(64),
REV_ INTEGER,
NAME_ VARCHAR2(255),
DEPLOYMENT_ID_ VARCHAR2(64),
BYTES_ BLOB,
GENERATED_ NUMBER(1) CHECK (GENERATED_ IN (1,0)),
primary key (ID_)
);
insert into ACT_GE_PROPERTY
values ('common.schema.version', '7.0.1.1', 1);
insert into ACT_GE_PROPERTY
values ('next.dbid', '1', 1);

View File

@@ -0,0 +1,2 @@
drop table ACT_GE_BYTEARRAY;
drop table ACT_GE_PROPERTY;

View File

@@ -0,0 +1,355 @@
create table ACT_RE_DEPLOYMENT (
ID_ VARCHAR2(64),
NAME_ VARCHAR2(255),
CATEGORY_ VARCHAR2(255),
KEY_ VARCHAR2(255),
TENANT_ID_ VARCHAR2(255) DEFAULT '',
DEPLOY_TIME_ TIMESTAMP(6),
DERIVED_FROM_ VARCHAR2(64),
DERIVED_FROM_ROOT_ VARCHAR2(64),
PARENT_DEPLOYMENT_ID_ VARCHAR2(255),
ENGINE_VERSION_ VARCHAR2(255),
primary key (ID_)
);
create table ACT_RE_MODEL (
ID_ VARCHAR2(64) not null,
REV_ INTEGER,
NAME_ VARCHAR2(255),
KEY_ VARCHAR2(255),
CATEGORY_ VARCHAR2(255),
CREATE_TIME_ TIMESTAMP(6),
LAST_UPDATE_TIME_ TIMESTAMP(6),
VERSION_ INTEGER,
META_INFO_ VARCHAR2(2000),
DEPLOYMENT_ID_ VARCHAR2(64),
EDITOR_SOURCE_VALUE_ID_ VARCHAR2(64),
EDITOR_SOURCE_EXTRA_VALUE_ID_ VARCHAR2(64),
TENANT_ID_ VARCHAR2(255) DEFAULT '',
primary key (ID_)
);
create table ACT_RU_EXECUTION (
ID_ VARCHAR2(64),
REV_ INTEGER,
PROC_INST_ID_ VARCHAR2(64),
BUSINESS_KEY_ VARCHAR2(255),
PARENT_ID_ VARCHAR2(64),
PROC_DEF_ID_ VARCHAR2(64),
SUPER_EXEC_ VARCHAR2(64),
ROOT_PROC_INST_ID_ VARCHAR2(64),
ACT_ID_ VARCHAR2(255),
IS_ACTIVE_ NUMBER(1) CHECK (IS_ACTIVE_ IN (1,0)),
IS_CONCURRENT_ NUMBER(1) CHECK (IS_CONCURRENT_ IN (1,0)),
IS_SCOPE_ NUMBER(1) CHECK (IS_SCOPE_ IN (1,0)),
IS_EVENT_SCOPE_ NUMBER(1) CHECK (IS_EVENT_SCOPE_ IN (1,0)),
IS_MI_ROOT_ NUMBER(1) CHECK (IS_MI_ROOT_ IN (1,0)),
SUSPENSION_STATE_ INTEGER,
CACHED_ENT_STATE_ INTEGER,
TENANT_ID_ VARCHAR2(255) DEFAULT '',
NAME_ VARCHAR2(255),
START_ACT_ID_ VARCHAR2(255),
START_TIME_ TIMESTAMP(6),
START_USER_ID_ VARCHAR2(255),
LOCK_TIME_ TIMESTAMP(6),
LOCK_OWNER_ VARCHAR2(255),
IS_COUNT_ENABLED_ NUMBER(1) CHECK (IS_COUNT_ENABLED_ IN (1,0)),
EVT_SUBSCR_COUNT_ INTEGER,
TASK_COUNT_ INTEGER,
JOB_COUNT_ INTEGER,
TIMER_JOB_COUNT_ INTEGER,
SUSP_JOB_COUNT_ INTEGER,
DEADLETTER_JOB_COUNT_ INTEGER,
EXTERNAL_WORKER_JOB_COUNT_ INTEGER,
VAR_COUNT_ INTEGER,
ID_LINK_COUNT_ INTEGER,
CALLBACK_ID_ VARCHAR2(255),
CALLBACK_TYPE_ VARCHAR2(255),
REFERENCE_ID_ VARCHAR2(255),
REFERENCE_TYPE_ VARCHAR2(255),
PROPAGATED_STAGE_INST_ID_ VARCHAR2(255),
BUSINESS_STATUS_ VARCHAR2(255),
primary key (ID_)
);
create table ACT_RE_PROCDEF (
ID_ VARCHAR2(64) NOT NULL,
REV_ INTEGER,
CATEGORY_ VARCHAR2(255),
NAME_ VARCHAR2(255),
KEY_ VARCHAR2(255) NOT NULL,
VERSION_ INTEGER NOT NULL,
DEPLOYMENT_ID_ VARCHAR2(64),
RESOURCE_NAME_ VARCHAR2(2000),
DGRM_RESOURCE_NAME_ VARCHAR2(4000),
DESCRIPTION_ VARCHAR2(2000),
HAS_START_FORM_KEY_ NUMBER(1) CHECK (HAS_START_FORM_KEY_ IN (1,0)),
HAS_GRAPHICAL_NOTATION_ NUMBER(1) CHECK (HAS_GRAPHICAL_NOTATION_ IN (1,0)),
SUSPENSION_STATE_ INTEGER,
TENANT_ID_ VARCHAR2(255) DEFAULT '',
DERIVED_FROM_ VARCHAR2(64),
DERIVED_FROM_ROOT_ VARCHAR2(64),
DERIVED_VERSION_ INTEGER DEFAULT 0 NOT NULL,
ENGINE_VERSION_ VARCHAR2(255),
primary key (ID_)
);
create table ACT_EVT_LOG (
LOG_NR_ NUMBER(19),
TYPE_ VARCHAR2(64),
PROC_DEF_ID_ VARCHAR2(64),
PROC_INST_ID_ VARCHAR2(64),
EXECUTION_ID_ VARCHAR2(64),
TASK_ID_ VARCHAR2(64),
TIME_STAMP_ TIMESTAMP(6) not null,
USER_ID_ VARCHAR2(255),
DATA_ BLOB,
LOCK_OWNER_ VARCHAR2(255),
LOCK_TIME_ TIMESTAMP(6) null,
IS_PROCESSED_ NUMBER(3) default 0,
primary key (LOG_NR_)
);
create sequence act_evt_log_seq;
create table ACT_PROCDEF_INFO (
ID_ VARCHAR2(64) not null,
PROC_DEF_ID_ VARCHAR2(64) not null,
REV_ integer,
INFO_JSON_ID_ VARCHAR2(64),
primary key (ID_)
);
create table ACT_RU_ACTINST (
ID_ VARCHAR2(64) not null,
REV_ INTEGER default 1,
PROC_DEF_ID_ VARCHAR2(64) not null,
PROC_INST_ID_ VARCHAR2(64) not null,
EXECUTION_ID_ VARCHAR2(64) not null,
ACT_ID_ VARCHAR2(255) not null,
TASK_ID_ VARCHAR2(64),
CALL_PROC_INST_ID_ VARCHAR2(64),
ACT_NAME_ VARCHAR2(255),
ACT_TYPE_ VARCHAR2(255) not null,
ASSIGNEE_ VARCHAR2(255),
START_TIME_ TIMESTAMP(6) not null,
END_TIME_ TIMESTAMP(6),
DURATION_ NUMBER(19,0),
TRANSACTION_ORDER_ INTEGER,
DELETE_REASON_ VARCHAR2(2000),
TENANT_ID_ VARCHAR2(255) default '',
primary key (ID_)
);
create index ACT_IDX_EXEC_BUSKEY on ACT_RU_EXECUTION(BUSINESS_KEY_);
create index ACT_IDX_EXEC_ROOT on ACT_RU_EXECUTION(ROOT_PROC_INST_ID_);
create index ACT_IDX_EXEC_REF_ID_ on ACT_RU_EXECUTION(REFERENCE_ID_);
create index ACT_IDX_VARIABLE_TASK_ID on ACT_RU_VARIABLE(TASK_ID_);
create index ACT_IDX_RU_ACTI_START on ACT_RU_ACTINST(START_TIME_);
create index ACT_IDX_RU_ACTI_END on ACT_RU_ACTINST(END_TIME_);
create index ACT_IDX_RU_ACTI_PROC on ACT_RU_ACTINST(PROC_INST_ID_);
create index ACT_IDX_RU_ACTI_PROC_ACT on ACT_RU_ACTINST(PROC_INST_ID_, ACT_ID_);
create index ACT_IDX_RU_ACTI_EXEC on ACT_RU_ACTINST(EXECUTION_ID_);
create index ACT_IDX_RU_ACTI_EXEC_ACT on ACT_RU_ACTINST(EXECUTION_ID_, ACT_ID_);
create index ACT_IDX_RU_ACTI_TASK on ACT_RU_ACTINST(TASK_ID_);
create index ACT_IDX_BYTEAR_DEPL on ACT_GE_BYTEARRAY(DEPLOYMENT_ID_);
alter table ACT_GE_BYTEARRAY
add constraint ACT_FK_BYTEARR_DEPL
foreign key (DEPLOYMENT_ID_)
references ACT_RE_DEPLOYMENT (ID_);
alter table ACT_RE_PROCDEF
add constraint ACT_UNIQ_PROCDEF
unique (KEY_,VERSION_, DERIVED_VERSION_, TENANT_ID_);
create index ACT_IDX_EXE_PROCINST on ACT_RU_EXECUTION(PROC_INST_ID_);
alter table ACT_RU_EXECUTION
add constraint ACT_FK_EXE_PROCINST
foreign key (PROC_INST_ID_)
references ACT_RU_EXECUTION (ID_);
create index ACT_IDX_EXE_PARENT on ACT_RU_EXECUTION(PARENT_ID_);
alter table ACT_RU_EXECUTION
add constraint ACT_FK_EXE_PARENT
foreign key (PARENT_ID_)
references ACT_RU_EXECUTION (ID_);
create index ACT_IDX_EXE_SUPER on ACT_RU_EXECUTION(SUPER_EXEC_);
alter table ACT_RU_EXECUTION
add constraint ACT_FK_EXE_SUPER
foreign key (SUPER_EXEC_)
references ACT_RU_EXECUTION (ID_);
create index ACT_IDX_EXE_PROCDEF on ACT_RU_EXECUTION(PROC_DEF_ID_);
alter table ACT_RU_EXECUTION
add constraint ACT_FK_EXE_PROCDEF
foreign key (PROC_DEF_ID_)
references ACT_RE_PROCDEF (ID_);
create index ACT_IDX_TSKASS_TASK on ACT_RU_IDENTITYLINK(TASK_ID_);
alter table ACT_RU_IDENTITYLINK
add constraint ACT_FK_TSKASS_TASK
foreign key (TASK_ID_)
references ACT_RU_TASK (ID_);
create index ACT_IDX_ATHRZ_PROCEDEF on ACT_RU_IDENTITYLINK(PROC_DEF_ID_);
alter table ACT_RU_IDENTITYLINK
add constraint ACT_FK_ATHRZ_PROCEDEF
foreign key (PROC_DEF_ID_)
references ACT_RE_PROCDEF (ID_);
create index ACT_IDX_IDL_PROCINST on ACT_RU_IDENTITYLINK(PROC_INST_ID_);
alter table ACT_RU_IDENTITYLINK
add constraint ACT_FK_IDL_PROCINST
foreign key (PROC_INST_ID_)
references ACT_RU_EXECUTION (ID_);
create index ACT_IDX_TASK_EXEC on ACT_RU_TASK(EXECUTION_ID_);
alter table ACT_RU_TASK
add constraint ACT_FK_TASK_EXE
foreign key (EXECUTION_ID_)
references ACT_RU_EXECUTION (ID_);
create index ACT_IDX_TASK_PROCINST on ACT_RU_TASK(PROC_INST_ID_);
alter table ACT_RU_TASK
add constraint ACT_FK_TASK_PROCINST
foreign key (PROC_INST_ID_)
references ACT_RU_EXECUTION (ID_);
create index ACT_IDX_TASK_PROCDEF on ACT_RU_TASK(PROC_DEF_ID_);
alter table ACT_RU_TASK
add constraint ACT_FK_TASK_PROCDEF
foreign key (PROC_DEF_ID_)
references ACT_RE_PROCDEF (ID_);
create index ACT_IDX_VAR_EXE on ACT_RU_VARIABLE(EXECUTION_ID_);
alter table ACT_RU_VARIABLE
add constraint ACT_FK_VAR_EXE
foreign key (EXECUTION_ID_)
references ACT_RU_EXECUTION (ID_);
create index ACT_IDX_VAR_PROCINST on ACT_RU_VARIABLE(PROC_INST_ID_);
alter table ACT_RU_VARIABLE
add constraint ACT_FK_VAR_PROCINST
foreign key (PROC_INST_ID_)
references ACT_RU_EXECUTION(ID_);
create index ACT_IDX_JOB_EXECUTION_ID on ACT_RU_JOB(EXECUTION_ID_);
alter table ACT_RU_JOB
add constraint ACT_FK_JOB_EXECUTION
foreign key (EXECUTION_ID_)
references ACT_RU_EXECUTION (ID_);
create index ACT_IDX_JOB_PROC_INST_ID on ACT_RU_JOB(PROCESS_INSTANCE_ID_);
alter table ACT_RU_JOB
add constraint ACT_FK_JOB_PROCESS_INSTANCE
foreign key (PROCESS_INSTANCE_ID_)
references ACT_RU_EXECUTION (ID_);
create index ACT_IDX_JOB_PROC_DEF_ID on ACT_RU_JOB(PROC_DEF_ID_);
alter table ACT_RU_JOB
add constraint ACT_FK_JOB_PROC_DEF
foreign key (PROC_DEF_ID_)
references ACT_RE_PROCDEF (ID_);
create index ACT_IDX_TJOB_EXECUTION_ID on ACT_RU_TIMER_JOB(EXECUTION_ID_);
alter table ACT_RU_TIMER_JOB
add constraint ACT_FK_TJOB_EXECUTION
foreign key (EXECUTION_ID_)
references ACT_RU_EXECUTION (ID_);
create index ACT_IDX_TJOB_PROC_INST_ID on ACT_RU_TIMER_JOB(PROCESS_INSTANCE_ID_);
alter table ACT_RU_TIMER_JOB
add constraint ACT_FK_TJOB_PROCESS_INSTANCE
foreign key (PROCESS_INSTANCE_ID_)
references ACT_RU_EXECUTION (ID_);
create index ACT_IDX_TJOB_PROC_DEF_ID on ACT_RU_TIMER_JOB(PROC_DEF_ID_);
alter table ACT_RU_TIMER_JOB
add constraint ACT_FK_TJOB_PROC_DEF
foreign key (PROC_DEF_ID_)
references ACT_RE_PROCDEF (ID_);
create index ACT_IDX_SJOB_EXECUTION_ID on ACT_RU_SUSPENDED_JOB(EXECUTION_ID_);
alter table ACT_RU_SUSPENDED_JOB
add constraint ACT_FK_SJOB_EXECUTION
foreign key (EXECUTION_ID_)
references ACT_RU_EXECUTION (ID_);
create index ACT_IDX_SJOB_PROC_INST_ID on ACT_RU_SUSPENDED_JOB(PROCESS_INSTANCE_ID_);
alter table ACT_RU_SUSPENDED_JOB
add constraint ACT_FK_SJOB_PROCESS_INSTANCE
foreign key (PROCESS_INSTANCE_ID_)
references ACT_RU_EXECUTION (ID_);
create index ACT_IDX_SJOB_PROC_DEF_ID on ACT_RU_SUSPENDED_JOB(PROC_DEF_ID_);
alter table ACT_RU_SUSPENDED_JOB
add constraint ACT_FK_SJOB_PROC_DEF
foreign key (PROC_DEF_ID_)
references ACT_RE_PROCDEF (ID_);
create index ACT_IDX_DJOB_EXECUTION_ID on ACT_RU_DEADLETTER_JOB(EXECUTION_ID_);
alter table ACT_RU_DEADLETTER_JOB
add constraint ACT_FK_DJOB_EXECUTION
foreign key (EXECUTION_ID_)
references ACT_RU_EXECUTION (ID_);
create index ACT_IDX_DJOB_PROC_INST_ID on ACT_RU_DEADLETTER_JOB(PROCESS_INSTANCE_ID_);
alter table ACT_RU_DEADLETTER_JOB
add constraint ACT_FK_DJOB_PROCESS_INSTANCE
foreign key (PROCESS_INSTANCE_ID_)
references ACT_RU_EXECUTION (ID_);
create index ACT_IDX_DJOB_PROC_DEF_ID on ACT_RU_DEADLETTER_JOB(PROC_DEF_ID_);
alter table ACT_RU_DEADLETTER_JOB
add constraint ACT_FK_DJOB_PROC_DEF
foreign key (PROC_DEF_ID_)
references ACT_RE_PROCDEF (ID_);
alter table ACT_RU_EVENT_SUBSCR
add constraint ACT_FK_EVENT_EXEC
foreign key (EXECUTION_ID_)
references ACT_RU_EXECUTION(ID_);
create index ACT_IDX_MODEL_SOURCE on ACT_RE_MODEL(EDITOR_SOURCE_VALUE_ID_);
alter table ACT_RE_MODEL
add constraint ACT_FK_MODEL_SOURCE
foreign key (EDITOR_SOURCE_VALUE_ID_)
references ACT_GE_BYTEARRAY (ID_);
create index ACT_IDX_MODEL_SOURCE_EXTRA on ACT_RE_MODEL(EDITOR_SOURCE_EXTRA_VALUE_ID_);
alter table ACT_RE_MODEL
add constraint ACT_FK_MODEL_SOURCE_EXTRA
foreign key (EDITOR_SOURCE_EXTRA_VALUE_ID_)
references ACT_GE_BYTEARRAY (ID_);
create index ACT_IDX_MODEL_DEPLOYMENT on ACT_RE_MODEL(DEPLOYMENT_ID_);
alter table ACT_RE_MODEL
add constraint ACT_FK_MODEL_DEPLOYMENT
foreign key (DEPLOYMENT_ID_)
references ACT_RE_DEPLOYMENT (ID_);
create index ACT_IDX_PROCDEF_INFO_JSON on ACT_PROCDEF_INFO(INFO_JSON_ID_);
alter table ACT_PROCDEF_INFO
add constraint ACT_FK_INFO_JSON_BA
foreign key (INFO_JSON_ID_)
references ACT_GE_BYTEARRAY (ID_);
create index ACT_IDX_PROCDEF_INFO_PROC on ACT_PROCDEF_INFO(PROC_DEF_ID_);
alter table ACT_PROCDEF_INFO
add constraint ACT_FK_INFO_PROCDEF
foreign key (PROC_DEF_ID_)
references ACT_RE_PROCDEF (ID_);
alter table ACT_PROCDEF_INFO
add constraint ACT_UNIQ_INFO_PROCDEF
unique (PROC_DEF_ID_);
insert into ACT_GE_PROPERTY
values ('schema.version', '7.0.1.1', 1);
insert into ACT_GE_PROPERTY
values ('schema.history', 'create(7.0.1.1)', 1);

View File

@@ -0,0 +1,114 @@
create table ACT_HI_PROCINST (
ID_ VARCHAR2(64) not null,
REV_ INTEGER default 1,
PROC_INST_ID_ VARCHAR2(64) not null,
BUSINESS_KEY_ VARCHAR2(255),
PROC_DEF_ID_ VARCHAR2(64) not null,
START_TIME_ TIMESTAMP(6) not null,
END_TIME_ TIMESTAMP(6),
DURATION_ NUMBER(19,0),
START_USER_ID_ VARCHAR2(255),
START_ACT_ID_ VARCHAR2(255),
END_ACT_ID_ VARCHAR2(255),
SUPER_PROCESS_INSTANCE_ID_ VARCHAR2(64),
DELETE_REASON_ VARCHAR2(2000),
TENANT_ID_ VARCHAR2(255) default '',
NAME_ VARCHAR2(255),
CALLBACK_ID_ VARCHAR2(255),
CALLBACK_TYPE_ VARCHAR2(255),
REFERENCE_ID_ VARCHAR2(255),
REFERENCE_TYPE_ VARCHAR2(255),
PROPAGATED_STAGE_INST_ID_ VARCHAR2(255),
BUSINESS_STATUS_ VARCHAR2(255),
primary key (ID_),
unique (PROC_INST_ID_)
);
create table ACT_HI_ACTINST (
ID_ VARCHAR2(64) not null,
REV_ INTEGER default 1,
PROC_DEF_ID_ VARCHAR2(64) not null,
PROC_INST_ID_ VARCHAR2(64) not null,
EXECUTION_ID_ VARCHAR2(64) not null,
ACT_ID_ VARCHAR2(255) not null,
TASK_ID_ VARCHAR2(64),
CALL_PROC_INST_ID_ VARCHAR2(64),
ACT_NAME_ VARCHAR2(255),
ACT_TYPE_ VARCHAR2(255) not null,
ASSIGNEE_ VARCHAR2(255),
START_TIME_ TIMESTAMP(6) not null,
END_TIME_ TIMESTAMP(6),
TRANSACTION_ORDER_ INTEGER,
DURATION_ NUMBER(19,0),
DELETE_REASON_ VARCHAR2(2000),
TENANT_ID_ VARCHAR2(255) default '',
primary key (ID_)
);
create table ACT_HI_DETAIL (
ID_ VARCHAR2(64) not null,
TYPE_ VARCHAR2(255) not null,
PROC_INST_ID_ VARCHAR2(64),
EXECUTION_ID_ VARCHAR2(64),
TASK_ID_ VARCHAR2(64),
ACT_INST_ID_ VARCHAR2(64),
NAME_ VARCHAR2(255) not null,
VAR_TYPE_ VARCHAR2(64),
REV_ INTEGER,
TIME_ TIMESTAMP(6) not null,
BYTEARRAY_ID_ VARCHAR2(64),
DOUBLE_ NUMBER(38,10),
LONG_ NUMBER(19,0),
TEXT_ VARCHAR2(2000),
TEXT2_ VARCHAR2(2000),
primary key (ID_)
);
create table ACT_HI_COMMENT (
ID_ VARCHAR2(64) not null,
TYPE_ VARCHAR2(255),
TIME_ TIMESTAMP(6) not null,
USER_ID_ VARCHAR2(255),
TASK_ID_ VARCHAR2(64),
PROC_INST_ID_ VARCHAR2(64),
ACTION_ VARCHAR2(255),
MESSAGE_ VARCHAR2(2000),
FULL_MSG_ BLOB,
primary key (ID_)
);
create table ACT_HI_ATTACHMENT (
ID_ VARCHAR2(64) not null,
REV_ INTEGER,
USER_ID_ VARCHAR2(255),
NAME_ VARCHAR2(255),
DESCRIPTION_ VARCHAR2(2000),
TYPE_ VARCHAR2(255),
TASK_ID_ VARCHAR2(64),
PROC_INST_ID_ VARCHAR2(64),
URL_ VARCHAR2(2000),
CONTENT_ID_ VARCHAR2(64),
TIME_ TIMESTAMP(6),
primary key (ID_)
);
create index ACT_IDX_HI_PRO_INST_END on ACT_HI_PROCINST(END_TIME_);
create index ACT_IDX_HI_PRO_I_BUSKEY on ACT_HI_PROCINST(BUSINESS_KEY_);
create index ACT_IDX_HI_PRO_SUPER_PROCINST on ACT_HI_PROCINST(SUPER_PROCESS_INSTANCE_ID_);
create index ACT_IDX_HI_ACT_INST_START on ACT_HI_ACTINST(START_TIME_);
create index ACT_IDX_HI_ACT_INST_END on ACT_HI_ACTINST(END_TIME_);
create index ACT_IDX_HI_DETAIL_PROC_INST on ACT_HI_DETAIL(PROC_INST_ID_);
create index ACT_IDX_HI_DETAIL_ACT_INST on ACT_HI_DETAIL(ACT_INST_ID_);
create index ACT_IDX_HI_DETAIL_TIME on ACT_HI_DETAIL(TIME_);
create index ACT_IDX_HI_DETAIL_NAME on ACT_HI_DETAIL(NAME_);
create index ACT_IDX_HI_DETAIL_TASK_ID on ACT_HI_DETAIL(TASK_ID_);
create index ACT_IDX_HI_PROCVAR_PROC_INST on ACT_HI_VARINST(PROC_INST_ID_);
create index ACT_IDX_HI_PROCVAR_TASK_ID on ACT_HI_VARINST(TASK_ID_);
create index ACT_IDX_HI_PROCVAR_EXE on ACT_HI_VARINST(EXECUTION_ID_);
create index ACT_IDX_HI_IDENT_LNK_TASK on ACT_HI_IDENTITYLINK(TASK_ID_);
create index ACT_IDX_HI_IDENT_LNK_PROCINST on ACT_HI_IDENTITYLINK(PROC_INST_ID_);
create index ACT_IDX_HI_ACT_INST_PROCINST on ACT_HI_ACTINST(PROC_INST_ID_, ACT_ID_);
create index ACT_IDX_HI_ACT_INST_EXEC on ACT_HI_ACTINST(EXECUTION_ID_, ACT_ID_);
create index ACT_IDX_HI_TASK_INST_PROCINST on ACT_HI_TASKINST(PROC_INST_ID_);

View File

@@ -0,0 +1,148 @@
drop index ACT_IDX_BYTEAR_DEPL;
drop index ACT_IDX_EXE_PROCINST;
drop index ACT_IDX_EXE_PARENT;
drop index ACT_IDX_EXE_SUPER;
drop index ACT_IDX_TSKASS_TASK;
drop index ACT_IDX_TASK_EXEC;
drop index ACT_IDX_TASK_PROCINST;
drop index ACT_IDX_TASK_PROCDEF;
drop index ACT_IDX_VAR_EXE;
drop index ACT_IDX_VAR_PROCINST;
drop index ACT_IDX_JOB_EXECUTION_ID;
drop index ACT_IDX_JOB_PROC_INST_ID;
drop index ACT_IDX_JOB_PROC_DEF_ID;
drop index ACT_IDX_TJOB_EXECUTION_ID;
drop index ACT_IDX_TJOB_PROC_INST_ID;
drop index ACT_IDX_TJOB_PROC_DEF_ID;
drop index ACT_IDX_SJOB_EXECUTION_ID;
drop index ACT_IDX_SJOB_PROC_INST_ID;
drop index ACT_IDX_SJOB_PROC_DEF_ID;
drop index ACT_IDX_DJOB_EXECUTION_ID;
drop index ACT_IDX_DJOB_PROC_INST_ID;
drop index ACT_IDX_DJOB_PROC_DEF_ID;
drop index ACT_IDX_MODEL_SOURCE;
drop index ACT_IDX_MODEL_SOURCE_EXTRA;
drop index ACT_IDX_MODEL_DEPLOYMENT;
drop index ACT_IDX_PROCDEF_INFO_JSON;
drop index ACT_IDX_EXEC_BUSKEY;
drop index ACT_IDX_VARIABLE_TASK_ID;
drop index ACT_IDX_RU_ACTI_START;
drop index ACT_IDX_RU_ACTI_END;
drop index ACT_IDX_RU_ACTI_PROC;
drop index ACT_IDX_RU_ACTI_PROC_ACT;
drop index ACT_IDX_RU_ACTI_EXEC;
drop index ACT_IDX_RU_ACTI_EXEC_ACT;
alter table ACT_GE_BYTEARRAY
drop CONSTRAINT ACT_FK_BYTEARR_DEPL;
alter table ACT_RU_EXECUTION
drop CONSTRAINT ACT_FK_EXE_PROCINST;
alter table ACT_RU_EXECUTION
drop CONSTRAINT ACT_FK_EXE_PARENT;
alter table ACT_RU_EXECUTION
drop CONSTRAINT ACT_FK_EXE_SUPER;
alter table ACT_RU_EXECUTION
drop CONSTRAINT ACT_FK_EXE_PROCDEF;
alter table ACT_RU_IDENTITYLINK
drop CONSTRAINT ACT_FK_TSKASS_TASK;
alter table ACT_RU_IDENTITYLINK
drop CONSTRAINT ACT_FK_IDL_PROCINST;
alter table ACT_RU_IDENTITYLINK
drop CONSTRAINT ACT_FK_ATHRZ_PROCEDEF;
alter table ACT_RU_TASK
drop CONSTRAINT ACT_FK_TASK_EXE;
alter table ACT_RU_TASK
drop CONSTRAINT ACT_FK_TASK_PROCINST;
alter table ACT_RU_TASK
drop CONSTRAINT ACT_FK_TASK_PROCDEF;
alter table ACT_RU_VARIABLE
drop CONSTRAINT ACT_FK_VAR_EXE;
alter table ACT_RU_VARIABLE
drop CONSTRAINT ACT_FK_VAR_PROCINST;
alter table ACT_RU_JOB
drop CONSTRAINT ACT_FK_JOB_EXECUTION;
alter table ACT_RU_JOB
drop CONSTRAINT ACT_FK_JOB_PROCESS_INSTANCE;
alter table ACT_RU_JOB
drop CONSTRAINT ACT_FK_JOB_PROC_DEF;
alter table ACT_RU_TIMER_JOB
drop CONSTRAINT ACT_FK_TJOB_EXECUTION;
alter table ACT_RU_TIMER_JOB
drop CONSTRAINT ACT_FK_TJOB_PROCESS_INSTANCE;
alter table ACT_RU_TIMER_JOB
drop CONSTRAINT ACT_FK_TJOB_PROC_DEF;
alter table ACT_RU_SUSPENDED_JOB
drop CONSTRAINT ACT_FK_SJOB_EXECUTION;
alter table ACT_RU_SUSPENDED_JOB
drop CONSTRAINT ACT_FK_SJOB_PROCESS_INSTANCE;
alter table ACT_RU_SUSPENDED_JOB
drop CONSTRAINT ACT_FK_SJOB_PROC_DEF;
alter table ACT_RU_DEADLETTER_JOB
drop CONSTRAINT ACT_FK_DJOB_EXECUTION;
alter table ACT_RU_DEADLETTER_JOB
drop CONSTRAINT ACT_FK_DJOB_PROCESS_INSTANCE;
alter table ACT_RU_DEADLETTER_JOB
drop CONSTRAINT ACT_FK_DJOB_PROC_DEF;
alter table ACT_RU_EVENT_SUBSCR
drop CONSTRAINT ACT_FK_EVENT_EXEC;
alter table ACT_RE_PROCDEF
drop CONSTRAINT ACT_UNIQ_PROCDEF;
alter table ACT_RE_MODEL
drop CONSTRAINT ACT_FK_MODEL_SOURCE;
alter table ACT_RE_MODEL
drop CONSTRAINT ACT_FK_MODEL_SOURCE_EXTRA;
alter table ACT_RE_MODEL
drop CONSTRAINT ACT_FK_MODEL_DEPLOYMENT;
alter table ACT_PROCDEF_INFO
drop CONSTRAINT ACT_UNIQ_INFO_PROCDEF;
alter table ACT_PROCDEF_INFO
drop CONSTRAINT ACT_FK_INFO_JSON_BA;
alter table ACT_PROCDEF_INFO
drop CONSTRAINT ACT_FK_INFO_PROCDEF;
drop index ACT_IDX_ATHRZ_PROCEDEF;
drop index ACT_IDX_PROCDEF_INFO_PROC;
drop table ACT_RU_ACTINST;
drop table ACT_RE_DEPLOYMENT;
drop table ACT_RE_MODEL;
drop table ACT_RE_PROCDEF;
drop table ACT_RU_EXECUTION;
drop sequence act_evt_log_seq;
drop table ACT_EVT_LOG;
drop table ACT_PROCDEF_INFO;

View File

@@ -0,0 +1,23 @@
drop index ACT_IDX_HI_PRO_INST_END;
drop index ACT_IDX_HI_PRO_I_BUSKEY;
drop index ACT_IDX_HI_ACT_INST_START;
drop index ACT_IDX_HI_ACT_INST_END;
drop index ACT_IDX_HI_DETAIL_PROC_INST;
drop index ACT_IDX_HI_DETAIL_ACT_INST;
drop index ACT_IDX_HI_DETAIL_TIME;
drop index ACT_IDX_HI_DETAIL_NAME;
drop index ACT_IDX_HI_DETAIL_TASK_ID;
drop index ACT_IDX_HI_PROCVAR_PROC_INST;
drop index ACT_IDX_HI_PROCVAR_TASK_ID;
drop index ACT_IDX_HI_PROCVAR_EXE;
drop index ACT_IDX_HI_ACT_INST_PROCINST;
drop index ACT_IDX_HI_IDENT_LNK_TASK;
drop index ACT_IDX_HI_IDENT_LNK_PROCINST;
drop index ACT_IDX_HI_TASK_INST_PROCINST;
drop table ACT_HI_PROCINST;
drop table ACT_HI_ACTINST;
drop table ACT_HI_DETAIL;
drop table ACT_HI_COMMENT;
drop table ACT_HI_ATTACHMENT;

View File

@@ -0,0 +1,23 @@
create table ACT_HI_ENTITYLINK (
ID_ VARCHAR2(64),
LINK_TYPE_ VARCHAR2(255),
CREATE_TIME_ TIMESTAMP(6),
SCOPE_ID_ VARCHAR2(255),
SUB_SCOPE_ID_ VARCHAR2(255),
SCOPE_TYPE_ VARCHAR2(255),
SCOPE_DEFINITION_ID_ VARCHAR2(255),
PARENT_ELEMENT_ID_ VARCHAR2(255),
REF_SCOPE_ID_ VARCHAR2(255),
REF_SCOPE_TYPE_ VARCHAR2(255),
REF_SCOPE_DEFINITION_ID_ VARCHAR2(255),
ROOT_SCOPE_ID_ VARCHAR2(255),
ROOT_SCOPE_TYPE_ VARCHAR2(255),
HIERARCHY_TYPE_ VARCHAR2(255),
primary key (ID_)
);
create index ACT_IDX_HI_ENT_LNK_SCOPE on ACT_HI_ENTITYLINK(SCOPE_ID_, SCOPE_TYPE_, LINK_TYPE_);
create index ACT_IDX_HI_ENT_LNK_REF_SCOPE on ACT_HI_ENTITYLINK(REF_SCOPE_ID_, REF_SCOPE_TYPE_, LINK_TYPE_);
create index ACT_IDX_HI_ENT_LNK_ROOT_SCOPE on ACT_HI_ENTITYLINK(ROOT_SCOPE_ID_, ROOT_SCOPE_TYPE_, LINK_TYPE_);
create index ACT_IDX_HI_ENT_LNK_SCOPE_DEF on ACT_HI_ENTITYLINK(SCOPE_DEFINITION_ID_, SCOPE_TYPE_, LINK_TYPE_);

View File

@@ -0,0 +1,26 @@
create table ACT_RU_ENTITYLINK (
ID_ VARCHAR2(64),
REV_ INTEGER,
CREATE_TIME_ TIMESTAMP(6),
LINK_TYPE_ VARCHAR2(255),
SCOPE_ID_ VARCHAR2(255),
SUB_SCOPE_ID_ VARCHAR2(255),
SCOPE_TYPE_ VARCHAR2(255),
SCOPE_DEFINITION_ID_ VARCHAR2(255),
PARENT_ELEMENT_ID_ VARCHAR2(255),
REF_SCOPE_ID_ VARCHAR2(255),
REF_SCOPE_TYPE_ VARCHAR2(255),
REF_SCOPE_DEFINITION_ID_ VARCHAR2(255),
ROOT_SCOPE_ID_ VARCHAR2(255),
ROOT_SCOPE_TYPE_ VARCHAR2(255),
HIERARCHY_TYPE_ VARCHAR2(255),
primary key (ID_)
);
create index ACT_IDX_ENT_LNK_SCOPE on ACT_RU_ENTITYLINK(SCOPE_ID_, SCOPE_TYPE_, LINK_TYPE_);
create index ACT_IDX_ENT_LNK_REF_SCOPE on ACT_RU_ENTITYLINK(REF_SCOPE_ID_, REF_SCOPE_TYPE_, LINK_TYPE_);
create index ACT_IDX_ENT_LNK_ROOT_SCOPE on ACT_RU_ENTITYLINK(ROOT_SCOPE_ID_, ROOT_SCOPE_TYPE_, LINK_TYPE_);
create index ACT_IDX_ENT_LNK_SCOPE_DEF on ACT_RU_ENTITYLINK(SCOPE_DEFINITION_ID_, SCOPE_TYPE_, LINK_TYPE_);
insert into ACT_GE_PROPERTY values ('entitylink.schema.version', '7.0.1.1', 1);

View File

@@ -0,0 +1,4 @@
drop index ACT_IDX_HI_ENT_LNK_SCOPE;
drop index ACT_IDX_HI_ENT_LNK_SCOPE_DEF;
drop table ACT_HI_ENTITYLINK;

View File

@@ -0,0 +1,4 @@
drop index ACT_IDX_ENT_LNK_SCOPE;
drop index ACT_IDX_ENT_LNK_SCOPE_DEF;
drop table ACT_RU_ENTITYLINK;

View File

@@ -0,0 +1,28 @@
create table ACT_RU_EVENT_SUBSCR (
ID_ VARCHAR2(64) not null,
REV_ integer,
EVENT_TYPE_ VARCHAR2(255) not null,
EVENT_NAME_ VARCHAR2(255),
EXECUTION_ID_ VARCHAR2(64),
PROC_INST_ID_ VARCHAR2(64),
ACTIVITY_ID_ VARCHAR2(64),
CONFIGURATION_ VARCHAR2(255),
CREATED_ TIMESTAMP(6) not null,
PROC_DEF_ID_ VARCHAR2(64),
SUB_SCOPE_ID_ VARCHAR2(64),
SCOPE_ID_ VARCHAR2(64),
SCOPE_DEFINITION_ID_ VARCHAR2(64),
SCOPE_DEFINITION_KEY_ VARCHAR2(255),
SCOPE_TYPE_ VARCHAR2(64),
LOCK_TIME_ TIMESTAMP(6),
LOCK_OWNER_ VARCHAR2(255),
TENANT_ID_ VARCHAR2(255) DEFAULT '',
primary key (ID_)
);
create index ACT_IDX_EVENT_SUBSCR_CONFIG_ on ACT_RU_EVENT_SUBSCR(CONFIGURATION_);
create index ACT_IDX_EVENT_SUBSCR on ACT_RU_EVENT_SUBSCR(EXECUTION_ID_);
create index ACT_IDX_EVENT_SUBSCR_SCOPEREF_ on ACT_RU_EVENT_SUBSCR(SCOPE_ID_, SCOPE_TYPE_);
insert into ACT_GE_PROPERTY values ('eventsubscription.schema.version', '7.0.1.1', 1);

View File

@@ -0,0 +1,5 @@
drop index ACT_IDX_EVENT_SUBSCR_CONFIG_;
drop index ACT_IDX_EVENT_SUBSCR;
drop index ACT_IDX_EVENT_SUBSCR_SCOPEREF_;
drop table ACT_RU_EVENT_SUBSCR;

View File

@@ -0,0 +1,20 @@
create table ACT_HI_IDENTITYLINK (
ID_ VARCHAR2(64),
GROUP_ID_ VARCHAR2(255),
TYPE_ VARCHAR2(255),
USER_ID_ VARCHAR2(255),
TASK_ID_ VARCHAR2(64),
CREATE_TIME_ TIMESTAMP(6),
PROC_INST_ID_ VARCHAR2(64),
SCOPE_ID_ VARCHAR2(255),
SUB_SCOPE_ID_ VARCHAR2(255),
SCOPE_TYPE_ VARCHAR2(255),
SCOPE_DEFINITION_ID_ VARCHAR2(255),
primary key (ID_)
);
create index ACT_IDX_HI_IDENT_LNK_USER on ACT_HI_IDENTITYLINK(USER_ID_);
create index ACT_IDX_HI_IDENT_LNK_SCOPE on ACT_HI_IDENTITYLINK(SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_HI_IDENT_LNK_SUB_SCOPE on ACT_HI_IDENTITYLINK(SUB_SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_HI_IDENT_LNK_SCOPE_DEF on ACT_HI_IDENTITYLINK(SCOPE_DEFINITION_ID_, SCOPE_TYPE_);

View File

@@ -0,0 +1,24 @@
create table ACT_RU_IDENTITYLINK (
ID_ VARCHAR2(64),
REV_ INTEGER,
GROUP_ID_ VARCHAR2(255),
TYPE_ VARCHAR2(255),
USER_ID_ VARCHAR2(255),
TASK_ID_ VARCHAR2(64),
PROC_INST_ID_ VARCHAR2(64),
PROC_DEF_ID_ VARCHAR2(64),
SCOPE_ID_ VARCHAR2(255),
SUB_SCOPE_ID_ VARCHAR2(255),
SCOPE_TYPE_ VARCHAR2(255),
SCOPE_DEFINITION_ID_ VARCHAR2(255),
primary key (ID_)
);
create index ACT_IDX_IDENT_LNK_USER on ACT_RU_IDENTITYLINK(USER_ID_);
create index ACT_IDX_IDENT_LNK_GROUP on ACT_RU_IDENTITYLINK(GROUP_ID_);
create index ACT_IDX_IDENT_LNK_SCOPE on ACT_RU_IDENTITYLINK(SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_IDENT_LNK_SUB_SCOPE on ACT_RU_IDENTITYLINK(SUB_SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_IDENT_LNK_SCOPE_DEF on ACT_RU_IDENTITYLINK(SCOPE_DEFINITION_ID_, SCOPE_TYPE_);
insert into ACT_GE_PROPERTY values ('identitylink.schema.version', '7.0.1.1', 1);

View File

@@ -0,0 +1,6 @@
drop index ACT_IDX_HI_IDENT_LNK_USER;
drop index ACT_IDX_HI_IDENT_LNK_SCOPE;
drop index ACT_IDX_HI_IDENT_LNK_SUB_SCOPE;
drop index ACT_IDX_HI_IDENT_LNK_SCOPE_DEF;
drop table ACT_HI_IDENTITYLINK;

View File

@@ -0,0 +1,7 @@
drop index ACT_IDX_IDENT_LNK_USER;
drop index ACT_IDX_IDENT_LNK_GROUP;
drop index ACT_IDX_IDENT_LNK_SCOPE;
drop index ACT_IDX_IDENT_LNK_SUB_SCOPE;
drop index ACT_IDX_IDENT_LNK_SCOPE_DEF;
drop table ACT_RU_IDENTITYLINK;

View File

@@ -0,0 +1,108 @@
create table ACT_ID_PROPERTY (
NAME_ VARCHAR2(64),
VALUE_ VARCHAR2(300),
REV_ INTEGER,
primary key (NAME_)
);
insert into ACT_ID_PROPERTY
values ('schema.version', '7.0.1.1', 1);
create table ACT_ID_BYTEARRAY (
ID_ VARCHAR2(64),
REV_ INTEGER,
NAME_ VARCHAR2(255),
BYTES_ BLOB,
primary key (ID_)
);
create table ACT_ID_GROUP (
ID_ VARCHAR2(64),
REV_ INTEGER,
NAME_ VARCHAR2(255),
TYPE_ VARCHAR2(255),
primary key (ID_)
);
create table ACT_ID_MEMBERSHIP (
USER_ID_ VARCHAR2(64),
GROUP_ID_ VARCHAR2(64),
primary key (USER_ID_, GROUP_ID_)
);
create table ACT_ID_USER (
ID_ VARCHAR2(64),
REV_ INTEGER,
FIRST_ VARCHAR2(255),
LAST_ VARCHAR2(255),
DISPLAY_NAME_ VARCHAR2(255),
EMAIL_ VARCHAR2(255),
PWD_ VARCHAR2(255),
PICTURE_ID_ VARCHAR2(64),
TENANT_ID_ VARCHAR2(255) default '',
primary key (ID_)
);
create table ACT_ID_INFO (
ID_ VARCHAR2(64),
REV_ INTEGER,
USER_ID_ VARCHAR2(64),
TYPE_ VARCHAR2(64),
KEY_ VARCHAR2(255),
VALUE_ VARCHAR2(255),
PASSWORD_ BLOB,
PARENT_ID_ VARCHAR2(255),
primary key (ID_)
);
create table ACT_ID_TOKEN (
ID_ VARCHAR2(64) not null,
REV_ INTEGER,
TOKEN_VALUE_ VARCHAR2(255),
TOKEN_DATE_ TIMESTAMP(6),
IP_ADDRESS_ VARCHAR2(255),
USER_AGENT_ VARCHAR2(255),
USER_ID_ VARCHAR2(255),
TOKEN_DATA_ VARCHAR2(2000),
primary key (ID_)
);
create table ACT_ID_PRIV (
ID_ VARCHAR2(64) not null,
NAME_ VARCHAR2(255) not null,
primary key (ID_)
);
create table ACT_ID_PRIV_MAPPING (
ID_ VARCHAR2(64) not null,
PRIV_ID_ VARCHAR2(64) not null,
USER_ID_ VARCHAR2(255),
GROUP_ID_ VARCHAR2(255),
primary key (ID_)
);
create index ACT_IDX_MEMB_GROUP on ACT_ID_MEMBERSHIP(GROUP_ID_);
alter table ACT_ID_MEMBERSHIP
add constraint ACT_FK_MEMB_GROUP
foreign key (GROUP_ID_)
references ACT_ID_GROUP (ID_);
create index ACT_IDX_MEMB_USER on ACT_ID_MEMBERSHIP(USER_ID_);
alter table ACT_ID_MEMBERSHIP
add constraint ACT_FK_MEMB_USER
foreign key (USER_ID_)
references ACT_ID_USER (ID_);
create index ACT_IDX_PRIV_MAPPING on ACT_ID_PRIV_MAPPING(PRIV_ID_);
alter table ACT_ID_PRIV_MAPPING
add constraint ACT_FK_PRIV_MAPPING
foreign key (PRIV_ID_)
references ACT_ID_PRIV (ID_);
create index ACT_IDX_PRIV_USER on ACT_ID_PRIV_MAPPING(USER_ID_);
create index ACT_IDX_PRIV_GROUP on ACT_ID_PRIV_MAPPING(GROUP_ID_);
alter table ACT_ID_PRIV
add constraint ACT_UNIQ_PRIV_NAME
unique (NAME_);

View File

@@ -0,0 +1,22 @@
alter table ACT_ID_MEMBERSHIP
drop CONSTRAINT ACT_FK_MEMB_GROUP;
alter table ACT_ID_MEMBERSHIP
drop CONSTRAINT ACT_FK_MEMB_USER;
alter table ACT_ID_PRIV_MAPPING
drop CONSTRAINT ACT_FK_PRIV_MAPPING;
drop index ACT_IDX_MEMB_GROUP;
drop index ACT_IDX_MEMB_USER;
drop index ACT_IDX_PRIV_MAPPING;
drop table ACT_ID_PROPERTY;
drop table ACT_ID_BYTEARRAY;
drop table ACT_ID_INFO;
drop table ACT_ID_MEMBERSHIP;
drop table ACT_ID_GROUP;
drop table ACT_ID_USER;
drop table ACT_ID_TOKEN;
drop table ACT_ID_PRIV;
drop table ACT_ID_PRIV_MAPPING;

View File

@@ -0,0 +1,261 @@
create table ACT_RU_JOB (
ID_ VARCHAR2(64) NOT NULL,
REV_ INTEGER,
CATEGORY_ VARCHAR2(255),
TYPE_ VARCHAR2(255) NOT NULL,
LOCK_EXP_TIME_ TIMESTAMP(6),
LOCK_OWNER_ VARCHAR2(255),
EXCLUSIVE_ NUMBER(1) CHECK (EXCLUSIVE_ IN (1,0)),
EXECUTION_ID_ VARCHAR2(64),
PROCESS_INSTANCE_ID_ VARCHAR2(64),
PROC_DEF_ID_ VARCHAR2(64),
ELEMENT_ID_ VARCHAR2(255),
ELEMENT_NAME_ VARCHAR2(255),
SCOPE_ID_ VARCHAR2(255),
SUB_SCOPE_ID_ VARCHAR2(255),
SCOPE_TYPE_ VARCHAR2(255),
SCOPE_DEFINITION_ID_ VARCHAR2(255),
CORRELATION_ID_ VARCHAR2(255),
RETRIES_ INTEGER,
EXCEPTION_STACK_ID_ VARCHAR2(64),
EXCEPTION_MSG_ VARCHAR2(2000),
DUEDATE_ TIMESTAMP(6),
REPEAT_ VARCHAR2(255),
HANDLER_TYPE_ VARCHAR2(255),
HANDLER_CFG_ VARCHAR2(2000),
CUSTOM_VALUES_ID_ VARCHAR2(64),
CREATE_TIME_ TIMESTAMP(6),
TENANT_ID_ VARCHAR2(255) DEFAULT '',
primary key (ID_)
);
create table ACT_RU_TIMER_JOB (
ID_ VARCHAR2(64) NOT NULL,
REV_ INTEGER,
CATEGORY_ VARCHAR2(255),
TYPE_ VARCHAR2(255) NOT NULL,
LOCK_EXP_TIME_ TIMESTAMP(6),
LOCK_OWNER_ VARCHAR2(255),
EXCLUSIVE_ NUMBER(1) CHECK (EXCLUSIVE_ IN (1,0)),
EXECUTION_ID_ VARCHAR2(64),
PROCESS_INSTANCE_ID_ VARCHAR2(64),
PROC_DEF_ID_ VARCHAR2(64),
ELEMENT_ID_ VARCHAR2(255),
ELEMENT_NAME_ VARCHAR2(255),
SCOPE_ID_ VARCHAR2(255),
SUB_SCOPE_ID_ VARCHAR2(255),
SCOPE_TYPE_ VARCHAR2(255),
SCOPE_DEFINITION_ID_ VARCHAR2(255),
CORRELATION_ID_ VARCHAR2(255),
RETRIES_ INTEGER,
EXCEPTION_STACK_ID_ VARCHAR2(64),
EXCEPTION_MSG_ VARCHAR2(2000),
DUEDATE_ TIMESTAMP(6),
REPEAT_ VARCHAR2(255),
HANDLER_TYPE_ VARCHAR2(255),
HANDLER_CFG_ VARCHAR2(2000),
CUSTOM_VALUES_ID_ VARCHAR2(64),
CREATE_TIME_ TIMESTAMP(6),
TENANT_ID_ VARCHAR2(255) DEFAULT '',
primary key (ID_)
);
create table ACT_RU_SUSPENDED_JOB (
ID_ VARCHAR2(64) NOT NULL,
REV_ INTEGER,
CATEGORY_ VARCHAR2(255),
TYPE_ VARCHAR2(255) NOT NULL,
EXCLUSIVE_ NUMBER(1) CHECK (EXCLUSIVE_ IN (1,0)),
EXECUTION_ID_ VARCHAR2(64),
PROCESS_INSTANCE_ID_ VARCHAR2(64),
PROC_DEF_ID_ VARCHAR2(64),
ELEMENT_ID_ VARCHAR2(255),
ELEMENT_NAME_ VARCHAR2(255),
SCOPE_ID_ VARCHAR2(255),
SUB_SCOPE_ID_ VARCHAR2(255),
SCOPE_TYPE_ VARCHAR2(255),
SCOPE_DEFINITION_ID_ VARCHAR2(255),
CORRELATION_ID_ VARCHAR2(255),
RETRIES_ INTEGER,
EXCEPTION_STACK_ID_ VARCHAR2(64),
EXCEPTION_MSG_ VARCHAR2(2000),
DUEDATE_ TIMESTAMP(6),
REPEAT_ VARCHAR2(255),
HANDLER_TYPE_ VARCHAR2(255),
HANDLER_CFG_ VARCHAR2(2000),
CUSTOM_VALUES_ID_ VARCHAR2(64),
CREATE_TIME_ TIMESTAMP(6),
TENANT_ID_ VARCHAR2(255) DEFAULT '',
primary key (ID_)
);
create table ACT_RU_DEADLETTER_JOB (
ID_ VARCHAR2(64) NOT NULL,
REV_ INTEGER,
CATEGORY_ VARCHAR2(255),
TYPE_ VARCHAR2(255) NOT NULL,
EXCLUSIVE_ NUMBER(1) CHECK (EXCLUSIVE_ IN (1,0)),
EXECUTION_ID_ VARCHAR2(64),
PROCESS_INSTANCE_ID_ VARCHAR2(64),
PROC_DEF_ID_ VARCHAR2(64),
ELEMENT_ID_ VARCHAR2(255),
ELEMENT_NAME_ VARCHAR2(255),
SCOPE_ID_ VARCHAR2(255),
SUB_SCOPE_ID_ VARCHAR2(255),
SCOPE_TYPE_ VARCHAR2(255),
SCOPE_DEFINITION_ID_ VARCHAR2(255),
CORRELATION_ID_ VARCHAR2(255),
EXCEPTION_STACK_ID_ VARCHAR2(64),
EXCEPTION_MSG_ VARCHAR2(2000),
DUEDATE_ TIMESTAMP(6),
REPEAT_ VARCHAR2(255),
HANDLER_TYPE_ VARCHAR2(255),
HANDLER_CFG_ VARCHAR2(2000),
CUSTOM_VALUES_ID_ VARCHAR2(64),
CREATE_TIME_ TIMESTAMP(6),
TENANT_ID_ VARCHAR2(255) DEFAULT '',
primary key (ID_)
);
create table ACT_RU_HISTORY_JOB (
ID_ VARCHAR2(64) NOT NULL,
REV_ INTEGER,
LOCK_EXP_TIME_ TIMESTAMP(6),
LOCK_OWNER_ VARCHAR2(255),
RETRIES_ INTEGER,
EXCEPTION_STACK_ID_ VARCHAR2(64),
EXCEPTION_MSG_ VARCHAR2(2000),
HANDLER_TYPE_ VARCHAR2(255),
HANDLER_CFG_ VARCHAR2(2000),
CUSTOM_VALUES_ID_ VARCHAR2(64),
ADV_HANDLER_CFG_ID_ VARCHAR2(64),
CREATE_TIME_ TIMESTAMP(6),
SCOPE_TYPE_ VARCHAR2(255),
TENANT_ID_ VARCHAR2(255) DEFAULT '',
primary key (ID_)
);
create table ACT_RU_EXTERNAL_JOB (
ID_ VARCHAR2(64) NOT NULL,
REV_ INTEGER,
CATEGORY_ VARCHAR2(255),
TYPE_ VARCHAR2(255) NOT NULL,
LOCK_EXP_TIME_ TIMESTAMP(6),
LOCK_OWNER_ VARCHAR2(255),
EXCLUSIVE_ NUMBER(1) CHECK (EXCLUSIVE_ IN (1,0)),
EXECUTION_ID_ VARCHAR2(64),
PROCESS_INSTANCE_ID_ VARCHAR2(64),
PROC_DEF_ID_ VARCHAR2(64),
ELEMENT_ID_ VARCHAR2(255),
ELEMENT_NAME_ VARCHAR2(255),
SCOPE_ID_ VARCHAR2(255),
SUB_SCOPE_ID_ VARCHAR2(255),
SCOPE_TYPE_ VARCHAR2(255),
SCOPE_DEFINITION_ID_ VARCHAR2(255),
CORRELATION_ID_ VARCHAR2(255),
RETRIES_ INTEGER,
EXCEPTION_STACK_ID_ VARCHAR2(64),
EXCEPTION_MSG_ VARCHAR2(2000),
DUEDATE_ TIMESTAMP(6),
REPEAT_ VARCHAR2(255),
HANDLER_TYPE_ VARCHAR2(255),
HANDLER_CFG_ VARCHAR2(2000),
CUSTOM_VALUES_ID_ VARCHAR2(64),
CREATE_TIME_ TIMESTAMP(6),
TENANT_ID_ VARCHAR2(255) DEFAULT '',
primary key (ID_)
);
create index ACT_IDX_JOB_EXCEPTION on ACT_RU_JOB(EXCEPTION_STACK_ID_);
create index ACT_IDX_JOB_CUSTOM_VAL_ID on ACT_RU_JOB(CUSTOM_VALUES_ID_);
create index ACT_IDX_JOB_CORRELATION_ID on ACT_RU_JOB(CORRELATION_ID_);
create index ACT_IDX_TJOB_EXCEPTION on ACT_RU_TIMER_JOB(EXCEPTION_STACK_ID_);
create index ACT_IDX_TJOB_CUSTOM_VAL_ID on ACT_RU_TIMER_JOB(CUSTOM_VALUES_ID_);
create index ACT_IDX_TJOB_CORRELATION_ID on ACT_RU_TIMER_JOB(CORRELATION_ID_);
create index ACT_IDX_TJOB_DUEDATE on ACT_RU_TIMER_JOB(DUEDATE_);
create index ACT_IDX_SJOB_EXCEPTION on ACT_RU_SUSPENDED_JOB(EXCEPTION_STACK_ID_);
create index ACT_IDX_SJOB_CUSTOM_VAL_ID on ACT_RU_SUSPENDED_JOB(CUSTOM_VALUES_ID_);
create index ACT_IDX_SJOB_CORRELATION_ID on ACT_RU_SUSPENDED_JOB(CORRELATION_ID_);
create index ACT_IDX_DJOB_EXCEPTION on ACT_RU_DEADLETTER_JOB(EXCEPTION_STACK_ID_);
create index ACT_IDX_DJOB_CUSTOM_VAL_ID on ACT_RU_DEADLETTER_JOB(CUSTOM_VALUES_ID_);
create index ACT_IDX_DJOB_CORRELATION_ID on ACT_RU_DEADLETTER_JOB(CORRELATION_ID_);
create index ACT_IDX_EJOB_EXCEPTION on ACT_RU_EXTERNAL_JOB(EXCEPTION_STACK_ID_);
create index ACT_IDX_EJOB_CUSTOM_VAL_ID on ACT_RU_EXTERNAL_JOB(CUSTOM_VALUES_ID_);
create index ACT_IDX_EJOB_CORRELATION_ID on ACT_RU_EXTERNAL_JOB(CORRELATION_ID_);
alter table ACT_RU_JOB
add constraint ACT_FK_JOB_EXCEPTION
foreign key (EXCEPTION_STACK_ID_)
references ACT_GE_BYTEARRAY (ID_);
alter table ACT_RU_JOB
add constraint ACT_FK_JOB_CUSTOM_VAL
foreign key (CUSTOM_VALUES_ID_)
references ACT_GE_BYTEARRAY (ID_);
alter table ACT_RU_TIMER_JOB
add constraint ACT_FK_TJOB_EXCEPTION
foreign key (EXCEPTION_STACK_ID_)
references ACT_GE_BYTEARRAY (ID_);
alter table ACT_RU_TIMER_JOB
add constraint ACT_FK_TJOB_CUSTOM_VAL
foreign key (CUSTOM_VALUES_ID_)
references ACT_GE_BYTEARRAY (ID_);
alter table ACT_RU_SUSPENDED_JOB
add constraint ACT_FK_SJOB_EXCEPTION
foreign key (EXCEPTION_STACK_ID_)
references ACT_GE_BYTEARRAY (ID_);
alter table ACT_RU_SUSPENDED_JOB
add constraint ACT_FK_SJOB_CUSTOM_VAL
foreign key (CUSTOM_VALUES_ID_)
references ACT_GE_BYTEARRAY (ID_);
alter table ACT_RU_DEADLETTER_JOB
add constraint ACT_FK_DJOB_EXCEPTION
foreign key (EXCEPTION_STACK_ID_)
references ACT_GE_BYTEARRAY (ID_);
alter table ACT_RU_DEADLETTER_JOB
add constraint ACT_FK_DJOB_CUSTOM_VAL
foreign key (CUSTOM_VALUES_ID_)
references ACT_GE_BYTEARRAY (ID_);
alter table ACT_RU_EXTERNAL_JOB
add constraint ACT_FK_EJOB_EXCEPTION
foreign key (EXCEPTION_STACK_ID_)
references ACT_GE_BYTEARRAY (ID_);
alter table ACT_RU_EXTERNAL_JOB
add constraint ACT_FK_EJOB_CUSTOM_VAL
foreign key (CUSTOM_VALUES_ID_)
references ACT_GE_BYTEARRAY (ID_);
create index ACT_IDX_JOB_SCOPE on ACT_RU_JOB(SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_JOB_SUB_SCOPE on ACT_RU_JOB(SUB_SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_JOB_SCOPE_DEF on ACT_RU_JOB(SCOPE_DEFINITION_ID_, SCOPE_TYPE_);
create index ACT_IDX_TJOB_SCOPE on ACT_RU_TIMER_JOB(SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_TJOB_SUB_SCOPE on ACT_RU_TIMER_JOB(SUB_SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_TJOB_SCOPE_DEF on ACT_RU_TIMER_JOB(SCOPE_DEFINITION_ID_, SCOPE_TYPE_);
create index ACT_IDX_SJOB_SCOPE on ACT_RU_SUSPENDED_JOB(SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_SJOB_SUB_SCOPE on ACT_RU_SUSPENDED_JOB(SUB_SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_SJOB_SCOPE_DEF on ACT_RU_SUSPENDED_JOB(SCOPE_DEFINITION_ID_, SCOPE_TYPE_);
create index ACT_IDX_DJOB_SCOPE on ACT_RU_DEADLETTER_JOB(SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_DJOB_SUB_SCOPE on ACT_RU_DEADLETTER_JOB(SUB_SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_DJOB_SCOPE_DEF on ACT_RU_DEADLETTER_JOB(SCOPE_DEFINITION_ID_, SCOPE_TYPE_);
create index ACT_IDX_EJOB_SCOPE on ACT_RU_EXTERNAL_JOB(SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_EJOB_SUB_SCOPE on ACT_RU_EXTERNAL_JOB(SUB_SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_EJOB_SCOPE_DEF on ACT_RU_EXTERNAL_JOB(SCOPE_DEFINITION_ID_, SCOPE_TYPE_);
insert into ACT_GE_PROPERTY values ('job.schema.version', '7.0.1.1', 1);

View File

@@ -0,0 +1,74 @@
drop index ACT_IDX_JOB_SCOPE;
drop index ACT_IDX_JOB_SUB_SCOPE;
drop index ACT_IDX_JOB_SCOPE_DEF;
drop index ACT_IDX_TJOB_SCOPE;
drop index ACT_IDX_TJOB_SUB_SCOPE;
drop index ACT_IDX_TJOB_SCOPE_DEF;
drop index ACT_IDX_SJOB_SCOPE;
drop index ACT_IDX_SJOB_SUB_SCOPE;
drop index ACT_IDX_SJOB_SCOPE_DEF;
drop index ACT_IDX_DJOB_SCOPE;
drop index ACT_IDX_DJOB_SUB_SCOPE;
drop index ACT_IDX_DJOB_SCOPE_DEF;
drop index ACT_IDX_EJOB_SCOPE;
drop index ACT_IDX_EJOB_SUB_SCOPE;
drop index ACT_IDX_EJOB_SCOPE_DEF;
drop index ACT_IDX_JOB_EXCEPTION;
drop index ACT_IDX_JOB_CUSTOM_VAL_ID;
drop index ACT_IDX_JOB_CORRELATION_ID;
drop index ACT_IDX_TJOB_EXCEPTION;
drop index ACT_IDX_TJOB_CUSTOM_VAL_ID;
drop index ACT_IDX_TJOB_CORRELATION_ID;
drop index ACT_IDX_TJOB_DUEDATE;
drop index ACT_IDX_SJOB_EXCEPTION;
drop index ACT_IDX_SJOB_CUSTOM_VAL_ID;
drop index ACT_IDX_SJOB_CORRELATION_ID;
drop index ACT_IDX_DJOB_EXCEPTION;
drop index ACT_IDX_DJOB_CUSTOM_VAL_ID;
drop index ACT_IDX_DJOB_CORRELATION_ID;
drop index ACT_IDX_EJOB_EXCEPTION;
drop index ACT_IDX_EJOB_CUSTOM_VAL_ID;
drop index ACT_IDX_EJOB_CORRELATION_ID;
alter table ACT_RU_JOB
drop CONSTRAINT ACT_FK_JOB_EXCEPTION;
alter table ACT_RU_JOB
drop CONSTRAINT ACT_FK_JOB_CUSTOM_VAL;
alter table ACT_RU_TIMER_JOB
drop CONSTRAINT ACT_FK_TJOB_EXCEPTION;
alter table ACT_RU_TIMER_JOB
drop CONSTRAINT ACT_FK_TJOB_CUSTOM_VAL;
alter table ACT_RU_SUSPENDED_JOB
drop CONSTRAINT ACT_FK_SJOB_EXCEPTION;
alter table ACT_RU_SUSPENDED_JOB
drop CONSTRAINT ACT_FK_SJOB_CUSTOM_VAL;
alter table ACT_RU_DEADLETTER_JOB
drop CONSTRAINT ACT_FK_DJOB_EXCEPTION;
alter table ACT_RU_DEADLETTER_JOB
drop CONSTRAINT ACT_FK_DJOB_CUSTOM_VAL;
alter table ACT_RU_EXTERNAL_JOB
drop CONSTRAINT ACT_FK_DJOB_EXCEPTION;
alter table ACT_RU_EXTERNAL_JOB
drop CONSTRAINT ACT_FK_DJOB_CUSTOM_VAL;
drop table ACT_RU_JOB;
drop table ACT_RU_TIMER_JOB;
drop table ACT_RU_SUSPENDED_JOB;
drop table ACT_RU_DEADLETTER_JOB;
drop table ACT_RU_HISTORY_JOB;
drop table ACT_RU_EXTERNAL_JOB;

View File

@@ -0,0 +1,64 @@
create table ACT_HI_TASKINST (
ID_ VARCHAR2(64) not null,
REV_ INTEGER default 1,
PROC_DEF_ID_ VARCHAR2(64),
TASK_DEF_ID_ VARCHAR2(64),
TASK_DEF_KEY_ VARCHAR2(255),
PROC_INST_ID_ VARCHAR2(64),
EXECUTION_ID_ VARCHAR2(64),
SCOPE_ID_ VARCHAR2(255),
SUB_SCOPE_ID_ VARCHAR2(255),
SCOPE_TYPE_ VARCHAR2(255),
SCOPE_DEFINITION_ID_ VARCHAR2(255),
PROPAGATED_STAGE_INST_ID_ VARCHAR2(255),
PARENT_TASK_ID_ VARCHAR2(64),
STATE_ VARCHAR2(255),
NAME_ VARCHAR2(255),
DESCRIPTION_ VARCHAR2(2000),
OWNER_ VARCHAR2(255),
ASSIGNEE_ VARCHAR2(255),
START_TIME_ TIMESTAMP(6) not null,
IN_PROGRESS_TIME_ TIMESTAMP(6),
IN_PROGRESS_STARTED_BY_ VARCHAR2(255),
CLAIM_TIME_ TIMESTAMP(6),
CLAIMED_BY_ VARCHAR2(255),
SUSPENDED_TIME_ TIMESTAMP(6),
SUSPENDED_BY_ VARCHAR2(255),
END_TIME_ TIMESTAMP(6),
COMPLETED_BY_ VARCHAR2(255),
DURATION_ NUMBER(19,0),
DELETE_REASON_ VARCHAR2(2000),
PRIORITY_ INTEGER,
IN_PROGRESS_DUE_DATE_ TIMESTAMP(6),
DUE_DATE_ TIMESTAMP(6),
FORM_KEY_ VARCHAR2(255),
CATEGORY_ VARCHAR2(255),
TENANT_ID_ VARCHAR2(255) default '',
LAST_UPDATED_TIME_ TIMESTAMP(6),
primary key (ID_)
);
create table ACT_HI_TSK_LOG (
ID_ NUMBER(19),
TYPE_ VARCHAR2(64),
TASK_ID_ VARCHAR2(64) not null,
TIME_STAMP_ TIMESTAMP(6) not null,
USER_ID_ VARCHAR2(255),
DATA_ VARCHAR2(2000),
EXECUTION_ID_ VARCHAR2(64),
PROC_INST_ID_ VARCHAR2(64),
PROC_DEF_ID_ VARCHAR2(64),
SCOPE_ID_ VARCHAR2(255),
SCOPE_DEFINITION_ID_ VARCHAR2(255),
SUB_SCOPE_ID_ VARCHAR2(255),
SCOPE_TYPE_ VARCHAR2(255),
TENANT_ID_ VARCHAR2(255) default '',
primary key (ID_)
);
create sequence act_hi_task_evt_log_seq start with 1 increment by 1;
create index ACT_IDX_HI_TASK_SCOPE on ACT_HI_TASKINST(SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_HI_TASK_SUB_SCOPE on ACT_HI_TASKINST(SUB_SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_HI_TASK_SCOPE_DEF on ACT_HI_TASKINST(SCOPE_DEFINITION_ID_, SCOPE_TYPE_);

View File

@@ -0,0 +1,48 @@
create table ACT_RU_TASK (
ID_ VARCHAR2(64),
REV_ INTEGER,
EXECUTION_ID_ VARCHAR2(64),
PROC_INST_ID_ VARCHAR2(64),
PROC_DEF_ID_ VARCHAR2(64),
TASK_DEF_ID_ VARCHAR2(64),
SCOPE_ID_ VARCHAR2(255),
SUB_SCOPE_ID_ VARCHAR2(255),
SCOPE_TYPE_ VARCHAR2(255),
SCOPE_DEFINITION_ID_ VARCHAR2(255),
PROPAGATED_STAGE_INST_ID_ VARCHAR2(255),
STATE_ VARCHAR2(255),
NAME_ VARCHAR2(255),
PARENT_TASK_ID_ VARCHAR2(64),
DESCRIPTION_ VARCHAR2(2000),
TASK_DEF_KEY_ VARCHAR2(255),
OWNER_ VARCHAR2(255),
ASSIGNEE_ VARCHAR2(255),
DELEGATION_ VARCHAR2(64),
PRIORITY_ INTEGER,
CREATE_TIME_ TIMESTAMP(6),
IN_PROGRESS_TIME_ TIMESTAMP(6),
IN_PROGRESS_STARTED_BY_ VARCHAR2(255),
CLAIM_TIME_ TIMESTAMP(6),
CLAIMED_BY_ VARCHAR2(255),
SUSPENDED_TIME_ TIMESTAMP(6),
SUSPENDED_BY_ VARCHAR2(255),
IN_PROGRESS_DUE_DATE_ TIMESTAMP(6),
DUE_DATE_ TIMESTAMP(6),
CATEGORY_ VARCHAR2(255),
SUSPENSION_STATE_ INTEGER,
TENANT_ID_ VARCHAR2(255) DEFAULT '',
FORM_KEY_ VARCHAR2(255),
IS_COUNT_ENABLED_ NUMBER(1) CHECK (IS_COUNT_ENABLED_ IN (1,0)),
VAR_COUNT_ INTEGER,
ID_LINK_COUNT_ INTEGER,
SUB_TASK_COUNT_ INTEGER,
primary key (ID_)
);
create index ACT_IDX_TASK_CREATE on ACT_RU_TASK(CREATE_TIME_);
create index ACT_IDX_TASK_SCOPE on ACT_RU_TASK(SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_TASK_SUB_SCOPE on ACT_RU_TASK(SUB_SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_TASK_SCOPE_DEF on ACT_RU_TASK(SCOPE_DEFINITION_ID_, SCOPE_TYPE_);
insert into ACT_GE_PROPERTY values ('task.schema.version', '7.0.1.1', 1);

View File

@@ -0,0 +1,8 @@
drop index ACT_IDX_HI_TASK_SCOPE;
drop index ACT_IDX_HI_TASK_SUB_SCOPE;
drop index ACT_IDX_HI_TASK_SCOPE_DEF;
drop sequence act_hi_task_evt_log_seq;
drop table ACT_HI_TASKINST;
drop table ACT_HI_TSK_LOG;

View File

@@ -0,0 +1,6 @@
drop index ACT_IDX_TASK_CREATE;
drop index ACT_IDX_TASK_SCOPE;
drop index ACT_IDX_TASK_SUB_SCOPE;
drop index ACT_IDX_TASK_SCOPE_DEF;
drop table ACT_RU_TASK;

View File

@@ -0,0 +1,26 @@
create table ACT_HI_VARINST (
ID_ VARCHAR2(64) not null,
REV_ INTEGER default 1,
PROC_INST_ID_ VARCHAR2(64),
EXECUTION_ID_ VARCHAR2(64),
TASK_ID_ VARCHAR2(64),
NAME_ VARCHAR2(255) not null,
VAR_TYPE_ VARCHAR2(100),
SCOPE_ID_ VARCHAR2(255),
SUB_SCOPE_ID_ VARCHAR2(255),
SCOPE_TYPE_ VARCHAR2(255),
BYTEARRAY_ID_ VARCHAR2(64),
DOUBLE_ NUMBER(38,10),
LONG_ NUMBER(19,0),
TEXT_ VARCHAR2(2000),
TEXT2_ VARCHAR2(2000),
META_INFO_ VARCHAR2(2000),
CREATE_TIME_ TIMESTAMP(6),
LAST_UPDATED_TIME_ TIMESTAMP(6),
primary key (ID_)
);
create index ACT_IDX_HI_PROCVAR_NAME_TYPE on ACT_HI_VARINST(NAME_, VAR_TYPE_);
create index ACT_IDX_HI_VAR_SCOPE_ID_TYPE on ACT_HI_VARINST(SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_HI_VAR_SUB_ID_TYPE on ACT_HI_VARINST(SUB_SCOPE_ID_, SCOPE_TYPE_);

View File

@@ -0,0 +1,31 @@
create table ACT_RU_VARIABLE (
ID_ VARCHAR2(64) not null,
REV_ INTEGER,
TYPE_ VARCHAR2(255) not null,
NAME_ VARCHAR2(255) not null,
EXECUTION_ID_ VARCHAR2(64),
PROC_INST_ID_ VARCHAR2(64),
TASK_ID_ VARCHAR2(64),
SCOPE_ID_ VARCHAR2(255),
SUB_SCOPE_ID_ VARCHAR2(255),
SCOPE_TYPE_ VARCHAR2(255),
BYTEARRAY_ID_ VARCHAR2(64),
DOUBLE_ NUMBER(38,10),
LONG_ NUMBER(19,0),
TEXT_ VARCHAR2(2000),
TEXT2_ VARCHAR2(2000),
META_INFO_ VARCHAR2(2000),
primary key (ID_)
);
create index ACT_IDX_RU_VAR_SCOPE_ID_TYPE on ACT_RU_VARIABLE(SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_RU_VAR_SUB_ID_TYPE on ACT_RU_VARIABLE(SUB_SCOPE_ID_, SCOPE_TYPE_);
create index ACT_IDX_VAR_BYTEARRAY on ACT_RU_VARIABLE(BYTEARRAY_ID_);
alter table ACT_RU_VARIABLE
add constraint ACT_FK_VAR_BYTEARRAY
foreign key (BYTEARRAY_ID_)
references ACT_GE_BYTEARRAY (ID_);
insert into ACT_GE_PROPERTY values ('variable.schema.version', '7.0.1.1', 1);

View File

@@ -0,0 +1,6 @@
drop index ACT_IDX_HI_PROCVAR_NAME_TYPE;
drop index ACT_IDX_HI_VAR_SCOPE_ID_TYPE;
drop index ACT_IDX_HI_VAR_SUB_ID_TYPE;
drop table ACT_HI_VARINST;

View File

@@ -0,0 +1,9 @@
drop index ACT_IDX_VAR_BYTEARRAY;
drop index ACT_IDX_RU_VAR_SCOPE_ID_TYPE;
drop index ACT_IDX_RU_VAR_SUB_ID_TYPE;
alter table ACT_RU_VARIABLE
drop CONSTRAINT ACT_FK_VAR_BYTEARRAY;
drop table ACT_RU_VARIABLE;

View File

@@ -0,0 +1,108 @@
package com.zt.plat.module.databus.controller.admin.gateway;
import com.zt.plat.framework.common.pojo.CommonResult;
import com.zt.plat.framework.common.pojo.PageResult;
import com.zt.plat.module.databus.controller.admin.gateway.convert.ApiAccessLogConvert;
import com.zt.plat.module.databus.controller.admin.gateway.vo.accesslog.ApiAccessLogPageReqVO;
import com.zt.plat.module.databus.controller.admin.gateway.vo.accesslog.ApiAccessLogRespVO;
import com.zt.plat.module.databus.dal.dataobject.gateway.ApiAccessLogDO;
import com.zt.plat.module.databus.service.gateway.ApiAccessLogService;
import com.zt.plat.module.databus.service.gateway.ApiDefinitionService;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.Parameter;
import io.swagger.v3.oas.annotations.tags.Tag;
import jakarta.annotation.Resource;
import jakarta.validation.Valid;
import org.springframework.security.access.prepost.PreAuthorize;
import org.springframework.util.CollectionUtils;
import org.springframework.util.StringUtils;
import org.springframework.validation.annotation.Validated;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static com.zt.plat.framework.common.pojo.CommonResult.success;
/**
* Databus API 访问日志控制器。
*/
@Tag(name = "管理后台 - Databus API 访问日志")
@RestController
@RequestMapping("/databus/gateway/access-log")
@Validated
public class ApiAccessLogController {
@Resource
private ApiAccessLogService apiAccessLogService;
@Resource
private ApiDefinitionService apiDefinitionService;
@GetMapping("/get")
@Operation(summary = "获取访问日志详情")
@Parameter(name = "id", description = "日志编号", required = true, example = "1024")
@PreAuthorize("@ss.hasPermission('databus:gateway:access-log:query')")
public CommonResult<ApiAccessLogRespVO> get(@RequestParam("id") Long id) {
ApiAccessLogDO logDO = apiAccessLogService.get(id);
ApiAccessLogRespVO respVO = ApiAccessLogConvert.INSTANCE.convert(logDO);
enrichDefinitionInfo(respVO);
return success(respVO);
}
@GetMapping("/page")
@Operation(summary = "分页查询访问日志")
@PreAuthorize("@ss.hasPermission('databus:gateway:access-log:query')")
public CommonResult<PageResult<ApiAccessLogRespVO>> page(@Valid ApiAccessLogPageReqVO pageReqVO) {
PageResult<ApiAccessLogDO> pageResult = apiAccessLogService.getPage(pageReqVO);
PageResult<ApiAccessLogRespVO> result = ApiAccessLogConvert.INSTANCE.convertPage(pageResult);
enrichDefinitionInfo(result.getList());
return success(result);
}
private void enrichDefinitionInfo(List<ApiAccessLogRespVO> list) {
// 对分页结果批量补充 API 描述,使用本地缓存减少重复查询
if (CollectionUtils.isEmpty(list)) {
return;
}
Map<String, String> cache = new HashMap<>(list.size());
list.forEach(item -> {
if (item == null) {
return;
}
String cacheKey = buildCacheKey(item.getApiCode(), item.getApiVersion());
if (!cache.containsKey(cacheKey)) {
cache.put(cacheKey, resolveApiDescription(item.getApiCode(), item.getApiVersion()));
}
item.setApiDescription(cache.get(cacheKey));
});
}
private void enrichDefinitionInfo(ApiAccessLogRespVO item) {
// 单条数据同样需要补全描述信息
if (item == null) {
return;
}
item.setApiDescription(resolveApiDescription(item.getApiCode(), item.getApiVersion()));
}
private String resolveApiDescription(String apiCode, String apiVersion) {
if (!StringUtils.hasText(apiCode)) {
return null;
}
String normalizedVersion = StringUtils.hasText(apiVersion) ? apiVersion.trim() : apiVersion;
// 通过网关定义服务补全 API 描述,提升页面可读性
return apiDefinitionService.findByCodeAndVersionIncludingInactive(apiCode, normalizedVersion)
.map(aggregate -> aggregate.getDefinition() != null ? aggregate.getDefinition().getDescription() : null)
.filter(StringUtils::hasText)
.orElse(null);
}
private String buildCacheKey(String apiCode, String apiVersion) {
// 组合唯一键,避免重复查询相同的 API 描述
return (apiCode == null ? "" : apiCode) + "#" + (apiVersion == null ? "" : apiVersion);
}
}

View File

@@ -0,0 +1,97 @@
package com.zt.plat.module.databus.controller.admin.gateway;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.zt.plat.framework.common.pojo.CommonResult;
import com.zt.plat.framework.common.pojo.PageResult;
import com.zt.plat.module.databus.controller.admin.gateway.convert.ApiVersionConvert;
import com.zt.plat.module.databus.controller.admin.gateway.vo.definition.ApiDefinitionSaveReqVO;
import com.zt.plat.module.databus.controller.admin.gateway.vo.version.ApiVersionCompareRespVO;
import com.zt.plat.module.databus.controller.admin.gateway.vo.version.ApiVersionDetailRespVO;
import com.zt.plat.module.databus.controller.admin.gateway.vo.version.ApiVersionPageReqVO;
import com.zt.plat.module.databus.controller.admin.gateway.vo.version.ApiVersionRespVO;
import com.zt.plat.module.databus.controller.admin.gateway.vo.version.ApiVersionRollbackReqVO;
import com.zt.plat.module.databus.dal.dataobject.gateway.ApiVersionDO;
import com.zt.plat.module.databus.service.gateway.ApiVersionService;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.Parameter;
import io.swagger.v3.oas.annotations.tags.Tag;
import jakarta.annotation.Resource;
import jakarta.validation.Valid;
import lombok.extern.slf4j.Slf4j;
import org.springframework.security.access.prepost.PreAuthorize;
import org.springframework.validation.annotation.Validated;
import org.springframework.web.bind.annotation.*;
import static com.zt.plat.framework.common.pojo.CommonResult.success;
/**
* API 版本历史控制器。
*/
@Tag(name = "管理后台 - API 版本历史")
@RestController
@RequestMapping("/databus/gateway/version")
@Validated
@Slf4j
public class ApiVersionController {
@Resource
private ApiVersionService apiVersionService;
@Resource
private ObjectMapper objectMapper;
@GetMapping("/get")
@Operation(summary = "获取 API 版本详情")
@Parameter(name = "id", description = "版本编号", required = true, example = "1024")
@PreAuthorize("@ss.hasPermission('databus:gateway:version:query')")
public CommonResult<ApiVersionDetailRespVO> getVersion(@RequestParam("id") Long id) {
ApiVersionDO versionDO = apiVersionService.getVersion(id);
ApiVersionDetailRespVO respVO = ApiVersionConvert.INSTANCE.convertDetail(versionDO);
// 反序列化快照数据
if (versionDO.getSnapshotData() != null) {
try {
ApiDefinitionSaveReqVO snapshot = objectMapper.readValue(versionDO.getSnapshotData(), ApiDefinitionSaveReqVO.class);
respVO.setSnapshotData(snapshot);
} catch (JsonProcessingException ex) {
log.error("反序列化版本快照失败, versionId={}", id, ex);
}
}
return success(respVO);
}
@GetMapping("/page")
@Operation(summary = "分页查询 API 版本列表")
@PreAuthorize("@ss.hasPermission('databus:gateway:version:query')")
public CommonResult<PageResult<ApiVersionRespVO>> getVersionPage(@Valid ApiVersionPageReqVO pageReqVO) {
PageResult<ApiVersionDO> pageResult = apiVersionService.getVersionPage(pageReqVO);
return success(ApiVersionConvert.INSTANCE.convertPage(pageResult));
}
@GetMapping("/list")
@Operation(summary = "查询指定 API 的全部版本")
@PreAuthorize("@ss.hasPermission('databus:gateway:version:query')")
public CommonResult<java.util.List<ApiVersionRespVO>> getVersionList(@RequestParam("apiId") Long apiId) {
return success(ApiVersionConvert.INSTANCE.convertList(apiVersionService.getVersionListByApiId(apiId)));
}
@PutMapping("/rollback")
@Operation(summary = "回滚到指定版本")
@PreAuthorize("@ss.hasPermission('databus:gateway:version:rollback')")
public CommonResult<Boolean> rollbackToVersion(@Valid @RequestBody ApiVersionRollbackReqVO reqVO) {
apiVersionService.rollbackToVersion(reqVO.getId(), reqVO.getRemark());
return success(true);
}
@GetMapping("/compare")
@Operation(summary = "对比两个版本差异")
@PreAuthorize("@ss.hasPermission('databus:gateway:version:query')")
public CommonResult<ApiVersionCompareRespVO> compareVersions(
@RequestParam("sourceId") Long sourceId,
@RequestParam("targetId") Long targetId) {
return success(apiVersionService.compareVersions(sourceId, targetId));
}
}

View File

@@ -0,0 +1,55 @@
package com.zt.plat.module.databus.controller.admin.gateway.convert;
import com.zt.plat.framework.common.pojo.PageResult;
import com.zt.plat.module.databus.controller.admin.gateway.vo.accesslog.ApiAccessLogRespVO;
import com.zt.plat.module.databus.dal.dataobject.gateway.ApiAccessLogDO;
import org.mapstruct.Mapper;
import org.mapstruct.Mapping;
import org.mapstruct.factory.Mappers;
import org.springframework.http.HttpStatus;
import java.util.List;
@Mapper
public interface ApiAccessLogConvert {
ApiAccessLogConvert INSTANCE = Mappers.getMapper(ApiAccessLogConvert.class);
@Mapping(target = "statusDesc", expression = "java(statusDesc(bean.getStatus()))")
@Mapping(target = "responseStatusText", expression = "java(resolveHttpStatusText(bean.getResponseStatus()))")
ApiAccessLogRespVO convert(ApiAccessLogDO bean);
List<ApiAccessLogRespVO> convertList(List<ApiAccessLogDO> list);
default PageResult<ApiAccessLogRespVO> convertPage(PageResult<ApiAccessLogDO> page) {
if (page == null) {
return PageResult.empty();
}
PageResult<ApiAccessLogRespVO> result = new PageResult<>();
result.setList(convertList(page.getList()));
result.setTotal(page.getTotal());
return result;
}
default String statusDesc(Integer status) {
// 将数字状态码转换为中文描述,方便前端直接展示
if (status == null) {
return "未知";
}
return switch (status) {
case 0 -> "成功";
case 1 -> "客户端错误";
case 2 -> "服务端错误";
default -> "未知";
};
}
default String resolveHttpStatusText(Integer status) {
// 统一使用 Spring 的 HttpStatus 解析出标准文案
if (status == null) {
return null;
}
HttpStatus resolved = HttpStatus.resolve(status);
return resolved != null ? resolved.getReasonPhrase() : null;
}
}

View File

@@ -3,12 +3,10 @@ package com.zt.plat.module.databus.controller.admin.gateway.convert;
import cn.hutool.core.collection.CollUtil;
import com.zt.plat.framework.common.pojo.PageResult;
import com.zt.plat.framework.common.util.object.BeanUtils;
import com.zt.plat.module.databus.controller.admin.gateway.vo.definition.ApiDefinitionDetailRespVO;
import com.zt.plat.module.databus.controller.admin.gateway.vo.definition.ApiDefinitionPublicationRespVO;
import com.zt.plat.module.databus.controller.admin.gateway.vo.definition.ApiDefinitionStepRespVO;
import com.zt.plat.module.databus.controller.admin.gateway.vo.definition.ApiDefinitionSummaryRespVO;
import com.zt.plat.module.databus.controller.admin.gateway.vo.definition.ApiDefinitionTransformRespVO;
import com.zt.plat.module.databus.controller.admin.gateway.vo.definition.*;
import com.zt.plat.module.databus.dal.dataobject.gateway.ApiDefinitionDO;
import com.zt.plat.module.databus.dal.dataobject.gateway.ApiStepDO;
import com.zt.plat.module.databus.dal.dataobject.gateway.ApiTransformDO;
import com.zt.plat.module.databus.framework.integration.gateway.domain.ApiDefinitionAggregate;
import com.zt.plat.module.databus.framework.integration.gateway.domain.ApiFlowPublication;
import com.zt.plat.module.databus.framework.integration.gateway.domain.ApiStepDefinition;
@@ -101,4 +99,29 @@ public interface ApiDefinitionConvert {
return publication == null ? null : BeanUtils.toBean(publication, ApiDefinitionPublicationRespVO.class);
}
/**
* 转换步骤列表DO -> SaveReqVO
*/
default List<ApiDefinitionStepSaveReqVO> convertStepList(List<ApiStepDO> steps) {
if (CollUtil.isEmpty(steps)) {
return new ArrayList<>();
}
return steps.stream()
.sorted(Comparator.comparing(step -> step.getStepOrder() == null ? Integer.MAX_VALUE : step.getStepOrder()))
.map(step -> BeanUtils.toBean(step, ApiDefinitionStepSaveReqVO.class))
.collect(Collectors.toList());
}
/**
* 转换变换列表DO -> SaveReqVO
*/
default List<ApiDefinitionTransformSaveReqVO> convertTransformList(List<ApiTransformDO> transforms) {
if (CollUtil.isEmpty(transforms)) {
return new ArrayList<>();
}
return transforms.stream()
.map(transform -> BeanUtils.toBean(transform, ApiDefinitionTransformSaveReqVO.class))
.collect(Collectors.toList());
}
}

View File

@@ -0,0 +1,30 @@
package com.zt.plat.module.databus.controller.admin.gateway.convert;
import com.zt.plat.framework.common.pojo.PageResult;
import com.zt.plat.module.databus.controller.admin.gateway.vo.version.ApiVersionDetailRespVO;
import com.zt.plat.module.databus.controller.admin.gateway.vo.version.ApiVersionRespVO;
import com.zt.plat.module.databus.dal.dataobject.gateway.ApiVersionDO;
import org.mapstruct.Mapper;
import org.mapstruct.Mapping;
import org.mapstruct.factory.Mappers;
import java.util.List;
/**
* API 版本历史 Convert。
*/
@Mapper
public interface ApiVersionConvert {
ApiVersionConvert INSTANCE = Mappers.getMapper(ApiVersionConvert.class);
ApiVersionRespVO convert(ApiVersionDO bean);
PageResult<ApiVersionRespVO> convertPage(PageResult<ApiVersionDO> page);
List<ApiVersionRespVO> convertList(List<ApiVersionDO> list);
@Mapping(target = "snapshotData", ignore = true)
ApiVersionDetailRespVO convertDetail(ApiVersionDO bean);
}

Some files were not shown because too many files have changed in this diff Show More