Compare commits
141 Commits
5037c741a9
...
lims_dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7f7c4210ac | ||
|
|
db3afb5b64 | ||
|
|
542466270a | ||
|
|
03ebe21670 | ||
|
|
64d0d4e55e | ||
|
|
22599bbc65 | ||
|
|
240a531ee1 | ||
|
|
00b2f6312d | ||
|
|
446b5ca7a4 | ||
|
|
28a49ce45a | ||
|
|
4bd0402dde | ||
|
|
0ab550123f | ||
|
|
cd21239ff2 | ||
|
|
837e09941a | ||
|
|
256bf22a10 | ||
|
|
76eabb6db0 | ||
|
|
06909fafea | ||
|
|
00956030a4 | ||
|
|
2dac28d3b3 | ||
|
|
dbb1d1905e | ||
|
|
08232eb3cb | ||
|
|
5de2801fc9 | ||
|
|
e9994a24c2 | ||
|
|
a10732119b | ||
|
|
e7efddf976 | ||
|
|
13ec805c20 | ||
|
|
61e61d08b6 | ||
|
|
5698c34185 | ||
|
|
96058e29c2 | ||
|
|
27d22de4e0 | ||
|
|
f1242e74fc | ||
|
|
0c0d82f465 | ||
|
|
12ba2cf756 | ||
|
|
b1bd193f50 | ||
|
|
0b4b87845c | ||
|
|
a2f2325119 | ||
|
|
4c79ac8a6d | ||
|
|
0c0cb27c15 | ||
|
|
a263632e49 | ||
|
|
2e2b7ac6fa | ||
|
|
9730573546 | ||
|
|
299132943c | ||
|
|
76ba994b50 | ||
|
|
dd284728b4 | ||
|
|
685ed6b504 | ||
|
|
f754b1c694 | ||
|
|
dc1db47d07 | ||
|
|
dd38e65972 | ||
|
|
02e0c81446 | ||
|
|
6c8c479984 | ||
|
|
829229a355 | ||
|
|
067f7226f4 | ||
|
|
b35df8493c | ||
|
|
518aa2a773 | ||
|
|
4003388740 | ||
|
|
f16509c107 | ||
|
|
565a625df7 | ||
|
|
adcea87bbf | ||
|
|
a79806690d | ||
|
|
8689c5e844 | ||
|
|
5be1b75be8 | ||
|
|
06d9ae2688 | ||
|
|
547b1d9afb | ||
|
|
2f9c28f166 | ||
|
|
9a0e60ad84 | ||
|
|
c24ae5bad8 | ||
|
|
64eb031486 | ||
|
|
30b22698e8 | ||
|
|
95fab27556 | ||
|
|
2efb815d59 | ||
|
|
3d5f07b7a5 | ||
|
|
77b4e62def | ||
|
|
e2dbaf12a4 | ||
|
|
f8d95218f5 | ||
|
|
e0d5c0221e | ||
|
|
59afa893b0 | ||
|
|
d4d80ce86a | ||
|
|
70868a77c0 | ||
|
|
eab968da72 | ||
|
|
e212ba4d2f | ||
|
|
4ddd38c3b6 | ||
|
|
54f763c15e | ||
|
|
65f62fddd6 | ||
|
|
65b99740c1 | ||
|
|
f7ee073617 | ||
|
|
4b17c1d833 | ||
|
|
77c46acf9e | ||
|
|
3d3808be48 | ||
|
|
6b266e3211 | ||
|
|
491f8c8b1f | ||
|
|
e57f649e72 | ||
|
|
ce39dc6d4b | ||
|
|
8dd7ccaf5f | ||
|
|
14c81350bd | ||
|
|
7e3baeba46 | ||
|
|
4106cfced2 | ||
|
|
985b429f6f | ||
|
|
0b646295da | ||
|
|
5374b0567b | ||
|
|
47f710648b | ||
|
|
4c9b1c616e | ||
|
|
88fb1652e6 | ||
|
|
52a0b561f9 | ||
|
|
74d2a1a236 | ||
|
|
585dd3449f | ||
|
|
8dc3e81d4f | ||
|
|
6b3453e4ee | ||
|
|
633e430f46 | ||
|
|
8b3d93dc17 | ||
|
|
266eb45e00 | ||
|
|
c4482af144 | ||
|
|
d5a1e5c157 | ||
|
|
53a0293b7c | ||
|
|
af7f103a38 | ||
|
|
2280d29fb6 | ||
|
|
71e63519ae | ||
|
|
c8dca75943 | ||
|
|
2eb09ff35d | ||
|
|
c1f12dfe5e | ||
|
|
428c9a60b1 | ||
|
|
4efa894c8f | ||
|
|
f02745454d | ||
|
|
d9fa921fda | ||
|
|
b6951a4c6b | ||
|
|
3e5a0a4845 | ||
|
|
49a54e2199 | ||
|
|
e00086c6e8 | ||
|
|
9c99750dd8 | ||
|
|
bd56cb0405 | ||
|
|
c399bdf720 | ||
|
|
1a34cbc678 | ||
|
|
81fb8eea8f | ||
|
|
8423775582 | ||
|
|
aa159638b9 | ||
|
|
2c4f46b6de | ||
|
|
03c76b071a | ||
|
|
a6b87f01a7 | ||
|
|
3312ed328d | ||
|
|
7d74ff7acc | ||
|
|
b7f07ba8da | ||
|
|
3c19722cbd |
288
deployment.yaml
288
deployment.yaml
@@ -44,7 +44,7 @@ spec:
|
||||
cpu: "500m"
|
||||
memory: "1024Mi"
|
||||
limits:
|
||||
cpu: "2048m"
|
||||
cpu: "1024m"
|
||||
memory: "2048Mi"
|
||||
terminationGracePeriodSeconds: 30
|
||||
---
|
||||
@@ -76,7 +76,7 @@ metadata:
|
||||
description: DESC_PLACEHOLDER
|
||||
rollout.kubernetes.io/change-cause: "DESC_PLACEHOLDER:VERSION_PLACEHOLDER"
|
||||
spec:
|
||||
replicas: 1
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: zt-module-infra
|
||||
@@ -111,7 +111,7 @@ spec:
|
||||
cpu: "500m"
|
||||
memory: "1024Mi"
|
||||
limits:
|
||||
cpu: "2560m"
|
||||
cpu: "1024m"
|
||||
memory: "2048Mi"
|
||||
terminationGracePeriodSeconds: 30
|
||||
strategy:
|
||||
@@ -183,7 +183,7 @@ spec:
|
||||
cpu: "500m"
|
||||
memory: "1024Mi"
|
||||
limits:
|
||||
cpu: "2048m"
|
||||
cpu: "1024m"
|
||||
memory: "2048Mi"
|
||||
terminationGracePeriodSeconds: 30
|
||||
strategy:
|
||||
@@ -208,148 +208,148 @@ spec:
|
||||
nodePort: 30091
|
||||
---
|
||||
# zt-module-bpm
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: ns-d6a0e78ebd674c279614498e4c57b133
|
||||
name: zt-module-bpm
|
||||
labels:
|
||||
app: zt-module-bpm
|
||||
annotations:
|
||||
version: "VERSION_PLACEHOLDER"
|
||||
description: DESC_PLACEHOLDER
|
||||
rollout.kubernetes.io/change-cause: "DESC_PLACEHOLDER:VERSION_PLACEHOLDER"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: zt-module-bpm
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: zt-module-bpm
|
||||
spec:
|
||||
containers:
|
||||
- name: zt-module-bpm
|
||||
image: 172.16.46.66:10043/zt/zt-module-bpm:VERSION_PLACEHOLDER
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: TZ
|
||||
value: Asia/Shanghai
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /actuator/health
|
||||
port: 48083
|
||||
initialDelaySeconds: 50
|
||||
periodSeconds: 5
|
||||
failureThreshold: 3
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /actuator/health
|
||||
port: 48083
|
||||
initialDelaySeconds: 50
|
||||
periodSeconds: 10
|
||||
failureThreshold: 5
|
||||
resources:
|
||||
requests:
|
||||
cpu: "500m"
|
||||
memory: "1024Mi"
|
||||
limits:
|
||||
cpu: "2048m"
|
||||
memory: "2048Mi"
|
||||
terminationGracePeriodSeconds: 30
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 0
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
namespace: ns-d6a0e78ebd674c279614498e4c57b133
|
||||
name: zt-module-bpm
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
app: zt-module-bpm
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 48083
|
||||
targetPort: 48083
|
||||
nodePort: 30093
|
||||
#apiVersion: apps/v1
|
||||
#kind: Deployment
|
||||
#metadata:
|
||||
# namespace: ns-d6a0e78ebd674c279614498e4c57b133
|
||||
# name: zt-module-bpm
|
||||
# labels:
|
||||
# app: zt-module-bpm
|
||||
# annotations:
|
||||
# version: "VERSION_PLACEHOLDER"
|
||||
# description: DESC_PLACEHOLDER
|
||||
# rollout.kubernetes.io/change-cause: "DESC_PLACEHOLDER:VERSION_PLACEHOLDER"
|
||||
#spec:
|
||||
# replicas: 1
|
||||
# selector:
|
||||
# matchLabels:
|
||||
# app: zt-module-bpm
|
||||
# template:
|
||||
# metadata:
|
||||
# labels:
|
||||
# app: zt-module-bpm
|
||||
# spec:
|
||||
# containers:
|
||||
# - name: zt-module-bpm
|
||||
# image: 172.16.46.66:10043/zt/zt-module-bpm:VERSION_PLACEHOLDER
|
||||
# imagePullPolicy: Always
|
||||
# env:
|
||||
# - name: TZ
|
||||
# value: Asia/Shanghai
|
||||
# readinessProbe:
|
||||
# httpGet:
|
||||
# path: /actuator/health
|
||||
# port: 48083
|
||||
# initialDelaySeconds: 50
|
||||
# periodSeconds: 5
|
||||
# failureThreshold: 3
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# path: /actuator/health
|
||||
# port: 48083
|
||||
# initialDelaySeconds: 50
|
||||
# periodSeconds: 10
|
||||
# failureThreshold: 5
|
||||
# resources:
|
||||
# requests:
|
||||
# cpu: "500m"
|
||||
# memory: "1024Mi"
|
||||
# limits:
|
||||
# cpu: "2048m"
|
||||
# memory: "2048Mi"
|
||||
# terminationGracePeriodSeconds: 30
|
||||
# strategy:
|
||||
# type: RollingUpdate
|
||||
# rollingUpdate:
|
||||
# maxSurge: 1
|
||||
# maxUnavailable: 0
|
||||
#---
|
||||
#apiVersion: v1
|
||||
#kind: Service
|
||||
#metadata:
|
||||
# namespace: ns-d6a0e78ebd674c279614498e4c57b133
|
||||
# name: zt-module-bpm
|
||||
#spec:
|
||||
# type: NodePort
|
||||
# selector:
|
||||
# app: zt-module-bpm
|
||||
# ports:
|
||||
# - protocol: TCP
|
||||
# port: 48083
|
||||
# targetPort: 48083
|
||||
# nodePort: 30093
|
||||
---
|
||||
# zt-module-report
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: ns-d6a0e78ebd674c279614498e4c57b133
|
||||
name: zt-module-report
|
||||
labels:
|
||||
app: zt-module-report
|
||||
annotations:
|
||||
version: "VERSION_PLACEHOLDER"
|
||||
description: DESC_PLACEHOLDER
|
||||
rollout.kubernetes.io/change-cause: "DESC_PLACEHOLDER:VERSION_PLACEHOLDER"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: zt-module-report
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: zt-module-report
|
||||
spec:
|
||||
containers:
|
||||
- name: zt-module-report
|
||||
image: 172.16.46.66:10043/zt/zt-module-report:VERSION_PLACEHOLDER
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: TZ
|
||||
value: Asia/Shanghai
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /actuator/health
|
||||
port: 48084
|
||||
initialDelaySeconds: 50
|
||||
periodSeconds: 5
|
||||
failureThreshold: 3
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /actuator/health
|
||||
port: 48084
|
||||
initialDelaySeconds: 50
|
||||
periodSeconds: 10
|
||||
failureThreshold: 5
|
||||
resources:
|
||||
requests:
|
||||
cpu: "500m"
|
||||
memory: "1024Mi"
|
||||
limits:
|
||||
cpu: "2048m"
|
||||
memory: "2048Mi"
|
||||
terminationGracePeriodSeconds: 30
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 0
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
namespace: ns-d6a0e78ebd674c279614498e4c57b133
|
||||
name: zt-module-report
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
app: zt-module-report
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 48084
|
||||
targetPort: 48084
|
||||
nodePort: 30094
|
||||
#apiVersion: apps/v1
|
||||
#kind: Deployment
|
||||
#metadata:
|
||||
# namespace: ns-d6a0e78ebd674c279614498e4c57b133
|
||||
# name: zt-module-report
|
||||
# labels:
|
||||
# app: zt-module-report
|
||||
# annotations:
|
||||
# version: "VERSION_PLACEHOLDER"
|
||||
# description: DESC_PLACEHOLDER
|
||||
# rollout.kubernetes.io/change-cause: "DESC_PLACEHOLDER:VERSION_PLACEHOLDER"
|
||||
#spec:
|
||||
# replicas: 1
|
||||
# selector:
|
||||
# matchLabels:
|
||||
# app: zt-module-report
|
||||
# template:
|
||||
# metadata:
|
||||
# labels:
|
||||
# app: zt-module-report
|
||||
# spec:
|
||||
# containers:
|
||||
# - name: zt-module-report
|
||||
# image: 172.16.46.66:10043/zt/zt-module-report:VERSION_PLACEHOLDER
|
||||
# imagePullPolicy: Always
|
||||
# env:
|
||||
# - name: TZ
|
||||
# value: Asia/Shanghai
|
||||
# readinessProbe:
|
||||
# httpGet:
|
||||
# path: /actuator/health
|
||||
# port: 48084
|
||||
# initialDelaySeconds: 50
|
||||
# periodSeconds: 5
|
||||
# failureThreshold: 3
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# path: /actuator/health
|
||||
# port: 48084
|
||||
# initialDelaySeconds: 50
|
||||
# periodSeconds: 10
|
||||
# failureThreshold: 5
|
||||
# resources:
|
||||
# requests:
|
||||
# cpu: "500m"
|
||||
# memory: "1024Mi"
|
||||
# limits:
|
||||
# cpu: "2048m"
|
||||
# memory: "2048Mi"
|
||||
# terminationGracePeriodSeconds: 30
|
||||
# strategy:
|
||||
# type: RollingUpdate
|
||||
# rollingUpdate:
|
||||
# maxSurge: 1
|
||||
# maxUnavailable: 0
|
||||
#---
|
||||
#apiVersion: v1
|
||||
#kind: Service
|
||||
#metadata:
|
||||
# namespace: ns-d6a0e78ebd674c279614498e4c57b133
|
||||
# name: zt-module-report
|
||||
#spec:
|
||||
# type: NodePort
|
||||
# selector:
|
||||
# app: zt-module-report
|
||||
# ports:
|
||||
# - protocol: TCP
|
||||
# port: 48084
|
||||
# targetPort: 48084
|
||||
# nodePort: 30094
|
||||
---
|
||||
# zt-module-databus
|
||||
apiVersion: apps/v1
|
||||
|
||||
340
docs/iWork集成说明.md
Normal file
340
docs/iWork集成说明.md
Normal file
@@ -0,0 +1,340 @@
|
||||
# iWork 统一集成使用说明
|
||||
|
||||
本文档介绍如何在 System 模块中使用项目已实现的统一 iWork 流程发起能力(controller + service + properties)。内容包含:配置项、调用方式(内部 Java 调用 & 外部 HTTP 调用)、请求/响应示例、错误处理、缓存与 Token 生命周期、典型问题与排查步骤。
|
||||
|
||||
---
|
||||
|
||||
## 概览
|
||||
|
||||
项目在 `system` 模块下实现了一套对外统一的 iWork 集成能力:
|
||||
|
||||
- 提供管理端接口(REST),路径前缀:`/system/integration/iwork`。
|
||||
- 提供 Service 层 `IWorkIntegrationService`,供其它模块以 Spring Bean 注入方式直接调用。
|
||||
- 使用 `IWorkProperties` 绑定 `application.yml` 中 `iwork` 的配置项。
|
||||
- Token / 会话采用本地 Caffeine 缓存缓存(按 appId + operatorUserId 缓存 session),并在到期前按配置提前刷新。
|
||||
- 使用统一配置的 appId、公钥以及默认流程编号,无需再维护多套凭证。
|
||||
|
||||
---
|
||||
|
||||
## 配置(YAML)
|
||||
|
||||
在 `application.yml`(或 profile)中,添加或修改如下项(示例摘自 `zt-server/src/main/resources/application.yaml`):
|
||||
|
||||
```yaml
|
||||
iwork:
|
||||
base-url: https://iwork.example.com
|
||||
app-id: my-iwork-app # 固定使用的 iWork 应用编号
|
||||
client-public-key: MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A... # 与 iWork 约定的客户端公钥(Base64)
|
||||
user-id: system # 默认操作用户(当调用未指定 operatorUserId 时使用)
|
||||
org:
|
||||
token-seed: 5936562a-d47c-4a29-9b74-b310e6c971b7
|
||||
paths:
|
||||
subcompany-page: /api/hrm/resful/getHrmsubcompanyWithPage
|
||||
department-page: /api/hrm/resful/getHrmdepartmentWithPage
|
||||
job-title-page: /api/hrm/resful/getJobtitleInfoWithPage
|
||||
user-page: /api/hrm/resful/getHrmUserInfoWithPage
|
||||
sync-subcompany: /api/hrm/resful/synSubcompany
|
||||
sync-department: /api/hrm/resful/synDepartment
|
||||
sync-job-title: /api/hrm/resful/synJobtitle
|
||||
sync-user: /api/hrm/resful/synHrmresource
|
||||
workflow-id: 54 # 当调用方未传 workflowId 时使用的默认流程编号
|
||||
paths:
|
||||
register: /api/ec/dev/auth/regist
|
||||
apply-token: /api/ec/dev/auth/applytoken
|
||||
user-info: /api/workflow/paService/getUserInfo
|
||||
create-workflow: /api/workflow/paService/doCreateRequest
|
||||
void-workflow: /api/workflow/paService/doCancelRequest
|
||||
token:
|
||||
ttl-seconds: 3600 # token 有效期(秒)
|
||||
refresh-ahead-seconds: 60 # 在到期前多少秒认为需要刷新
|
||||
client:
|
||||
connect-timeout: 5s
|
||||
response-timeout: 30s
|
||||
```
|
||||
|
||||
说明:
|
||||
|
||||
- `base-url` 为 iWork 网关的基础地址,不能留空。
|
||||
- `app-id` 与 `client-public-key` 共同构成注册/申请 token 所需的凭据信息,由配置统一提供,不再支持多套切换。
|
||||
- `workflow-id` 提供全局默认流程编号,单次调用也可通过 `workflowId` 覆盖。
|
||||
- 请求头键名固定为 `app-id`、`client-public-key`、`secret`、`token`、`time`、`user-id`,无需在配置中重复声明。
|
||||
- `org.*` 配置负责 iWork 人力组织 REST 代理:`token-seed` 为与 iWork 约定的标识,系统会自动将其与毫秒时间戳拼接并计算 MD5 生成 `key`,无需额外传递 token。
|
||||
|
||||
---
|
||||
|
||||
## 典型调用路径(Controller)
|
||||
|
||||
Controller 暴露的 REST 接口:
|
||||
|
||||
- POST /system/integration/iwork/user/resolve
|
||||
- 说明:根据外部识别信息查找 iWork 的用户编号(userId)。
|
||||
- 请求:见下方 `Resolve User` 示例。
|
||||
|
||||
- POST /system/integration/iwork/workflow/create
|
||||
- 说明:在 iWork 中发起流程。
|
||||
- 请求:见下方 `Create Workflow` 示例。
|
||||
|
||||
- POST /system/integration/iwork/workflow/void
|
||||
- 说明:作废/干预流程。
|
||||
- 请求:见下方 `Void Workflow` 示例。
|
||||
|
||||
这些接口的响应均使用项目的 `CommonResult` 封装,实际返回的业务对象在 `data` 字段。
|
||||
|
||||
---
|
||||
|
||||
### 人力组织 REST 接口(key + ts)
|
||||
|
||||
为对接 PDF 所述的人力组织 RESTFUL 接口,Controller 额外暴露了以下代理端点,用于通过 `key + ts` 生成的 token 与 iWork 交互:
|
||||
|
||||
- POST `/system/integration/iwork/hr/subcompany/page` —— 请求体传入 `params`(Map),对应 `getHrmsubcompanyWithPage`。
|
||||
- POST `/system/integration/iwork/hr/department/page` —— 对应 `getHrmdepartmentWithPage`。
|
||||
- POST `/system/integration/iwork/hr/job-title/page` —— 对应 `getJobtitleInfoWithPage`。
|
||||
- POST `/system/integration/iwork/hr/user/page` —— 对应 `getHrmUserInfoWithPage`。
|
||||
- POST `/system/integration/iwork/hr/subcompany/sync` —— 请求体传入 `data`(List<Map>),对应 `synSubcompany`。
|
||||
- POST `/system/integration/iwork/hr/department/sync` —— 对应 `synDepartment`。
|
||||
- POST `/system/integration/iwork/hr/job-title/sync` —— 对应 `synJobtitle`。
|
||||
- POST `/system/integration/iwork/hr/user/sync` —— 对应 `synHrmresource`。
|
||||
|
||||
所有请求均自动封装为 `application/x-www-form-urlencoded`,并把 `token` 字段设置为 `{"key":"<md5>","ts":"<timestamp>"}`,无需调用方重复计算。
|
||||
|
||||
---
|
||||
|
||||
## 请求 VO 说明(重要字段)
|
||||
|
||||
- IWorkBaseReqVO(公用字段)
|
||||
- `appId` (String):为兼容历史接口保留,系统始终使用配置项 `iwork.app-id`。
|
||||
- `operatorUserId` (String):在 iWork 内部代表操作人的用户编号(可为空,框架会使用 `properties.userId`)。
|
||||
- `forceRefreshToken` (Boolean):是否强制刷新 token(例如遇到 token 错误时强制刷新)。
|
||||
|
||||
- IWorkUserInfoReqVO(用于解析用户)
|
||||
- `identifierKey` (String):外部标识 key(必须,例如 "loginid")。
|
||||
- `identifierValue` (String):外部标识值(必须,例如用户名)。
|
||||
- `payload` (Map):额外的请求载荷,会与 identifier 合并后发送到 iWork。
|
||||
- `queryParams` (Map):如果需要通过查询参数传递额外信息,可使用此字段。
|
||||
|
||||
- IWorkUserInfoRespVO(解析用户响应)
|
||||
- `userId` (String):从 iWork 响应中解析出的用户编号(如果能解析到)。
|
||||
- `payload` / `rawBody`:原始返回信息。
|
||||
- `success` / `message`:调用成功标志与提示信息。
|
||||
|
||||
- IWorkWorkflowCreateReqVO(发起流程)
|
||||
- `requestName` (String):流程标题。
|
||||
- `workflowId` (Long):流程模板 ID(可选,缺省时使用配置的默认值)。
|
||||
- `mainFields` (`List<IWorkFormFieldVO>`):主表字段集合。
|
||||
- `detailTables` (`List<IWorkDetailTableVO>`):明细表集合(可选)。
|
||||
- `otherParams` / `formExtras`:额外参数,`formExtras` 会以 form-data 方式追加。
|
||||
|
||||
- IWorkWorkflowVoidReqVO(作废)
|
||||
- `requestId` (String):流程请求编号(必填)。
|
||||
- `reason`、`extraParams`、`formExtras` 等用于传递作废原因或额外字段。
|
||||
|
||||
- IWorkFormFieldVO(表单字段)
|
||||
- `fieldName` (String):字段名(必填),与 iWork 表单字段 key 对应。
|
||||
- `fieldValue` (String):字段值(必填)。
|
||||
|
||||
- IWorkDetailRecordVO(明细记录)
|
||||
- `recordOrder` (Integer):可选记录序号(从 0 开始),用于 iWork 明细排序。
|
||||
- `fields` (List<IWorkFormFieldVO>):该明细行下的字段集合(必填)。
|
||||
|
||||
- IWorkDetailTableVO(明细表)
|
||||
- `tableDBName` (String):iWork 明细表表名(必填,如 `formtable_main_26_dt1`)。
|
||||
- `records` (List<IWorkDetailRecordVO>):明细记录集合(必填)。
|
||||
|
||||
---
|
||||
|
||||
## Java(内部)调用示例
|
||||
|
||||
项目同时提供 `IWorkIntegrationService` Bean,可直接注入并调用:
|
||||
|
||||
```java
|
||||
import com.zt.plat.module.system.controller.admin.integration.iwork.vo.IWorkDetailRecordVO;
|
||||
import com.zt.plat.module.system.controller.admin.integration.iwork.vo.IWorkDetailTableVO;
|
||||
import com.zt.plat.module.system.controller.admin.integration.iwork.vo.IWorkFormFieldVO;
|
||||
import com.zt.plat.module.system.controller.admin.integration.iwork.vo.IWorkOperationRespVO;
|
||||
import com.zt.plat.module.system.controller.admin.integration.iwork.vo.IWorkWorkflowCreateReqVO;
|
||||
import com.zt.plat.module.system.service.integration.iwork.IWorkIntegrationService;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.springframework.stereotype.Component;
|
||||
import java.util.List;
|
||||
|
||||
@RequiredArgsConstructor
|
||||
@Component
|
||||
public class MyService {
|
||||
private final IWorkIntegrationService iworkService;
|
||||
|
||||
public void startFlow() {
|
||||
IWorkWorkflowCreateReqVO req = new IWorkWorkflowCreateReqVO();
|
||||
// 使用 application.yml 中配置的 app-id,无需额外指定
|
||||
req.setRequestName("测试-创建流程");
|
||||
// 若需要覆盖配置的默认流程,可显式设置 workflowId
|
||||
// req.setWorkflowId(54L);
|
||||
|
||||
// 主表字段
|
||||
IWorkFormFieldVO nameField = new IWorkFormFieldVO();
|
||||
nameField.setFieldName("name");
|
||||
nameField.setFieldValue("张三");
|
||||
|
||||
IWorkFormFieldVO amountField = new IWorkFormFieldVO();
|
||||
amountField.setFieldName("amount");
|
||||
amountField.setFieldValue("1000");
|
||||
req.setMainFields(List.of(nameField, amountField));
|
||||
|
||||
// 明细表(可选)
|
||||
IWorkFormFieldVO detailField = new IWorkFormFieldVO();
|
||||
detailField.setFieldName("itemName");
|
||||
detailField.setFieldValue("办公用品");
|
||||
|
||||
IWorkDetailRecordVO record = new IWorkDetailRecordVO();
|
||||
record.setRecordOrder(0);
|
||||
record.setFields(List.of(detailField));
|
||||
|
||||
IWorkDetailTableVO detailTable = new IWorkDetailTableVO();
|
||||
detailTable.setTableDBName("formtable_main_26_dt1");
|
||||
detailTable.setRecords(List.of(record));
|
||||
req.setDetailTables(List.of(detailTable));
|
||||
|
||||
IWorkOperationRespVO resp = iworkService.createWorkflow(req);
|
||||
if (resp.isSuccess()) {
|
||||
// 处理成功,例如记录 requestId
|
||||
} else {
|
||||
// 日志或重试
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
说明:
|
||||
|
||||
- 若需使用特定凭证,可设置 `req.setAppId("my-iwork-app")`。
|
||||
- 若需覆盖默认流程模板,可调用 `req.setWorkflowId(123L)` 指定。
|
||||
- 若希望以特定 iWork 操作人发起,可设置 `req.setOperatorUserId("1001")`。
|
||||
|
||||
---
|
||||
|
||||
## HTTP(外部)调用示例(cURL)
|
||||
|
||||
1. Resolve user
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"appId":"my-iwork-app",
|
||||
"identifierKey":"loginid",
|
||||
"identifierValue":"zhangsan"
|
||||
}' \
|
||||
https://your-zt-server/admin-api/system/integration/iwork/user/resolve
|
||||
```
|
||||
|
||||
成功返回示例(CommonResult 包装):
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 0,
|
||||
"msg": "success",
|
||||
"data": {
|
||||
"userId": "1001",
|
||||
"success": true,
|
||||
"payload": { ... },
|
||||
"rawBody": "{...}"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
1. Create workflow
|
||||
|
||||
```bash
|
||||
curl -X POST -H "Content-Type: application/json" -d '{
|
||||
"requestName":"测试REST创建流程",
|
||||
"workflowId":54,
|
||||
"mainFields":[{"fieldName":"name","fieldValue":"张三"}],
|
||||
"appId":"my-iwork-app"
|
||||
}' https://your-zt-server/admin-api/system/integration/iwork/workflow/create
|
||||
```
|
||||
|
||||
1. Void workflow
|
||||
|
||||
```bash
|
||||
curl -X POST -H "Content-Type: application/json" -d '{
|
||||
"requestId":"REQ-001",
|
||||
"reason":"作废原因",
|
||||
"appId":"my-iwork-app"
|
||||
}' https://your-zt-server/admin-api/system/integration/iwork/workflow/void
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 核心逻辑与细节
|
||||
|
||||
1. 基础参数解析
|
||||
|
||||
系统始终使用 `application.yml` 中配置的 `app-id` 与 `client-public-key` 与 iWork 通信。
|
||||
请求体中的 `appId` 字段仅为兼容历史调用而保留,框架内部不会使用该值做切换。
|
||||
|
||||
1. Workflow 模板解析
|
||||
|
||||
调用时优先使用请求体中的 `workflowId`。
|
||||
若未显式传入,则回退到全局 `iwork.workflow-id`,若仍为空则抛出 `IWORK_WORKFLOW_ID_MISSING`。
|
||||
|
||||
1. 注册 + RSA + Token
|
||||
|
||||
- 在首次或 token 过期时,会按以下步骤获取 session:
|
||||
1. 向 iWork 的 `register` 接口发起请求(Headers 包含 appId 与 clientPublicKey)。
|
||||
2. 从注册响应中获取 `secret` 与 `spk`(服务端公钥),使用本地的 client 公钥做 RSA 加密(`spk` 用于加密),得到加密后的 secret 与 encryptedUserId。
|
||||
3. 使用注册返回的密钥申请 token(apply-token),token 会被按 `ttl-seconds` 缓存。
|
||||
|
||||
- `IWorkIntegrationServiceImpl` 中维护一个 Caffeine `sessionCache`,缓存 key 为 `appId::operatorUserId`。
|
||||
- 当 token 接近到期(`refresh-ahead-seconds`)时会在下一次请求触发刷新。
|
||||
|
||||
1. 请求构造
|
||||
|
||||
- JSON 请求使用 `application/json`,表单请求(如创建流程/作废)使用 `application/x-www-form-urlencoded`。
|
||||
- 认证 Header:由 `IWorkProperties.Headers` 中的常量控制,固定键名为 `app-id`、`client-public-key`、`secret`、`token`、`time`、`user-id`。
|
||||
|
||||
1. 响应解析
|
||||
|
||||
- 实现里对响应成功的判定比较宽松:检查 `code`、`status`、`success`、`errno` 等字段(支持布尔、字符串 ‘0’/‘1’/‘success’)以判断是否成功,并解析常见的 message 字段 `msg|message|errmsg`。
|
||||
|
||||
---
|
||||
|
||||
## 常见错误与排查
|
||||
|
||||
- baseUrl 未配置(IWORK_BASE_URL_MISSING)
|
||||
- 处理:确保 `iwork.base-url` 配置正确。
|
||||
|
||||
- 配置缺失(IWORK_CONFIGURATION_INVALID)
|
||||
- 场景:`app-id`、`client-public-key`、`user-id` 等关键字段没有配置或只包含空白字符。
|
||||
- 处理:在 `application.yml` 或配置中心中补充对应字段,确保它们与 iWork 侧一致。
|
||||
|
||||
- 流程编号缺失(IWORK_WORKFLOW_ID_MISSING)
|
||||
- 场景:请求体、凭证与全局配置均未提供流程模板编号。
|
||||
- 处理:在请求中指定 `workflowId`,或在配置中设置 `workflow-id` / 凭证级 `default-workflow-id`。
|
||||
|
||||
- RSA 加密/注册/申请 token 失败(IWORK_REGISTER_FAILED / IWORK_APPLY_TOKEN_FAILED / IWORK_REMOTE_REQUEST_FAILED)
|
||||
- 处理:通过日志查看 iWork 返回的 HTTP 状态码与 body,确认请求头/路径/参数是否匹配 iWork 网关要求。
|
||||
|
||||
- 用户解析失败
|
||||
- 确认 `identifierKey`/`identifierValue` 是否正确填写并与 iWork 的查询接口契合;可启用 `forceRefreshToken` 触发 session 刷新以排除 token 过期造成的问题。
|
||||
|
||||
---
|
||||
|
||||
## 进阶主题
|
||||
|
||||
- 并发与缓存
|
||||
- `sessionCache` 最大条目数为 256;若高并发/多凭证/多操作人场景,可能需调整容量。
|
||||
|
||||
- 超时与 HTTP 客户端
|
||||
- `IWorkProperties.client.response-timeout` 可用于设定响应超时;连接超时通常由 Reactor Netty 全局配置控制。
|
||||
|
||||
- 单元测试
|
||||
- 项目中已有 MockWebServer 测试样例(`IWorkIntegrationServiceImplTest`),可参考测试用例模拟 iWork 的注册、申请 token、用户查询、创建/作废流程的交互。
|
||||
|
||||
---
|
||||
|
||||
## 小结与建议
|
||||
|
||||
- 在配置中补齐 `iwork.app-id`、`iwork.client-public-key`、`iwork.user-id`、`iwork.workflow-id` 等关键字段。
|
||||
- 优先在本地通过 `IWorkIntegrationService` Java API 调试,成功后再通过 Controller 的 REST 接口对外暴露。
|
||||
- 若遇到请求失败,查看应用日志(`[iWork]` 前缀的日志)与 iWork 网关返回 body,定位是注册、申请 token,还是业务接口(user-info/create/void)失败。
|
||||
|
||||
文档已生成并保存到:`docs/iWork集成说明.md`。
|
||||
0
docs/主数据同步指南.md
Normal file
0
docs/主数据同步指南.md
Normal file
124
docs/分页汇总功能使用说明.md
Normal file
124
docs/分页汇总功能使用说明.md
Normal file
@@ -0,0 +1,124 @@
|
||||
# 分页汇总功能使用说明
|
||||
|
||||
本文档介绍如何在平台项目中启用分页接口的汇总行(SUM)统计能力。该能力基于 `PageResult` 返回体与 `@PageSum` 标注,在分页查询时自动计算并返回指定字段的合计值。
|
||||
|
||||
## 适用场景
|
||||
|
||||
- 需要在分页列表底部展示金额、数量等合计值。
|
||||
- 希望后端自动补充汇总信息,避免前端手动累加。
|
||||
- 已使用 `BaseMapperX` 及其 `selectPage` 等分页便捷方法。
|
||||
|
||||
## 功能概览
|
||||
|
||||
| 组件 | 位置 | 作用 |
|
||||
| --- | --- | --- |
|
||||
| `@PageSum` | `com.zt.plat.framework.common.annotation.PageSum` | 标注需要参与 SUM 聚合的实体字段 |
|
||||
| `PageResult.summary` | `com.zt.plat.framework.common.pojo.PageResult` | 承载字段 -> `BigDecimal` 的汇总结果 |
|
||||
| `PageSumSupport` | `com.zt.plat.framework.mybatis.core.sum.PageSumSupport` | 负责扫描注解、克隆查询条件并执行 SUM 查询 |
|
||||
| `BaseMapperX.selectPage` | `com.zt.plat.framework.mybatis.core.mapper.BaseMapperX` | 在分页与非分页查询后自动附加汇总信息 |
|
||||
|
||||
## 接入步骤
|
||||
|
||||
### 1. 在实体上标注 `@PageSum`
|
||||
|
||||
```java
|
||||
@TableName("order_summary")
|
||||
public class OrderSummaryDO {
|
||||
|
||||
private Long id;
|
||||
|
||||
@PageSum
|
||||
private BigDecimal amount;
|
||||
|
||||
@PageSum(column = "tax_amount")
|
||||
private BigDecimal tax;
|
||||
|
||||
@PageSum(column = "discount")
|
||||
private BigDecimal discountSummary;
|
||||
|
||||
// 其它字段 ...
|
||||
}
|
||||
```
|
||||
|
||||
- 不传 `column` 时,默认使用 MyBatis-Plus 实体字段映射的数据库列。
|
||||
- 如需跨表或函数(例如 `sum(price * quantity)`),可在 `column` 中直接写 SQL 片段。
|
||||
- 必须是数值类型(`Number`、`BigDecimal`、原生数值)。非数值字段会被忽略并打印警告日志。
|
||||
- 对于并不存在于表中的“汇总专用字段”,仅需在 `@PageSum` 中声明 `exist = false`,框架会自动注入等效的 `@TableField(exist = false)`,无需再次编写 `@TableField` 注解。
|
||||
|
||||
### 2. 使用 `BaseMapperX` 的分页能力
|
||||
|
||||
```java
|
||||
PageResult<OrderSummaryRespVO> page = orderSummaryMapper.selectPage(pageParam, wrapper);
|
||||
```
|
||||
|
||||
- 仅 `BaseMapperX.selectPage`(含排序参数版本)支持自动附加汇总结果。
|
||||
- 对于 `PageParam.PAGE_SIZE_NONE`(不分页)场景同样有效。
|
||||
- `selectJoinPage` 暂未附加汇总信息,如需支持请二次封装。
|
||||
|
||||
> ⚠️ 目前的汇总增强依赖 MyBatis-Plus 默认分页(单表/简单条件)实现聚合。若需在复杂联表或高度自定义 SQL 中进行统计,请单独编写汇总接口,或在自定义逻辑中手工调用 `PageSumSupport.tryAttachSummary(...)`,避免影响现有查询语句。
|
||||
|
||||
### 3. 暴露响应结果
|
||||
|
||||
`PageResult` 现在包含两个与数量相关的属性:
|
||||
|
||||
- `total`:分页总数量,仍通过 `total` 字段返回(向后兼容 `totalCount` 反序列化)。
|
||||
- `summary`:Map 结构,键为实体字段名,值为 `BigDecimal` 类型的合计值。
|
||||
|
||||
示例响应:
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 0,
|
||||
"data": {
|
||||
"list": [
|
||||
{ "id": 1, "amount": 20.00, "tax": 1.20 },
|
||||
{ "id": 2, "amount": 30.00, "tax": 1.80 }
|
||||
],
|
||||
"total": 2,
|
||||
"summary": {
|
||||
"amount": 50.00,
|
||||
"tax": 3.00
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
前端即可直接读取 `data.summary.amount` 展示汇总行,无需手工聚合。
|
||||
|
||||
## 常见问题
|
||||
|
||||
### 汇总结果为空
|
||||
|
||||
- 检查实体字段是否正确标注 `@PageSum`,且类型为数值。
|
||||
- 确认 Mapper 的泛型实体与查询结果实体一致,`PageSumSupport` 会基于 Mapper 泛型解析实体类型。
|
||||
- 若查询条件覆盖了 `select` 列表(如显式调用 `select(...)`),请确保 SUM 语句仍能执行;`PageSumSupport` 会克隆 Wrapper 并重新设置 `select` 列表,手写 SQL 需保证兼容。
|
||||
|
||||
### 自定义 SQL & 复杂场景
|
||||
|
||||
- 对于需要复杂汇总(如 CASE WHEN),可在 `column` 属性中写 SQL 表达式:
|
||||
|
||||
```java
|
||||
@PageSum(column = "SUM(CASE WHEN status = 'PAID' THEN amount ELSE 0 END)")
|
||||
private BigDecimal paidAmount;
|
||||
```
|
||||
|
||||
- 当前实现 **仅会扫描 Mapper 泛型实体类** 上的 `@PageSum` 标注。若分页接口最终返回 VO,请先在实体上完成标注,再使用 `PageResult.convert(...)` 或其它方式将数据转换为 VO;转换后 `summary` 内容会被完整保留。
|
||||
|
||||
### 非 BaseMapperX 查询
|
||||
|
||||
- 目前自动聚合只对 `BaseMapperX.selectPage` 及分页列表查询有效。
|
||||
- 若使用 XML 自定义 SQL,可在逻辑中手动调用 `PageSumSupport.tryAttachSummary(mapper, wrapper, pageResult)`。
|
||||
|
||||
## 调试与测试
|
||||
|
||||
- 单元测试示例:`com.zt.plat.framework.mybatis.core.sum.PageSumSupportTest`。
|
||||
- 运行 `mvn -pl zt-framework/zt-spring-boot-starter-mybatis -am test` 可验证功能和回归。
|
||||
- 日志中会输出数字解析或字段配置异常的告警信息,便于定位问题。
|
||||
|
||||
## 变更兼容性
|
||||
|
||||
- `PageResult` 仍通过 `list`/`total` 提供原有分页数据,向后兼容旧接口。
|
||||
- 新增 `summary` 字段,前端可按需展示。
|
||||
- `totalCount` Setter / Getter 仍保留(`@JsonIgnore`),可兼容旧代码逻辑。
|
||||
|
||||
如需进一步扩展(例如 AVG、MAX 等聚合),可按现有结构在 `PageSumSupport` 基础上新增注解与聚合逻辑。
|
||||
194
docs/外部单点登录.md
Normal file
194
docs/外部单点登录.md
Normal file
@@ -0,0 +1,194 @@
|
||||
# 外部单点登录(External SSO)接入说明
|
||||
|
||||
## 功能概述
|
||||
|
||||
- 支持外部系统携带一次性 token 跳转本系统,实现免密单点登录。
|
||||
- 去除了历史实现中的 payload 解密、nonce 强校验、自动建号与邮箱匹配等逻辑,所有账号均需在本地预先存在并保持映射关系。
|
||||
- 前后端新增 `/system/sso/verify` 校验能力,返回标准的 `AuthLoginRespVO` 令牌信息,并记录审计与登录日志。
|
||||
- 通过 `ExternalSsoClient` 抽象外部接口调用,可按需自定义实现或复用默认 HTTP 客户端封装。
|
||||
|
||||
## 关键组件
|
||||
|
||||
### 后端
|
||||
|
||||
- `ExternalSsoServiceImpl`:单点登录主流程,实现参数校验、外部用户查询、本地账号匹配、令牌签发与日志记录。
|
||||
- `ExternalSsoStrategy`:定义按来源系统拆分的策略接口,不同系统可实现自定义的拉取与匹配逻辑。
|
||||
- `DefaultExternalSsoStrategy`:默认策略实现,复用配置化的 HTTP 客户端与匹配顺序,可按优先级被自定义策略覆盖。
|
||||
- `ExternalSsoClient`:获取外部用户信息的接口抽象。
|
||||
- `DefaultExternalSsoClient`:基于 `RestTemplate` 的默认实现,支持 Header/Query/Body 占位符渲染、重试、响应字段映射、代理配置。
|
||||
- `ExternalSsoClientConfiguration`:通过 `@Configuration` 在缺省情况下注册 `DefaultExternalSsoClient` Bean,允许业务自定义覆盖。
|
||||
- `ExternalSsoProperties`:`external-sso.*` 配置项,包含开关、外部接口、账号映射、跨域等子配置;示例配置已同步到 `zt-module-system/zt-module-system-server/src/main/resources/application.yaml` 与 `zt-server/src/main/resources/application.yaml`。
|
||||
- `ExternalSsoVerifyReqVO`:`POST /system/sso/verify` 请求载荷。
|
||||
- `ExternalSsoUserInfo`:外部用户标准化模型,包含基础字段与自定义属性集合。
|
||||
|
||||
### 前端
|
||||
|
||||
- `src/router/modules/remaining.ts`:新增隐藏路由 `/externalsso`,用于回调页面。
|
||||
- `src/views/Login/ExternalSsoCallback.vue`:处理 URL 参数、调用校验接口、落地令牌、跳转目标页面,异常时提示并引导返回登录页。
|
||||
- `src/api/login/index.ts`:新增 `externalSsoVerify` 方法,请求 `/system/sso/verify` 并返回 `TokenType`。
|
||||
|
||||
## 调用流程
|
||||
|
||||
1. 外部系统完成本地认证后,构造 URL:`{本系统域名}/#/externalsso?x-token={外部token}&target={回跳地址}` 并跳转,可选附带 `sourceSystem` 指定来源系统。
|
||||
2. Vue 页面 `ExternalSsoCallback` 解析查询参数,优先读取 `x-token`(兼容历史的 `token` 参数)并校验是否存在;缺失时终止流程并提示错误。
|
||||
3. 前端调用 `POST /system/sso/verify`,请求体:
|
||||
|
||||
```json
|
||||
{
|
||||
"token": "外部系统颁发的 token",
|
||||
"targetUri": "/#/dashboard", // 可选
|
||||
"sourceSystem": "partner-a" // 可选
|
||||
}
|
||||
```
|
||||
|
||||
4. `ExternalSsoServiceImpl#verifyToken` 执行以下步骤:
|
||||
|
||||
- 校验功能开关与 token 有效性。
|
||||
- 基于 `sourceSystem` 选择匹配的 `ExternalSsoStrategy`,若无匹配直接返回来源系统不支持。
|
||||
- 通过策略触发 `ExternalSsoClient` 拉取外部用户信息(默认调用配置的 HTTP 接口)。
|
||||
- 按策略内定义的匹配顺序(默认外部 ID → 用户名 → 手机号)查找本地账号,未命中直接返回 "未找到匹配的本地用户"。
|
||||
- 校验账号状态是否启用,签发 OAuth2 访问令牌并记录登录日志(类型 `LOGIN_EXTERNAL_SSO`)。
|
||||
- 生成操作审计日志,记录外部响应摘要、映射账号、目标地址、来源系统等信息。
|
||||
|
||||
5. 前端拿到 `AuthLoginRespVO`,通过 `authUtil.setToken`/`setTenantId` 持久化,随后跳转到整理后的 `targetUri`(默认 `/`)。
|
||||
6. 当流程出现异常(例如 token 缺失、外部接口失败、账号不存在)时,后端返回对应错误码,前端弹出提示并清除本地缓存。
|
||||
|
||||
下图为时序示意:
|
||||
|
||||
```text
|
||||
外部系统 -> 浏览器 -> Vue /externalsso -> POST /system/sso/verify -> ExternalSsoService -> ExternalSsoClient -> 外部用户接口
|
||||
\-> OAuth2TokenService -> 登录日志/审计日志
|
||||
```
|
||||
|
||||
## 前端细节
|
||||
|
||||
- `ExternalSsoCallback.vue` 对 `target`/`targetUri`/`redirect` 参数做统一解码与归一化,支持携带绝对地址或 hash 路由,防止开放跳转漏洞。
|
||||
- 解析 URL 中的 `sourceSystem`(兼容 `source`、`systemCode`)并透传给后端,便于在多来源系统场景下选择策略。
|
||||
- 在成功回调后调用 `router.replace`,保证不会产生历史记录;失败时引导用户回到 `/login` 并附带原目标地址。
|
||||
- 通过 `buildErrorMessage` 兼容后端返回的 `msg`、`Error` 对象或字符串,统一展示错误提示。
|
||||
|
||||
## 后端流程拆解
|
||||
|
||||
- **开关与参数校验**:
|
||||
- 关闭开关或缺少 token 时抛出 `EXTERNAL_SSO_DISABLED` / `EXTERNAL_SSO_TOKEN_MISSING`。
|
||||
- **外部用户获取**:
|
||||
- 默认客户端会在 `external-sso.remote` 中加载请求配置,支持 GET/POST 等多种场景。
|
||||
- 占位符:`${externalUserId}`(初始值为 token)、`${shareToken}`/`${token}`(共享服务访问 token)、`${xToken}`(原始回调 token)、`${targetUri}`、`${sourceSystem}`。
|
||||
- 会自动通过 `ShareServiceUtils` 获取共享服务访问 token,并写入 `ShareServiceProperties#tokenHeaderName` 对应的请求头。
|
||||
- 请求体会以 JSON 形式发送 `{ "x-token": "回调参数中的 token" }`,满足上游接口 `S_BF_CS_01` 的要求。
|
||||
- `validateResponse` 可按 `codeField` 与 `successCode` 校验业务状态,失败时抛出带详细信息的 `ExternalSsoClientException`。
|
||||
- **本地账号匹配**:
|
||||
- 使用 `mapping.order` 控制字段优先级;`custom.entries` 保存 "外部ID → 本地用户ID" 静态映射。
|
||||
- 找不到用户或账号禁用时分别抛出 `EXTERNAL_SSO_USER_NOT_FOUND`、`EXTERNAL_SSO_USER_DISABLED`,均会同步写入登录日志。
|
||||
- **令牌签发与日志**:
|
||||
- 通过 `OAuth2TokenService#createAccessToken` 使用默认客户端 `CLIENT_ID_DEFAULT` 颁发本地访问令牌。
|
||||
- `recordAuditLog` 把原始响应的 SHA-256 摘要、外部属性、token 摘要等写入操作日志,便于排查。
|
||||
- `recordLoginLog` 记录登录行为并在成功时更新用户最后登录 IP。
|
||||
|
||||
## `ExternalSsoClient` 扩展
|
||||
|
||||
- 默认实现 `DefaultExternalSsoClient` 由 `ExternalSsoClientConfiguration` 自动注册,若需要接入其它协议,可在任意配置类中自定义:
|
||||
|
||||
```java
|
||||
@Bean
|
||||
public ExternalSsoClient customExternalSsoClient(...) {
|
||||
return new MyExternalSsoClient(...);
|
||||
}
|
||||
```
|
||||
|
||||
- 默认实现要点:
|
||||
- 按配置构造 `RestTemplate`,支持连接/读取超时、HTTP 代理、重试次数。
|
||||
- 解析 JSON 响应,将指定字段映射到 `ExternalSsoUserInfo`,并保留原始 data 节点到 `attributes`。
|
||||
- 在解析失败、状态码异常时抛出带原始响应的 `ExternalSsoClientException`。
|
||||
|
||||
## 配置项参考
|
||||
|
||||
```yaml
|
||||
external-sso:
|
||||
enabled: true
|
||||
system-code: example-partner
|
||||
token:
|
||||
secret: "shared-secret"
|
||||
algorithm: AES
|
||||
allowed-clock-skew-seconds: 60
|
||||
max-age-seconds: 300
|
||||
require-nonce: false
|
||||
replay-protection-enabled: false
|
||||
remote:
|
||||
base-url: http://10.1.7.110
|
||||
user-info-path: /api/sso/user
|
||||
method: POST
|
||||
headers:
|
||||
Authorization: "Bearer ${token}"
|
||||
query-params: {}
|
||||
body:
|
||||
userId: "${externalUserId}"
|
||||
code-field: code
|
||||
success-code: "0"
|
||||
message-field: message
|
||||
data-field: data
|
||||
user-id-field: data.userId
|
||||
username-field: data.username
|
||||
nickname-field: data.nickname
|
||||
email-field: data.email
|
||||
mobile-field: data.mobile
|
||||
tenant-id-field: data.tenantId
|
||||
connect-timeout-millis: 3000
|
||||
read-timeout-millis: 5000
|
||||
retry-count: 1
|
||||
proxy:
|
||||
enabled: false
|
||||
mapping:
|
||||
order:
|
||||
- EXTERNAL_ID
|
||||
- USERNAME
|
||||
- MOBILE
|
||||
ignore-case: true
|
||||
update-profile-on-login: false
|
||||
custom:
|
||||
entries:
|
||||
partnerUser001: 10001
|
||||
cors:
|
||||
allowed-origins:
|
||||
- https://partner.example.com
|
||||
allowed-methods: ["OPTIONS", "POST"]
|
||||
allowed-headers: ["Authorization", "Content-Type"]
|
||||
allow-credentials: true
|
||||
max-age: 1800
|
||||
```
|
||||
|
||||
| 配置路径 | 说明 |
|
||||
| --- | --- |
|
||||
| `enabled` | 总开关,关闭后接口直接返回 `EXTERNAL_SSO_DISABLED` |
|
||||
| `system-code` | 默认来源系统标识,可作为 `sourceSystem` 的缺省值及日志标签 |
|
||||
| `token.*` | 若仍需解密/校验外部 token,可在自定义 `ExternalSsoClient` 内按需使用;默认实现仅透传 |
|
||||
| `remote.*` | 外部接口 HTTP 调用参数、字段映射与超时控制,模板占位符支持 `externalUserId`、`shareToken`(`token`)、`xToken`、`targetUri`、`sourceSystem` |
|
||||
| `mapping.order` | 本地账号匹配优先级,支持 `EXTERNAL_ID`、`USERNAME`、`MOBILE` |
|
||||
| `mapping.custom.entries` | 外部用户标识到本地用户 ID 的静态映射表 |
|
||||
| `cors.*` | 用于开放 `/system/sso/verify` 的跨域访问白名单 |
|
||||
|
||||
## 错误码与日志
|
||||
|
||||
- 错误码:
|
||||
- `1_002_000_050`:功能未开启。
|
||||
- `1_002_000_051`:token 缺失。
|
||||
- `1_002_000_055`:外部接口异常,具体原因写入占位符。
|
||||
- `1_002_000_056`:未匹配到本地用户。
|
||||
- `1_002_000_057`:本地用户已禁用。
|
||||
- `1_002_000_058`:来源系统不支持,需配置匹配的策略实现。
|
||||
- 登录日志:使用 `LoginLogTypeEnum.LOGIN_EXTERNAL_SSO` 记录成功/失败。
|
||||
- 操作日志:类型 `EXTERNAL_SSO/VERIFY`,包含外部用户 ID、映射账号、目标地址、来源系统、响应摘要等元数据。
|
||||
|
||||
## 注意事项
|
||||
|
||||
- 所有账号须提前维护映射,系统不会自动创建或按邮箱兜底匹配用户。
|
||||
- `targetUri` 会在前端归一化,避免开放跳转风险;无合法目标时默认跳转首页。
|
||||
- 确保外部接口返回的 JSON 字段与配置保持一致,必要时可通过 `remote.data-field` 指向具体节点。
|
||||
- 若外部接口速度较慢或易失败,可提高 `retry-count`、超时时间或自定义客户端实现。
|
||||
- 如需记录更多审计信息,可在 `ExternalSsoUserInfo#addAttribute` 注入自定义字段,审计日志会自动保留。
|
||||
|
||||
## 扩展与测试建议
|
||||
|
||||
- 通过提供新的 `ExternalSsoStrategy` 或 `ExternalSsoClient` Bean,可扩展不同来源系统的对接方式。
|
||||
- 建议为主要错误场景编写集成测试:token 缺失、映射缺失、来源系统不支持、外部接口超时、账号被禁用等。
|
||||
- 外部系统回调前可先调用 `/system/sso/verify` 联调接口验证配置是否正确,再接入正式流程。
|
||||
205
docs/数据总线模块大致功能与调用介绍.md
Normal file
205
docs/数据总线模块大致功能与调用介绍.md
Normal file
@@ -0,0 +1,205 @@
|
||||
# Databus 模块 API 功能与第三方调用说明
|
||||
|
||||
> 适用范围:`zt-module-databus`(Server 侧)+ `zt-module-databus-api`(接口定义)。本文基于 2025-11-20 主干分支代码。
|
||||
|
||||
## 1. 模块定位与整体能力
|
||||
|
||||
- **目标**:对外暴露统一的数据/业务编排网关,允许后台在可视化界面中配置 API、步骤、变换与限流策略,并即时发布到运行态。
|
||||
- **核心特性**:
|
||||
1. API 全生命周期管理(定义、版本、回滚、发布缓存刷新)。
|
||||
2. 编排引擎基于 Spring Integration 动态装配,支持 Start/HTTP/RPC/Script/End 步骤及 JSON 变换链路。
|
||||
3. 多重安全防护:IP 白/黑名单、应用凭证、时间戳 + 随机串、报文加解密、签名、防重放、租户隔离、匿名固定用户等。
|
||||
4. QoS 能力:可插拔限流策略(Redis 固定窗口计数)、审计日志、追踪 ID & Step 级结果入库。
|
||||
5. Debug 支持:管理端 `POST /databus/gateway/invoke` 可注入任意参数模拟真实调用。
|
||||
|
||||
## 2. 运行时架构概览
|
||||
|
||||
| 组件 | 位置 | 作用 |
|
||||
| --- | --- | --- |
|
||||
| `GatewaySecurityFilter` | `framework.integration.gateway.security` | 过滤并校验所有落在 `databus.api-portal.base-path` 之下的 HTTP 请求,完成 IP 校验、报文解密、签名、防重放、匿名用户注入、响应加密。 |
|
||||
| `ApiGatewayExecutionService` | `framework.integration.gateway.core` | 将 HTTP 请求映射为 `ApiInvocationContext`,调度 Integration Flow,构造统一响应。 |
|
||||
| `IntegrationFlowManager` | `framework.integration.gateway.core` | 按 `apiCode + version` 动态注册 Spring Integration Flow,支持热刷新与调试临时 Flow。 |
|
||||
| `ApiFlowDispatcher` | 同上 | 依据 apiCode/version 找到输入通道,发送请求并等待 `ApiInvocationContext` 回传。 |
|
||||
| `PolicyAdvisorFactory` + `DefaultRateLimitPolicyEvaluator` | `framework.integration.gateway.core/policy` | 在 Flow 上织入限流等策略,当前默认实现支持 Redis 固定窗口。 |
|
||||
| `ApiGatewayAccessLogger` | `framework.integration.gateway.core` | 生成访问日志 `databus_api_access_log`,记录 Trace、请求/响应、耗时、步骤结果等。 |
|
||||
| 管控 REST 控制器 | `controller.admin.gateway.*` | 管理 API 定义、版本、凭证、策略、访问日志等。 |
|
||||
|
||||
|
||||
## 4. 管控端 REST 接口速查
|
||||
|
||||
| 模块 | 方法 | 路径 | 说明 |
|
||||
| --- | --- | --- | --- |
|
||||
| API 定义 | GET | `/databus/gateway/definition/page` | 分页查询(支持 code/描述筛选)。 |
|
||||
| | GET | `/databus/gateway/definition/{id}` | 详情(含步骤、变换、限流绑定)。 |
|
||||
| | POST | `/databus/gateway/definition` | 新建定义,必填步骤(至少 Start+End)。 |
|
||||
| | PUT | `/databus/gateway/definition` | 更新并自动刷新对应 Flow。 |
|
||||
| | DELETE | `/databus/gateway/definition/{id}` | 删除并注销 Flow。 |
|
||||
| API 网关 | POST | `/databus/gateway/invoke` | 管理端调试调用。 |
|
||||
| | GET | `/databus/gateway/definitions` | 拉取当前已上线定义(供灰度/网关缓存)。 |
|
||||
| | POST | `/databus/gateway/cache/refresh` | 强制刷新所有 Flow 缓存。 |
|
||||
| API 版本 | GET | `/databus/gateway/version/get?id=` | 查询版本详情(自动还原 snapshotData)。 |
|
||||
| | GET | `/databus/gateway/version/page` | 分页。 |
|
||||
| | GET | `/databus/gateway/version/list?apiId=` | 列出某 API 的全部版本。 |
|
||||
| | PUT | `/databus/gateway/version/rollback` | 根据 `id + remark` 回滚。 |
|
||||
| | GET | `/databus/gateway/version/compare` | 差异对比(sourceId/targetId)。 |
|
||||
| 客户端凭证 | GET | `/databus/gateway/credential/page` | 分页。 |
|
||||
| | GET | `/databus/gateway/credential/get?id=` | 详情(含匿名配置)。 |
|
||||
| | POST | `/databus/gateway/credential/create` | 新增凭证。 |
|
||||
| | PUT | `/databus/gateway/credential/update` | 更新。 |
|
||||
| | DELETE | `/databus/gateway/credential/delete?id=` | 删除。 |
|
||||
| | GET | `/databus/gateway/credential/list-simple` | 下拉使用。 |
|
||||
| 限流策略 | GET | `/databus/gateway/policy/rate-limit/page` | 分页检索。 |
|
||||
| | GET | `/databus/gateway/policy/rate-limit/{id}` | 详情。 |
|
||||
| | GET | `/databus/gateway/policy/rate-limit/simple-list` | 精简列表。 |
|
||||
| | POST/PUT/DELETE | `/databus/gateway/policy/rate-limit` | 新增/更新/删除。 |
|
||||
| 访问日志 | GET | `/databus/gateway/access-log/page` | 分页(需 `databus:gateway:access-log:query` 权限)。 |
|
||||
| | GET | `/databus/gateway/access-log/get?id=` | 单条详情(自动补充 API 描述)。 |
|
||||
|
||||
> 所有接口默认返回 `CommonResult` 包装,字段 `code/message/data`。必要时参考对应 VO(位置 `controller.admin.gateway.vo`)。
|
||||
|
||||
## 5. API 生命周期管理要点
|
||||
|
||||
1. **状态机**:`ApiStatusEnum`(草稿/已上线/已下线/已废弃)。Integration Flow 只加载 `ONLINE` 状态定义。
|
||||
2. **版本快照**:每次保存时写入 `databus_api_version`,可通过 `snapshotData` 一键恢复(`rollback` 接口)。
|
||||
3. **变换校验**:保存时会校验同一级 `TransformPhaseEnum` 不可重复,并确保 Start/End 唯一且位于首尾。
|
||||
4. **缓存刷新**:
|
||||
- 单 API:创建/更新/删除后自动调用 `IntegrationFlowManager.refresh(apiCode, version)`。
|
||||
- 全量:管理员可调用 `/databus/gateway/cache/refresh` 做兜底。
|
||||
|
||||
## 6. 网关请求路径与响应格式
|
||||
|
||||
- **默认 Base Path**:`/admin-api/databus/api/portal`(可通过 `databus.api-portal.base-path` 覆盖;兼容旧版 `/databus/api/portal`)。
|
||||
- **最终路径**:`{basePath}/{apiCode}/{version}`,示例 `/admin-api/databus/api/portal/order.create/v1`。
|
||||
- **支持方法**:GET/POST/PUT/DELETE/PATCH,均被映射为 `ApiInvocationContext.httpMethod`。
|
||||
- **响应包装**:
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 200,
|
||||
"message": "OK",
|
||||
"response": { "bizField": "value" },
|
||||
"traceId": "c8a3d52f-..."
|
||||
}
|
||||
```
|
||||
|
||||
> `code` 与 HTTP 状态保持一致;`response` 为 API 变换后的业务体;所有错误也沿用该 Envelope(若启用响应加密则返回 Base64 字串)。
|
||||
|
||||
## 7. 配置项(`application.yml`)重点
|
||||
|
||||
```yaml
|
||||
databus:
|
||||
api-portal:
|
||||
base-path: /admin-api/databus/api/portal
|
||||
allowed-ips: [10.0.0.0/24] # 可为空表示全放行
|
||||
denied-ips: []
|
||||
enable-tenant-header: true
|
||||
tenant-header: ZT-Tenant-Id
|
||||
enable-audit: true
|
||||
enable-rate-limit: true
|
||||
security:
|
||||
enabled: true
|
||||
signature-type: MD5 # 或 SHA256
|
||||
encryption-type: AES # 或 DES
|
||||
allowed-clock-skew-seconds: 300
|
||||
nonce-ttl-seconds: 600
|
||||
require-body-encryption: true
|
||||
encrypt-response: true
|
||||
```
|
||||
|
||||
> `GatewaySecurityFilter` 会自动注册到最高优先级 +10,确保该路径的请求先经过安全校验。
|
||||
|
||||
## 8. 第三方调用流程详解
|
||||
|
||||
### 8.1 前置准备
|
||||
|
||||
1. **申请凭证**:在后台创建 `API 客户端凭证`,得到:
|
||||
- `appId`(对应 `ZT-App-Id` 头)
|
||||
- `encryptionKey`(用于 AES/DES 对称加密,服务器使用 `CryptoSignatureUtils.decrypt` 解密)
|
||||
- `encryptionType`、`signatureType`
|
||||
- `allowAnonymous` = true 时需选择一个固定系统用户(服务器将自动颁发内部 JWT)。
|
||||
2. **确定 API**:记录 `apiCode`、`version`、请求方法、入参/变换契约。
|
||||
3. **网络白名单**:将第三方出口 IP 加入 `allowed-ips`,否则直接返回 403。
|
||||
4. **Redis 要求**:需保证 Redis 可用(用于 nonce、防重放、限流计数)。
|
||||
|
||||
### 8.2 请求构建步骤
|
||||
|
||||
| 序号 | 操作 | 说明 |
|
||||
| --- | --- | --- |
|
||||
| 1 | 生成时间戳 | `timestamp = System.currentTimeMillis()`,与服务器时间差 ≤ 300s。 |
|
||||
| 2 | 生成随机串 | `nonce` 长度≥8,可使用 `UUID.randomUUID().toString().replace("-", "")`。 |
|
||||
| 3 | 准备明文 Body | 例如 `{"orderNo":"SO20251120001"}`,记为 `plainBody`。 |
|
||||
| 4 | 计算签名 | 将所有签名字段放入 Map(详见下节),调用 `CryptoSignatureUtils.verifySignature` 同样的规则:对 key 排序、跳过 `signature` 字段、使用 `&` 连接 `key=value`,再用 `MD5/SHA256` 计算;结果赋值给 `ZT-Signature`。*注意:签名使用明文 body。* |
|
||||
| 5 | 加密请求体 | 使用凭证的 `encryptionKey + encryptionType` 对 `plainBody` 进行对称加密,Base64 结果作为 HTTP Body;Content-Type 可设 `text/plain` 或 `application/json`。 |
|
||||
| 6 | 组装请求头 | `ZT-App-Id`, `ZT-Timestamp`, `ZT-Nonce`, `ZT-Signature`, `ZT-Tenant-Id`(可选), `X-Client-Id`(建议,与限流相关),如有自带 JWT 则设置 `Authorization`。 |
|
||||
| 7 | 发送请求 | URL = `https://{host}{basePath}/{apiCode}/{version}`,方法与 API 定义保持一致。 |
|
||||
|
||||
#### 签名字段示例
|
||||
|
||||
```
|
||||
appId=demo-app
|
||||
&body={"orderNo":"SO20251120001"}
|
||||
&nonce=0c5e2df9a1
|
||||
×tamp=1732070400000
|
||||
```
|
||||
|
||||
- Query 参数将被拼接为 `key=value`(多值以逗号连接),自动忽略 `signature` 字段。
|
||||
- Request Body 若非 JSON,则退化为字符串整体签名。
|
||||
|
||||
#### cURL 示例
|
||||
|
||||
```bash
|
||||
curl -X POST "https://gw.example.com/admin-api/databus/api/portal/order.create/v1" \
|
||||
-H "ZT-App-Id: demo-app" \
|
||||
-H "ZT-Timestamp: 1732070400000" \
|
||||
-H "ZT-Nonce: 0c5e2df9a1" \
|
||||
-H "ZT-Signature: 8e377..." \
|
||||
-H "X-Client-Id: mall" \
|
||||
-H "Content-Type: text/plain" \
|
||||
-d "Q2hhcnNldGV4dC1CYXNlNjQgZW5jcnlwdGVkIGJvZHk="
|
||||
```
|
||||
|
||||
> `-d` 的实际内容应当是 AES/ DES 加密后的 Base64 字符串。
|
||||
|
||||
### 8.3 响应处理
|
||||
|
||||
1. 读取 HTTP 状态与 `ApiGatewayResponse.code/message/traceId`。
|
||||
2. 若 `security.encrypt-response=true`,则响应体本身是加密串,需要使用同一 `encryptionKey/encryptionType` 解密得到 JSON,再解析 `response` 字段。
|
||||
3. `traceId` 可用于后台日志及 `访问日志` 页面关联排查。
|
||||
|
||||
### 8.4 错误与重试策略
|
||||
|
||||
| 场景 | 表现 | 处理建议 |
|
||||
| --- | --- | --- |
|
||||
| 时间戳/Nonce 不合法 | HTTP 401,`message` = `请求到达时间超出 300s`/`重复请求` | 校准服务器时间;`nonce` 不可重复(Redis TTL 默认 600s)。 |
|
||||
| 签名失败 | HTTP 401,`message` = `签名校验失败` | 检查签名字符串、字符编码、大小写。 |
|
||||
| 未配置密钥 | HTTP 500,`message` = `应用未配置加密密钥` | 在后台凭证中补齐密钥与算法,或取消强制加密。 |
|
||||
| 限流触发 | HTTP 429,`message` = `请求触发限流策略` | 调整 `X-Client-Id` 级并发或增大策略 `limit/windowSeconds`。 |
|
||||
| API 未发布 | HTTP 404,`message` = `API 定义未发布或已下线` | 确认 `status=ONLINE`,并刷新缓存。 |
|
||||
|
||||
## 9. 限流策略配置
|
||||
|
||||
- 存储在 `ApiPolicyRateLimitDO.config`,JSON 结构示例:
|
||||
|
||||
```json
|
||||
{
|
||||
"limit": 1000,
|
||||
"windowSeconds": 60,
|
||||
"keyTemplate": "${apiCode}:${tenantId}:${header.X-Client-Id}" // 预留扩展
|
||||
}
|
||||
```
|
||||
|
||||
- 当前默认实现读取 `limit`(默认 100)与 `windowSeconds`(默认 60)。
|
||||
- Redis Key 格式:`databus:api:rl:{apiCode}:{version}:{X-Client-Id}`,当计数首次出现时自动设置过期。
|
||||
- 限流拦截后会抛出 `API_RATE_LIMIT_EXCEEDED`,在访问日志中标记 `status=1/2`。
|
||||
|
||||
## 10. 访问日志字段对照
|
||||
|
||||
| 字段 | 说明 |
|
||||
| --- | --- |
|
||||
| `traceId` | 来自 `TracerUtils`,可在日志与链路追踪中搜索。 |
|
||||
| `requestHeaders`, `requestBody`, `responseBody` | 默认截断至 4000 字符,JSON 序列化存储。 |
|
||||
| `status` | 0=成功,1=客户端错误,2=服务端错误,3=未知。 |
|
||||
| `stepResults` | 序列化的步骤执行列表(见 `ApiStepResult`),含 `request/response/elapsed/error`。 |
|
||||
| `extra` | 附加变量/属性,供排查自定义上下文。 |
|
||||
|
||||
> 可通过 `/databus/gateway/access-log/page` + `traceId` 或 `apiCode` 条件快速定位第三方问题。
|
||||
29
pom.xml
29
pom.xml
@@ -18,7 +18,7 @@
|
||||
<module>zt-module-infra</module>
|
||||
<!-- <module>zt-module-bpm</module>-->
|
||||
<module>zt-module-report</module>
|
||||
<!-- <module>zt-module-mp</module>-->
|
||||
<!--<module>zt-module-mp</module>-->
|
||||
<!-- <module>zt-module-ai</module>-->
|
||||
<!-- <module>zt-module-template</module>-->
|
||||
<!-- <module>zt-module-iot</module>-->
|
||||
@@ -32,7 +32,7 @@
|
||||
<url>https://github.com/YunaiV/ruoyi-vue-pro</url>
|
||||
|
||||
<properties>
|
||||
<revision>3.0.43</revision>
|
||||
<revision>3.0.45</revision>
|
||||
<!-- Maven 相关 -->
|
||||
<java.version>17</java.version>
|
||||
<maven.compiler.source>${java.version}</maven.compiler.source>
|
||||
@@ -237,8 +237,8 @@
|
||||
<config.server-addr>172.16.46.63:30848</config.server-addr>
|
||||
<config.namespace>dev</config.namespace>
|
||||
<config.group>DEFAULT_GROUP</config.group>
|
||||
<config.username/>
|
||||
<config.password/>
|
||||
<config.username>nacos</config.username>
|
||||
<config.password>P@ssword25</config.password>
|
||||
<config.version>1.0.0</config.version>
|
||||
</properties>
|
||||
</profile>
|
||||
@@ -250,8 +250,8 @@
|
||||
<config.server-addr>172.16.46.63:30848</config.server-addr>
|
||||
<config.namespace>prod</config.namespace>
|
||||
<config.group>DEFAULT_GROUP</config.group>
|
||||
<config.username/>
|
||||
<config.password/>
|
||||
<config.username>nacos</config.username>
|
||||
<config.password>P@ssword25</config.password>
|
||||
<config.version>1.0.0</config.version>
|
||||
</properties>
|
||||
</profile>
|
||||
@@ -263,8 +263,8 @@
|
||||
<config.server-addr>172.16.46.63:30848</config.server-addr>
|
||||
<config.namespace>local</config.namespace>
|
||||
<config.group>DEFAULT_GROUP</config.group>
|
||||
<config.username/>
|
||||
<config.password/>
|
||||
<config.username>nacos</config.username>
|
||||
<config.password>P@ssword25</config.password>
|
||||
<config.version>1.0.0</config.version>
|
||||
</properties>
|
||||
</profile>
|
||||
@@ -274,6 +274,19 @@
|
||||
<config.namespace>chenbowen</config.namespace>
|
||||
</properties>
|
||||
</profile>
|
||||
<profile>
|
||||
<id>qsj</id>
|
||||
<properties>
|
||||
<env.name>dev</env.name>
|
||||
<!--Nacos 配置-->
|
||||
<config.server-addr>172.16.46.63:30848</config.server-addr>
|
||||
<config.namespace>qsj</config.namespace>
|
||||
<config.group>DEFAULT_GROUP</config.group>
|
||||
<config.username>nacos</config.username>
|
||||
<config.password>P@ssword25</config.password>
|
||||
<config.version>1.0.0</config.version>
|
||||
</properties>
|
||||
</profile>
|
||||
</profiles>
|
||||
|
||||
</project>
|
||||
|
||||
581
sql/dm/bpm.sql
Normal file
581
sql/dm/bpm.sql
Normal file
File diff suppressed because one or more lines are too long
@@ -0,0 +1,74 @@
|
||||
CREATE TABLE "RUOYI-VUE-PRO"."DATABUS_API_ACCESS_LOG"
|
||||
(
|
||||
"ID" BIGINT NOT NULL,
|
||||
"TRACE_ID" VARCHAR(64) DEFAULT NULL,
|
||||
"API_CODE" VARCHAR(128) DEFAULT NULL,
|
||||
"API_VERSION" VARCHAR(32) DEFAULT NULL,
|
||||
"REQUEST_METHOD" VARCHAR(16) DEFAULT NULL,
|
||||
"REQUEST_PATH" VARCHAR(512) DEFAULT NULL,
|
||||
"REQUEST_QUERY" TEXT,
|
||||
"REQUEST_HEADERS" TEXT,
|
||||
"REQUEST_BODY" TEXT,
|
||||
"RESPONSE_STATUS" INT DEFAULT NULL,
|
||||
"RESPONSE_MESSAGE" VARCHAR(500) DEFAULT NULL,
|
||||
"RESPONSE_BODY" TEXT,
|
||||
"STATUS" SMALLINT DEFAULT 3 NOT NULL,
|
||||
"ERROR_CODE" VARCHAR(100) DEFAULT NULL,
|
||||
"ERROR_MESSAGE" VARCHAR(1000) DEFAULT NULL,
|
||||
"EXCEPTION_STACK" TEXT,
|
||||
"CLIENT_IP" VARCHAR(64) DEFAULT NULL,
|
||||
"USER_AGENT" VARCHAR(512) DEFAULT NULL,
|
||||
"DURATION" BIGINT DEFAULT NULL,
|
||||
"REQUEST_TIME" DATETIME(6) DEFAULT CURRENT_TIMESTAMP NOT NULL,
|
||||
"RESPONSE_TIME" DATETIME(6) DEFAULT NULL,
|
||||
"STEP_RESULTS" TEXT,
|
||||
"EXTRA" TEXT,
|
||||
"CREATOR" VARCHAR(64) DEFAULT '' NOT NULL,
|
||||
"CREATE_TIME" DATETIME(6) DEFAULT CURRENT_TIMESTAMP NOT NULL,
|
||||
"UPDATER" VARCHAR(64) DEFAULT '' NOT NULL,
|
||||
"UPDATE_TIME" DATETIME(6) DEFAULT CURRENT_TIMESTAMP NOT NULL,
|
||||
"DELETED" BIT DEFAULT '0' NOT NULL,
|
||||
"TENANT_ID" BIGINT DEFAULT 0 NOT NULL,
|
||||
NOT CLUSTER PRIMARY KEY("ID")) STORAGE(ON "MAIN", CLUSTERBTR) ;
|
||||
|
||||
COMMENT ON TABLE "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG IS 'Databus API 访问日志表';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."API_CODE" IS 'API 编码';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."API_VERSION" IS 'API 版本';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."CLIENT_IP" IS '客户端 IP';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."CREATE_TIME" IS '创建时间';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."CREATOR" IS '创建者';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."DELETED" IS '是否删除';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."DURATION" IS '请求耗时(毫秒)';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."ERROR_CODE" IS '业务错误码';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."ERROR_MESSAGE" IS '错误信息';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."EXCEPTION_STACK" IS '异常堆栈';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."EXTRA" IS '额外调试信息(JSON 字符串)';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."ID" IS '日志主键';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."REQUEST_BODY" IS '请求体(JSON 字符串)';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."REQUEST_HEADERS" IS '请求头(JSON 字符串)';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."REQUEST_METHOD" IS '请求方法';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."REQUEST_PATH" IS '请求路径';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."REQUEST_QUERY" IS '请求查询参数(JSON 字符串)';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."REQUEST_TIME" IS '请求时间';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."RESPONSE_BODY" IS '响应体(JSON 字符串)';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."RESPONSE_MESSAGE" IS '响应提示信息';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."RESPONSE_STATUS" IS '响应 HTTP 状态码';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."RESPONSE_TIME" IS '响应时间';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."STATUS" IS '访问状态:0-成功 1-客户端错误 2-服务端错误 3-未知';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."STEP_RESULTS" IS '执行步骤结果(JSON 字符串)';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."TENANT_ID" IS '租户编号';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."TRACE_ID" IS '追踪 ID';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."UPDATER" IS '更新者';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."UPDATE_TIME" IS '更新时间';
|
||||
COMMENT ON COLUMN "RUOYI-VUE-PRO".DATABUS_API_ACCESS_LOG."USER_AGENT" IS 'User-Agent';
|
||||
|
||||
|
||||
CREATE OR REPLACE INDEX "IDX_DATABUS_API_ACCESS_LOG_TRACE" ON "RUOYI-VUE-PRO"."DATABUS_API_ACCESS_LOG"("TRACE_ID" ASC) STORAGE(ON "MAIN", CLUSTERBTR) ;
|
||||
CREATE OR REPLACE INDEX "IDX_DATABUS_API_ACCESS_LOG_CODE" ON "RUOYI-VUE-PRO"."DATABUS_API_ACCESS_LOG"("API_CODE" ASC) STORAGE(ON "MAIN", CLUSTERBTR) ;
|
||||
CREATE OR REPLACE INDEX "IDX_DATABUS_API_ACCESS_LOG_METHOD" ON "RUOYI-VUE-PRO"."DATABUS_API_ACCESS_LOG"("REQUEST_METHOD" ASC) STORAGE(ON "MAIN", CLUSTERBTR) ;
|
||||
CREATE OR REPLACE INDEX "IDX_DATABUS_API_ACCESS_LOG_STATUS" ON "RUOYI-VUE-PRO"."DATABUS_API_ACCESS_LOG"("STATUS" ASC) STORAGE(ON "MAIN", CLUSTERBTR) ;
|
||||
CREATE OR REPLACE INDEX "IDX_DATABUS_API_ACCESS_LOG_RESP_STATUS" ON "RUOYI-VUE-PRO"."DATABUS_API_ACCESS_LOG"("RESPONSE_STATUS" ASC) STORAGE(ON "MAIN", CLUSTERBTR) ;
|
||||
CREATE OR REPLACE INDEX "IDX_DATABUS_API_ACCESS_LOG_REQUEST_TIME" ON "RUOYI-VUE-PRO"."DATABUS_API_ACCESS_LOG"("REQUEST_TIME" ASC) STORAGE(ON "MAIN", CLUSTERBTR) ;
|
||||
CREATE OR REPLACE INDEX "IDX_DATABUS_API_ACCESS_LOG_CLIENT_IP" ON "RUOYI-VUE-PRO"."DATABUS_API_ACCESS_LOG"("CLIENT_IP" ASC) STORAGE(ON "MAIN", CLUSTERBTR) ;
|
||||
CREATE OR REPLACE INDEX "IDX_DATABUS_API_ACCESS_LOG_TENANT" ON "RUOYI-VUE-PRO"."DATABUS_API_ACCESS_LOG"("TENANT_ID" ASC) STORAGE(ON "MAIN", CLUSTERBTR) ;
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
<url>https://github.com/YunaiV/ruoyi-vue-pro</url>
|
||||
|
||||
<properties>
|
||||
<revision>3.0.43</revision>
|
||||
<revision>3.0.45</revision>
|
||||
<flatten-maven-plugin.version>1.6.0</flatten-maven-plugin.version>
|
||||
<!-- 统一依赖管理 -->
|
||||
<spring.boot.version>3.4.5</spring.boot.version>
|
||||
@@ -63,6 +63,7 @@
|
||||
<podam.version>8.0.2.RELEASE</podam.version>
|
||||
<jedis-mock.version>1.1.4</jedis-mock.version>
|
||||
<mockito-inline.version>5.2.0</mockito-inline.version>
|
||||
<okhttp3.version>4.12.0</okhttp3.version>
|
||||
<!-- Bpm 工作流相关 -->
|
||||
<flowable.version>7.0.1</flowable.version>
|
||||
<!-- 工具类相关 -->
|
||||
@@ -86,6 +87,7 @@
|
||||
<netty.version>4.1.116.Final</netty.version>
|
||||
<mqtt.version>1.2.5</mqtt.version>
|
||||
<pf4j-spring.version>0.9.0</pf4j-spring.version>
|
||||
<okhttp3.version>4.12.0</okhttp3.version>
|
||||
<!-- 规则引擎 -->
|
||||
<liteflow.version>2.15.1</liteflow.version>
|
||||
<vertx.version>4.5.13</vertx.version>
|
||||
@@ -470,6 +472,12 @@
|
||||
<version>${podam.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.squareup.okhttp3</groupId>
|
||||
<artifactId>okhttp</artifactId>
|
||||
<version>${okhttp3.version}</version>
|
||||
</dependency>
|
||||
|
||||
<!-- 工作流相关 -->
|
||||
<dependency>
|
||||
<groupId>org.flowable</groupId>
|
||||
|
||||
@@ -52,6 +52,12 @@
|
||||
<scope>provided</scope> <!-- 设置为 provided,只有工具类需要使用到 -->
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.springframework.data</groupId>
|
||||
<artifactId>spring-data-redis</artifactId>
|
||||
<scope>provided</scope> <!-- 设置为 provided,只有工具类需要使用到 -->
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>jakarta.servlet</groupId>
|
||||
<artifactId>jakarta.servlet-api</artifactId>
|
||||
@@ -151,6 +157,12 @@
|
||||
<artifactId>spring-boot-starter-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.mockito</groupId>
|
||||
<artifactId>mockito-core</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
</project>
|
||||
|
||||
@@ -0,0 +1,31 @@
|
||||
package com.zt.plat.framework.common.annotation;
|
||||
|
||||
import java.lang.annotation.*;
|
||||
|
||||
/**
|
||||
* 标记分页结果中需要求和的字段。
|
||||
* <p>
|
||||
* 未显式指定列名时,会默认使用实体字段对应的数据库列。
|
||||
* <p>
|
||||
* {@link #exist()} 可以用于声明该字段并不存在于表结构中,相当于为字段添加
|
||||
* {@code @TableField(exist = false)},方便在 DO 中声明专用于汇总结果的临时字段。
|
||||
*/
|
||||
@Documented
|
||||
@Target(ElementType.FIELD)
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
public @interface PageSum {
|
||||
|
||||
/**
|
||||
* 自定义求和的数据库列名或表达式,未设置时默认使用实体字段对应的列。
|
||||
*/
|
||||
String column() default "";
|
||||
|
||||
/**
|
||||
* 是否在实体字段上声明真实存在的数据库列。
|
||||
* <p>
|
||||
* 设为 {@code false} 时,框架会自动为该字段提供 {@code @TableField(exist = false)} 的能力,
|
||||
* 适用于只在分页响应中返回的临时统计字段。
|
||||
*/
|
||||
boolean exist() default false;
|
||||
|
||||
}
|
||||
@@ -11,7 +11,7 @@ public class CompanyDeptInfo {
|
||||
/**
|
||||
* 公司Id
|
||||
*/
|
||||
private Long companyId;
|
||||
private String companyId;
|
||||
/**
|
||||
* 公司名称
|
||||
*/
|
||||
@@ -19,7 +19,7 @@ public class CompanyDeptInfo {
|
||||
/**
|
||||
* 部门Id
|
||||
*/
|
||||
private Long deptId;
|
||||
private String deptId;
|
||||
/**
|
||||
* 部门名称
|
||||
*/
|
||||
|
||||
@@ -1,11 +1,18 @@
|
||||
package com.zt.plat.framework.common.pojo;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonAlias;
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import lombok.Data;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.math.BigDecimal;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@Schema(description = "分页结果")
|
||||
@Data
|
||||
@@ -15,19 +22,31 @@ public final class PageResult<T> implements Serializable {
|
||||
private List<T> list;
|
||||
|
||||
@Schema(description = "总量", requiredMode = Schema.RequiredMode.REQUIRED)
|
||||
@JsonProperty("total")
|
||||
@JsonAlias({"totalCount"})
|
||||
private Long total;
|
||||
|
||||
@Schema(description = "汇总信息(字段需使用 @PageSum 标注)")
|
||||
@JsonProperty("summary")
|
||||
private Map<String, BigDecimal> summary;
|
||||
|
||||
public PageResult() {
|
||||
this.list = new ArrayList<>();
|
||||
this.summary = Collections.emptyMap();
|
||||
}
|
||||
|
||||
public PageResult(List<T> list, Long total) {
|
||||
this(list, total, null);
|
||||
}
|
||||
|
||||
public PageResult(List<T> list, Long total, Map<String, BigDecimal> summary) {
|
||||
this.list = list;
|
||||
this.total = total;
|
||||
setSummaryInternal(summary);
|
||||
}
|
||||
|
||||
public PageResult(Long total) {
|
||||
this.list = new ArrayList<>();
|
||||
this.total = total;
|
||||
this(new ArrayList<>(), total, null);
|
||||
}
|
||||
|
||||
public static <T> PageResult<T> empty() {
|
||||
@@ -38,4 +57,30 @@ public final class PageResult<T> implements Serializable {
|
||||
return new PageResult<>(total);
|
||||
}
|
||||
|
||||
public void setSummary(Map<String, BigDecimal> summary) {
|
||||
setSummaryInternal(summary);
|
||||
}
|
||||
|
||||
private void setSummaryInternal(Map<String, BigDecimal> summary) {
|
||||
if (summary == null || summary.isEmpty()) {
|
||||
this.summary = Collections.emptyMap();
|
||||
return;
|
||||
}
|
||||
this.summary = new LinkedHashMap<>(summary);
|
||||
}
|
||||
|
||||
public <R> PageResult<R> convert(List<R> newList) {
|
||||
return new PageResult<>(newList, total, summary);
|
||||
}
|
||||
|
||||
@JsonIgnore
|
||||
public Long getTotalCount() {
|
||||
return total;
|
||||
}
|
||||
|
||||
@JsonIgnore
|
||||
public void setTotalCount(Long totalCount) {
|
||||
this.total = totalCount;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -81,36 +81,40 @@ public class AsyncLatchUtils {
|
||||
|
||||
System.out.println("主流程开始,准备分发异步任务...");
|
||||
|
||||
System.out.println("主线程id:" + Thread.currentThread().getId());
|
||||
// 2. 提交多个异步任务
|
||||
// 任务一:获取用户信息
|
||||
AsyncLatchUtils.submitTask(executorService, () -> {
|
||||
try {
|
||||
try {
|
||||
System.out.println("任务一子线程id:" + Thread.currentThread().getId());
|
||||
System.out.println("开始获取用户信息...");
|
||||
Thread.sleep(1000); // 模拟耗时
|
||||
System.out.println("获取用户信息成功!");
|
||||
} catch (InterruptedException e) {
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
});
|
||||
|
||||
// 任务二:获取订单信息
|
||||
AsyncLatchUtils.submitTask(executorService, () -> {
|
||||
try {
|
||||
try {
|
||||
System.out.println("任务二子线程id:" + Thread.currentThread().getId());
|
||||
System.out.println("开始获取订单信息...");
|
||||
Thread.sleep(1500); // 模拟耗时
|
||||
System.out.println("获取订单信息成功!");
|
||||
} catch (InterruptedException e) {
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
});
|
||||
|
||||
// 任务三:获取商品信息
|
||||
AsyncLatchUtils.submitTask(executorService, () -> {
|
||||
try {
|
||||
try {
|
||||
System.out.println("任务三子线程id:" + Thread.currentThread().getId());
|
||||
System.out.println("开始获取商品信息...");
|
||||
Thread.sleep(500); // 模拟耗时
|
||||
System.out.println("获取商品信息成功!");
|
||||
} catch (InterruptedException e) {
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
});
|
||||
@@ -118,12 +122,12 @@ public class AsyncLatchUtils {
|
||||
System.out.println("所有异步任务已提交,主线程开始等待...");
|
||||
|
||||
// 3. 等待所有任务完成,最长等待5秒
|
||||
boolean allTasksCompleted = AsyncLatchUtils.waitFor(5, TimeUnit.SECONDS);
|
||||
boolean allTasksCompleted = AsyncLatchUtils.waitFor(5, TimeUnit.SECONDS);
|
||||
|
||||
// 4. 根据等待结果继续主流程
|
||||
if (allTasksCompleted) {
|
||||
System.out.println("所有异步任务执行成功,主流程继续...");
|
||||
} else {
|
||||
} else {
|
||||
System.err.println("有任务执行超时,主流程中断!");
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,112 @@
|
||||
package com.zt.plat.framework.common.util.integration;
|
||||
|
||||
import cn.hutool.core.util.StrUtil;
|
||||
import lombok.Getter;
|
||||
import lombok.Setter;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.time.Duration;
|
||||
|
||||
/**
|
||||
* 配置参数,控制对接 ePlat 共享服务的请求行为。
|
||||
*/
|
||||
@Getter
|
||||
@Setter
|
||||
@Component
|
||||
@ConfigurationProperties(prefix = "eplat.share")
|
||||
public class ShareServiceProperties {
|
||||
|
||||
private static final String DEFAULT_TOKEN_ENDPOINT_PATH = "/eplat/oauth/token";
|
||||
|
||||
/**
|
||||
* 共享服务基础地址,例如:https://example.com/share。
|
||||
*/
|
||||
private String urlPrefix;
|
||||
|
||||
/**
|
||||
* OAuth 客户端标识。
|
||||
*/
|
||||
private String clientId;
|
||||
|
||||
/**
|
||||
* OAuth 客户端密钥。
|
||||
*/
|
||||
private String clientSecret;
|
||||
|
||||
/**
|
||||
* OAuth scope,默认 read。
|
||||
*/
|
||||
private String scope = "read";
|
||||
|
||||
/**
|
||||
* 访问 token 在 Redis 中的缓存 key。
|
||||
*/
|
||||
private String tokenCacheKey = "eplat:cache:shareToken";
|
||||
|
||||
/**
|
||||
* 刷新 token 在 Redis 中的缓存 key。
|
||||
*/
|
||||
private String refreshTokenCacheKey = "eplat:cache:shareRefreshToken";
|
||||
|
||||
/**
|
||||
* 调用共享服务时携带 token 的请求头名称。
|
||||
*/
|
||||
private String tokenHeaderName = "Xplat-Token";
|
||||
|
||||
/**
|
||||
* 获取 token 的接口路径,默认 /eplat/oauth/token。
|
||||
*/
|
||||
private String tokenEndpointPath = DEFAULT_TOKEN_ENDPOINT_PATH;
|
||||
|
||||
/**
|
||||
* 访问 token 默认有效期,默认 5000 秒,建议略小于服务端实际过期时间。
|
||||
*/
|
||||
private Duration tokenTtl = Duration.ofSeconds(5000);
|
||||
|
||||
/**
|
||||
* 刷新 token 默认有效期,若未设置则取访问 token 的 2 倍。
|
||||
*/
|
||||
private Duration refreshTokenTtl;
|
||||
|
||||
/**
|
||||
* 构造具体服务的请求地址。
|
||||
*
|
||||
* @param serviceNo 服务号
|
||||
* @return 完整请求地址
|
||||
*/
|
||||
public String buildServiceUrl(String serviceNo) {
|
||||
return normalizeBaseUrl(urlPrefix) + "/service/" + serviceNo;
|
||||
}
|
||||
|
||||
/**
|
||||
* 构造获取 token 的请求地址。
|
||||
*
|
||||
* @return token 请求地址
|
||||
*/
|
||||
public String buildTokenUrl() {
|
||||
String base = normalizeBaseUrl(urlPrefix);
|
||||
String path = StrUtil.prependIfMissing(tokenEndpointPath, "/");
|
||||
return base + path;
|
||||
}
|
||||
|
||||
/**
|
||||
* 刷新 token 的缓存有效期。
|
||||
*
|
||||
* @return 刷新 token 有效期
|
||||
*/
|
||||
public Duration getRefreshTokenTtl() {
|
||||
if (refreshTokenTtl != null) {
|
||||
return refreshTokenTtl;
|
||||
}
|
||||
return tokenTtl.multipliedBy(2);
|
||||
}
|
||||
|
||||
private static String normalizeBaseUrl(String url) {
|
||||
if (StrUtil.isBlank(url)) {
|
||||
throw new IllegalArgumentException("共享服务地址不能为空");
|
||||
}
|
||||
return StrUtil.removeSuffix(url.trim(), "/");
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,237 @@
|
||||
package com.zt.plat.framework.common.util.integration;
|
||||
|
||||
import cn.hutool.core.util.StrUtil;
|
||||
import com.zt.plat.framework.common.util.json.JsonUtils;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.data.redis.core.StringRedisTemplate;
|
||||
import org.springframework.data.redis.core.ValueOperations;
|
||||
import org.springframework.http.HttpEntity;
|
||||
import org.springframework.http.HttpHeaders;
|
||||
import org.springframework.http.HttpMethod;
|
||||
import org.springframework.http.MediaType;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.LinkedMultiValueMap;
|
||||
import org.springframework.util.MultiValueMap;
|
||||
import org.springframework.web.client.RestClientException;
|
||||
import org.springframework.web.client.RestTemplate;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.time.Duration;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.locks.Lock;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
/**
|
||||
* ePlat 共享服务调用工具,负责发送请求与自动刷新访问 token。
|
||||
*/
|
||||
@Slf4j
|
||||
public final class ShareServiceUtils {
|
||||
|
||||
private static final Duration MIN_CACHE_TTL = Duration.ofSeconds(1);
|
||||
private static final ConcurrentMap<String, Lock> TOKEN_REFRESH_LOCKS = new ConcurrentHashMap<>();
|
||||
|
||||
private ShareServiceUtils() {
|
||||
}
|
||||
|
||||
public static String callShareService(RestTemplate restTemplate,
|
||||
StringRedisTemplate redisTemplate,
|
||||
ShareServiceProperties properties,
|
||||
String serviceNo,
|
||||
String requestBody) {
|
||||
return callShareService(restTemplate, redisTemplate, properties, serviceNo, (Object) requestBody);
|
||||
}
|
||||
|
||||
public static String callShareService(RestTemplate restTemplate,
|
||||
StringRedisTemplate redisTemplate,
|
||||
ShareServiceProperties properties,
|
||||
String serviceNo,
|
||||
Object requestBody) {
|
||||
Assert.notNull(restTemplate, "RestTemplate 不能为空");
|
||||
Assert.notNull(redisTemplate, "StringRedisTemplate 不能为空");
|
||||
Assert.notNull(properties, "ShareServiceProperties 不能为空");
|
||||
Assert.hasText(serviceNo, "服务号不能为空");
|
||||
|
||||
String url = properties.buildServiceUrl(serviceNo);
|
||||
String payload = convertRequestBody(requestBody);
|
||||
log.info("共享服务调用地址:[{}],请求体:[{}]", url, payload);
|
||||
|
||||
String token = obtainAccessToken(restTemplate, redisTemplate, properties);
|
||||
log.debug("共享服务服务号 [{}] 使用的 token 已获取", serviceNo);
|
||||
|
||||
HttpHeaders headers = new HttpHeaders();
|
||||
headers.setContentType(MediaType.APPLICATION_JSON);
|
||||
headers.set(properties.getTokenHeaderName(), token);
|
||||
|
||||
HttpEntity<String> entity = new HttpEntity<>(payload, headers);
|
||||
ResponseEntity<String> response = restTemplate.exchange(url, HttpMethod.POST, entity, String.class);
|
||||
return Objects.requireNonNullElse(response.getBody(), "");
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取共享服务的访问 token,可复用于自定义调用场景。
|
||||
*
|
||||
* @param restTemplate 用于请求共享服务的 {@link RestTemplate}
|
||||
* @param redisTemplate 缓存 token 的 {@link StringRedisTemplate}
|
||||
* @param properties 共享服务配置
|
||||
* @return 访问共享服务的 token
|
||||
*/
|
||||
public static String getAccessToken(RestTemplate restTemplate,
|
||||
StringRedisTemplate redisTemplate,
|
||||
ShareServiceProperties properties) {
|
||||
Assert.notNull(restTemplate, "RestTemplate 不能为空");
|
||||
Assert.notNull(redisTemplate, "StringRedisTemplate 不能为空");
|
||||
Assert.notNull(properties, "ShareServiceProperties 不能为空");
|
||||
return obtainAccessToken(restTemplate, redisTemplate, properties);
|
||||
}
|
||||
|
||||
private static String convertRequestBody(Object requestBody) {
|
||||
if (requestBody == null) {
|
||||
return "";
|
||||
}
|
||||
if (requestBody instanceof String str) {
|
||||
return str;
|
||||
}
|
||||
if (requestBody instanceof byte[] bytes) {
|
||||
return new String(bytes, StandardCharsets.UTF_8);
|
||||
}
|
||||
return JsonUtils.toJsonString(requestBody);
|
||||
}
|
||||
|
||||
private static String obtainAccessToken(RestTemplate restTemplate,
|
||||
StringRedisTemplate redisTemplate,
|
||||
ShareServiceProperties properties) {
|
||||
// 直接从 Redis 读取可复用的 token
|
||||
ValueOperations<String, String> valueOps = redisTemplate.opsForValue();
|
||||
String token = valueOps.get(properties.getTokenCacheKey());
|
||||
if (StrUtil.isNotBlank(token)) {
|
||||
return token;
|
||||
}
|
||||
// 针对同一个缓存 key 做细粒度加锁,避免并发刷新问题
|
||||
Lock lock = TOKEN_REFRESH_LOCKS.computeIfAbsent(properties.getTokenCacheKey(), key -> new ReentrantLock());
|
||||
lock.lock();
|
||||
try {
|
||||
token = valueOps.get(properties.getTokenCacheKey());
|
||||
if (StrUtil.isNotBlank(token)) {
|
||||
return token;
|
||||
}
|
||||
return refreshAccessToken(restTemplate, redisTemplate, properties, valueOps);
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
private static String refreshAccessToken(RestTemplate restTemplate,
|
||||
StringRedisTemplate redisTemplate,
|
||||
ShareServiceProperties properties,
|
||||
ValueOperations<String, String> valueOps) {
|
||||
String refreshToken = valueOps.get(properties.getRefreshTokenCacheKey());
|
||||
if (StrUtil.isNotBlank(refreshToken)) {
|
||||
try {
|
||||
return requestToken(restTemplate, redisTemplate, properties,
|
||||
buildRefreshTokenParams(properties, refreshToken));
|
||||
} catch (RuntimeException ex) {
|
||||
log.warn("刷新共享服务 token 失败,准备回退为 client_credentials 模式", ex);
|
||||
redisTemplate.delete(properties.getRefreshTokenCacheKey());
|
||||
}
|
||||
}
|
||||
return requestToken(restTemplate, redisTemplate, properties,
|
||||
buildClientCredentialsParams(properties));
|
||||
}
|
||||
|
||||
private static MultiValueMap<String, String> buildClientCredentialsParams(ShareServiceProperties properties) {
|
||||
MultiValueMap<String, String> params = baseTokenParams(properties);
|
||||
params.add("grant_type", "client_credentials");
|
||||
if (StrUtil.isNotBlank(properties.getScope())) {
|
||||
params.add("scope", properties.getScope());
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
||||
private static MultiValueMap<String, String> buildRefreshTokenParams(ShareServiceProperties properties,
|
||||
String refreshToken) {
|
||||
MultiValueMap<String, String> params = baseTokenParams(properties);
|
||||
params.add("grant_type", "refresh_token");
|
||||
params.add("refresh_token", refreshToken);
|
||||
return params;
|
||||
}
|
||||
|
||||
private static MultiValueMap<String, String> baseTokenParams(ShareServiceProperties properties) {
|
||||
MultiValueMap<String, String> params = new LinkedMultiValueMap<>();
|
||||
Assert.hasText(properties.getClientId(), "clientId 不能为空");
|
||||
Assert.hasText(properties.getClientSecret(), "clientSecret 不能为空");
|
||||
params.add("client_id", properties.getClientId());
|
||||
params.add("client_secret", properties.getClientSecret());
|
||||
return params;
|
||||
}
|
||||
|
||||
private static String requestToken(RestTemplate restTemplate,
|
||||
StringRedisTemplate redisTemplate,
|
||||
ShareServiceProperties properties,
|
||||
MultiValueMap<String, String> body) {
|
||||
HttpHeaders headers = new HttpHeaders();
|
||||
headers.setContentType(MediaType.APPLICATION_FORM_URLENCODED);
|
||||
HttpEntity<MultiValueMap<String, String>> entity = new HttpEntity<>(body, headers);
|
||||
String tokenUrl = properties.buildTokenUrl();
|
||||
log.info("共享服务获取 token 地址:[{}],授权方式:[{}]", tokenUrl, body.getFirst("grant_type"));
|
||||
ResponseEntity<String> response;
|
||||
try {
|
||||
response = restTemplate.postForEntity(tokenUrl, entity, String.class);
|
||||
} catch (RestClientException ex) {
|
||||
throw new IllegalStateException("请求共享服务 token 失败", ex);
|
||||
}
|
||||
String responseBody = response.getBody();
|
||||
if (StrUtil.isBlank(responseBody)) {
|
||||
throw new IllegalStateException("共享服务返回的 token 内容为空");
|
||||
}
|
||||
TokenResponse tokenResponse = parseTokenResponse(responseBody);
|
||||
cacheTokens(redisTemplate, properties, tokenResponse);
|
||||
return tokenResponse.accessToken();
|
||||
}
|
||||
|
||||
private static TokenResponse parseTokenResponse(String body) {
|
||||
var node = JsonUtils.parseTree(body);
|
||||
String accessToken = node.path("access_token").asText(null);
|
||||
if (StrUtil.isBlank(accessToken)) {
|
||||
throw new IllegalStateException("共享服务返回结果缺少 access_token 字段");
|
||||
}
|
||||
String refreshToken = node.path("refresh_token").asText(null);
|
||||
long expiresIn = node.path("expires_in").asLong(-1);
|
||||
long refreshExpiresIn = node.path("refresh_expires_in").asLong(-1);
|
||||
return new TokenResponse(accessToken, refreshToken, expiresIn, refreshExpiresIn);
|
||||
}
|
||||
|
||||
private static void cacheTokens(StringRedisTemplate redisTemplate,
|
||||
ShareServiceProperties properties,
|
||||
TokenResponse tokenResponse) {
|
||||
// 将最新的 token 与刷新 token 写回缓存
|
||||
ValueOperations<String, String> valueOps = redisTemplate.opsForValue();
|
||||
Duration tokenTtl = resolveTtl(tokenResponse.expiresIn(), properties.getTokenTtl());
|
||||
valueOps.set(properties.getTokenCacheKey(), tokenResponse.accessToken(), tokenTtl);
|
||||
if (StrUtil.isNotBlank(tokenResponse.refreshToken())) {
|
||||
Duration refreshTtl = resolveTtl(tokenResponse.refreshExpiresIn(), properties.getRefreshTokenTtl());
|
||||
valueOps.set(properties.getRefreshTokenCacheKey(), tokenResponse.refreshToken(), refreshTtl);
|
||||
}
|
||||
}
|
||||
|
||||
private static Duration resolveTtl(long expiresInSeconds, Duration fallback) {
|
||||
Duration effectiveFallback = fallback;
|
||||
if (effectiveFallback == null || effectiveFallback.compareTo(MIN_CACHE_TTL) < 0) {
|
||||
effectiveFallback = Duration.ofMinutes(5);
|
||||
}
|
||||
if (expiresInSeconds > 0) {
|
||||
Duration candidate = Duration.ofSeconds(expiresInSeconds);
|
||||
if (candidate.compareTo(MIN_CACHE_TTL) < 0) {
|
||||
candidate = MIN_CACHE_TTL;
|
||||
}
|
||||
return candidate.compareTo(effectiveFallback) < 0 ? candidate : effectiveFallback;
|
||||
}
|
||||
return effectiveFallback;
|
||||
}
|
||||
|
||||
private record TokenResponse(String accessToken, String refreshToken, long expiresIn, long refreshExpiresIn) {
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
package com.zt.plat.framework.common.util.json.databind;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.databind.SerializerProvider;
|
||||
import com.fasterxml.jackson.databind.ser.std.StdSerializer;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Long / long 数组序列化器,统一按字符串输出以规避 JS 精度问题。
|
||||
*/
|
||||
public class LongArraySerializer extends StdSerializer<Object> {
|
||||
|
||||
public static final LongArraySerializer INSTANCE = new LongArraySerializer();
|
||||
|
||||
private LongArraySerializer() {
|
||||
super(Object.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void serialize(Object value, JsonGenerator gen, SerializerProvider provider) throws IOException {
|
||||
gen.writeStartArray();
|
||||
if (value instanceof long[]) {
|
||||
long[] array = (long[]) value;
|
||||
for (long element : array) {
|
||||
// 原生 long 必定有值,直接走 NumberSerializer
|
||||
NumberSerializer.INSTANCE.serialize(element, gen, provider);
|
||||
}
|
||||
gen.writeEndArray();
|
||||
return;
|
||||
}
|
||||
|
||||
Long[] array = (Long[]) value;
|
||||
for (Long element : array) {
|
||||
if (element == null) {
|
||||
provider.defaultSerializeNull(gen);
|
||||
continue;
|
||||
}
|
||||
NumberSerializer.INSTANCE.serialize(element, gen, provider);
|
||||
}
|
||||
gen.writeEndArray();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
package com.zt.plat.framework.common.util.json.databind;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.databind.SerializerProvider;
|
||||
import com.fasterxml.jackson.databind.ser.std.StdSerializer;
|
||||
import com.fasterxml.jackson.databind.type.TypeFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
||||
/**
|
||||
* 将 {@link Collection} 中的 Long 元素序列化成字符串,避免 JavaScript 精度问题。
|
||||
*/
|
||||
public class LongCollectionSerializer extends StdSerializer<Collection<?>> {
|
||||
|
||||
public static final LongCollectionSerializer INSTANCE = new LongCollectionSerializer();
|
||||
|
||||
private LongCollectionSerializer() {
|
||||
super(TypeFactory.defaultInstance().constructCollectionType(Collection.class, Object.class));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void serialize(Collection<?> value, JsonGenerator gen, SerializerProvider provider) throws IOException {
|
||||
// 传入集合本身与元素数量,方便 Jackson 合理推测数组边界
|
||||
gen.writeStartArray(value, value.size());
|
||||
for (Object element : value) {
|
||||
if (element == null) {
|
||||
// 允许集合中存在 null,保持 Jackson 默认的 null 序列化行为
|
||||
provider.defaultSerializeNull(gen);
|
||||
continue;
|
||||
}
|
||||
// 所有 Long/long 元素统一走 NumberSerializer,保证前端精度
|
||||
NumberSerializer.INSTANCE.serialize((Number) element, gen, provider);
|
||||
}
|
||||
gen.writeEndArray();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
package com.zt.plat.framework.common.util.json.databind;
|
||||
|
||||
import com.fasterxml.jackson.databind.BeanDescription;
|
||||
import com.fasterxml.jackson.databind.JsonSerializer;
|
||||
import com.fasterxml.jackson.databind.SerializationConfig;
|
||||
import com.fasterxml.jackson.databind.ser.BeanSerializerModifier;
|
||||
import com.fasterxml.jackson.databind.type.ArrayType;
|
||||
import com.fasterxml.jackson.databind.type.CollectionType;
|
||||
import com.fasterxml.jackson.databind.type.CollectionLikeType;
|
||||
import com.fasterxml.jackson.databind.JavaType;
|
||||
|
||||
/**
|
||||
* 针对 Long 相关集合、数组的序列化增强,确保统一走 Long 的自定义序列化逻辑。
|
||||
*/
|
||||
public class LongTypeSerializerModifier extends BeanSerializerModifier {
|
||||
|
||||
@Override
|
||||
public JsonSerializer<?> modifyCollectionSerializer(SerializationConfig config, CollectionType valueType,
|
||||
BeanDescription beanDesc, JsonSerializer<?> serializer) {
|
||||
// List、Set 等容器若包含 Long,则切换到 LongCollectionSerializer
|
||||
return needsLongCollectionSerializer(valueType.getContentType()) ? LongCollectionSerializer.INSTANCE : serializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JsonSerializer<?> modifyCollectionLikeSerializer(SerializationConfig config, CollectionLikeType valueType,
|
||||
BeanDescription beanDesc, JsonSerializer<?> serializer) {
|
||||
// 处理 CollectionLike(如 Page、Optional 等)中的 Long 元素
|
||||
return needsLongCollectionSerializer(valueType.getContentType()) ? LongCollectionSerializer.INSTANCE : serializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JsonSerializer<?> modifyArraySerializer(SerializationConfig config, ArrayType valueType,
|
||||
BeanDescription beanDesc, JsonSerializer<?> serializer) {
|
||||
// 针对 long[]、Long[] 两种数组使用统一的数组序列化器
|
||||
Class<?> rawClass = valueType.getRawClass();
|
||||
if (long[].class.equals(rawClass)) {
|
||||
return LongArraySerializer.INSTANCE;
|
||||
}
|
||||
if (Long[].class.equals(rawClass)) {
|
||||
return LongArraySerializer.INSTANCE;
|
||||
}
|
||||
return serializer;
|
||||
}
|
||||
|
||||
private boolean needsLongCollectionSerializer(JavaType contentType) {
|
||||
if (contentType == null) {
|
||||
return false;
|
||||
}
|
||||
Class<?> rawClass = contentType.getRawClass();
|
||||
return Long.class.equals(rawClass) || Long.TYPE.equals(rawClass);
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,14 @@
|
||||
package com.zt.plat.framework.common.util.monitor;
|
||||
|
||||
import jakarta.servlet.http.HttpServletRequest;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.skywalking.apm.toolkit.trace.TraceContext;
|
||||
import org.slf4j.MDC;
|
||||
import org.springframework.web.context.request.RequestAttributes;
|
||||
import org.springframework.web.context.request.RequestContextHolder;
|
||||
import org.springframework.web.context.request.ServletRequestAttributes;
|
||||
|
||||
import java.util.UUID;
|
||||
|
||||
/**
|
||||
* 链路追踪工具类
|
||||
@@ -9,7 +17,33 @@ import org.apache.skywalking.apm.toolkit.trace.TraceContext;
|
||||
*
|
||||
* @author ZT
|
||||
*/
|
||||
public class TracerUtils {
|
||||
public final class TracerUtils {
|
||||
|
||||
/**
|
||||
* SkyWalking 在未接入 Agent 时返回的默认占位值
|
||||
*/
|
||||
private static final String SKY_WALKING_PLACEHOLDER = "N/A";
|
||||
|
||||
/**
|
||||
* SkyWalking 在忽略追踪时返回的占位值
|
||||
*/
|
||||
private static final String SKY_WALKING_IGNORED = "Ignored_Trace";
|
||||
|
||||
private static final String MDC_TRACE_ID_KEY = "traceId";
|
||||
private static final String REQUEST_ATTRIBUTE_KEY = TracerUtils.class.getName() + ".TRACE_ID";
|
||||
private static final String[] HEADER_CANDIDATES = {
|
||||
"trace-id",
|
||||
"Trace-Id",
|
||||
"x-trace-id",
|
||||
"X-Trace-Id",
|
||||
"x-request-id",
|
||||
"X-Request-Id"
|
||||
};
|
||||
|
||||
/**
|
||||
* 兜底的 traceId,保证在未接入链路追踪时依旧具备追踪能力
|
||||
*/
|
||||
private static final InheritableThreadLocal<String> FALLBACK_TRACE_ID = new InheritableThreadLocal<>();
|
||||
|
||||
/**
|
||||
* 私有化构造方法
|
||||
@@ -18,13 +52,121 @@ public class TracerUtils {
|
||||
}
|
||||
|
||||
/**
|
||||
* 获得链路追踪编号,直接返回 SkyWalking 的 TraceId。
|
||||
* 如果不存在的话为空字符串!!!
|
||||
* 获得链路追踪编号。
|
||||
* <p>
|
||||
* 优先返回 SkyWalking 的 TraceId;在缺少链路上下文或者未接入 SkyWalking 时,会优先复用来自请求上下文的 TraceId,
|
||||
* 否则生成一个新的兜底 TraceId,并在当前线程、请求上下文与日志 MDC 中缓存,确保后续组件能够复用。
|
||||
*
|
||||
* @return 链路追踪编号
|
||||
*/
|
||||
public static String getTraceId() {
|
||||
return TraceContext.traceId();
|
||||
String traceId = TraceContext.traceId();
|
||||
if (isValidTraceId(traceId)) {
|
||||
cacheTraceId(traceId);
|
||||
return traceId;
|
||||
}
|
||||
String cached = resolveCachedTraceId();
|
||||
if (StringUtils.isNotBlank(cached)) {
|
||||
return cached;
|
||||
}
|
||||
String generated = generateFallbackTraceId();
|
||||
cacheTraceId(generated);
|
||||
return generated;
|
||||
}
|
||||
|
||||
/**
|
||||
* 手动绑定外部传入的 TraceId,例如消费消息、处理异步任务时。
|
||||
*
|
||||
* @param traceId 链路编号
|
||||
*/
|
||||
public static void bindTraceId(String traceId) {
|
||||
if (StringUtils.isBlank(traceId)) {
|
||||
return;
|
||||
}
|
||||
cacheTraceId(traceId.trim());
|
||||
}
|
||||
|
||||
/**
|
||||
* 清理当前线程关联的兜底 traceId,避免线程复用导致污染。
|
||||
*/
|
||||
public static void clear() {
|
||||
FALLBACK_TRACE_ID.remove();
|
||||
MDC.remove(MDC_TRACE_ID_KEY);
|
||||
HttpServletRequest request = currentRequest();
|
||||
if (request != null) {
|
||||
request.removeAttribute(REQUEST_ATTRIBUTE_KEY);
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean isValidTraceId(String traceId) {
|
||||
if (StringUtils.isBlank(traceId)) {
|
||||
return false;
|
||||
}
|
||||
if (StringUtils.equalsIgnoreCase(traceId, SKY_WALKING_PLACEHOLDER)) {
|
||||
return false;
|
||||
}
|
||||
return !StringUtils.equalsIgnoreCase(traceId, SKY_WALKING_IGNORED);
|
||||
}
|
||||
|
||||
private static String resolveCachedTraceId() {
|
||||
String cached = FALLBACK_TRACE_ID.get();
|
||||
if (StringUtils.isNotBlank(cached)) {
|
||||
return cached;
|
||||
}
|
||||
HttpServletRequest request = currentRequest();
|
||||
if (request != null) {
|
||||
Object attribute = request.getAttribute(REQUEST_ATTRIBUTE_KEY);
|
||||
if (attribute instanceof String attrValue && StringUtils.isNotBlank(attrValue)) {
|
||||
cacheTraceId(attrValue);
|
||||
return attrValue;
|
||||
}
|
||||
String headerValue = resolveTraceIdFromHeader(request);
|
||||
if (StringUtils.isNotBlank(headerValue)) {
|
||||
cacheTraceId(headerValue);
|
||||
return headerValue;
|
||||
}
|
||||
}
|
||||
String mdcTraceId = MDC.get(MDC_TRACE_ID_KEY);
|
||||
if (StringUtils.isNotBlank(mdcTraceId)) {
|
||||
cacheTraceId(mdcTraceId);
|
||||
return mdcTraceId;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private static void cacheTraceId(String traceId) {
|
||||
if (StringUtils.isBlank(traceId)) {
|
||||
return;
|
||||
}
|
||||
String trimmed = traceId.trim();
|
||||
FALLBACK_TRACE_ID.set(trimmed);
|
||||
MDC.put(MDC_TRACE_ID_KEY, trimmed);
|
||||
HttpServletRequest request = currentRequest();
|
||||
if (request != null) {
|
||||
request.setAttribute(REQUEST_ATTRIBUTE_KEY, trimmed);
|
||||
}
|
||||
}
|
||||
|
||||
private static HttpServletRequest currentRequest() {
|
||||
RequestAttributes requestAttributes = RequestContextHolder.getRequestAttributes();
|
||||
if (requestAttributes instanceof ServletRequestAttributes servletRequestAttributes) {
|
||||
return servletRequestAttributes.getRequest();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private static String resolveTraceIdFromHeader(HttpServletRequest request) {
|
||||
for (String header : HEADER_CANDIDATES) {
|
||||
String value = request.getHeader(header);
|
||||
if (StringUtils.isNotBlank(value)) {
|
||||
return value.trim();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private static String generateFallbackTraceId() {
|
||||
return StringUtils.replace(UUID.randomUUID().toString(), "-", "");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import cn.hutool.core.bean.BeanUtil;
|
||||
import com.zt.plat.framework.common.pojo.PageResult;
|
||||
import com.zt.plat.framework.common.util.collection.CollectionUtils;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.function.Consumer;
|
||||
@@ -69,10 +70,13 @@ public class BeanUtils {
|
||||
return null;
|
||||
}
|
||||
List<T> list = toBean(source.getList(), targetType);
|
||||
if (list == null) {
|
||||
list = Collections.emptyList();
|
||||
}
|
||||
if (peek != null) {
|
||||
list.forEach(peek);
|
||||
}
|
||||
return new PageResult<>(list, source.getTotal());
|
||||
return new PageResult<>(list, source.getTotal(), source.getSummary());
|
||||
}
|
||||
|
||||
public static void copyProperties(Object source, Object target) {
|
||||
|
||||
@@ -0,0 +1,216 @@
|
||||
package com.zt.plat.framework.common.util.tree;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.zt.plat.framework.common.util.object.ObjectUtils;
|
||||
import lombok.Data;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.function.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* 树操作方法工具类
|
||||
*/
|
||||
public class TreeUtil {
|
||||
/**
|
||||
* 将list合成树
|
||||
*@param list 需要合成树的List
|
||||
*@param rootCheck 判断E中为根节点的条件,如:x->x.getPId()==-1L,x->x.getParentId()==null,x->x.getParentMenuId()==0
|
||||
*@param parentCheck 判断E中为父节点条件,如:(x,y)->x.getId().equals(y.getPId())
|
||||
*@param setSubChildren E中设置下级数据方法,如:Menu::setSubMenus
|
||||
*@param<E> 泛型实体对象
|
||||
*@return 合成好的树
|
||||
*/
|
||||
public static <E> List<E> makeTree(List<E> list, Predicate<E> rootCheck, BiFunction<E,E,Boolean> parentCheck, BiConsumer<E,List<E>> setSubChildren){
|
||||
return list.stream().filter(rootCheck).peek(x->setSubChildren.accept(x,makeChildren(x,list,parentCheck,setSubChildren))).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
/**
|
||||
*将树打平成tree
|
||||
*@paramtree需要打平的树
|
||||
*@paramgetSubChildren设置下级数据方法,如:Menu::getSubMenus,x->x.setSubMenus(null)
|
||||
*@paramsetSubChildren将下级数据置空方法,如:x->x.setSubMenus(null)
|
||||
*@return打平后的数据
|
||||
*@param<E>泛型实体对象
|
||||
*/
|
||||
public static <E> List<E> flat(List<E> tree, Function<E,List<E>> getSubChildren, Consumer<E> setSubChildren){
|
||||
List<E> res = new ArrayList<>();
|
||||
forPostOrder(tree,item->{
|
||||
setSubChildren.accept(item);
|
||||
res.add(item);
|
||||
},getSubChildren);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
*前序遍历
|
||||
*
|
||||
*@paramtree需要遍历的树
|
||||
*@paramconsumer遍历后对单个元素的处理方法,如:x->System.out.println(x)、System.out::println打印元素
|
||||
*@paramsetSubChildren设置下级数据方法,如:Menu::getSubMenus,x->x.setSubMenus(null)
|
||||
*@param<E>泛型实体对象
|
||||
*/
|
||||
public static <E> void forPreOrder(List<E> tree,Consumer<E> consumer,Function<E,List<E>> setSubChildren){
|
||||
for(E l : tree){
|
||||
consumer.accept(l);
|
||||
List<E> es = setSubChildren.apply(l);
|
||||
if(es != null && es.size() > 0){
|
||||
forPreOrder(es,consumer,setSubChildren);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
*层序遍历
|
||||
*
|
||||
*@paramtree需要遍历的树
|
||||
*@paramconsumer遍历后对单个元素的处理方法,如:x->System.out.println(x)、System.out::println打印元素
|
||||
*@paramsetSubChildren设置下级数据方法,如:Menu::getSubMenus,x->x.setSubMenus(null)
|
||||
*@param<E>泛型实体对象
|
||||
*/
|
||||
public static <E> void forLevelOrder(List<E> tree,Consumer<E> consumer,Function<E,List<E>> setSubChildren){
|
||||
Queue<E> queue=new LinkedList<>(tree);
|
||||
while(!queue.isEmpty()){
|
||||
E item = queue.poll();
|
||||
consumer.accept(item);
|
||||
List<E> childList = setSubChildren.apply(item);
|
||||
if(childList !=null && !childList.isEmpty()){
|
||||
queue.addAll(childList);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
*后序遍历
|
||||
*
|
||||
*@paramtree需要遍历的树
|
||||
*@paramconsumer遍历后对单个元素的处理方法,如:x->System.out.println(x)、System.out::println打印元素
|
||||
*@paramsetSubChildren设置下级数据方法,如:Menu::getSubMenus,x->x.setSubMenus(null)
|
||||
*@param<E>泛型实体对象
|
||||
*/
|
||||
public static <E> void forPostOrder(List<E> tree,Consumer<E> consumer,Function<E,List<E>> setSubChildren){
|
||||
for(E item : tree) {
|
||||
List<E> childList = setSubChildren.apply(item);
|
||||
if(childList != null && !childList.isEmpty()){
|
||||
forPostOrder(childList,consumer,setSubChildren);
|
||||
}
|
||||
consumer.accept(item);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*对树所有子节点按comparator排序
|
||||
*
|
||||
*@paramtree需要排序的树
|
||||
*@paramcomparator排序规则Comparator,如:Comparator.comparing(MenuVo::getRank)按Rank正序,(x,y)->y.getRank().compareTo(x.getRank()),按Rank倒序
|
||||
*@paramgetChildren获取下级数据方法,如:MenuVo::getSubMenus
|
||||
*@return排序好的树
|
||||
*@param<E>泛型实体对象
|
||||
*/
|
||||
public static <E> List<E> sort(List<E> tree, Comparator<? super E> comparator, Function<E,List<E>> getChildren){
|
||||
for(E item : tree){
|
||||
List<E> childList = getChildren.apply(item);
|
||||
if(childList != null &&! childList.isEmpty()){
|
||||
sort(childList,comparator,getChildren);
|
||||
}
|
||||
}
|
||||
tree.sort(comparator);
|
||||
return tree;
|
||||
}
|
||||
|
||||
private static <E> List<E> makeChildren(E parent,List<E> allData,BiFunction<E,E,Boolean> parentCheck,BiConsumer<E,List<E>> children){
|
||||
return allData.stream().filter(x->parentCheck.apply(parent,x)).peek(x->children.accept(x,makeChildren(x,allData,parentCheck,children))).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
/**
|
||||
* 使用样例
|
||||
* @param args
|
||||
*/
|
||||
public static void main(String[] args) {
|
||||
MenuVo menu0 = new MenuVo(0L, -1L, "一级菜单", 0);
|
||||
MenuVo menu1 = new MenuVo(1L, 0L, "二级菜单", 1);
|
||||
MenuVo menu2 = new MenuVo(2L, 0L, "三级菜单", 2);
|
||||
MenuVo menu3 = new MenuVo(3L, 1L, "四级菜单", 3);
|
||||
MenuVo menu4 = new MenuVo(4L, 1L, "五级菜单", 4);
|
||||
MenuVo menu5 = new MenuVo(5L, 2L, "六级菜单", 5);
|
||||
MenuVo menu6 = new MenuVo(6L, 2L, "七级菜单", 6);
|
||||
MenuVo menu7 = new MenuVo(7L, 3L, "八级菜单", 7);
|
||||
MenuVo menu8 = new MenuVo(8L, 3L, "九级菜单", 8);
|
||||
MenuVo menu9 = new MenuVo(9L, 4L, "十级菜单", 9);
|
||||
//基本数据
|
||||
List<MenuVo> menuList = Arrays.asList(menu0,menu1, menu2,menu3,menu4,menu5,menu6,menu7,menu8,menu9);
|
||||
//合成树
|
||||
/**
|
||||
* 第1个参数List list,为我们需要合成树的List,如上面Demo中的menuList
|
||||
* 第2个参数Predicate rootCheck,判断为根节点的条件,如上面Demo中pId==-1就是根节点
|
||||
* 第3个参数parentCheck 判断为父节点条件,如上面Demo中 id==pId
|
||||
* 第4个参数setSubChildren,设置下级数据方法如上面Demo中:Menu::setSubMenus
|
||||
*/
|
||||
List<MenuVo> tree= TreeUtil.makeTree(menuList, x->x.getPId()==-1L,(x, y)->x.getId().equals(y.getPId()), MenuVo::setSubMenus);
|
||||
System.out.println(JSON.toJSONString(tree));
|
||||
|
||||
//先序
|
||||
/**
|
||||
* 遍历数参数解释:
|
||||
* tree 需要遍历的树,就是makeTree()合
|
||||
* 成的对象Consumer consumer 遍历后对单个元素的处理方法,如:x-> System.out.println(x)、 postOrder.append(x.getId().toString())
|
||||
* Function<E, List> getSubChildren,获取下级数据方法,如Menu::getSubMenus
|
||||
*/
|
||||
StringBuffer preStr = new StringBuffer();
|
||||
TreeUtil.forPreOrder(tree,x-> preStr.append(x.getId().toString()),MenuVo::getSubMenus);
|
||||
ObjectUtils.equalsAny("0123456789",preStr.toString());
|
||||
|
||||
//层序
|
||||
StringBuffer levelStr=new StringBuffer();
|
||||
TreeUtil.forLevelOrder(tree,x-> levelStr.append(x.getId().toString()),MenuVo::getSubMenus);
|
||||
ObjectUtils.equalsAny("0123456789",levelStr.toString());
|
||||
|
||||
//后序
|
||||
StringBuffer postOrder=new StringBuffer();
|
||||
TreeUtil.forPostOrder(tree,x-> postOrder.append(x.getId().toString()),MenuVo::getSubMenus);
|
||||
ObjectUtils.equalsAny("7839415620",postOrder.toString());
|
||||
|
||||
// 树平铺
|
||||
/**
|
||||
* flat()参数解释:
|
||||
* tree 需要打平的树,就是makeTree()合成的对象Function<E, List> getSubChildren,
|
||||
* 获取下级数据方法,如Menu::getSubMenusConsumer setSubChildren,
|
||||
* 设置下级数据方法,如:x->x.setSubMenus(null)
|
||||
*/
|
||||
List<MenuVo> flat = TreeUtil.flat(tree, MenuVo::getSubMenus,x->x.setSubMenus(null));
|
||||
ObjectUtils.equalsAny(flat.size(),menuList.size());
|
||||
flat.forEach(x -> {
|
||||
if (x.getSubMenus() != null) {
|
||||
throw new RuntimeException("树平铺失败");
|
||||
}
|
||||
});
|
||||
|
||||
// 按rank正序
|
||||
/**
|
||||
* sort参数解释:
|
||||
* tree 需要排序的树,就是makeTree()合成的对象Comparator<? super E> comparator
|
||||
* 排序规则Comparator,如:Comparator.comparing(MenuVo::getRank) 按Rank正序 ,(x,y)->y.getRank().compareTo(x.getRank()),按Rank倒序Function<E, List> getChildren
|
||||
* 获取下级数据方法,如:MenuVo::getSubMenus
|
||||
*/
|
||||
List<MenuVo> sortTree= TreeUtil.sort(tree, Comparator.comparing(MenuVo::getRank), MenuVo::getSubMenus);
|
||||
// 按rank倒序
|
||||
List<MenuVo> sortTreeReverse = TreeUtil.sort(tree, (x,y)->y.getRank().compareTo(x.getRank()), MenuVo::getSubMenus);
|
||||
}
|
||||
@Data
|
||||
static class MenuVo {
|
||||
private Long id; // 主键id
|
||||
private Long pId; // 父级id
|
||||
private String name; // 菜单名称
|
||||
private Integer rank = 0; // 排序
|
||||
private List<MenuVo> subMenus = new ArrayList<>(); // 子菜单
|
||||
public MenuVo(Long id, Long pId, String name, Integer rank) {
|
||||
this.id = id;
|
||||
this.pId = pId;
|
||||
this.name = name;
|
||||
this.rank = rank;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -19,11 +19,11 @@ import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
|
||||
public class ZtBusinessAutoConfiguration implements WebMvcConfigurer {
|
||||
@Override
|
||||
public void addInterceptors(InterceptorRegistry registry) {
|
||||
// 只拦截增删改和 set 相关的 url
|
||||
// 拦截所有 url,统一进行业务与文件上传请求头校验
|
||||
registry.addInterceptor(new BusinessHeaderInterceptor())
|
||||
.addPathPatterns("/**/add**", "/**/create**", "/**/update**", "/**/edit**", "/**/set**");
|
||||
.addPathPatterns("/**");
|
||||
registry.addInterceptor(new FileUploadHeaderInterceptor())
|
||||
.addPathPatterns("/**/add**", "/**/create**", "/**/update**", "/**/edit**", "/**/set**");
|
||||
.addPathPatterns("/**");
|
||||
}
|
||||
|
||||
@Bean
|
||||
|
||||
@@ -2,19 +2,20 @@ package com.zt.plat.framework.business.core.util;
|
||||
|
||||
import cn.hutool.json.JSONObject;
|
||||
import cn.hutool.json.JSONUtil;
|
||||
import com.zt.plat.framework.common.pojo.CommonResult;
|
||||
import com.zt.plat.framework.common.pojo.CompanyDeptInfo;
|
||||
import com.zt.plat.framework.common.util.json.JsonUtils;
|
||||
import com.zt.plat.framework.common.util.spring.SpringUtils;
|
||||
import com.zt.plat.framework.security.core.LoginUser;
|
||||
import com.zt.plat.framework.tenant.core.context.CompanyContextHolder;
|
||||
import com.zt.plat.framework.web.core.util.WebFrameworkUtils;
|
||||
import com.zt.plat.module.system.api.dept.DeptApi;
|
||||
import com.zt.plat.module.system.api.dept.dto.CompanyDeptInfoRespDTO;
|
||||
import jakarta.servlet.http.HttpServletRequest;
|
||||
import jakarta.servlet.http.HttpServletResponse;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static com.zt.plat.framework.common.util.collection.CollectionUtils.singleton;
|
||||
@@ -23,7 +24,10 @@ import static com.zt.plat.framework.security.core.util.SecurityFrameworkUtils.ge
|
||||
/**
|
||||
* @author chenbowen
|
||||
*/
|
||||
@Slf4j
|
||||
public class BusinessDeptHandleUtil {
|
||||
private static final String CONTEXT_KEY_COMPANY_DEPT_INFOS = "companyDeptInfos";
|
||||
|
||||
public static Set<CompanyDeptInfo> getBelongCompanyAndDept(HttpServletRequest request, HttpServletResponse response) throws Exception {
|
||||
response.setContentType("application/json;charset=UTF-8");
|
||||
String companyIdHeader = request.getHeader(WebFrameworkUtils.HEADER_VISIT_COMPANY_ID);
|
||||
@@ -37,21 +41,19 @@ public class BusinessDeptHandleUtil {
|
||||
currentLoginUser.setInfo(extraInfo);
|
||||
}
|
||||
|
||||
Set<CompanyDeptInfo> companyDeptSet = JSONUtil.parseArray(extraInfo.getOrDefault(LoginUser.INFO_KEY_COMPANY_DEPT_SET, "[]")).stream()
|
||||
.map(obj -> JSONUtil.toBean((JSONObject) obj, CompanyDeptInfo.class))
|
||||
.collect(Collectors.toSet());
|
||||
Set<CompanyDeptInfo> companyDeptSet = resolveCompanyDeptInfos(currentLoginUser, extraInfo);
|
||||
|
||||
// 1. 有 companyId
|
||||
if (companyIdHeader != null && !companyIdHeader.isBlank()) {
|
||||
// 根据请求头中的公司 ID 过滤出当前用户的公司部门信息
|
||||
Set<CompanyDeptInfo> companyDeptSetByCompanyId = companyDeptSet.stream()
|
||||
.filter(companyDeptInfo -> companyDeptInfo.getCompanyId().toString().equals(companyIdHeader))
|
||||
.filter(companyDeptInfo -> companyDeptInfo.getCompanyId().equals(companyIdHeader))
|
||||
.collect(Collectors.toSet());
|
||||
if (companyDeptSetByCompanyId.isEmpty()) {
|
||||
// 当前公司下没有部门
|
||||
CompanyDeptInfo data = new CompanyDeptInfo();
|
||||
data.setCompanyId(Long.valueOf(companyIdHeader));
|
||||
data.setDeptId(0L);
|
||||
data.setCompanyId(companyIdHeader);
|
||||
data.setDeptId("0");
|
||||
return new HashSet<>(singleton(data));
|
||||
}
|
||||
// 如果有 deptId,校验其是否属于该 companyId
|
||||
@@ -84,16 +86,106 @@ public class BusinessDeptHandleUtil {
|
||||
return companyDeptSet;
|
||||
}
|
||||
|
||||
private static Set<CompanyDeptInfo> resolveCompanyDeptInfos(LoginUser loginUser, Map<String, String> extraInfo) {
|
||||
if (loginUser == null) {
|
||||
return Collections.emptySet();
|
||||
}
|
||||
Set<CompanyDeptInfo> cached = loginUser.getContext(CONTEXT_KEY_COMPANY_DEPT_INFOS, Set.class);
|
||||
if (cached != null) {
|
||||
return cached;
|
||||
}
|
||||
|
||||
Set<CompanyDeptInfo> resolved = parseFromInfo(extraInfo);
|
||||
if (resolved == null || resolved.isEmpty()) {
|
||||
Set<CompanyDeptInfo> fetched = fetchCompanyDeptInfos(loginUser.getId());
|
||||
if (!fetched.isEmpty()) {
|
||||
resolved = fetched;
|
||||
} else if (resolved == null) {
|
||||
resolved = Collections.emptySet();
|
||||
}
|
||||
}
|
||||
|
||||
cacheCompanyDeptInfos(loginUser, extraInfo, resolved);
|
||||
return resolved;
|
||||
}
|
||||
|
||||
private static Set<CompanyDeptInfo> parseFromInfo(Map<String, String> extraInfo) {
|
||||
if (extraInfo == null || !extraInfo.containsKey(LoginUser.INFO_KEY_COMPANY_DEPT_SET)) {
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
return JSONUtil.parseArray(extraInfo.getOrDefault(LoginUser.INFO_KEY_COMPANY_DEPT_SET, "[]")).stream()
|
||||
.map(obj -> JSONUtil.toBean((JSONObject) obj, CompanyDeptInfo.class))
|
||||
.collect(Collectors.toCollection(LinkedHashSet::new));
|
||||
} catch (Exception ex) {
|
||||
log.warn("[parseFromInfo][解析公司部门信息失败] raw={}", extraInfo.get(LoginUser.INFO_KEY_COMPANY_DEPT_SET), ex);
|
||||
return Collections.emptySet();
|
||||
}
|
||||
}
|
||||
|
||||
private static Set<CompanyDeptInfo> fetchCompanyDeptInfos(Long userId) {
|
||||
if (userId == null) {
|
||||
return Collections.emptySet();
|
||||
}
|
||||
try {
|
||||
DeptApi deptApi = SpringUtils.getBean(DeptApi.class);
|
||||
CommonResult<Set<CompanyDeptInfoRespDTO>> result = deptApi.getCompanyDeptInfoListByUserId(userId);
|
||||
if (result == null || !result.isSuccess() || result.getData() == null) {
|
||||
return Collections.emptySet();
|
||||
}
|
||||
return result.getData().stream()
|
||||
.map(BusinessDeptHandleUtil::convert)
|
||||
.collect(Collectors.toCollection(LinkedHashSet::new));
|
||||
} catch (Exception ex) {
|
||||
log.warn("[fetchCompanyDeptInfos][userId({}) 获取公司部门信息失败]", userId, ex);
|
||||
return Collections.emptySet();
|
||||
}
|
||||
}
|
||||
|
||||
private static void cacheCompanyDeptInfos(LoginUser loginUser, Map<String, String> extraInfo, Set<CompanyDeptInfo> infos) {
|
||||
if (infos == null) {
|
||||
infos = Collections.emptySet();
|
||||
}
|
||||
loginUser.setContext(CONTEXT_KEY_COMPANY_DEPT_INFOS, infos);
|
||||
if (extraInfo == null) {
|
||||
return;
|
||||
}
|
||||
Set<String> companyIds = infos.stream()
|
||||
.map(CompanyDeptInfo::getCompanyId)
|
||||
.filter(Objects::nonNull)
|
||||
.collect(Collectors.toCollection(LinkedHashSet::new));
|
||||
Set<String> deptIds = infos.stream()
|
||||
.map(CompanyDeptInfo::getDeptId)
|
||||
.filter(Objects::nonNull)
|
||||
.collect(Collectors.toCollection(LinkedHashSet::new));
|
||||
extraInfo.put(LoginUser.INFO_KEY_COMPANY_DEPT_SET, JsonUtils.toJsonString(infos));
|
||||
extraInfo.put(LoginUser.INFO_KEY_COMPANY_IDS, JsonUtils.toJsonString(companyIds));
|
||||
extraInfo.put(LoginUser.INFO_KEY_DEPT_IDS, JsonUtils.toJsonString(deptIds));
|
||||
}
|
||||
|
||||
private static CompanyDeptInfo convert(CompanyDeptInfoRespDTO dto) {
|
||||
CompanyDeptInfo info = new CompanyDeptInfo();
|
||||
info.setCompanyId(String.valueOf(dto.getCompanyId()));
|
||||
info.setCompanyName(dto.getCompanyName());
|
||||
info.setCompanyCode(dto.getCompanyCode());
|
||||
info.setDeptId(String.valueOf(dto.getDeptId()));
|
||||
info.setDeptName(dto.getDeptName());
|
||||
info.setDeptCode(dto.getDeptCode());
|
||||
return info;
|
||||
}
|
||||
|
||||
private static boolean applyAutoSelection(LoginUser loginUser, HttpServletRequest request, CompanyDeptInfo info) {
|
||||
if (info == null || info.getCompanyId() == null || info.getCompanyId() <= 0
|
||||
|| info.getDeptId() == null || info.getDeptId() <= 0) {
|
||||
if (info == null || info.getCompanyId() == null || "0".equals(info.getCompanyId())
|
||||
|| info.getDeptId() == null || "0".equals(info.getDeptId())) {
|
||||
return false;
|
||||
}
|
||||
if (loginUser != null) {
|
||||
loginUser.setVisitCompanyId(info.getCompanyId());
|
||||
loginUser.setVisitCompanyId(Long.valueOf(info.getCompanyId()));
|
||||
loginUser.setVisitCompanyName(info.getCompanyName());
|
||||
loginUser.setVisitDeptId(info.getDeptId());
|
||||
loginUser.setVisitCompanyCode(info.getCompanyName());
|
||||
loginUser.setVisitDeptId(Long.valueOf(info.getDeptId()));
|
||||
loginUser.setVisitDeptName(info.getDeptName());
|
||||
loginUser.setVisitDeptCode(info.getDeptName());
|
||||
}
|
||||
request.setAttribute(WebFrameworkUtils.HEADER_VISIT_COMPANY_ID, info.getCompanyId());
|
||||
if (info.getCompanyName() != null) {
|
||||
@@ -104,7 +196,7 @@ public class BusinessDeptHandleUtil {
|
||||
request.setAttribute(WebFrameworkUtils.HEADER_VISIT_DEPT_NAME, info.getDeptName());
|
||||
}
|
||||
CompanyContextHolder.setIgnore(false);
|
||||
CompanyContextHolder.setCompanyId(info.getCompanyId());
|
||||
CompanyContextHolder.setCompanyId(Long.valueOf(info.getCompanyId()));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,11 +107,11 @@ class BusinessHeaderInterceptorTest {
|
||||
|
||||
// 构造 loginUser,包含多个公司部门
|
||||
CompanyDeptInfo deptInfo1 = new CompanyDeptInfo();
|
||||
deptInfo1.setCompanyId(1L);
|
||||
deptInfo1.setDeptId(2L);
|
||||
deptInfo1.setCompanyId(String.valueOf(1L));
|
||||
deptInfo1.setDeptId(String.valueOf(2L));
|
||||
CompanyDeptInfo deptInfo2 = new CompanyDeptInfo();
|
||||
deptInfo2.setCompanyId(2L);
|
||||
deptInfo2.setDeptId(3L);
|
||||
deptInfo2.setCompanyId(String.valueOf(2L));
|
||||
deptInfo2.setDeptId(String.valueOf(3L));
|
||||
Set<CompanyDeptInfo> deptSet = new HashSet<>();
|
||||
deptSet.add(deptInfo1);
|
||||
deptSet.add(deptInfo2);
|
||||
@@ -141,8 +141,8 @@ class BusinessHeaderInterceptorTest {
|
||||
|
||||
// 构造 loginUser,只有一个公司且公司下只有一个部门
|
||||
CompanyDeptInfo deptInfo = new CompanyDeptInfo();
|
||||
deptInfo.setCompanyId(100L);
|
||||
deptInfo.setDeptId(200L);
|
||||
deptInfo.setCompanyId(String.valueOf(100L));
|
||||
deptInfo.setDeptId(String.valueOf(200L));
|
||||
Set<CompanyDeptInfo> deptSet = new HashSet<>();
|
||||
deptSet.add(deptInfo);
|
||||
LoginUser loginUser = randomPojo(LoginUser.class, o -> o.setId(1L)
|
||||
@@ -155,9 +155,9 @@ class BusinessHeaderInterceptorTest {
|
||||
setLoginUserForTest(loginUser);
|
||||
|
||||
boolean result = interceptor.preHandle(request, response, handlerMethod);
|
||||
assertFalse(result);
|
||||
// 可选:verify(request).setAttribute("visit-company-id", String.valueOf(deptInfo.getCompanyId()));
|
||||
// 可选:verify(request).setAttribute("visit-dept-id", String.valueOf(deptInfo.getDeptId()));
|
||||
assertTrue(result);
|
||||
verify(request).setAttribute(eq("visit-company-id"), eq(deptInfo.getCompanyId()));
|
||||
verify(request).setAttribute(eq("visit-dept-id"), eq(deptInfo.getDeptId()));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -172,11 +172,11 @@ class BusinessHeaderInterceptorTest {
|
||||
|
||||
// 构造 loginUser,多个公司部门
|
||||
CompanyDeptInfo deptInfo1 = new CompanyDeptInfo();
|
||||
deptInfo1.setCompanyId(1L);
|
||||
deptInfo1.setDeptId(2L);
|
||||
deptInfo1.setCompanyId(String.valueOf(1L));
|
||||
deptInfo1.setDeptId(String.valueOf(2L));
|
||||
CompanyDeptInfo deptInfo2 = new CompanyDeptInfo();
|
||||
deptInfo2.setCompanyId(2L);
|
||||
deptInfo2.setDeptId(3L);
|
||||
deptInfo2.setCompanyId(String.valueOf(2L));
|
||||
deptInfo2.setDeptId(String.valueOf(3L));
|
||||
Set<CompanyDeptInfo> deptSet = new HashSet<>();
|
||||
deptSet.add(deptInfo1);
|
||||
deptSet.add(deptInfo2);
|
||||
@@ -207,11 +207,11 @@ class BusinessHeaderInterceptorTest {
|
||||
|
||||
// 构造 loginUser,只有其他公司部门
|
||||
CompanyDeptInfo deptInfo1 = new CompanyDeptInfo();
|
||||
deptInfo1.setCompanyId(1L);
|
||||
deptInfo1.setDeptId(2L);
|
||||
deptInfo1.setCompanyId(String.valueOf(1L));
|
||||
deptInfo1.setDeptId(String.valueOf(2L));
|
||||
CompanyDeptInfo deptInfo2 = new CompanyDeptInfo();
|
||||
deptInfo2.setCompanyId(2L);
|
||||
deptInfo2.setDeptId(3L);
|
||||
deptInfo2.setCompanyId(String.valueOf(2L));
|
||||
deptInfo2.setDeptId(String.valueOf(3L));
|
||||
Set<CompanyDeptInfo> deptSet = new HashSet<>();
|
||||
deptSet.add(deptInfo1);
|
||||
deptSet.add(deptInfo2);
|
||||
|
||||
@@ -25,6 +25,7 @@ import com.zt.plat.framework.web.core.handler.GlobalExceptionHandler;
|
||||
import com.baomidou.mybatisplus.extension.plugins.MybatisPlusInterceptor;
|
||||
import com.baomidou.mybatisplus.extension.plugins.inner.TenantLineInnerInterceptor;
|
||||
import jakarta.annotation.Resource;
|
||||
import org.springframework.beans.factory.SmartInitializingSingleton;
|
||||
import org.springframework.boot.autoconfigure.AutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
@@ -49,17 +50,30 @@ import org.springframework.web.util.pattern.PathPattern;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import static com.zt.plat.framework.common.util.collection.CollectionUtils.convertList;
|
||||
|
||||
@AutoConfiguration
|
||||
@ConditionalOnProperty(prefix = "zt.tenant", value = "enable", matchIfMissing = true) // 允许使用 zt.tenant.enable=false 禁用多租户
|
||||
@EnableConfigurationProperties(TenantProperties.class)
|
||||
public class ZtTenantAutoConfiguration {
|
||||
public class ZtTenantAutoConfiguration implements SmartInitializingSingleton {
|
||||
|
||||
@Resource
|
||||
private ApplicationContext applicationContext;
|
||||
|
||||
@Resource
|
||||
private TenantProperties tenantProperties;
|
||||
|
||||
/**
|
||||
* 存放 @TenantIgnore 注解的 URL
|
||||
*
|
||||
* 为什么不直接放到 TenantProperties 中?
|
||||
* 因为 TenantProperties 是 @ConfigurationProperties Bean,可能会被 Nacos 等配置中心刷新,导致 programmatically 添加的 URL 丢失。
|
||||
*/
|
||||
private final Set<String> globalIgnoreUrls = ConcurrentHashMap.newKeySet();
|
||||
|
||||
@Bean
|
||||
public TenantFrameworkService tenantFrameworkService(TenantCommonApi tenantApi) {
|
||||
// 参见 https://gitee.com/zhijiantianya/zt-cloud/issues/IC6YZF
|
||||
@@ -98,16 +112,18 @@ public class ZtTenantAutoConfiguration {
|
||||
FilterRegistrationBean<TenantContextWebFilter> registrationBean = new FilterRegistrationBean<>();
|
||||
registrationBean.setFilter(new TenantContextWebFilter());
|
||||
registrationBean.setOrder(WebFilterOrderEnum.TENANT_CONTEXT_FILTER);
|
||||
addIgnoreUrls(tenantProperties);
|
||||
return registrationBean;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterSingletonsInstantiated() {
|
||||
addIgnoreUrls();
|
||||
}
|
||||
|
||||
/**
|
||||
* 如果 Controller 接口上,有 {@link TenantIgnore} 注解,那么添加到忽略的 URL 中
|
||||
*
|
||||
* @param tenantProperties 租户配置
|
||||
*/
|
||||
private void addIgnoreUrls(TenantProperties tenantProperties) {
|
||||
private void addIgnoreUrls() {
|
||||
// 获得接口对应的 HandlerMethod 集合
|
||||
RequestMappingHandlerMapping requestMappingHandlerMapping = (RequestMappingHandlerMapping)
|
||||
applicationContext.getBean("requestMappingHandlerMapping");
|
||||
@@ -120,10 +136,10 @@ public class ZtTenantAutoConfiguration {
|
||||
}
|
||||
// 添加到忽略的 URL 中
|
||||
if (entry.getKey().getPatternsCondition() != null) {
|
||||
tenantProperties.getIgnoreUrls().addAll(entry.getKey().getPatternsCondition().getPatterns());
|
||||
globalIgnoreUrls.addAll(entry.getKey().getPatternsCondition().getPatterns());
|
||||
}
|
||||
if (entry.getKey().getPathPatternsCondition() != null) {
|
||||
tenantProperties.getIgnoreUrls().addAll(
|
||||
globalIgnoreUrls.addAll(
|
||||
convertList(entry.getKey().getPathPatternsCondition().getPatterns(), PathPattern::getPatternString));
|
||||
}
|
||||
}
|
||||
@@ -172,7 +188,7 @@ public class ZtTenantAutoConfiguration {
|
||||
TenantFrameworkService tenantFrameworkService) {
|
||||
FilterRegistrationBean<TenantSecurityWebFilter> registrationBean = new FilterRegistrationBean<>();
|
||||
registrationBean.setFilter(new TenantSecurityWebFilter(tenantProperties, webProperties,
|
||||
globalExceptionHandler, tenantFrameworkService));
|
||||
globalExceptionHandler, tenantFrameworkService, globalIgnoreUrls));
|
||||
registrationBean.setOrder(WebFilterOrderEnum.TENANT_SECURITY_FILTER);
|
||||
return registrationBean;
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package com.zt.plat.framework.tenant.core.db;
|
||||
|
||||
import com.zt.plat.framework.tenant.config.TenantProperties;
|
||||
import com.zt.plat.framework.tenant.core.aop.TenantIgnore;
|
||||
import com.zt.plat.framework.tenant.core.context.TenantContextHolder;
|
||||
import com.baomidou.mybatisplus.core.metadata.TableInfo;
|
||||
import com.baomidou.mybatisplus.core.metadata.TableInfoHelper;
|
||||
import com.baomidou.mybatisplus.extension.plugins.handler.TenantLineHandler;
|
||||
import com.baomidou.mybatisplus.extension.toolkit.SqlParserUtils;
|
||||
import com.zt.plat.framework.tenant.config.TenantProperties;
|
||||
import com.zt.plat.framework.tenant.core.aop.TenantIgnore;
|
||||
import com.zt.plat.framework.tenant.core.context.TenantContextHolder;
|
||||
import net.sf.jsqlparser.expression.Expression;
|
||||
import net.sf.jsqlparser.expression.LongValue;
|
||||
|
||||
@@ -69,7 +69,12 @@ public class TenantDatabaseInterceptor implements TenantLineHandler {
|
||||
// 找不到的表,说明不是 zt 项目里的,不进行拦截(忽略租户)
|
||||
TableInfo tableInfo = TableInfoHelper.getTableInfo(tableName);
|
||||
if (tableInfo == null) {
|
||||
return true;
|
||||
tableName = tableName.toLowerCase();
|
||||
tableInfo = TableInfoHelper.getTableInfo(tableName);
|
||||
}
|
||||
if (tableInfo == null) {
|
||||
tableName = tableName.toLowerCase();
|
||||
tableInfo = TableInfoHelper.getTableInfo(tableName);
|
||||
}
|
||||
// 如果继承了 TenantBaseDO 基类,显然不忽略租户
|
||||
if (TenantBaseDO.class.isAssignableFrom(tableInfo.getEntityType())) {
|
||||
|
||||
@@ -21,6 +21,7 @@ import org.springframework.util.AntPathMatcher;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* 多租户 Security Web 过滤器
|
||||
@@ -39,16 +40,19 @@ public class TenantSecurityWebFilter extends ApiRequestFilter {
|
||||
|
||||
private final GlobalExceptionHandler globalExceptionHandler;
|
||||
private final TenantFrameworkService tenantFrameworkService;
|
||||
private final Set<String> globalIgnoreUrls;
|
||||
|
||||
public TenantSecurityWebFilter(TenantProperties tenantProperties,
|
||||
WebProperties webProperties,
|
||||
GlobalExceptionHandler globalExceptionHandler,
|
||||
TenantFrameworkService tenantFrameworkService) {
|
||||
TenantFrameworkService tenantFrameworkService,
|
||||
Set<String> globalIgnoreUrls) {
|
||||
super(webProperties);
|
||||
this.tenantProperties = tenantProperties;
|
||||
this.pathMatcher = new AntPathMatcher();
|
||||
this.globalExceptionHandler = globalExceptionHandler;
|
||||
this.tenantFrameworkService = tenantFrameworkService;
|
||||
this.globalIgnoreUrls = globalIgnoreUrls;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -105,12 +109,20 @@ public class TenantSecurityWebFilter extends ApiRequestFilter {
|
||||
if (CollUtil.contains(tenantProperties.getIgnoreUrls(), request.getRequestURI())) {
|
||||
return true;
|
||||
}
|
||||
if (CollUtil.contains(globalIgnoreUrls, request.getRequestURI())) {
|
||||
return true;
|
||||
}
|
||||
// 逐个 Ant 路径匹配
|
||||
for (String url : tenantProperties.getIgnoreUrls()) {
|
||||
if (pathMatcher.match(url, request.getRequestURI())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
for (String url : globalIgnoreUrls) {
|
||||
if (pathMatcher.match(url, request.getRequestURI())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@
|
||||
<dependency>
|
||||
<groupId>com.zt.plat</groupId>
|
||||
<artifactId>zt-spring-boot-starter-biz-ip</artifactId>
|
||||
<optional>true</optional> <!-- 设置为 optional,只有在 AreaConvert 的时候使用 -->
|
||||
<!--<optional>true</optional>--> <!-- 设置为 optional,只有在 AreaConvert 的时候使用 -->
|
||||
</dependency>
|
||||
|
||||
<!-- Test 测试相关 -->
|
||||
|
||||
@@ -24,10 +24,15 @@ public class TraceFilter extends OncePerRequestFilter {
|
||||
@Override
|
||||
protected void doFilterInternal(HttpServletRequest request, HttpServletResponse response, FilterChain chain)
|
||||
throws IOException, ServletException {
|
||||
// 设置响应 traceId
|
||||
response.addHeader(HEADER_NAME_TRACE_ID, TracerUtils.getTraceId());
|
||||
// 继续过滤
|
||||
chain.doFilter(request, response);
|
||||
String traceId = TracerUtils.getTraceId();
|
||||
try {
|
||||
// 设置响应 traceId,便于客户端回溯
|
||||
response.addHeader(HEADER_NAME_TRACE_ID, traceId);
|
||||
// 继续过滤
|
||||
chain.doFilter(request, response);
|
||||
} finally {
|
||||
TracerUtils.clear();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -107,6 +107,12 @@
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-starter-openfeign</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
</project>
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package com.zt.plat.framework.mybatis.config;
|
||||
|
||||
import cn.hutool.core.util.StrUtil;
|
||||
import com.zt.plat.framework.mybatis.core.handler.DefaultDBFieldHandler;
|
||||
import com.baomidou.mybatisplus.annotation.DbType;
|
||||
import com.baomidou.mybatisplus.autoconfigure.MybatisPlusAutoConfiguration;
|
||||
import com.baomidou.mybatisplus.autoconfigure.MybatisPlusPropertiesCustomizer;
|
||||
import com.baomidou.mybatisplus.core.handlers.MetaObjectHandler;
|
||||
import com.baomidou.mybatisplus.core.incrementer.IKeyGenerator;
|
||||
import com.baomidou.mybatisplus.extension.incrementer.*;
|
||||
@@ -11,6 +11,8 @@ import com.baomidou.mybatisplus.extension.parser.JsqlParserGlobal;
|
||||
import com.baomidou.mybatisplus.extension.parser.cache.JdkSerialCaffeineJsqlParseCache;
|
||||
import com.baomidou.mybatisplus.extension.plugins.MybatisPlusInterceptor;
|
||||
import com.baomidou.mybatisplus.extension.plugins.inner.PaginationInnerInterceptor;
|
||||
import com.zt.plat.framework.mybatis.core.handler.DefaultDBFieldHandler;
|
||||
import com.zt.plat.framework.mybatis.core.sum.PageSumTableFieldAnnotationHandler;
|
||||
import org.apache.ibatis.annotations.Mapper;
|
||||
import org.mybatis.spring.annotation.MapperScan;
|
||||
import org.springframework.boot.autoconfigure.AutoConfiguration;
|
||||
@@ -25,29 +27,40 @@ import java.util.concurrent.TimeUnit;
|
||||
*
|
||||
* @author ZT
|
||||
*/
|
||||
@AutoConfiguration(before = MybatisPlusAutoConfiguration.class) // 目的:先于 MyBatis Plus 自动配置,避免 @MapperScan 可能扫描不到 Mapper 打印 warn 日志
|
||||
@AutoConfiguration(before = MybatisPlusAutoConfiguration.class) // 先于官方自动配置,避免 Mapper 未扫描完成
|
||||
@MapperScan(value = "${zt.info.base-package}", annotationClass = Mapper.class,
|
||||
lazyInitialization = "${mybatis.lazy-initialization:false}") // Mapper 懒加载,目前仅用于单元测试
|
||||
lazyInitialization = "${mybatis.lazy-initialization:false}") // Mapper 懒加载,目前仅单测需要
|
||||
public class ZtMybatisAutoConfiguration {
|
||||
|
||||
static {
|
||||
// 动态 SQL 智能优化支持本地缓存加速解析,更完善的租户复杂 XML 动态 SQL 支持,静态注入缓存
|
||||
// 使用本地缓存加速 JsqlParser 解析,复杂动态 SQL 性能更稳定
|
||||
JsqlParserGlobal.setJsqlParseCache(new JdkSerialCaffeineJsqlParseCache(
|
||||
(cache) -> cache.maximumSize(1024)
|
||||
.expireAfterWrite(5, TimeUnit.SECONDS))
|
||||
);
|
||||
cache -> cache.maximumSize(1024).expireAfterWrite(5, TimeUnit.SECONDS)));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public MybatisPlusInterceptor mybatisPlusInterceptor() {
|
||||
MybatisPlusInterceptor mybatisPlusInterceptor = new MybatisPlusInterceptor();
|
||||
mybatisPlusInterceptor.addInnerInterceptor(new PaginationInnerInterceptor()); // 分页插件
|
||||
return mybatisPlusInterceptor;
|
||||
MybatisPlusInterceptor interceptor = new MybatisPlusInterceptor();
|
||||
interceptor.addInnerInterceptor(new PaginationInnerInterceptor()); // 分页插件
|
||||
return interceptor;
|
||||
}
|
||||
|
||||
@Bean
|
||||
public MetaObjectHandler defaultMetaObjectHandler() {
|
||||
return new DefaultDBFieldHandler(); // 自动填充参数类
|
||||
return new DefaultDBFieldHandler(); // 统一的公共字段填充
|
||||
}
|
||||
|
||||
@Bean
|
||||
public MybatisPlusPropertiesCustomizer pageSumAnnotationCustomizer() {
|
||||
// 通过官方扩展点为 @PageSum 字段自动注入 exist = false 的 TableField 注解
|
||||
return properties -> {
|
||||
var globalConfig = properties.getGlobalConfig();
|
||||
if (globalConfig == null) {
|
||||
return;
|
||||
}
|
||||
globalConfig.setAnnotationHandler(
|
||||
new PageSumTableFieldAnnotationHandler(globalConfig.getAnnotationHandler()));
|
||||
};
|
||||
}
|
||||
|
||||
@Bean
|
||||
|
||||
@@ -1,6 +1,13 @@
|
||||
package com.zt.plat.framework.mybatis.core.mapper;
|
||||
|
||||
import cn.hutool.core.collection.CollUtil;
|
||||
import com.zt.plat.framework.common.pojo.PageParam;
|
||||
import com.zt.plat.framework.common.pojo.PageResult;
|
||||
import com.zt.plat.framework.common.pojo.SortablePageParam;
|
||||
import com.zt.plat.framework.common.pojo.SortingField;
|
||||
import com.zt.plat.framework.mybatis.core.sum.PageSumSupport;
|
||||
import com.zt.plat.framework.mybatis.core.util.JdbcUtils;
|
||||
import com.zt.plat.framework.mybatis.core.util.MyBatisUtils;
|
||||
import com.baomidou.mybatisplus.annotation.DbType;
|
||||
import com.baomidou.mybatisplus.core.conditions.Wrapper;
|
||||
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
|
||||
@@ -43,14 +50,18 @@ public interface BaseMapperX<T> extends MPJBaseMapper<T> {
|
||||
// 特殊:不分页,直接查询全部
|
||||
if (PageParam.PAGE_SIZE_NONE.equals(pageParam.getPageSize())) {
|
||||
List<T> list = selectList(queryWrapper);
|
||||
return new PageResult<>(list, (long) list.size());
|
||||
PageResult<T> pageResult = new PageResult<>(list, (long) list.size());
|
||||
PageSumSupport.tryAttachSummary(this, queryWrapper, pageResult);
|
||||
return pageResult;
|
||||
}
|
||||
|
||||
// MyBatis Plus 查询
|
||||
IPage<T> mpPage = MyBatisUtils.buildPage(pageParam, sortingFields);
|
||||
selectPage(mpPage, queryWrapper);
|
||||
// 转换返回
|
||||
return new PageResult<>(mpPage.getRecords(), mpPage.getTotal());
|
||||
PageResult<T> pageResult = new PageResult<>(mpPage.getRecords(), mpPage.getTotal());
|
||||
PageSumSupport.tryAttachSummary(this, queryWrapper, pageResult);
|
||||
return pageResult;
|
||||
}
|
||||
|
||||
default <D> PageResult<D> selectJoinPage(PageParam pageParam, Class<D> clazz, MPJLambdaWrapper<T> lambdaWrapper) {
|
||||
|
||||
@@ -0,0 +1,76 @@
|
||||
package com.zt.plat.framework.mybatis.core.sum;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Metadata describing a field participating in page-level SUM aggregation.
|
||||
*/
|
||||
final class PageSumFieldMeta {
|
||||
|
||||
private final String propertyName;
|
||||
private final String columnExpression;
|
||||
private final String selectAlias;
|
||||
private final Class<?> fieldType;
|
||||
|
||||
PageSumFieldMeta(String propertyName, String columnExpression, String selectAlias, Class<?> fieldType) {
|
||||
this.propertyName = propertyName;
|
||||
this.columnExpression = columnExpression;
|
||||
this.selectAlias = selectAlias;
|
||||
this.fieldType = fieldType;
|
||||
}
|
||||
|
||||
static PageSumFieldMeta of(Field field, String columnExpression) {
|
||||
String property = field.getName();
|
||||
return new PageSumFieldMeta(property, columnExpression, property, field.getType());
|
||||
}
|
||||
|
||||
String getPropertyName() {
|
||||
return propertyName;
|
||||
}
|
||||
|
||||
String getColumnExpression() {
|
||||
return columnExpression;
|
||||
}
|
||||
|
||||
String getSelectAlias() {
|
||||
return selectAlias;
|
||||
}
|
||||
|
||||
Class<?> getFieldType() {
|
||||
return fieldType;
|
||||
}
|
||||
|
||||
String buildSelectSegment() {
|
||||
return "SUM(" + columnExpression + ") AS " + selectAlias;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(propertyName, columnExpression, selectAlias, fieldType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (!(obj instanceof PageSumFieldMeta other)) {
|
||||
return false;
|
||||
}
|
||||
return Objects.equals(propertyName, other.propertyName)
|
||||
&& Objects.equals(columnExpression, other.columnExpression)
|
||||
&& Objects.equals(selectAlias, other.selectAlias)
|
||||
&& Objects.equals(fieldType, other.fieldType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "PageSumFieldMeta{" +
|
||||
"propertyName='" + propertyName + '\'' +
|
||||
", columnExpression='" + columnExpression + '\'' +
|
||||
", selectAlias='" + selectAlias + '\'' +
|
||||
", fieldType=" + fieldType +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,341 @@
|
||||
package com.zt.plat.framework.mybatis.core.sum;
|
||||
|
||||
import cn.hutool.core.util.StrUtil;
|
||||
import com.baomidou.mybatisplus.core.conditions.Wrapper;
|
||||
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
|
||||
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
|
||||
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
|
||||
import com.baomidou.mybatisplus.core.metadata.TableFieldInfo;
|
||||
import com.baomidou.mybatisplus.core.metadata.TableInfo;
|
||||
import com.baomidou.mybatisplus.core.metadata.TableInfoHelper;
|
||||
import com.zt.plat.framework.common.annotation.PageSum;
|
||||
import com.zt.plat.framework.common.pojo.PageResult;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.lang.reflect.ParameterizedType;
|
||||
import java.lang.reflect.Type;
|
||||
import java.math.BigDecimal;
|
||||
import java.math.BigInteger;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
|
||||
/**
|
||||
* Utility that inspects {@link PageSum} annotations and attaches aggregated SUM results to {@link PageResult}.
|
||||
*/
|
||||
public final class PageSumSupport {
|
||||
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(PageSumSupport.class);
|
||||
|
||||
private static final ConcurrentMap<Class<?>, Optional<Class<?>>> ENTITY_CLASS_CACHE = new ConcurrentHashMap<>();
|
||||
private static final ConcurrentMap<Class<?>, List<PageSumFieldMeta>> FIELD_META_CACHE = new ConcurrentHashMap<>();
|
||||
private static final ConcurrentMap<Class<?>, Optional<Field>> SQL_SELECT_FIELD_CACHE = new ConcurrentHashMap<>();
|
||||
|
||||
private PageSumSupport() {
|
||||
}
|
||||
|
||||
public static <T> void tryAttachSummary(Object mapperProxy, Wrapper<T> wrapper, PageResult<?> pageResult) {
|
||||
if (mapperProxy == null || pageResult == null) {
|
||||
return;
|
||||
}
|
||||
Class<?> entityClass = resolveEntityClass(mapperProxy.getClass());
|
||||
if (entityClass == null) {
|
||||
return;
|
||||
}
|
||||
List<PageSumFieldMeta> fieldMetas = resolveFieldMetas(entityClass);
|
||||
if (fieldMetas.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
Map<String, BigDecimal> summary = executeSum((BaseMapper<T>) mapperProxy, wrapper, fieldMetas);
|
||||
if (!summary.isEmpty()) {
|
||||
pageResult.setSummary(summary);
|
||||
}
|
||||
}
|
||||
|
||||
private static Class<?> resolveEntityClass(Class<?> mapperProxyClass) {
|
||||
return ENTITY_CLASS_CACHE.computeIfAbsent(mapperProxyClass, PageSumSupport::extractEntityClass)
|
||||
.orElse(null);
|
||||
}
|
||||
|
||||
private static Optional<Class<?>> extractEntityClass(Class<?> mapperProxyClass) {
|
||||
Class<?>[] interfaces = mapperProxyClass.getInterfaces();
|
||||
for (Class<?> iface : interfaces) {
|
||||
Class<?> entityClass = extractEntityClassFromInterface(iface);
|
||||
if (entityClass != null) {
|
||||
return Optional.of(entityClass);
|
||||
}
|
||||
}
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
private static Class<?> extractEntityClassFromInterface(Class<?> interfaceClass) {
|
||||
if (interfaceClass == null || interfaceClass == Object.class) {
|
||||
return null;
|
||||
}
|
||||
// inspect direct generic interfaces
|
||||
for (Type type : interfaceClass.getGenericInterfaces()) {
|
||||
Class<?> resolved = resolveFromType(type);
|
||||
if (resolved != null) {
|
||||
return resolved;
|
||||
}
|
||||
}
|
||||
// fallback to parent interfaces recursively
|
||||
for (Class<?> parent : interfaceClass.getInterfaces()) {
|
||||
Class<?> resolved = extractEntityClassFromInterface(parent);
|
||||
if (resolved != null) {
|
||||
return resolved;
|
||||
}
|
||||
}
|
||||
// handle generic super class (rare for interfaces but keep for completeness)
|
||||
return resolveFromType(interfaceClass.getGenericSuperclass());
|
||||
}
|
||||
|
||||
private static Class<?> resolveFromType(Type type) {
|
||||
if (type == null) {
|
||||
return null;
|
||||
}
|
||||
if (type instanceof ParameterizedType parameterizedType) {
|
||||
Type raw = parameterizedType.getRawType();
|
||||
if (raw instanceof Class<?> rawClass) {
|
||||
if (BaseMapper.class.isAssignableFrom(rawClass)) {
|
||||
Type[] actualTypes = parameterizedType.getActualTypeArguments();
|
||||
if (actualTypes.length > 0) {
|
||||
Type actual = actualTypes[0];
|
||||
return toClass(actual);
|
||||
}
|
||||
}
|
||||
Class<?> resolved = extractEntityClassFromInterface(rawClass);
|
||||
if (resolved != null) {
|
||||
return resolved;
|
||||
}
|
||||
}
|
||||
for (Type actual : parameterizedType.getActualTypeArguments()) {
|
||||
Class<?> resolved = resolveFromType(actual);
|
||||
if (resolved != null) {
|
||||
return resolved;
|
||||
}
|
||||
}
|
||||
} else if (type instanceof Class<?> clazz) {
|
||||
return extractEntityClassFromInterface(clazz);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private static Class<?> toClass(Type type) {
|
||||
if (type instanceof Class<?> clazz) {
|
||||
return clazz;
|
||||
}
|
||||
if (type instanceof ParameterizedType parameterizedType) {
|
||||
Type raw = parameterizedType.getRawType();
|
||||
if (raw instanceof Class<?>) {
|
||||
return (Class<?>) raw;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private static List<PageSumFieldMeta> resolveFieldMetas(Class<?> entityClass) {
|
||||
return FIELD_META_CACHE.computeIfAbsent(entityClass, PageSumSupport::scanFieldMetas);
|
||||
}
|
||||
|
||||
private static List<PageSumFieldMeta> scanFieldMetas(Class<?> entityClass) {
|
||||
TableInfo tableInfo = TableInfoHelper.getTableInfo(entityClass);
|
||||
if (tableInfo == null) {
|
||||
LOGGER.debug("No TableInfo found for entity {}, falling back to annotation provided column expressions.",
|
||||
entityClass.getName());
|
||||
}
|
||||
Map<String, String> propertyColumnMap = tableInfo != null
|
||||
? buildPropertyColumnMap(tableInfo)
|
||||
: Collections.emptyMap();
|
||||
List<PageSumFieldMeta> metas = new ArrayList<>();
|
||||
Class<?> current = entityClass;
|
||||
while (current != null && current != Object.class) {
|
||||
Field[] fields = current.getDeclaredFields();
|
||||
for (Field field : fields) {
|
||||
PageSum annotation = field.getAnnotation(PageSum.class);
|
||||
if (annotation == null) {
|
||||
continue;
|
||||
}
|
||||
if (!isNumeric(field.getType())) {
|
||||
LOGGER.warn("Field {}.{} annotated with @PageSum is not numeric and will be ignored.",
|
||||
entityClass.getSimpleName(), field.getName());
|
||||
continue;
|
||||
}
|
||||
String columnExpression = resolveColumnExpression(annotation, field, propertyColumnMap);
|
||||
if (StrUtil.isBlank(columnExpression)) {
|
||||
LOGGER.warn("Unable to resolve column for field {}.{} with @PageSum, skipping.",
|
||||
entityClass.getSimpleName(), field.getName());
|
||||
continue;
|
||||
}
|
||||
metas.add(PageSumFieldMeta.of(field, columnExpression));
|
||||
}
|
||||
current = current.getSuperclass();
|
||||
}
|
||||
return metas.isEmpty() ? Collections.emptyList() : Collections.unmodifiableList(metas);
|
||||
}
|
||||
|
||||
private static Map<String, String> buildPropertyColumnMap(TableInfo tableInfo) {
|
||||
Map<String, String> mapping = new LinkedHashMap<>();
|
||||
if (StrUtil.isNotBlank(tableInfo.getKeyProperty()) && StrUtil.isNotBlank(tableInfo.getKeyColumn())) {
|
||||
mapping.put(tableInfo.getKeyProperty(), tableInfo.getKeyColumn());
|
||||
}
|
||||
for (TableFieldInfo fieldInfo : tableInfo.getFieldList()) {
|
||||
mapping.put(fieldInfo.getProperty(), fieldInfo.getColumn());
|
||||
}
|
||||
return mapping;
|
||||
}
|
||||
|
||||
private static String resolveColumnExpression(PageSum annotation, Field field, Map<String, String> propertyColumnMap) {
|
||||
if (StrUtil.isNotBlank(annotation.column())) {
|
||||
return annotation.column();
|
||||
}
|
||||
return propertyColumnMap.get(field.getName());
|
||||
}
|
||||
|
||||
private static boolean isNumeric(Class<?> type) {
|
||||
if (type.isPrimitive()) {
|
||||
return type == int.class || type == long.class || type == double.class
|
||||
|| type == float.class || type == short.class || type == byte.class;
|
||||
}
|
||||
return Number.class.isAssignableFrom(type) || BigDecimal.class.isAssignableFrom(type)
|
||||
|| BigInteger.class.isAssignableFrom(type);
|
||||
}
|
||||
|
||||
private static <T> Map<String, BigDecimal> executeSum(BaseMapper<T> mapper, Wrapper<T> wrapper, List<PageSumFieldMeta> metas) {
|
||||
if (metas.isEmpty()) {
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
Wrapper<T> workingWrapper = cloneWrapper(wrapper);
|
||||
applySelect(workingWrapper, metas);
|
||||
List<Map<String, Object>> rows = mapper.selectMaps(workingWrapper);
|
||||
Map<String, BigDecimal> result = new LinkedHashMap<>(metas.size());
|
||||
Map<String, Object> row = rows.isEmpty() ? Collections.emptyMap() : rows.get(0);
|
||||
for (PageSumFieldMeta meta : metas) {
|
||||
Object value = extractValue(row, meta.getSelectAlias());
|
||||
result.put(meta.getPropertyName(), toBigDecimal(value));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private static <T> Wrapper<T> cloneWrapper(Wrapper<T> wrapper) {
|
||||
if (wrapper == null) {
|
||||
return new QueryWrapper<>();
|
||||
}
|
||||
if (wrapper instanceof com.baomidou.mybatisplus.core.conditions.AbstractWrapper<?, ?, ?> abstractWrapper) {
|
||||
@SuppressWarnings("unchecked")
|
||||
Wrapper<T> clone = (Wrapper<T>) abstractWrapper.clone();
|
||||
return clone;
|
||||
}
|
||||
return wrapper;
|
||||
}
|
||||
|
||||
private static void applySelect(Wrapper<?> wrapper, List<PageSumFieldMeta> metas) {
|
||||
String selectSql = buildSelectSql(metas);
|
||||
if (wrapper instanceof QueryWrapper<?> queryWrapper) {
|
||||
queryWrapper.select(selectSql);
|
||||
return;
|
||||
}
|
||||
if (wrapper instanceof LambdaQueryWrapper<?> lambdaQueryWrapper) {
|
||||
setSqlSelect(lambdaQueryWrapper, selectSql);
|
||||
return;
|
||||
}
|
||||
// attempt reflective fallback for other wrapper implementations extending LambdaQueryWrapper
|
||||
setSqlSelect(wrapper, selectSql);
|
||||
}
|
||||
|
||||
private static String buildSelectSql(List<PageSumFieldMeta> metas) {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
for (int i = 0; i < metas.size(); i++) {
|
||||
if (i > 0) {
|
||||
builder.append(',');
|
||||
}
|
||||
builder.append(metas.get(i).buildSelectSegment());
|
||||
}
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
private static void setSqlSelect(Object wrapper, String selectSql) {
|
||||
Field field = SQL_SELECT_FIELD_CACHE.computeIfAbsent(wrapper.getClass(), PageSumSupport::locateSqlSelectField)
|
||||
.orElse(null);
|
||||
if (field == null) {
|
||||
LOGGER.debug("Unable to locate sqlSelect field on wrapper {}, summary aggregation skipped.",
|
||||
wrapper.getClass().getName());
|
||||
return;
|
||||
}
|
||||
try {
|
||||
com.baomidou.mybatisplus.core.conditions.SharedString shared = (com.baomidou.mybatisplus.core.conditions.SharedString) field.get(wrapper);
|
||||
if (shared == null) {
|
||||
shared = com.baomidou.mybatisplus.core.conditions.SharedString.emptyString();
|
||||
field.set(wrapper, shared);
|
||||
}
|
||||
shared.setStringValue(selectSql);
|
||||
} catch (IllegalAccessException ex) {
|
||||
LOGGER.warn("Failed to set sqlSelect on wrapper {}: {}", wrapper.getClass().getName(), ex.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private static Optional<Field> locateSqlSelectField(Class<?> wrapperClass) {
|
||||
Class<?> current = wrapperClass;
|
||||
while (current != null && current != Object.class) {
|
||||
try {
|
||||
Field field = current.getDeclaredField("sqlSelect");
|
||||
field.setAccessible(true);
|
||||
return Optional.of(field);
|
||||
} catch (NoSuchFieldException ignored) {
|
||||
current = current.getSuperclass();
|
||||
}
|
||||
}
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
private static Object extractValue(Map<String, Object> row, String alias) {
|
||||
if (row == null || row.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
if (row.containsKey(alias)) {
|
||||
return row.get(alias);
|
||||
}
|
||||
for (Map.Entry<String, Object> entry : row.entrySet()) {
|
||||
if (alias.equalsIgnoreCase(entry.getKey())) {
|
||||
return entry.getValue();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private static BigDecimal toBigDecimal(Object value) {
|
||||
if (value == null) {
|
||||
return BigDecimal.ZERO;
|
||||
}
|
||||
if (value instanceof BigDecimal decimal) {
|
||||
return decimal;
|
||||
}
|
||||
if (value instanceof BigInteger bigInteger) {
|
||||
return new BigDecimal(bigInteger);
|
||||
}
|
||||
if (value instanceof Number number) {
|
||||
return new BigDecimal(number.toString());
|
||||
}
|
||||
if (value instanceof CharSequence sequence) {
|
||||
String text = sequence.toString().trim();
|
||||
if (text.isEmpty()) {
|
||||
return BigDecimal.ZERO;
|
||||
}
|
||||
try {
|
||||
return new BigDecimal(text);
|
||||
} catch (NumberFormatException ex) {
|
||||
LOGGER.warn("Unable to parse numeric summary value '{}': {}", text, ex.getMessage());
|
||||
return BigDecimal.ZERO;
|
||||
}
|
||||
}
|
||||
LOGGER.warn("Unsupported summary value type: {}", value.getClass().getName());
|
||||
return BigDecimal.ZERO;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,79 @@
|
||||
package com.zt.plat.framework.mybatis.core.sum;
|
||||
|
||||
import com.baomidou.mybatisplus.annotation.TableField;
|
||||
import com.baomidou.mybatisplus.core.handlers.AnnotationHandler;
|
||||
import com.zt.plat.framework.common.annotation.PageSum;
|
||||
import org.springframework.core.annotation.AnnotationUtils;
|
||||
|
||||
import java.lang.annotation.Annotation;
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* 让 {@link PageSum#exist()} 能够自动生成 {@link TableField#exist()} = false 的能力,
|
||||
* 这样 DO 层无需重复编写 {@code @TableField(exist = false)}。
|
||||
*/
|
||||
public class PageSumTableFieldAnnotationHandler implements AnnotationHandler {
|
||||
|
||||
private static final AnnotationHandler DEFAULT_HANDLER = new AnnotationHandler() { };
|
||||
/** 预构建 @TableField(exist = false) 的属性集合,避免重复创建 Map 对象 */
|
||||
private static final Map<String, Object> TABLE_FIELD_EXIST_FALSE_ATTRIBUTES =
|
||||
Collections.singletonMap("exist", Boolean.FALSE);
|
||||
|
||||
private final AnnotationHandler delegate;
|
||||
|
||||
public PageSumTableFieldAnnotationHandler(AnnotationHandler delegate) {
|
||||
this.delegate = delegate != null ? delegate : DEFAULT_HANDLER;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends Annotation> T getAnnotation(Class<?> target, Class<T> annotationClass) {
|
||||
return delegate.getAnnotation(target, annotationClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends Annotation> boolean isAnnotationPresent(Class<?> target, Class<T> annotationClass) {
|
||||
return delegate.isAnnotationPresent(target, annotationClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends Annotation> T getAnnotation(java.lang.reflect.Method method, Class<T> annotationClass) {
|
||||
return delegate.getAnnotation(method, annotationClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends Annotation> boolean isAnnotationPresent(java.lang.reflect.Method method, Class<T> annotationClass) {
|
||||
return delegate.isAnnotationPresent(method, annotationClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends Annotation> T getAnnotation(Field field, Class<T> annotationClass) {
|
||||
T annotation = delegate.getAnnotation(field, annotationClass);
|
||||
if (annotation != null || annotationClass != TableField.class) {
|
||||
return annotation;
|
||||
}
|
||||
PageSum pageSum = delegate.getAnnotation(field, PageSum.class);
|
||||
if (pageSum != null && !pageSum.exist()) {
|
||||
// 当字段只用于分页汇总时,动态合成一个 exist = false 的 TableField 注解
|
||||
return annotationClass.cast(synthesizeTableField(field));
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends Annotation> boolean isAnnotationPresent(Field field, Class<T> annotationClass) {
|
||||
if (delegate.isAnnotationPresent(field, annotationClass)) {
|
||||
return true;
|
||||
}
|
||||
if (annotationClass != TableField.class) {
|
||||
return false;
|
||||
}
|
||||
PageSum pageSum = delegate.getAnnotation(field, PageSum.class);
|
||||
return pageSum != null && !pageSum.exist();
|
||||
}
|
||||
|
||||
private static TableField synthesizeTableField(Field field) {
|
||||
return AnnotationUtils.synthesizeAnnotation(TABLE_FIELD_EXIST_FALSE_ATTRIBUTES, TableField.class, field);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,68 @@
|
||||
package com.zt.plat.framework.mybatis.core.sum;
|
||||
|
||||
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
|
||||
import com.zt.plat.framework.common.annotation.PageSum;
|
||||
import com.zt.plat.framework.common.pojo.PageResult;
|
||||
import com.zt.plat.framework.mybatis.core.mapper.BaseMapperX;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.lang.reflect.InvocationHandler;
|
||||
import java.lang.reflect.Method;
|
||||
import java.lang.reflect.Proxy;
|
||||
import java.math.BigDecimal;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
|
||||
class PageSumSupportTest {
|
||||
|
||||
@Test
|
||||
void shouldAttachSummaryWhenAnnotationPresent() {
|
||||
TestMapper mapper = createMapperProxy();
|
||||
PageResult<TestEntity> pageResult = new PageResult<>(Collections.emptyList(), 0L);
|
||||
QueryWrapper<TestEntity> wrapper = new QueryWrapper<>();
|
||||
|
||||
PageSumSupport.tryAttachSummary(mapper, wrapper, pageResult);
|
||||
|
||||
assertFalse(pageResult.getSummary().isEmpty());
|
||||
assertEquals(new BigDecimal("123.45"), pageResult.getSummary().get("amount"));
|
||||
assertEquals(new BigDecimal("50"), pageResult.getSummary().get("virtualAmount"));
|
||||
}
|
||||
|
||||
private TestMapper createMapperProxy() {
|
||||
InvocationHandler handler = new InvocationHandler() {
|
||||
@Override
|
||||
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
|
||||
if (method.getDeclaringClass() == Object.class) {
|
||||
return method.invoke(this, args);
|
||||
}
|
||||
if ("selectMaps".equals(method.getName())) {
|
||||
Map<String, Object> row = new HashMap<>();
|
||||
row.put("amount", new BigDecimal("123.45"));
|
||||
row.put("virtualAmount", new BigDecimal("50"));
|
||||
return List.of(row);
|
||||
}
|
||||
return Collections.emptyList();
|
||||
}
|
||||
};
|
||||
return (TestMapper) Proxy.newProxyInstance(
|
||||
TestMapper.class.getClassLoader(),
|
||||
new Class[]{TestMapper.class},
|
||||
handler);
|
||||
}
|
||||
|
||||
interface TestMapper extends BaseMapperX<TestEntity> {
|
||||
}
|
||||
|
||||
static class TestEntity {
|
||||
@PageSum(column = "amount")
|
||||
private BigDecimal amount;
|
||||
|
||||
@PageSum(column = "virtual_column", exist = false)
|
||||
private BigDecimal virtualAmount;
|
||||
}
|
||||
}
|
||||
@@ -73,9 +73,11 @@ public class LoginUser {
|
||||
|
||||
private Long visitCompanyId;
|
||||
private String visitCompanyName;
|
||||
private String visitCompanyCode;
|
||||
|
||||
private Long visitDeptId;
|
||||
private String visitDeptName;
|
||||
private String visitDeptCode;
|
||||
|
||||
public void setContext(String key, Object value) {
|
||||
if (context == null) {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package com.zt.plat.framework.swagger.config;
|
||||
|
||||
import com.zt.plat.framework.common.enums.RpcConstants;
|
||||
import io.swagger.v3.oas.models.Components;
|
||||
import io.swagger.v3.oas.models.OpenAPI;
|
||||
import io.swagger.v3.oas.models.info.Contact;
|
||||
@@ -11,6 +12,7 @@ import io.swagger.v3.oas.models.parameters.Parameter;
|
||||
import io.swagger.v3.oas.models.security.SecurityRequirement;
|
||||
import io.swagger.v3.oas.models.security.SecurityScheme;
|
||||
import org.springdoc.core.customizers.OpenApiBuilderCustomizer;
|
||||
import org.springdoc.core.customizers.OpenApiCustomizer;
|
||||
import org.springdoc.core.customizers.ServerBaseUrlCustomizer;
|
||||
import org.springdoc.core.models.GroupedOpenApi;
|
||||
import org.springdoc.core.properties.SpringDocConfigProperties;
|
||||
@@ -123,12 +125,26 @@ public class ZtSwaggerAutoConfiguration {
|
||||
return GroupedOpenApi.builder()
|
||||
.group(group)
|
||||
.pathsToMatch("/admin-api/" + path + "/**", "/app-api/" + path + "/**")
|
||||
.pathsToExclude(RpcConstants.RPC_API_PREFIX + "/**")
|
||||
.addOperationCustomizer((operation, handlerMethod) -> operation
|
||||
.addParametersItem(buildTenantHeaderParameter())
|
||||
.addParametersItem(buildSecurityHeaderParameter()))
|
||||
.build();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public OpenApiCustomizer rpcApiPathExclusionCustomiser() {
|
||||
return openApi -> {
|
||||
if (openApi == null || openApi.getPaths() == null) {
|
||||
return;
|
||||
}
|
||||
openApi.getPaths().entrySet().removeIf(entry -> {
|
||||
String path = entry.getKey();
|
||||
return path != null && path.startsWith(RpcConstants.RPC_API_PREFIX);
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* 构建 Tenant 租户编号请求头参数
|
||||
*
|
||||
|
||||
@@ -2,6 +2,7 @@ package com.zt.plat.gateway.jackson;
|
||||
|
||||
import cn.hutool.core.collection.CollUtil;
|
||||
import com.zt.plat.framework.common.util.json.JsonUtils;
|
||||
import com.zt.plat.framework.common.util.json.databind.LongTypeSerializerModifier;
|
||||
import com.zt.plat.framework.common.util.json.databind.NumberSerializer;
|
||||
import com.zt.plat.framework.common.util.json.databind.TimestampLocalDateTimeDeserializer;
|
||||
import com.zt.plat.framework.common.util.json.databind.TimestampLocalDateTimeSerializer;
|
||||
@@ -39,6 +40,7 @@ public class JacksonAutoConfiguration {
|
||||
// 新增 LocalDateTime 序列化、反序列化规则,使用 Long 时间戳
|
||||
.addSerializer(LocalDateTime.class, TimestampLocalDateTimeSerializer.INSTANCE)
|
||||
.addDeserializer(LocalDateTime.class, TimestampLocalDateTimeDeserializer.INSTANCE);
|
||||
simpleModule.setSerializerModifier(new LongTypeSerializerModifier());
|
||||
// 1.2 注册到 objectMapper
|
||||
objectMappers.forEach(objectMapper -> objectMapper.registerModule(simpleModule));
|
||||
|
||||
|
||||
@@ -4,8 +4,8 @@ spring:
|
||||
cloud:
|
||||
nacos:
|
||||
server-addr: 172.16.46.63:30848 # Nacos 服务器地址
|
||||
username: # Nacos 账号
|
||||
password: # Nacos 密码
|
||||
username: ${config.username} # Nacos 账号
|
||||
password: ${config.password} # Nacos 密码
|
||||
discovery: # 【配置中心】配置项
|
||||
namespace: ${config.namespace} # 命名空间。这里使用 maven Profile 资源过滤进行动态替换
|
||||
group: DEFAULT_GROUP # 使用的 Nacos 配置分组,默认为 DEFAULT_GROUP
|
||||
|
||||
@@ -5,6 +5,10 @@
|
||||
<springProperty scope="context" name="zt.info.base-package" source="zt.info.base-package"/>
|
||||
<!-- 格式化输出:%d 表示日期,%X{tid} SkWalking 链路追踪编号,%thread 表示线程名,%-5level:级别从左显示 5 个字符宽度,%msg:日志消息,%n是换行符 -->
|
||||
<property name="PATTERN_DEFAULT" value="%d{${LOG_DATEFORMAT_PATTERN:-yyyy-MM-dd HH:mm:ss.SSS}} | %highlight(${LOG_LEVEL_PATTERN:-%5p} ${PID:- }) | %boldYellow(%thread [%tid]) %boldGreen(%-40.40logger{39}) | %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}"/>
|
||||
<!--应用名称-->
|
||||
<springProperty scope="context" name="spring.application.name" source="spring.application.name"/>
|
||||
<!-- 日志输出路径 -->
|
||||
<property name="LOG_DIR" value="${user.home}/logs/${spring.application.name}"/>
|
||||
|
||||
<!-- 控制台 Appender -->
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
@@ -31,7 +35,7 @@
|
||||
<!-- 启动服务时,是否清理历史日志,一般不建议清理 -->
|
||||
<cleanHistoryOnStart>${LOGBACK_ROLLINGPOLICY_CLEAN_HISTORY_ON_START:-false}</cleanHistoryOnStart>
|
||||
<!-- 日志文件,到达多少容量,进行滚动 -->
|
||||
<maxFileSize>${LOGBACK_ROLLINGPOLICY_MAX_FILE_SIZE:-10MB}</maxFileSize>
|
||||
<maxFileSize>${LOGBACK_ROLLINGPOLICY_MAX_FILE_SIZE:-50MB}</maxFileSize>
|
||||
<!-- 日志文件的总大小,0 表示不限制 -->
|
||||
<totalSizeCap>${LOGBACK_ROLLINGPOLICY_TOTAL_SIZE_CAP:-0}</totalSizeCap>
|
||||
<!-- 日志文件的保留天数 -->
|
||||
@@ -56,18 +60,39 @@
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!-- ERROR 级别日志 -->
|
||||
<appender name="ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_DIR}-error.log</file>
|
||||
<filter class="ch.qos.logback.classic.filter.LevelFilter">
|
||||
<level>ERROR</level>
|
||||
<onMatch>ACCEPT</onMatch>
|
||||
<onMismatch>DENY</onMismatch>
|
||||
</filter>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_DIR}-error.%d{yyyy-MM-dd}.log</fileNamePattern>
|
||||
<maxHistory>30</maxHistory> <!-- 保留30天的日志 -->
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{36} - %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!--logback的日志级别 FATAL > ERROR > WARN > INFO > DEBUG-->
|
||||
<!-- 本地环境 -->
|
||||
<springProfile name="local">
|
||||
<root level="INFO">
|
||||
<springProfile name="local,dev">
|
||||
<root level="WARN">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="ERROR"/>
|
||||
<appender-ref ref="GRPC"/> <!-- 本地环境下,如果不想接入 SkyWalking 日志服务,可以注释掉本行 -->
|
||||
<appender-ref ref="ASYNC"/> <!-- 本地环境下,如果不想打印日志,可以注释掉本行 -->
|
||||
</root>
|
||||
</springProfile>
|
||||
|
||||
<!-- 其它环境 -->
|
||||
<springProfile name="dev,test,stage,prod,default">
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="ERROR"/>
|
||||
<appender-ref ref="ASYNC"/>
|
||||
<appender-ref ref="GRPC"/>
|
||||
</root>
|
||||
|
||||
@@ -8,17 +8,25 @@ import com.zt.plat.module.bpm.framework.flowable.core.event.BpmProcessInstanceEv
|
||||
import com.zt.plat.module.system.api.user.AdminUserApi;
|
||||
import org.flowable.common.engine.api.delegate.FlowableFunctionDelegate;
|
||||
import org.flowable.common.engine.api.delegate.event.FlowableEventListener;
|
||||
import org.flowable.engine.ProcessEngineConfiguration;
|
||||
import org.flowable.spring.SpringProcessEngineConfiguration;
|
||||
import org.flowable.spring.boot.EngineConfigurationConfigurer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.ObjectProvider;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
|
||||
import org.springframework.context.ApplicationEventPublisher;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.core.task.AsyncListenableTaskExecutor;
|
||||
import org.springframework.jdbc.datasource.DataSourceUtils;
|
||||
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
|
||||
|
||||
import java.util.List;
|
||||
import javax.sql.DataSource;
|
||||
import java.sql.Connection;
|
||||
import java.sql.DatabaseMetaData;
|
||||
import java.sql.SQLException;
|
||||
|
||||
/**
|
||||
* BPM 模块的 Flowable 配置类
|
||||
@@ -28,6 +36,8 @@ import java.util.List;
|
||||
@Configuration(proxyBeanMethods = false)
|
||||
public class BpmFlowableConfiguration {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(BpmFlowableConfiguration.class);
|
||||
|
||||
/**
|
||||
* 参考 {@link org.flowable.spring.boot.FlowableJobConfiguration} 类,创建对应的 AsyncListenableTaskExecutor Bean
|
||||
*
|
||||
@@ -69,6 +79,37 @@ public class BpmFlowableConfiguration {
|
||||
};
|
||||
}
|
||||
|
||||
@Bean
|
||||
public EngineConfigurationConfigurer<SpringProcessEngineConfiguration> dmProcessEngineConfigurationConfigurer(DataSource dataSource) {
|
||||
return configuration -> {
|
||||
try {
|
||||
configureDmCompatibility(configuration, dataSource);
|
||||
} catch (SQLException ex) {
|
||||
log.warn("Failed to inspect datasource for DM compatibility; Flowable will keep default settings", ex);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private void configureDmCompatibility(SpringProcessEngineConfiguration configuration, DataSource dataSource) throws SQLException {
|
||||
Connection connection = null;
|
||||
try {
|
||||
connection = DataSourceUtils.getConnection(dataSource);
|
||||
DatabaseMetaData metaData = connection.getMetaData();
|
||||
String productName = metaData.getDatabaseProductName();
|
||||
String jdbcUrl = metaData.getURL();
|
||||
boolean dmProduct = productName != null && productName.toLowerCase().contains("dm");
|
||||
boolean dmUrl = jdbcUrl != null && jdbcUrl.toLowerCase().startsWith("jdbc:dm");
|
||||
if (!dmProduct && !dmUrl) {
|
||||
return;
|
||||
}
|
||||
log.info("Detected DM database (product='{}'); enabling Flowable Oracle compatibility with automatic schema updates", productName);
|
||||
configuration.setDatabaseSchemaUpdate(ProcessEngineConfiguration.DB_SCHEMA_UPDATE_TRUE);
|
||||
configuration.setDatabaseType("oracle");
|
||||
} finally {
|
||||
DataSourceUtils.releaseConnection(connection, dataSource);
|
||||
}
|
||||
}
|
||||
|
||||
// =========== 审批人相关的 Bean ==========
|
||||
|
||||
@Bean
|
||||
|
||||
@@ -5,6 +5,25 @@
|
||||
|
||||
package liquibase.database.core;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.sql.CallableStatement;
|
||||
import java.sql.Connection;
|
||||
import java.sql.DatabaseMetaData;
|
||||
import java.sql.PreparedStatement;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.ResourceBundle;
|
||||
import java.util.Set;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import liquibase.CatalogAndSchema;
|
||||
import liquibase.GlobalConfiguration;
|
||||
import liquibase.Scope;
|
||||
@@ -23,17 +42,15 @@ import liquibase.statement.UniqueConstraint;
|
||||
import liquibase.statement.core.RawCallStatement;
|
||||
import liquibase.statement.core.RawParameterizedSqlStatement;
|
||||
import liquibase.structure.DatabaseObject;
|
||||
import liquibase.structure.core.*;
|
||||
import liquibase.structure.core.Catalog;
|
||||
import liquibase.structure.core.Column;
|
||||
import liquibase.structure.core.Index;
|
||||
import liquibase.structure.core.PrimaryKey;
|
||||
import liquibase.structure.core.Schema;
|
||||
import liquibase.util.JdbcUtil;
|
||||
import liquibase.util.StringUtil;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.sql.*;
|
||||
import java.util.*;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public class DmDatabase extends AbstractJdbcDatabase {
|
||||
private static final String PROXY_USER_REGEX = ".*(?:thin|oci)\\:(.+)/@.*";
|
||||
public static final Pattern PROXY_USER_PATTERN = Pattern.compile(".*(?:thin|oci)\\:(.+)/@.*");
|
||||
@@ -98,6 +115,7 @@ public class DmDatabase extends AbstractJdbcDatabase {
|
||||
public void setConnection(DatabaseConnection conn) {
|
||||
this.reservedWords.addAll(Arrays.asList("GROUP", "USER", "SESSION", "PASSWORD", "RESOURCE", "START", "SIZE", "UID", "DESC", "ORDER"));
|
||||
Connection sqlConn = null;
|
||||
boolean dmDatabase = false;
|
||||
if (!(conn instanceof OfflineConnection)) {
|
||||
try {
|
||||
if (conn instanceof JdbcConnection) {
|
||||
@@ -124,26 +142,42 @@ public class DmDatabase extends AbstractJdbcDatabase {
|
||||
Scope.getCurrentScope().getLog(this.getClass()).info("Could not set remarks reporting on OracleDatabase: " + e.getMessage());
|
||||
}
|
||||
|
||||
CallableStatement statement = null;
|
||||
|
||||
try {
|
||||
statement = sqlConn.prepareCall("{call DBMS_UTILITY.DB_VERSION(?,?)}");
|
||||
statement.registerOutParameter(1, 12);
|
||||
statement.registerOutParameter(2, 12);
|
||||
statement.execute();
|
||||
String compatibleVersion = statement.getString(2);
|
||||
if (compatibleVersion != null) {
|
||||
Matcher majorVersionMatcher = VERSION_PATTERN.matcher(compatibleVersion);
|
||||
if (majorVersionMatcher.matches()) {
|
||||
this.databaseMajorVersion = Integer.valueOf(majorVersionMatcher.group(1));
|
||||
this.databaseMinorVersion = Integer.valueOf(majorVersionMatcher.group(2));
|
||||
DatabaseMetaData metaData = sqlConn.getMetaData();
|
||||
if (metaData != null) {
|
||||
String productName = metaData.getDatabaseProductName();
|
||||
dmDatabase = productName != null && PRODUCT_NAME.equalsIgnoreCase(productName);
|
||||
if (dmDatabase) {
|
||||
this.databaseMajorVersion = metaData.getDatabaseMajorVersion();
|
||||
this.databaseMinorVersion = metaData.getDatabaseMinorVersion();
|
||||
}
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
String message = "Cannot read from DBMS_UTILITY.DB_VERSION: " + e.getMessage();
|
||||
Scope.getCurrentScope().getLog(this.getClass()).info("Could not set check compatibility mode on OracleDatabase, assuming not running in any sort of compatibility mode: " + message);
|
||||
} finally {
|
||||
JdbcUtil.closeStatement(statement);
|
||||
Scope.getCurrentScope().getLog(this.getClass()).info("Unable to inspect database metadata for DM version detection: " + e.getMessage());
|
||||
}
|
||||
|
||||
if (!dmDatabase) {
|
||||
CallableStatement statement = null;
|
||||
|
||||
try {
|
||||
statement = sqlConn.prepareCall("{call DBMS_UTILITY.DB_VERSION(?,?)}");
|
||||
statement.registerOutParameter(1, 12);
|
||||
statement.registerOutParameter(2, 12);
|
||||
statement.execute();
|
||||
String compatibleVersion = statement.getString(2);
|
||||
if (compatibleVersion != null) {
|
||||
Matcher majorVersionMatcher = VERSION_PATTERN.matcher(compatibleVersion);
|
||||
if (majorVersionMatcher.matches()) {
|
||||
this.databaseMajorVersion = Integer.valueOf(majorVersionMatcher.group(1));
|
||||
this.databaseMinorVersion = Integer.valueOf(majorVersionMatcher.group(2));
|
||||
}
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
String message = "Cannot read from DBMS_UTILITY.DB_VERSION: " + e.getMessage();
|
||||
Scope.getCurrentScope().getLog(this.getClass()).info("Could not set check compatibility mode on OracleDatabase, assuming not running in any sort of compatibility mode: " + message);
|
||||
} finally {
|
||||
JdbcUtil.closeStatement(statement);
|
||||
}
|
||||
}
|
||||
|
||||
if (GlobalConfiguration.DDL_LOCK_TIMEOUT.getCurrentValue() != null) {
|
||||
@@ -250,7 +284,15 @@ public class DmDatabase extends AbstractJdbcDatabase {
|
||||
}
|
||||
|
||||
public boolean isCorrectDatabaseImplementation(DatabaseConnection conn) throws DatabaseException {
|
||||
return "oracle".equalsIgnoreCase(conn.getDatabaseProductName());
|
||||
String databaseProductName = conn == null ? null : conn.getDatabaseProductName();
|
||||
if (databaseProductName == null) {
|
||||
return false;
|
||||
}
|
||||
if (PRODUCT_NAME.equalsIgnoreCase(databaseProductName)) {
|
||||
return true;
|
||||
}
|
||||
// Flowable 历史上将 DM 映射为 Oracle 元数据,因此这里同样接受 Oracle 以保持兼容
|
||||
return "oracle".equalsIgnoreCase(databaseProductName);
|
||||
}
|
||||
|
||||
public String getDefaultDriver(String url) {
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
package liquibase.datatype.core;
|
||||
|
||||
import liquibase.database.Database;
|
||||
import liquibase.database.core.DmDatabase;
|
||||
import liquibase.datatype.DataTypeInfo;
|
||||
import liquibase.datatype.DatabaseDataType;
|
||||
|
||||
@DataTypeInfo(
|
||||
name = "boolean",
|
||||
aliases = {"java.sql.Types.BOOLEAN", "java.lang.Boolean", "bit", "bool"},
|
||||
minParameters = 0,
|
||||
maxParameters = 0,
|
||||
priority = 2
|
||||
)
|
||||
public class DmBooleanType extends BooleanType {
|
||||
|
||||
@Override
|
||||
public boolean supports(Database database) {
|
||||
if (database instanceof DmDatabase) {
|
||||
return true;
|
||||
}
|
||||
return super.supports(database);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DatabaseDataType toDatabaseDataType(Database database) {
|
||||
if (database instanceof DmDatabase) {
|
||||
return new DatabaseDataType("NUMBER", 1);
|
||||
}
|
||||
return super.toDatabaseDataType(database);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,354 @@
|
||||
/* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.flowable.common.engine.impl.db;
|
||||
|
||||
import org.apache.ibatis.session.SqlSessionFactory;
|
||||
import org.flowable.common.engine.api.FlowableException;
|
||||
import org.flowable.common.engine.impl.context.Context;
|
||||
import org.flowable.common.engine.impl.interceptor.CommandContext;
|
||||
import org.flowable.common.engine.impl.interceptor.Session;
|
||||
import org.flowable.common.engine.impl.interceptor.SessionFactory;
|
||||
import org.flowable.common.engine.impl.persistence.cache.EntityCache;
|
||||
import org.flowable.common.engine.impl.persistence.entity.Entity;
|
||||
|
||||
import java.sql.SQLException;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
/**
|
||||
* @author Tom Baeyens
|
||||
* @author Joram Barrez
|
||||
*/
|
||||
public class DbSqlSessionFactory implements SessionFactory {
|
||||
|
||||
protected Map<String, Map<String, String>> databaseSpecificStatements = new HashMap<>();
|
||||
|
||||
protected String databaseType;
|
||||
protected String databaseTablePrefix = "";
|
||||
protected boolean tablePrefixIsSchema;
|
||||
|
||||
protected String databaseCatalog;
|
||||
protected String databaseSchema;
|
||||
protected SqlSessionFactory sqlSessionFactory;
|
||||
protected Map<String, String> statementMappings;
|
||||
|
||||
protected Map<Class<?>, String> insertStatements = new ConcurrentHashMap<>();
|
||||
protected Map<Class<?>, String> updateStatements = new ConcurrentHashMap<>();
|
||||
protected Map<Class<?>, String> deleteStatements = new ConcurrentHashMap<>();
|
||||
protected Map<Class<?>, String> selectStatements = new ConcurrentHashMap<>();
|
||||
|
||||
protected List<Class<? extends Entity>> insertionOrder = new ArrayList<>();
|
||||
protected List<Class<? extends Entity>> deletionOrder = new ArrayList<>();
|
||||
|
||||
protected boolean isDbHistoryUsed = true;
|
||||
|
||||
protected Set<Class<? extends Entity>> bulkInserteableEntityClasses = new HashSet<>();
|
||||
protected Map<Class<?>, String> bulkInsertStatements = new ConcurrentHashMap<>();
|
||||
|
||||
protected int maxNrOfStatementsInBulkInsert = 100;
|
||||
|
||||
protected Map<String, Class<?>> logicalNameToClassMapping = new ConcurrentHashMap<>();
|
||||
|
||||
protected boolean usePrefixId;
|
||||
|
||||
public DbSqlSessionFactory(boolean usePrefixId) {
|
||||
this.usePrefixId = usePrefixId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<?> getSessionType() {
|
||||
return DbSqlSession.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Session openSession(CommandContext commandContext) {
|
||||
DbSqlSession dbSqlSession = createDbSqlSession();
|
||||
// 当前系统适配 dm,如果存在 schema 为空的情况,从 connection 获取
|
||||
try {
|
||||
if (getDatabaseSchema() == null || getDatabaseSchema().length() == 0){
|
||||
setDatabaseSchema(dbSqlSession.getSqlSession().getConnection().getSchema());
|
||||
}
|
||||
dbSqlSession.getSqlSession().getConnection().getSchema();
|
||||
} catch (SQLException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
if (getDatabaseSchema() != null && getDatabaseSchema().length() > 0) {
|
||||
try {
|
||||
dbSqlSession.getSqlSession().getConnection().setSchema(getDatabaseSchema());
|
||||
} catch (SQLException e) {
|
||||
throw new FlowableException("Could not set database schema on connection", e);
|
||||
}
|
||||
}
|
||||
if (getDatabaseCatalog() != null && getDatabaseCatalog().length() > 0) {
|
||||
try {
|
||||
dbSqlSession.getSqlSession().getConnection().setCatalog(getDatabaseCatalog());
|
||||
} catch (SQLException e) {
|
||||
throw new FlowableException("Could not set database catalog on connection", e);
|
||||
}
|
||||
}
|
||||
if (dbSqlSession.getSqlSession().getConnection() == null) {
|
||||
throw new FlowableException("Invalid dbSqlSession: no active connection found");
|
||||
}
|
||||
return dbSqlSession;
|
||||
}
|
||||
|
||||
protected DbSqlSession createDbSqlSession() {
|
||||
return new DbSqlSession(this, Context.getCommandContext().getSession(EntityCache.class));
|
||||
}
|
||||
|
||||
// insert, update and delete statements
|
||||
// /////////////////////////////////////
|
||||
|
||||
public String getInsertStatement(Entity object) {
|
||||
return getStatement(object.getClass(), insertStatements, "insert");
|
||||
}
|
||||
|
||||
public String getInsertStatement(Class<? extends Entity> clazz) {
|
||||
return getStatement(clazz, insertStatements, "insert");
|
||||
}
|
||||
|
||||
public String getUpdateStatement(Entity object) {
|
||||
return getStatement(object.getClass(), updateStatements, "update");
|
||||
}
|
||||
|
||||
public String getDeleteStatement(Class<?> entityClass) {
|
||||
return getStatement(entityClass, deleteStatements, "delete");
|
||||
}
|
||||
|
||||
public String getSelectStatement(Class<?> entityClass) {
|
||||
return getStatement(entityClass, selectStatements, "select");
|
||||
}
|
||||
|
||||
protected String getStatement(Class<?> entityClass, Map<Class<?>, String> cachedStatements, String prefix) {
|
||||
String statement = cachedStatements.get(entityClass);
|
||||
if (statement != null) {
|
||||
return statement;
|
||||
}
|
||||
statement = prefix + entityClass.getSimpleName();
|
||||
if (statement.endsWith("Impl")) {
|
||||
statement = statement.substring(0, statement.length() - 10); // removing 'entityImpl'
|
||||
} else {
|
||||
statement = statement.substring(0, statement.length() - 6); // removing 'entity'
|
||||
}
|
||||
cachedStatements.put(entityClass, statement);
|
||||
return statement;
|
||||
}
|
||||
|
||||
// db specific mappings
|
||||
// /////////////////////////////////////////////////////
|
||||
|
||||
protected void addDatabaseSpecificStatement(String databaseType, String activitiStatement, String ibatisStatement) {
|
||||
Map<String, String> specificStatements = databaseSpecificStatements.get(databaseType);
|
||||
if (specificStatements == null) {
|
||||
specificStatements = new HashMap<>();
|
||||
databaseSpecificStatements.put(databaseType, specificStatements);
|
||||
}
|
||||
specificStatements.put(activitiStatement, ibatisStatement);
|
||||
}
|
||||
|
||||
public String mapStatement(String statement) {
|
||||
if (statementMappings == null) {
|
||||
return statement;
|
||||
}
|
||||
String mappedStatement = statementMappings.get(statement);
|
||||
return (mappedStatement != null ? mappedStatement : statement);
|
||||
}
|
||||
|
||||
// customized getters and setters
|
||||
// ///////////////////////////////////////////
|
||||
|
||||
public void setDatabaseType(String databaseType) {
|
||||
this.databaseType = databaseType;
|
||||
this.statementMappings = databaseSpecificStatements.get(databaseType);
|
||||
}
|
||||
|
||||
public boolean isMysql() {
|
||||
return "mysql".equals(getDatabaseType());
|
||||
}
|
||||
|
||||
public boolean isOracle() {
|
||||
return "oracle".equals(getDatabaseType());
|
||||
}
|
||||
|
||||
public Boolean isBulkInsertable(Class<? extends Entity> entityClass) {
|
||||
return bulkInserteableEntityClasses != null && bulkInserteableEntityClasses.contains(entityClass);
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
public String getBulkInsertStatement(Class clazz) {
|
||||
return getStatement(clazz, bulkInsertStatements, "bulkInsert");
|
||||
}
|
||||
|
||||
public Set<Class<? extends Entity>> getBulkInserteableEntityClasses() {
|
||||
return bulkInserteableEntityClasses;
|
||||
}
|
||||
|
||||
public void setBulkInserteableEntityClasses(Set<Class<? extends Entity>> bulkInserteableEntityClasses) {
|
||||
this.bulkInserteableEntityClasses = bulkInserteableEntityClasses;
|
||||
}
|
||||
|
||||
public int getMaxNrOfStatementsInBulkInsert() {
|
||||
return maxNrOfStatementsInBulkInsert;
|
||||
}
|
||||
|
||||
public void setMaxNrOfStatementsInBulkInsert(int maxNrOfStatementsInBulkInsert) {
|
||||
this.maxNrOfStatementsInBulkInsert = maxNrOfStatementsInBulkInsert;
|
||||
}
|
||||
|
||||
public Map<Class<?>, String> getBulkInsertStatements() {
|
||||
return bulkInsertStatements;
|
||||
}
|
||||
|
||||
public void setBulkInsertStatements(Map<Class<?>, String> bulkInsertStatements) {
|
||||
this.bulkInsertStatements = bulkInsertStatements;
|
||||
}
|
||||
|
||||
// getters and setters //////////////////////////////////////////////////////
|
||||
|
||||
public SqlSessionFactory getSqlSessionFactory() {
|
||||
return sqlSessionFactory;
|
||||
}
|
||||
|
||||
public void setSqlSessionFactory(SqlSessionFactory sqlSessionFactory) {
|
||||
this.sqlSessionFactory = sqlSessionFactory;
|
||||
}
|
||||
|
||||
public String getDatabaseType() {
|
||||
return databaseType;
|
||||
}
|
||||
|
||||
public Map<String, Map<String, String>> getDatabaseSpecificStatements() {
|
||||
return databaseSpecificStatements;
|
||||
}
|
||||
|
||||
public void setDatabaseSpecificStatements(Map<String, Map<String, String>> databaseSpecificStatements) {
|
||||
this.databaseSpecificStatements = databaseSpecificStatements;
|
||||
}
|
||||
|
||||
public Map<String, String> getStatementMappings() {
|
||||
return statementMappings;
|
||||
}
|
||||
|
||||
public void setStatementMappings(Map<String, String> statementMappings) {
|
||||
this.statementMappings = statementMappings;
|
||||
}
|
||||
|
||||
public Map<Class<?>, String> getInsertStatements() {
|
||||
return insertStatements;
|
||||
}
|
||||
|
||||
public void setInsertStatements(Map<Class<?>, String> insertStatements) {
|
||||
this.insertStatements = insertStatements;
|
||||
}
|
||||
|
||||
public Map<Class<?>, String> getUpdateStatements() {
|
||||
return updateStatements;
|
||||
}
|
||||
|
||||
public void setUpdateStatements(Map<Class<?>, String> updateStatements) {
|
||||
this.updateStatements = updateStatements;
|
||||
}
|
||||
|
||||
public Map<Class<?>, String> getDeleteStatements() {
|
||||
return deleteStatements;
|
||||
}
|
||||
|
||||
public void setDeleteStatements(Map<Class<?>, String> deleteStatements) {
|
||||
this.deleteStatements = deleteStatements;
|
||||
}
|
||||
|
||||
public Map<Class<?>, String> getSelectStatements() {
|
||||
return selectStatements;
|
||||
}
|
||||
|
||||
public void setSelectStatements(Map<Class<?>, String> selectStatements) {
|
||||
this.selectStatements = selectStatements;
|
||||
}
|
||||
|
||||
public boolean isDbHistoryUsed() {
|
||||
return isDbHistoryUsed;
|
||||
}
|
||||
|
||||
public void setDbHistoryUsed(boolean isDbHistoryUsed) {
|
||||
this.isDbHistoryUsed = isDbHistoryUsed;
|
||||
}
|
||||
|
||||
public void setDatabaseTablePrefix(String databaseTablePrefix) {
|
||||
this.databaseTablePrefix = databaseTablePrefix;
|
||||
}
|
||||
|
||||
public String getDatabaseTablePrefix() {
|
||||
return databaseTablePrefix;
|
||||
}
|
||||
|
||||
public String getDatabaseCatalog() {
|
||||
return databaseCatalog;
|
||||
}
|
||||
|
||||
public void setDatabaseCatalog(String databaseCatalog) {
|
||||
this.databaseCatalog = databaseCatalog;
|
||||
}
|
||||
|
||||
public String getDatabaseSchema() {
|
||||
return databaseSchema;
|
||||
}
|
||||
|
||||
public void setDatabaseSchema(String databaseSchema) {
|
||||
this.databaseSchema = databaseSchema;
|
||||
}
|
||||
|
||||
public void setTablePrefixIsSchema(boolean tablePrefixIsSchema) {
|
||||
this.tablePrefixIsSchema = tablePrefixIsSchema;
|
||||
}
|
||||
|
||||
public boolean isTablePrefixIsSchema() {
|
||||
return tablePrefixIsSchema;
|
||||
}
|
||||
|
||||
public List<Class<? extends Entity>> getInsertionOrder() {
|
||||
return insertionOrder;
|
||||
}
|
||||
|
||||
public void setInsertionOrder(List<Class<? extends Entity>> insertionOrder) {
|
||||
this.insertionOrder = insertionOrder;
|
||||
}
|
||||
|
||||
public List<Class<? extends Entity>> getDeletionOrder() {
|
||||
return deletionOrder;
|
||||
}
|
||||
|
||||
public void setDeletionOrder(List<Class<? extends Entity>> deletionOrder) {
|
||||
this.deletionOrder = deletionOrder;
|
||||
}
|
||||
public void addLogicalEntityClassMapping(String logicalName, Class<?> entityClass) {
|
||||
logicalNameToClassMapping.put(logicalName, entityClass);
|
||||
}
|
||||
|
||||
public Map<String, Class<?>> getLogicalNameToClassMapping() {
|
||||
return logicalNameToClassMapping;
|
||||
}
|
||||
|
||||
public void setLogicalNameToClassMapping(Map<String, Class<?>> logicalNameToClassMapping) {
|
||||
this.logicalNameToClassMapping = logicalNameToClassMapping;
|
||||
}
|
||||
|
||||
public boolean isUsePrefixId() {
|
||||
return usePrefixId;
|
||||
}
|
||||
|
||||
public void setUsePrefixId(boolean usePrefixId) {
|
||||
this.usePrefixId = usePrefixId;
|
||||
}
|
||||
}
|
||||
@@ -13,6 +13,7 @@ liquibase.database.core.MariaDBDatabase
|
||||
liquibase.database.core.MockDatabase
|
||||
liquibase.database.core.MySQLDatabase
|
||||
liquibase.database.core.OracleDatabase
|
||||
liquibase.database.core.DmDatabase
|
||||
liquibase.database.core.PostgresDatabase
|
||||
liquibase.database.core.SQLiteDatabase
|
||||
liquibase.database.core.SybaseASADatabase
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
liquibase.datatype.core.DmBooleanType
|
||||
@@ -39,14 +39,14 @@ spring:
|
||||
primary: master
|
||||
datasource:
|
||||
master:
|
||||
url: jdbc:mysql://172.16.46.247:4787/ruoyi-vue-pro?useSSL=false&serverTimezone=Asia/Shanghai&allowPublicKeyRetrieval=true&nullCatalogMeansCurrent=true&rewriteBatchedStatements=true # MySQL Connector/J 8.X 连接的示例
|
||||
username: jygk-test
|
||||
password: Zgty@0527
|
||||
url: jdbc:dm://172.16.46.247:1050?schema=BPM
|
||||
username: SYSDBA
|
||||
password: pgbsci6ddJ6Sqj@e
|
||||
slave: # 模拟从库,可根据自己需要修改 # 模拟从库,可根据自己需要修改
|
||||
lazy: true # 开启懒加载,保证启动速度
|
||||
url: jdbc:mysql://172.16.46.247:4787/ruoyi-vue-pro?useSSL=false&serverTimezone=Asia/Shanghai&allowPublicKeyRetrieval=true&nullCatalogMeansCurrent=true&rewriteBatchedStatements=true # MySQL Connector/J 8.X 连接的示例
|
||||
username: jygk-test
|
||||
password: Zgty@0527
|
||||
url: jdbc:dm://172.16.46.247:1050?schema=BPM
|
||||
username: SYSDBA
|
||||
password: pgbsci6ddJ6Sqj@e
|
||||
|
||||
# Redis 配置。Redisson 默认的配置足够使用,一般不需要进行调优
|
||||
data:
|
||||
@@ -56,6 +56,11 @@ spring:
|
||||
database: 0 # 数据库索引
|
||||
# password: 123456 # 密码,建议生产环境开启
|
||||
|
||||
# Flowable 在 DM 场景下需要识别为 Oracle 并自动升级表结构
|
||||
flowable:
|
||||
database-schema-update: true
|
||||
database-type: oracle
|
||||
|
||||
--- #################### MQ 消息队列相关配置 ####################
|
||||
|
||||
--- #################### 定时任务相关配置 ####################
|
||||
|
||||
@@ -0,0 +1,41 @@
|
||||
create table FLW_RU_BATCH (
|
||||
ID_ VARCHAR2(64) not null,
|
||||
REV_ INTEGER,
|
||||
TYPE_ VARCHAR2(64) not null,
|
||||
SEARCH_KEY_ VARCHAR2(255),
|
||||
SEARCH_KEY2_ VARCHAR2(255),
|
||||
CREATE_TIME_ TIMESTAMP(6) not null,
|
||||
COMPLETE_TIME_ TIMESTAMP(6),
|
||||
STATUS_ VARCHAR2(255),
|
||||
BATCH_DOC_ID_ VARCHAR2(64),
|
||||
TENANT_ID_ VARCHAR2(255) default '',
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table FLW_RU_BATCH_PART (
|
||||
ID_ VARCHAR2(64) not null,
|
||||
REV_ INTEGER,
|
||||
BATCH_ID_ VARCHAR2(64),
|
||||
TYPE_ VARCHAR2(64) not null,
|
||||
SCOPE_ID_ VARCHAR2(64),
|
||||
SUB_SCOPE_ID_ VARCHAR2(64),
|
||||
SCOPE_TYPE_ VARCHAR2(64),
|
||||
SEARCH_KEY_ VARCHAR2(255),
|
||||
SEARCH_KEY2_ VARCHAR2(255),
|
||||
CREATE_TIME_ TIMESTAMP(6) not null,
|
||||
COMPLETE_TIME_ TIMESTAMP(6),
|
||||
STATUS_ VARCHAR2(255),
|
||||
RESULT_DOC_ID_ VARCHAR2(64),
|
||||
TENANT_ID_ VARCHAR2(255) default '',
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create index FLW_IDX_BATCH_PART on FLW_RU_BATCH_PART(BATCH_ID_);
|
||||
|
||||
alter table FLW_RU_BATCH_PART
|
||||
add constraint FLW_FK_BATCH_PART_PARENT
|
||||
foreign key (BATCH_ID_)
|
||||
references FLW_RU_BATCH (ID_);
|
||||
|
||||
insert into ACT_GE_PROPERTY values ('batch.schema.version', '7.0.1.1', 1);
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
drop index FLW_IDX_BATCH_PART;
|
||||
|
||||
drop table FLW_RU_BATCH_PART;
|
||||
drop table FLW_RU_BATCH;
|
||||
@@ -0,0 +1,23 @@
|
||||
create table ACT_GE_PROPERTY (
|
||||
NAME_ VARCHAR2(64),
|
||||
VALUE_ VARCHAR2(300),
|
||||
REV_ INTEGER,
|
||||
primary key (NAME_)
|
||||
);
|
||||
|
||||
create table ACT_GE_BYTEARRAY (
|
||||
ID_ VARCHAR2(64),
|
||||
REV_ INTEGER,
|
||||
NAME_ VARCHAR2(255),
|
||||
DEPLOYMENT_ID_ VARCHAR2(64),
|
||||
BYTES_ BLOB,
|
||||
GENERATED_ NUMBER(1) CHECK (GENERATED_ IN (1,0)),
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
insert into ACT_GE_PROPERTY
|
||||
values ('common.schema.version', '7.0.1.1', 1);
|
||||
|
||||
insert into ACT_GE_PROPERTY
|
||||
values ('next.dbid', '1', 1);
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
drop table ACT_GE_BYTEARRAY;
|
||||
drop table ACT_GE_PROPERTY;
|
||||
@@ -0,0 +1,355 @@
|
||||
create table ACT_RE_DEPLOYMENT (
|
||||
ID_ VARCHAR2(64),
|
||||
NAME_ VARCHAR2(255),
|
||||
CATEGORY_ VARCHAR2(255),
|
||||
KEY_ VARCHAR2(255),
|
||||
TENANT_ID_ VARCHAR2(255) DEFAULT '',
|
||||
DEPLOY_TIME_ TIMESTAMP(6),
|
||||
DERIVED_FROM_ VARCHAR2(64),
|
||||
DERIVED_FROM_ROOT_ VARCHAR2(64),
|
||||
PARENT_DEPLOYMENT_ID_ VARCHAR2(255),
|
||||
ENGINE_VERSION_ VARCHAR2(255),
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_RE_MODEL (
|
||||
ID_ VARCHAR2(64) not null,
|
||||
REV_ INTEGER,
|
||||
NAME_ VARCHAR2(255),
|
||||
KEY_ VARCHAR2(255),
|
||||
CATEGORY_ VARCHAR2(255),
|
||||
CREATE_TIME_ TIMESTAMP(6),
|
||||
LAST_UPDATE_TIME_ TIMESTAMP(6),
|
||||
VERSION_ INTEGER,
|
||||
META_INFO_ VARCHAR2(2000),
|
||||
DEPLOYMENT_ID_ VARCHAR2(64),
|
||||
EDITOR_SOURCE_VALUE_ID_ VARCHAR2(64),
|
||||
EDITOR_SOURCE_EXTRA_VALUE_ID_ VARCHAR2(64),
|
||||
TENANT_ID_ VARCHAR2(255) DEFAULT '',
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_RU_EXECUTION (
|
||||
ID_ VARCHAR2(64),
|
||||
REV_ INTEGER,
|
||||
PROC_INST_ID_ VARCHAR2(64),
|
||||
BUSINESS_KEY_ VARCHAR2(255),
|
||||
PARENT_ID_ VARCHAR2(64),
|
||||
PROC_DEF_ID_ VARCHAR2(64),
|
||||
SUPER_EXEC_ VARCHAR2(64),
|
||||
ROOT_PROC_INST_ID_ VARCHAR2(64),
|
||||
ACT_ID_ VARCHAR2(255),
|
||||
IS_ACTIVE_ NUMBER(1) CHECK (IS_ACTIVE_ IN (1,0)),
|
||||
IS_CONCURRENT_ NUMBER(1) CHECK (IS_CONCURRENT_ IN (1,0)),
|
||||
IS_SCOPE_ NUMBER(1) CHECK (IS_SCOPE_ IN (1,0)),
|
||||
IS_EVENT_SCOPE_ NUMBER(1) CHECK (IS_EVENT_SCOPE_ IN (1,0)),
|
||||
IS_MI_ROOT_ NUMBER(1) CHECK (IS_MI_ROOT_ IN (1,0)),
|
||||
SUSPENSION_STATE_ INTEGER,
|
||||
CACHED_ENT_STATE_ INTEGER,
|
||||
TENANT_ID_ VARCHAR2(255) DEFAULT '',
|
||||
NAME_ VARCHAR2(255),
|
||||
START_ACT_ID_ VARCHAR2(255),
|
||||
START_TIME_ TIMESTAMP(6),
|
||||
START_USER_ID_ VARCHAR2(255),
|
||||
LOCK_TIME_ TIMESTAMP(6),
|
||||
LOCK_OWNER_ VARCHAR2(255),
|
||||
IS_COUNT_ENABLED_ NUMBER(1) CHECK (IS_COUNT_ENABLED_ IN (1,0)),
|
||||
EVT_SUBSCR_COUNT_ INTEGER,
|
||||
TASK_COUNT_ INTEGER,
|
||||
JOB_COUNT_ INTEGER,
|
||||
TIMER_JOB_COUNT_ INTEGER,
|
||||
SUSP_JOB_COUNT_ INTEGER,
|
||||
DEADLETTER_JOB_COUNT_ INTEGER,
|
||||
EXTERNAL_WORKER_JOB_COUNT_ INTEGER,
|
||||
VAR_COUNT_ INTEGER,
|
||||
ID_LINK_COUNT_ INTEGER,
|
||||
CALLBACK_ID_ VARCHAR2(255),
|
||||
CALLBACK_TYPE_ VARCHAR2(255),
|
||||
REFERENCE_ID_ VARCHAR2(255),
|
||||
REFERENCE_TYPE_ VARCHAR2(255),
|
||||
PROPAGATED_STAGE_INST_ID_ VARCHAR2(255),
|
||||
BUSINESS_STATUS_ VARCHAR2(255),
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_RE_PROCDEF (
|
||||
ID_ VARCHAR2(64) NOT NULL,
|
||||
REV_ INTEGER,
|
||||
CATEGORY_ VARCHAR2(255),
|
||||
NAME_ VARCHAR2(255),
|
||||
KEY_ VARCHAR2(255) NOT NULL,
|
||||
VERSION_ INTEGER NOT NULL,
|
||||
DEPLOYMENT_ID_ VARCHAR2(64),
|
||||
RESOURCE_NAME_ VARCHAR2(2000),
|
||||
DGRM_RESOURCE_NAME_ VARCHAR2(4000),
|
||||
DESCRIPTION_ VARCHAR2(2000),
|
||||
HAS_START_FORM_KEY_ NUMBER(1) CHECK (HAS_START_FORM_KEY_ IN (1,0)),
|
||||
HAS_GRAPHICAL_NOTATION_ NUMBER(1) CHECK (HAS_GRAPHICAL_NOTATION_ IN (1,0)),
|
||||
SUSPENSION_STATE_ INTEGER,
|
||||
TENANT_ID_ VARCHAR2(255) DEFAULT '',
|
||||
DERIVED_FROM_ VARCHAR2(64),
|
||||
DERIVED_FROM_ROOT_ VARCHAR2(64),
|
||||
DERIVED_VERSION_ INTEGER DEFAULT 0 NOT NULL,
|
||||
ENGINE_VERSION_ VARCHAR2(255),
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_EVT_LOG (
|
||||
LOG_NR_ NUMBER(19),
|
||||
TYPE_ VARCHAR2(64),
|
||||
PROC_DEF_ID_ VARCHAR2(64),
|
||||
PROC_INST_ID_ VARCHAR2(64),
|
||||
EXECUTION_ID_ VARCHAR2(64),
|
||||
TASK_ID_ VARCHAR2(64),
|
||||
TIME_STAMP_ TIMESTAMP(6) not null,
|
||||
USER_ID_ VARCHAR2(255),
|
||||
DATA_ BLOB,
|
||||
LOCK_OWNER_ VARCHAR2(255),
|
||||
LOCK_TIME_ TIMESTAMP(6) null,
|
||||
IS_PROCESSED_ NUMBER(3) default 0,
|
||||
primary key (LOG_NR_)
|
||||
);
|
||||
|
||||
create sequence act_evt_log_seq;
|
||||
|
||||
create table ACT_PROCDEF_INFO (
|
||||
ID_ VARCHAR2(64) not null,
|
||||
PROC_DEF_ID_ VARCHAR2(64) not null,
|
||||
REV_ integer,
|
||||
INFO_JSON_ID_ VARCHAR2(64),
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_RU_ACTINST (
|
||||
ID_ VARCHAR2(64) not null,
|
||||
REV_ INTEGER default 1,
|
||||
PROC_DEF_ID_ VARCHAR2(64) not null,
|
||||
PROC_INST_ID_ VARCHAR2(64) not null,
|
||||
EXECUTION_ID_ VARCHAR2(64) not null,
|
||||
ACT_ID_ VARCHAR2(255) not null,
|
||||
TASK_ID_ VARCHAR2(64),
|
||||
CALL_PROC_INST_ID_ VARCHAR2(64),
|
||||
ACT_NAME_ VARCHAR2(255),
|
||||
ACT_TYPE_ VARCHAR2(255) not null,
|
||||
ASSIGNEE_ VARCHAR2(255),
|
||||
START_TIME_ TIMESTAMP(6) not null,
|
||||
END_TIME_ TIMESTAMP(6),
|
||||
DURATION_ NUMBER(19,0),
|
||||
TRANSACTION_ORDER_ INTEGER,
|
||||
DELETE_REASON_ VARCHAR2(2000),
|
||||
TENANT_ID_ VARCHAR2(255) default '',
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create index ACT_IDX_EXEC_BUSKEY on ACT_RU_EXECUTION(BUSINESS_KEY_);
|
||||
create index ACT_IDX_EXEC_ROOT on ACT_RU_EXECUTION(ROOT_PROC_INST_ID_);
|
||||
create index ACT_IDX_EXEC_REF_ID_ on ACT_RU_EXECUTION(REFERENCE_ID_);
|
||||
create index ACT_IDX_VARIABLE_TASK_ID on ACT_RU_VARIABLE(TASK_ID_);
|
||||
|
||||
create index ACT_IDX_RU_ACTI_START on ACT_RU_ACTINST(START_TIME_);
|
||||
create index ACT_IDX_RU_ACTI_END on ACT_RU_ACTINST(END_TIME_);
|
||||
create index ACT_IDX_RU_ACTI_PROC on ACT_RU_ACTINST(PROC_INST_ID_);
|
||||
create index ACT_IDX_RU_ACTI_PROC_ACT on ACT_RU_ACTINST(PROC_INST_ID_, ACT_ID_);
|
||||
create index ACT_IDX_RU_ACTI_EXEC on ACT_RU_ACTINST(EXECUTION_ID_);
|
||||
create index ACT_IDX_RU_ACTI_EXEC_ACT on ACT_RU_ACTINST(EXECUTION_ID_, ACT_ID_);
|
||||
create index ACT_IDX_RU_ACTI_TASK on ACT_RU_ACTINST(TASK_ID_);
|
||||
|
||||
create index ACT_IDX_BYTEAR_DEPL on ACT_GE_BYTEARRAY(DEPLOYMENT_ID_);
|
||||
alter table ACT_GE_BYTEARRAY
|
||||
add constraint ACT_FK_BYTEARR_DEPL
|
||||
foreign key (DEPLOYMENT_ID_)
|
||||
references ACT_RE_DEPLOYMENT (ID_);
|
||||
|
||||
alter table ACT_RE_PROCDEF
|
||||
add constraint ACT_UNIQ_PROCDEF
|
||||
unique (KEY_,VERSION_, DERIVED_VERSION_, TENANT_ID_);
|
||||
|
||||
create index ACT_IDX_EXE_PROCINST on ACT_RU_EXECUTION(PROC_INST_ID_);
|
||||
alter table ACT_RU_EXECUTION
|
||||
add constraint ACT_FK_EXE_PROCINST
|
||||
foreign key (PROC_INST_ID_)
|
||||
references ACT_RU_EXECUTION (ID_);
|
||||
|
||||
create index ACT_IDX_EXE_PARENT on ACT_RU_EXECUTION(PARENT_ID_);
|
||||
alter table ACT_RU_EXECUTION
|
||||
add constraint ACT_FK_EXE_PARENT
|
||||
foreign key (PARENT_ID_)
|
||||
references ACT_RU_EXECUTION (ID_);
|
||||
|
||||
create index ACT_IDX_EXE_SUPER on ACT_RU_EXECUTION(SUPER_EXEC_);
|
||||
alter table ACT_RU_EXECUTION
|
||||
add constraint ACT_FK_EXE_SUPER
|
||||
foreign key (SUPER_EXEC_)
|
||||
references ACT_RU_EXECUTION (ID_);
|
||||
|
||||
create index ACT_IDX_EXE_PROCDEF on ACT_RU_EXECUTION(PROC_DEF_ID_);
|
||||
alter table ACT_RU_EXECUTION
|
||||
add constraint ACT_FK_EXE_PROCDEF
|
||||
foreign key (PROC_DEF_ID_)
|
||||
references ACT_RE_PROCDEF (ID_);
|
||||
|
||||
create index ACT_IDX_TSKASS_TASK on ACT_RU_IDENTITYLINK(TASK_ID_);
|
||||
alter table ACT_RU_IDENTITYLINK
|
||||
add constraint ACT_FK_TSKASS_TASK
|
||||
foreign key (TASK_ID_)
|
||||
references ACT_RU_TASK (ID_);
|
||||
|
||||
create index ACT_IDX_ATHRZ_PROCEDEF on ACT_RU_IDENTITYLINK(PROC_DEF_ID_);
|
||||
alter table ACT_RU_IDENTITYLINK
|
||||
add constraint ACT_FK_ATHRZ_PROCEDEF
|
||||
foreign key (PROC_DEF_ID_)
|
||||
references ACT_RE_PROCDEF (ID_);
|
||||
|
||||
create index ACT_IDX_IDL_PROCINST on ACT_RU_IDENTITYLINK(PROC_INST_ID_);
|
||||
alter table ACT_RU_IDENTITYLINK
|
||||
add constraint ACT_FK_IDL_PROCINST
|
||||
foreign key (PROC_INST_ID_)
|
||||
references ACT_RU_EXECUTION (ID_);
|
||||
|
||||
create index ACT_IDX_TASK_EXEC on ACT_RU_TASK(EXECUTION_ID_);
|
||||
alter table ACT_RU_TASK
|
||||
add constraint ACT_FK_TASK_EXE
|
||||
foreign key (EXECUTION_ID_)
|
||||
references ACT_RU_EXECUTION (ID_);
|
||||
|
||||
create index ACT_IDX_TASK_PROCINST on ACT_RU_TASK(PROC_INST_ID_);
|
||||
alter table ACT_RU_TASK
|
||||
add constraint ACT_FK_TASK_PROCINST
|
||||
foreign key (PROC_INST_ID_)
|
||||
references ACT_RU_EXECUTION (ID_);
|
||||
|
||||
create index ACT_IDX_TASK_PROCDEF on ACT_RU_TASK(PROC_DEF_ID_);
|
||||
alter table ACT_RU_TASK
|
||||
add constraint ACT_FK_TASK_PROCDEF
|
||||
foreign key (PROC_DEF_ID_)
|
||||
references ACT_RE_PROCDEF (ID_);
|
||||
|
||||
create index ACT_IDX_VAR_EXE on ACT_RU_VARIABLE(EXECUTION_ID_);
|
||||
alter table ACT_RU_VARIABLE
|
||||
add constraint ACT_FK_VAR_EXE
|
||||
foreign key (EXECUTION_ID_)
|
||||
references ACT_RU_EXECUTION (ID_);
|
||||
|
||||
create index ACT_IDX_VAR_PROCINST on ACT_RU_VARIABLE(PROC_INST_ID_);
|
||||
alter table ACT_RU_VARIABLE
|
||||
add constraint ACT_FK_VAR_PROCINST
|
||||
foreign key (PROC_INST_ID_)
|
||||
references ACT_RU_EXECUTION(ID_);
|
||||
|
||||
create index ACT_IDX_JOB_EXECUTION_ID on ACT_RU_JOB(EXECUTION_ID_);
|
||||
alter table ACT_RU_JOB
|
||||
add constraint ACT_FK_JOB_EXECUTION
|
||||
foreign key (EXECUTION_ID_)
|
||||
references ACT_RU_EXECUTION (ID_);
|
||||
|
||||
create index ACT_IDX_JOB_PROC_INST_ID on ACT_RU_JOB(PROCESS_INSTANCE_ID_);
|
||||
alter table ACT_RU_JOB
|
||||
add constraint ACT_FK_JOB_PROCESS_INSTANCE
|
||||
foreign key (PROCESS_INSTANCE_ID_)
|
||||
references ACT_RU_EXECUTION (ID_);
|
||||
|
||||
create index ACT_IDX_JOB_PROC_DEF_ID on ACT_RU_JOB(PROC_DEF_ID_);
|
||||
alter table ACT_RU_JOB
|
||||
add constraint ACT_FK_JOB_PROC_DEF
|
||||
foreign key (PROC_DEF_ID_)
|
||||
references ACT_RE_PROCDEF (ID_);
|
||||
|
||||
create index ACT_IDX_TJOB_EXECUTION_ID on ACT_RU_TIMER_JOB(EXECUTION_ID_);
|
||||
alter table ACT_RU_TIMER_JOB
|
||||
add constraint ACT_FK_TJOB_EXECUTION
|
||||
foreign key (EXECUTION_ID_)
|
||||
references ACT_RU_EXECUTION (ID_);
|
||||
|
||||
create index ACT_IDX_TJOB_PROC_INST_ID on ACT_RU_TIMER_JOB(PROCESS_INSTANCE_ID_);
|
||||
alter table ACT_RU_TIMER_JOB
|
||||
add constraint ACT_FK_TJOB_PROCESS_INSTANCE
|
||||
foreign key (PROCESS_INSTANCE_ID_)
|
||||
references ACT_RU_EXECUTION (ID_);
|
||||
|
||||
create index ACT_IDX_TJOB_PROC_DEF_ID on ACT_RU_TIMER_JOB(PROC_DEF_ID_);
|
||||
alter table ACT_RU_TIMER_JOB
|
||||
add constraint ACT_FK_TJOB_PROC_DEF
|
||||
foreign key (PROC_DEF_ID_)
|
||||
references ACT_RE_PROCDEF (ID_);
|
||||
|
||||
create index ACT_IDX_SJOB_EXECUTION_ID on ACT_RU_SUSPENDED_JOB(EXECUTION_ID_);
|
||||
alter table ACT_RU_SUSPENDED_JOB
|
||||
add constraint ACT_FK_SJOB_EXECUTION
|
||||
foreign key (EXECUTION_ID_)
|
||||
references ACT_RU_EXECUTION (ID_);
|
||||
|
||||
create index ACT_IDX_SJOB_PROC_INST_ID on ACT_RU_SUSPENDED_JOB(PROCESS_INSTANCE_ID_);
|
||||
alter table ACT_RU_SUSPENDED_JOB
|
||||
add constraint ACT_FK_SJOB_PROCESS_INSTANCE
|
||||
foreign key (PROCESS_INSTANCE_ID_)
|
||||
references ACT_RU_EXECUTION (ID_);
|
||||
|
||||
create index ACT_IDX_SJOB_PROC_DEF_ID on ACT_RU_SUSPENDED_JOB(PROC_DEF_ID_);
|
||||
alter table ACT_RU_SUSPENDED_JOB
|
||||
add constraint ACT_FK_SJOB_PROC_DEF
|
||||
foreign key (PROC_DEF_ID_)
|
||||
references ACT_RE_PROCDEF (ID_);
|
||||
|
||||
create index ACT_IDX_DJOB_EXECUTION_ID on ACT_RU_DEADLETTER_JOB(EXECUTION_ID_);
|
||||
alter table ACT_RU_DEADLETTER_JOB
|
||||
add constraint ACT_FK_DJOB_EXECUTION
|
||||
foreign key (EXECUTION_ID_)
|
||||
references ACT_RU_EXECUTION (ID_);
|
||||
|
||||
create index ACT_IDX_DJOB_PROC_INST_ID on ACT_RU_DEADLETTER_JOB(PROCESS_INSTANCE_ID_);
|
||||
alter table ACT_RU_DEADLETTER_JOB
|
||||
add constraint ACT_FK_DJOB_PROCESS_INSTANCE
|
||||
foreign key (PROCESS_INSTANCE_ID_)
|
||||
references ACT_RU_EXECUTION (ID_);
|
||||
|
||||
create index ACT_IDX_DJOB_PROC_DEF_ID on ACT_RU_DEADLETTER_JOB(PROC_DEF_ID_);
|
||||
alter table ACT_RU_DEADLETTER_JOB
|
||||
add constraint ACT_FK_DJOB_PROC_DEF
|
||||
foreign key (PROC_DEF_ID_)
|
||||
references ACT_RE_PROCDEF (ID_);
|
||||
|
||||
alter table ACT_RU_EVENT_SUBSCR
|
||||
add constraint ACT_FK_EVENT_EXEC
|
||||
foreign key (EXECUTION_ID_)
|
||||
references ACT_RU_EXECUTION(ID_);
|
||||
|
||||
create index ACT_IDX_MODEL_SOURCE on ACT_RE_MODEL(EDITOR_SOURCE_VALUE_ID_);
|
||||
alter table ACT_RE_MODEL
|
||||
add constraint ACT_FK_MODEL_SOURCE
|
||||
foreign key (EDITOR_SOURCE_VALUE_ID_)
|
||||
references ACT_GE_BYTEARRAY (ID_);
|
||||
|
||||
create index ACT_IDX_MODEL_SOURCE_EXTRA on ACT_RE_MODEL(EDITOR_SOURCE_EXTRA_VALUE_ID_);
|
||||
alter table ACT_RE_MODEL
|
||||
add constraint ACT_FK_MODEL_SOURCE_EXTRA
|
||||
foreign key (EDITOR_SOURCE_EXTRA_VALUE_ID_)
|
||||
references ACT_GE_BYTEARRAY (ID_);
|
||||
|
||||
create index ACT_IDX_MODEL_DEPLOYMENT on ACT_RE_MODEL(DEPLOYMENT_ID_);
|
||||
alter table ACT_RE_MODEL
|
||||
add constraint ACT_FK_MODEL_DEPLOYMENT
|
||||
foreign key (DEPLOYMENT_ID_)
|
||||
references ACT_RE_DEPLOYMENT (ID_);
|
||||
|
||||
create index ACT_IDX_PROCDEF_INFO_JSON on ACT_PROCDEF_INFO(INFO_JSON_ID_);
|
||||
alter table ACT_PROCDEF_INFO
|
||||
add constraint ACT_FK_INFO_JSON_BA
|
||||
foreign key (INFO_JSON_ID_)
|
||||
references ACT_GE_BYTEARRAY (ID_);
|
||||
|
||||
create index ACT_IDX_PROCDEF_INFO_PROC on ACT_PROCDEF_INFO(PROC_DEF_ID_);
|
||||
alter table ACT_PROCDEF_INFO
|
||||
add constraint ACT_FK_INFO_PROCDEF
|
||||
foreign key (PROC_DEF_ID_)
|
||||
references ACT_RE_PROCDEF (ID_);
|
||||
|
||||
alter table ACT_PROCDEF_INFO
|
||||
add constraint ACT_UNIQ_INFO_PROCDEF
|
||||
unique (PROC_DEF_ID_);
|
||||
|
||||
insert into ACT_GE_PROPERTY
|
||||
values ('schema.version', '7.0.1.1', 1);
|
||||
|
||||
insert into ACT_GE_PROPERTY
|
||||
values ('schema.history', 'create(7.0.1.1)', 1);
|
||||
|
||||
@@ -0,0 +1,114 @@
|
||||
create table ACT_HI_PROCINST (
|
||||
ID_ VARCHAR2(64) not null,
|
||||
REV_ INTEGER default 1,
|
||||
PROC_INST_ID_ VARCHAR2(64) not null,
|
||||
BUSINESS_KEY_ VARCHAR2(255),
|
||||
PROC_DEF_ID_ VARCHAR2(64) not null,
|
||||
START_TIME_ TIMESTAMP(6) not null,
|
||||
END_TIME_ TIMESTAMP(6),
|
||||
DURATION_ NUMBER(19,0),
|
||||
START_USER_ID_ VARCHAR2(255),
|
||||
START_ACT_ID_ VARCHAR2(255),
|
||||
END_ACT_ID_ VARCHAR2(255),
|
||||
SUPER_PROCESS_INSTANCE_ID_ VARCHAR2(64),
|
||||
DELETE_REASON_ VARCHAR2(2000),
|
||||
TENANT_ID_ VARCHAR2(255) default '',
|
||||
NAME_ VARCHAR2(255),
|
||||
CALLBACK_ID_ VARCHAR2(255),
|
||||
CALLBACK_TYPE_ VARCHAR2(255),
|
||||
REFERENCE_ID_ VARCHAR2(255),
|
||||
REFERENCE_TYPE_ VARCHAR2(255),
|
||||
PROPAGATED_STAGE_INST_ID_ VARCHAR2(255),
|
||||
BUSINESS_STATUS_ VARCHAR2(255),
|
||||
primary key (ID_),
|
||||
unique (PROC_INST_ID_)
|
||||
);
|
||||
|
||||
create table ACT_HI_ACTINST (
|
||||
ID_ VARCHAR2(64) not null,
|
||||
REV_ INTEGER default 1,
|
||||
PROC_DEF_ID_ VARCHAR2(64) not null,
|
||||
PROC_INST_ID_ VARCHAR2(64) not null,
|
||||
EXECUTION_ID_ VARCHAR2(64) not null,
|
||||
ACT_ID_ VARCHAR2(255) not null,
|
||||
TASK_ID_ VARCHAR2(64),
|
||||
CALL_PROC_INST_ID_ VARCHAR2(64),
|
||||
ACT_NAME_ VARCHAR2(255),
|
||||
ACT_TYPE_ VARCHAR2(255) not null,
|
||||
ASSIGNEE_ VARCHAR2(255),
|
||||
START_TIME_ TIMESTAMP(6) not null,
|
||||
END_TIME_ TIMESTAMP(6),
|
||||
TRANSACTION_ORDER_ INTEGER,
|
||||
DURATION_ NUMBER(19,0),
|
||||
DELETE_REASON_ VARCHAR2(2000),
|
||||
TENANT_ID_ VARCHAR2(255) default '',
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_HI_DETAIL (
|
||||
ID_ VARCHAR2(64) not null,
|
||||
TYPE_ VARCHAR2(255) not null,
|
||||
PROC_INST_ID_ VARCHAR2(64),
|
||||
EXECUTION_ID_ VARCHAR2(64),
|
||||
TASK_ID_ VARCHAR2(64),
|
||||
ACT_INST_ID_ VARCHAR2(64),
|
||||
NAME_ VARCHAR2(255) not null,
|
||||
VAR_TYPE_ VARCHAR2(64),
|
||||
REV_ INTEGER,
|
||||
TIME_ TIMESTAMP(6) not null,
|
||||
BYTEARRAY_ID_ VARCHAR2(64),
|
||||
DOUBLE_ NUMBER(38,10),
|
||||
LONG_ NUMBER(19,0),
|
||||
TEXT_ VARCHAR2(2000),
|
||||
TEXT2_ VARCHAR2(2000),
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_HI_COMMENT (
|
||||
ID_ VARCHAR2(64) not null,
|
||||
TYPE_ VARCHAR2(255),
|
||||
TIME_ TIMESTAMP(6) not null,
|
||||
USER_ID_ VARCHAR2(255),
|
||||
TASK_ID_ VARCHAR2(64),
|
||||
PROC_INST_ID_ VARCHAR2(64),
|
||||
ACTION_ VARCHAR2(255),
|
||||
MESSAGE_ VARCHAR2(2000),
|
||||
FULL_MSG_ BLOB,
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_HI_ATTACHMENT (
|
||||
ID_ VARCHAR2(64) not null,
|
||||
REV_ INTEGER,
|
||||
USER_ID_ VARCHAR2(255),
|
||||
NAME_ VARCHAR2(255),
|
||||
DESCRIPTION_ VARCHAR2(2000),
|
||||
TYPE_ VARCHAR2(255),
|
||||
TASK_ID_ VARCHAR2(64),
|
||||
PROC_INST_ID_ VARCHAR2(64),
|
||||
URL_ VARCHAR2(2000),
|
||||
CONTENT_ID_ VARCHAR2(64),
|
||||
TIME_ TIMESTAMP(6),
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create index ACT_IDX_HI_PRO_INST_END on ACT_HI_PROCINST(END_TIME_);
|
||||
create index ACT_IDX_HI_PRO_I_BUSKEY on ACT_HI_PROCINST(BUSINESS_KEY_);
|
||||
create index ACT_IDX_HI_PRO_SUPER_PROCINST on ACT_HI_PROCINST(SUPER_PROCESS_INSTANCE_ID_);
|
||||
create index ACT_IDX_HI_ACT_INST_START on ACT_HI_ACTINST(START_TIME_);
|
||||
create index ACT_IDX_HI_ACT_INST_END on ACT_HI_ACTINST(END_TIME_);
|
||||
create index ACT_IDX_HI_DETAIL_PROC_INST on ACT_HI_DETAIL(PROC_INST_ID_);
|
||||
create index ACT_IDX_HI_DETAIL_ACT_INST on ACT_HI_DETAIL(ACT_INST_ID_);
|
||||
create index ACT_IDX_HI_DETAIL_TIME on ACT_HI_DETAIL(TIME_);
|
||||
create index ACT_IDX_HI_DETAIL_NAME on ACT_HI_DETAIL(NAME_);
|
||||
create index ACT_IDX_HI_DETAIL_TASK_ID on ACT_HI_DETAIL(TASK_ID_);
|
||||
create index ACT_IDX_HI_PROCVAR_PROC_INST on ACT_HI_VARINST(PROC_INST_ID_);
|
||||
create index ACT_IDX_HI_PROCVAR_TASK_ID on ACT_HI_VARINST(TASK_ID_);
|
||||
create index ACT_IDX_HI_PROCVAR_EXE on ACT_HI_VARINST(EXECUTION_ID_);
|
||||
create index ACT_IDX_HI_IDENT_LNK_TASK on ACT_HI_IDENTITYLINK(TASK_ID_);
|
||||
create index ACT_IDX_HI_IDENT_LNK_PROCINST on ACT_HI_IDENTITYLINK(PROC_INST_ID_);
|
||||
|
||||
create index ACT_IDX_HI_ACT_INST_PROCINST on ACT_HI_ACTINST(PROC_INST_ID_, ACT_ID_);
|
||||
create index ACT_IDX_HI_ACT_INST_EXEC on ACT_HI_ACTINST(EXECUTION_ID_, ACT_ID_);
|
||||
create index ACT_IDX_HI_TASK_INST_PROCINST on ACT_HI_TASKINST(PROC_INST_ID_);
|
||||
|
||||
@@ -0,0 +1,148 @@
|
||||
drop index ACT_IDX_BYTEAR_DEPL;
|
||||
drop index ACT_IDX_EXE_PROCINST;
|
||||
drop index ACT_IDX_EXE_PARENT;
|
||||
drop index ACT_IDX_EXE_SUPER;
|
||||
drop index ACT_IDX_TSKASS_TASK;
|
||||
drop index ACT_IDX_TASK_EXEC;
|
||||
drop index ACT_IDX_TASK_PROCINST;
|
||||
drop index ACT_IDX_TASK_PROCDEF;
|
||||
drop index ACT_IDX_VAR_EXE;
|
||||
drop index ACT_IDX_VAR_PROCINST;
|
||||
drop index ACT_IDX_JOB_EXECUTION_ID;
|
||||
drop index ACT_IDX_JOB_PROC_INST_ID;
|
||||
drop index ACT_IDX_JOB_PROC_DEF_ID;
|
||||
drop index ACT_IDX_TJOB_EXECUTION_ID;
|
||||
drop index ACT_IDX_TJOB_PROC_INST_ID;
|
||||
drop index ACT_IDX_TJOB_PROC_DEF_ID;
|
||||
drop index ACT_IDX_SJOB_EXECUTION_ID;
|
||||
drop index ACT_IDX_SJOB_PROC_INST_ID;
|
||||
drop index ACT_IDX_SJOB_PROC_DEF_ID;
|
||||
drop index ACT_IDX_DJOB_EXECUTION_ID;
|
||||
drop index ACT_IDX_DJOB_PROC_INST_ID;
|
||||
drop index ACT_IDX_DJOB_PROC_DEF_ID;
|
||||
drop index ACT_IDX_MODEL_SOURCE;
|
||||
drop index ACT_IDX_MODEL_SOURCE_EXTRA;
|
||||
drop index ACT_IDX_MODEL_DEPLOYMENT;
|
||||
drop index ACT_IDX_PROCDEF_INFO_JSON;
|
||||
|
||||
drop index ACT_IDX_EXEC_BUSKEY;
|
||||
drop index ACT_IDX_VARIABLE_TASK_ID;
|
||||
|
||||
drop index ACT_IDX_RU_ACTI_START;
|
||||
drop index ACT_IDX_RU_ACTI_END;
|
||||
drop index ACT_IDX_RU_ACTI_PROC;
|
||||
drop index ACT_IDX_RU_ACTI_PROC_ACT;
|
||||
drop index ACT_IDX_RU_ACTI_EXEC;
|
||||
drop index ACT_IDX_RU_ACTI_EXEC_ACT;
|
||||
|
||||
alter table ACT_GE_BYTEARRAY
|
||||
drop CONSTRAINT ACT_FK_BYTEARR_DEPL;
|
||||
|
||||
alter table ACT_RU_EXECUTION
|
||||
drop CONSTRAINT ACT_FK_EXE_PROCINST;
|
||||
|
||||
alter table ACT_RU_EXECUTION
|
||||
drop CONSTRAINT ACT_FK_EXE_PARENT;
|
||||
|
||||
alter table ACT_RU_EXECUTION
|
||||
drop CONSTRAINT ACT_FK_EXE_SUPER;
|
||||
|
||||
alter table ACT_RU_EXECUTION
|
||||
drop CONSTRAINT ACT_FK_EXE_PROCDEF;
|
||||
|
||||
alter table ACT_RU_IDENTITYLINK
|
||||
drop CONSTRAINT ACT_FK_TSKASS_TASK;
|
||||
|
||||
alter table ACT_RU_IDENTITYLINK
|
||||
drop CONSTRAINT ACT_FK_IDL_PROCINST;
|
||||
|
||||
alter table ACT_RU_IDENTITYLINK
|
||||
drop CONSTRAINT ACT_FK_ATHRZ_PROCEDEF;
|
||||
|
||||
alter table ACT_RU_TASK
|
||||
drop CONSTRAINT ACT_FK_TASK_EXE;
|
||||
|
||||
alter table ACT_RU_TASK
|
||||
drop CONSTRAINT ACT_FK_TASK_PROCINST;
|
||||
|
||||
alter table ACT_RU_TASK
|
||||
drop CONSTRAINT ACT_FK_TASK_PROCDEF;
|
||||
|
||||
alter table ACT_RU_VARIABLE
|
||||
drop CONSTRAINT ACT_FK_VAR_EXE;
|
||||
|
||||
alter table ACT_RU_VARIABLE
|
||||
drop CONSTRAINT ACT_FK_VAR_PROCINST;
|
||||
|
||||
alter table ACT_RU_JOB
|
||||
drop CONSTRAINT ACT_FK_JOB_EXECUTION;
|
||||
|
||||
alter table ACT_RU_JOB
|
||||
drop CONSTRAINT ACT_FK_JOB_PROCESS_INSTANCE;
|
||||
|
||||
alter table ACT_RU_JOB
|
||||
drop CONSTRAINT ACT_FK_JOB_PROC_DEF;
|
||||
|
||||
alter table ACT_RU_TIMER_JOB
|
||||
drop CONSTRAINT ACT_FK_TJOB_EXECUTION;
|
||||
|
||||
alter table ACT_RU_TIMER_JOB
|
||||
drop CONSTRAINT ACT_FK_TJOB_PROCESS_INSTANCE;
|
||||
|
||||
alter table ACT_RU_TIMER_JOB
|
||||
drop CONSTRAINT ACT_FK_TJOB_PROC_DEF;
|
||||
|
||||
alter table ACT_RU_SUSPENDED_JOB
|
||||
drop CONSTRAINT ACT_FK_SJOB_EXECUTION;
|
||||
|
||||
alter table ACT_RU_SUSPENDED_JOB
|
||||
drop CONSTRAINT ACT_FK_SJOB_PROCESS_INSTANCE;
|
||||
|
||||
alter table ACT_RU_SUSPENDED_JOB
|
||||
drop CONSTRAINT ACT_FK_SJOB_PROC_DEF;
|
||||
|
||||
alter table ACT_RU_DEADLETTER_JOB
|
||||
drop CONSTRAINT ACT_FK_DJOB_EXECUTION;
|
||||
|
||||
alter table ACT_RU_DEADLETTER_JOB
|
||||
drop CONSTRAINT ACT_FK_DJOB_PROCESS_INSTANCE;
|
||||
|
||||
alter table ACT_RU_DEADLETTER_JOB
|
||||
drop CONSTRAINT ACT_FK_DJOB_PROC_DEF;
|
||||
|
||||
alter table ACT_RU_EVENT_SUBSCR
|
||||
drop CONSTRAINT ACT_FK_EVENT_EXEC;
|
||||
|
||||
alter table ACT_RE_PROCDEF
|
||||
drop CONSTRAINT ACT_UNIQ_PROCDEF;
|
||||
|
||||
alter table ACT_RE_MODEL
|
||||
drop CONSTRAINT ACT_FK_MODEL_SOURCE;
|
||||
|
||||
alter table ACT_RE_MODEL
|
||||
drop CONSTRAINT ACT_FK_MODEL_SOURCE_EXTRA;
|
||||
|
||||
alter table ACT_RE_MODEL
|
||||
drop CONSTRAINT ACT_FK_MODEL_DEPLOYMENT;
|
||||
|
||||
alter table ACT_PROCDEF_INFO
|
||||
drop CONSTRAINT ACT_UNIQ_INFO_PROCDEF;
|
||||
|
||||
alter table ACT_PROCDEF_INFO
|
||||
drop CONSTRAINT ACT_FK_INFO_JSON_BA;
|
||||
|
||||
alter table ACT_PROCDEF_INFO
|
||||
drop CONSTRAINT ACT_FK_INFO_PROCDEF;
|
||||
|
||||
drop index ACT_IDX_ATHRZ_PROCEDEF;
|
||||
drop index ACT_IDX_PROCDEF_INFO_PROC;
|
||||
|
||||
drop table ACT_RU_ACTINST;
|
||||
drop table ACT_RE_DEPLOYMENT;
|
||||
drop table ACT_RE_MODEL;
|
||||
drop table ACT_RE_PROCDEF;
|
||||
drop table ACT_RU_EXECUTION;
|
||||
|
||||
drop sequence act_evt_log_seq;
|
||||
drop table ACT_EVT_LOG;
|
||||
drop table ACT_PROCDEF_INFO;
|
||||
@@ -0,0 +1,23 @@
|
||||
drop index ACT_IDX_HI_PRO_INST_END;
|
||||
drop index ACT_IDX_HI_PRO_I_BUSKEY;
|
||||
drop index ACT_IDX_HI_ACT_INST_START;
|
||||
drop index ACT_IDX_HI_ACT_INST_END;
|
||||
drop index ACT_IDX_HI_DETAIL_PROC_INST;
|
||||
drop index ACT_IDX_HI_DETAIL_ACT_INST;
|
||||
drop index ACT_IDX_HI_DETAIL_TIME;
|
||||
drop index ACT_IDX_HI_DETAIL_NAME;
|
||||
drop index ACT_IDX_HI_DETAIL_TASK_ID;
|
||||
drop index ACT_IDX_HI_PROCVAR_PROC_INST;
|
||||
drop index ACT_IDX_HI_PROCVAR_TASK_ID;
|
||||
drop index ACT_IDX_HI_PROCVAR_EXE;
|
||||
drop index ACT_IDX_HI_ACT_INST_PROCINST;
|
||||
drop index ACT_IDX_HI_IDENT_LNK_TASK;
|
||||
drop index ACT_IDX_HI_IDENT_LNK_PROCINST;
|
||||
drop index ACT_IDX_HI_TASK_INST_PROCINST;
|
||||
|
||||
drop table ACT_HI_PROCINST;
|
||||
drop table ACT_HI_ACTINST;
|
||||
drop table ACT_HI_DETAIL;
|
||||
drop table ACT_HI_COMMENT;
|
||||
drop table ACT_HI_ATTACHMENT;
|
||||
|
||||
@@ -0,0 +1,23 @@
|
||||
create table ACT_HI_ENTITYLINK (
|
||||
ID_ VARCHAR2(64),
|
||||
LINK_TYPE_ VARCHAR2(255),
|
||||
CREATE_TIME_ TIMESTAMP(6),
|
||||
SCOPE_ID_ VARCHAR2(255),
|
||||
SUB_SCOPE_ID_ VARCHAR2(255),
|
||||
SCOPE_TYPE_ VARCHAR2(255),
|
||||
SCOPE_DEFINITION_ID_ VARCHAR2(255),
|
||||
PARENT_ELEMENT_ID_ VARCHAR2(255),
|
||||
REF_SCOPE_ID_ VARCHAR2(255),
|
||||
REF_SCOPE_TYPE_ VARCHAR2(255),
|
||||
REF_SCOPE_DEFINITION_ID_ VARCHAR2(255),
|
||||
ROOT_SCOPE_ID_ VARCHAR2(255),
|
||||
ROOT_SCOPE_TYPE_ VARCHAR2(255),
|
||||
HIERARCHY_TYPE_ VARCHAR2(255),
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create index ACT_IDX_HI_ENT_LNK_SCOPE on ACT_HI_ENTITYLINK(SCOPE_ID_, SCOPE_TYPE_, LINK_TYPE_);
|
||||
create index ACT_IDX_HI_ENT_LNK_REF_SCOPE on ACT_HI_ENTITYLINK(REF_SCOPE_ID_, REF_SCOPE_TYPE_, LINK_TYPE_);
|
||||
create index ACT_IDX_HI_ENT_LNK_ROOT_SCOPE on ACT_HI_ENTITYLINK(ROOT_SCOPE_ID_, ROOT_SCOPE_TYPE_, LINK_TYPE_);
|
||||
create index ACT_IDX_HI_ENT_LNK_SCOPE_DEF on ACT_HI_ENTITYLINK(SCOPE_DEFINITION_ID_, SCOPE_TYPE_, LINK_TYPE_);
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
create table ACT_RU_ENTITYLINK (
|
||||
ID_ VARCHAR2(64),
|
||||
REV_ INTEGER,
|
||||
CREATE_TIME_ TIMESTAMP(6),
|
||||
LINK_TYPE_ VARCHAR2(255),
|
||||
SCOPE_ID_ VARCHAR2(255),
|
||||
SUB_SCOPE_ID_ VARCHAR2(255),
|
||||
SCOPE_TYPE_ VARCHAR2(255),
|
||||
SCOPE_DEFINITION_ID_ VARCHAR2(255),
|
||||
PARENT_ELEMENT_ID_ VARCHAR2(255),
|
||||
REF_SCOPE_ID_ VARCHAR2(255),
|
||||
REF_SCOPE_TYPE_ VARCHAR2(255),
|
||||
REF_SCOPE_DEFINITION_ID_ VARCHAR2(255),
|
||||
ROOT_SCOPE_ID_ VARCHAR2(255),
|
||||
ROOT_SCOPE_TYPE_ VARCHAR2(255),
|
||||
HIERARCHY_TYPE_ VARCHAR2(255),
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create index ACT_IDX_ENT_LNK_SCOPE on ACT_RU_ENTITYLINK(SCOPE_ID_, SCOPE_TYPE_, LINK_TYPE_);
|
||||
create index ACT_IDX_ENT_LNK_REF_SCOPE on ACT_RU_ENTITYLINK(REF_SCOPE_ID_, REF_SCOPE_TYPE_, LINK_TYPE_);
|
||||
create index ACT_IDX_ENT_LNK_ROOT_SCOPE on ACT_RU_ENTITYLINK(ROOT_SCOPE_ID_, ROOT_SCOPE_TYPE_, LINK_TYPE_);
|
||||
create index ACT_IDX_ENT_LNK_SCOPE_DEF on ACT_RU_ENTITYLINK(SCOPE_DEFINITION_ID_, SCOPE_TYPE_, LINK_TYPE_);
|
||||
|
||||
insert into ACT_GE_PROPERTY values ('entitylink.schema.version', '7.0.1.1', 1);
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
drop index ACT_IDX_HI_ENT_LNK_SCOPE;
|
||||
drop index ACT_IDX_HI_ENT_LNK_SCOPE_DEF;
|
||||
|
||||
drop table ACT_HI_ENTITYLINK;
|
||||
@@ -0,0 +1,4 @@
|
||||
drop index ACT_IDX_ENT_LNK_SCOPE;
|
||||
drop index ACT_IDX_ENT_LNK_SCOPE_DEF;
|
||||
|
||||
drop table ACT_RU_ENTITYLINK;
|
||||
@@ -0,0 +1,28 @@
|
||||
create table ACT_RU_EVENT_SUBSCR (
|
||||
ID_ VARCHAR2(64) not null,
|
||||
REV_ integer,
|
||||
EVENT_TYPE_ VARCHAR2(255) not null,
|
||||
EVENT_NAME_ VARCHAR2(255),
|
||||
EXECUTION_ID_ VARCHAR2(64),
|
||||
PROC_INST_ID_ VARCHAR2(64),
|
||||
ACTIVITY_ID_ VARCHAR2(64),
|
||||
CONFIGURATION_ VARCHAR2(255),
|
||||
CREATED_ TIMESTAMP(6) not null,
|
||||
PROC_DEF_ID_ VARCHAR2(64),
|
||||
SUB_SCOPE_ID_ VARCHAR2(64),
|
||||
SCOPE_ID_ VARCHAR2(64),
|
||||
SCOPE_DEFINITION_ID_ VARCHAR2(64),
|
||||
SCOPE_DEFINITION_KEY_ VARCHAR2(255),
|
||||
SCOPE_TYPE_ VARCHAR2(64),
|
||||
LOCK_TIME_ TIMESTAMP(6),
|
||||
LOCK_OWNER_ VARCHAR2(255),
|
||||
TENANT_ID_ VARCHAR2(255) DEFAULT '',
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create index ACT_IDX_EVENT_SUBSCR_CONFIG_ on ACT_RU_EVENT_SUBSCR(CONFIGURATION_);
|
||||
create index ACT_IDX_EVENT_SUBSCR on ACT_RU_EVENT_SUBSCR(EXECUTION_ID_);
|
||||
create index ACT_IDX_EVENT_SUBSCR_SCOPEREF_ on ACT_RU_EVENT_SUBSCR(SCOPE_ID_, SCOPE_TYPE_);
|
||||
|
||||
insert into ACT_GE_PROPERTY values ('eventsubscription.schema.version', '7.0.1.1', 1);
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
drop index ACT_IDX_EVENT_SUBSCR_CONFIG_;
|
||||
drop index ACT_IDX_EVENT_SUBSCR;
|
||||
drop index ACT_IDX_EVENT_SUBSCR_SCOPEREF_;
|
||||
|
||||
drop table ACT_RU_EVENT_SUBSCR;
|
||||
@@ -0,0 +1,20 @@
|
||||
create table ACT_HI_IDENTITYLINK (
|
||||
ID_ VARCHAR2(64),
|
||||
GROUP_ID_ VARCHAR2(255),
|
||||
TYPE_ VARCHAR2(255),
|
||||
USER_ID_ VARCHAR2(255),
|
||||
TASK_ID_ VARCHAR2(64),
|
||||
CREATE_TIME_ TIMESTAMP(6),
|
||||
PROC_INST_ID_ VARCHAR2(64),
|
||||
SCOPE_ID_ VARCHAR2(255),
|
||||
SUB_SCOPE_ID_ VARCHAR2(255),
|
||||
SCOPE_TYPE_ VARCHAR2(255),
|
||||
SCOPE_DEFINITION_ID_ VARCHAR2(255),
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create index ACT_IDX_HI_IDENT_LNK_USER on ACT_HI_IDENTITYLINK(USER_ID_);
|
||||
create index ACT_IDX_HI_IDENT_LNK_SCOPE on ACT_HI_IDENTITYLINK(SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_HI_IDENT_LNK_SUB_SCOPE on ACT_HI_IDENTITYLINK(SUB_SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_HI_IDENT_LNK_SCOPE_DEF on ACT_HI_IDENTITYLINK(SCOPE_DEFINITION_ID_, SCOPE_TYPE_);
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
create table ACT_RU_IDENTITYLINK (
|
||||
ID_ VARCHAR2(64),
|
||||
REV_ INTEGER,
|
||||
GROUP_ID_ VARCHAR2(255),
|
||||
TYPE_ VARCHAR2(255),
|
||||
USER_ID_ VARCHAR2(255),
|
||||
TASK_ID_ VARCHAR2(64),
|
||||
PROC_INST_ID_ VARCHAR2(64),
|
||||
PROC_DEF_ID_ VARCHAR2(64),
|
||||
SCOPE_ID_ VARCHAR2(255),
|
||||
SUB_SCOPE_ID_ VARCHAR2(255),
|
||||
SCOPE_TYPE_ VARCHAR2(255),
|
||||
SCOPE_DEFINITION_ID_ VARCHAR2(255),
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create index ACT_IDX_IDENT_LNK_USER on ACT_RU_IDENTITYLINK(USER_ID_);
|
||||
create index ACT_IDX_IDENT_LNK_GROUP on ACT_RU_IDENTITYLINK(GROUP_ID_);
|
||||
create index ACT_IDX_IDENT_LNK_SCOPE on ACT_RU_IDENTITYLINK(SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_IDENT_LNK_SUB_SCOPE on ACT_RU_IDENTITYLINK(SUB_SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_IDENT_LNK_SCOPE_DEF on ACT_RU_IDENTITYLINK(SCOPE_DEFINITION_ID_, SCOPE_TYPE_);
|
||||
|
||||
insert into ACT_GE_PROPERTY values ('identitylink.schema.version', '7.0.1.1', 1);
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
drop index ACT_IDX_HI_IDENT_LNK_USER;
|
||||
drop index ACT_IDX_HI_IDENT_LNK_SCOPE;
|
||||
drop index ACT_IDX_HI_IDENT_LNK_SUB_SCOPE;
|
||||
drop index ACT_IDX_HI_IDENT_LNK_SCOPE_DEF;
|
||||
|
||||
drop table ACT_HI_IDENTITYLINK;
|
||||
@@ -0,0 +1,7 @@
|
||||
drop index ACT_IDX_IDENT_LNK_USER;
|
||||
drop index ACT_IDX_IDENT_LNK_GROUP;
|
||||
drop index ACT_IDX_IDENT_LNK_SCOPE;
|
||||
drop index ACT_IDX_IDENT_LNK_SUB_SCOPE;
|
||||
drop index ACT_IDX_IDENT_LNK_SCOPE_DEF;
|
||||
|
||||
drop table ACT_RU_IDENTITYLINK;
|
||||
@@ -0,0 +1,108 @@
|
||||
create table ACT_ID_PROPERTY (
|
||||
NAME_ VARCHAR2(64),
|
||||
VALUE_ VARCHAR2(300),
|
||||
REV_ INTEGER,
|
||||
primary key (NAME_)
|
||||
);
|
||||
|
||||
insert into ACT_ID_PROPERTY
|
||||
values ('schema.version', '7.0.1.1', 1);
|
||||
|
||||
create table ACT_ID_BYTEARRAY (
|
||||
ID_ VARCHAR2(64),
|
||||
REV_ INTEGER,
|
||||
NAME_ VARCHAR2(255),
|
||||
BYTES_ BLOB,
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_ID_GROUP (
|
||||
ID_ VARCHAR2(64),
|
||||
REV_ INTEGER,
|
||||
NAME_ VARCHAR2(255),
|
||||
TYPE_ VARCHAR2(255),
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_ID_MEMBERSHIP (
|
||||
USER_ID_ VARCHAR2(64),
|
||||
GROUP_ID_ VARCHAR2(64),
|
||||
primary key (USER_ID_, GROUP_ID_)
|
||||
);
|
||||
|
||||
create table ACT_ID_USER (
|
||||
ID_ VARCHAR2(64),
|
||||
REV_ INTEGER,
|
||||
FIRST_ VARCHAR2(255),
|
||||
LAST_ VARCHAR2(255),
|
||||
DISPLAY_NAME_ VARCHAR2(255),
|
||||
EMAIL_ VARCHAR2(255),
|
||||
PWD_ VARCHAR2(255),
|
||||
PICTURE_ID_ VARCHAR2(64),
|
||||
TENANT_ID_ VARCHAR2(255) default '',
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_ID_INFO (
|
||||
ID_ VARCHAR2(64),
|
||||
REV_ INTEGER,
|
||||
USER_ID_ VARCHAR2(64),
|
||||
TYPE_ VARCHAR2(64),
|
||||
KEY_ VARCHAR2(255),
|
||||
VALUE_ VARCHAR2(255),
|
||||
PASSWORD_ BLOB,
|
||||
PARENT_ID_ VARCHAR2(255),
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_ID_TOKEN (
|
||||
ID_ VARCHAR2(64) not null,
|
||||
REV_ INTEGER,
|
||||
TOKEN_VALUE_ VARCHAR2(255),
|
||||
TOKEN_DATE_ TIMESTAMP(6),
|
||||
IP_ADDRESS_ VARCHAR2(255),
|
||||
USER_AGENT_ VARCHAR2(255),
|
||||
USER_ID_ VARCHAR2(255),
|
||||
TOKEN_DATA_ VARCHAR2(2000),
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_ID_PRIV (
|
||||
ID_ VARCHAR2(64) not null,
|
||||
NAME_ VARCHAR2(255) not null,
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_ID_PRIV_MAPPING (
|
||||
ID_ VARCHAR2(64) not null,
|
||||
PRIV_ID_ VARCHAR2(64) not null,
|
||||
USER_ID_ VARCHAR2(255),
|
||||
GROUP_ID_ VARCHAR2(255),
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create index ACT_IDX_MEMB_GROUP on ACT_ID_MEMBERSHIP(GROUP_ID_);
|
||||
alter table ACT_ID_MEMBERSHIP
|
||||
add constraint ACT_FK_MEMB_GROUP
|
||||
foreign key (GROUP_ID_)
|
||||
references ACT_ID_GROUP (ID_);
|
||||
|
||||
create index ACT_IDX_MEMB_USER on ACT_ID_MEMBERSHIP(USER_ID_);
|
||||
alter table ACT_ID_MEMBERSHIP
|
||||
add constraint ACT_FK_MEMB_USER
|
||||
foreign key (USER_ID_)
|
||||
references ACT_ID_USER (ID_);
|
||||
|
||||
create index ACT_IDX_PRIV_MAPPING on ACT_ID_PRIV_MAPPING(PRIV_ID_);
|
||||
alter table ACT_ID_PRIV_MAPPING
|
||||
add constraint ACT_FK_PRIV_MAPPING
|
||||
foreign key (PRIV_ID_)
|
||||
references ACT_ID_PRIV (ID_);
|
||||
|
||||
create index ACT_IDX_PRIV_USER on ACT_ID_PRIV_MAPPING(USER_ID_);
|
||||
create index ACT_IDX_PRIV_GROUP on ACT_ID_PRIV_MAPPING(GROUP_ID_);
|
||||
|
||||
alter table ACT_ID_PRIV
|
||||
add constraint ACT_UNIQ_PRIV_NAME
|
||||
unique (NAME_);
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
alter table ACT_ID_MEMBERSHIP
|
||||
drop CONSTRAINT ACT_FK_MEMB_GROUP;
|
||||
|
||||
alter table ACT_ID_MEMBERSHIP
|
||||
drop CONSTRAINT ACT_FK_MEMB_USER;
|
||||
|
||||
alter table ACT_ID_PRIV_MAPPING
|
||||
drop CONSTRAINT ACT_FK_PRIV_MAPPING;
|
||||
|
||||
drop index ACT_IDX_MEMB_GROUP;
|
||||
drop index ACT_IDX_MEMB_USER;
|
||||
drop index ACT_IDX_PRIV_MAPPING;
|
||||
|
||||
drop table ACT_ID_PROPERTY;
|
||||
drop table ACT_ID_BYTEARRAY;
|
||||
drop table ACT_ID_INFO;
|
||||
drop table ACT_ID_MEMBERSHIP;
|
||||
drop table ACT_ID_GROUP;
|
||||
drop table ACT_ID_USER;
|
||||
drop table ACT_ID_TOKEN;
|
||||
drop table ACT_ID_PRIV;
|
||||
drop table ACT_ID_PRIV_MAPPING;
|
||||
@@ -0,0 +1,261 @@
|
||||
create table ACT_RU_JOB (
|
||||
ID_ VARCHAR2(64) NOT NULL,
|
||||
REV_ INTEGER,
|
||||
CATEGORY_ VARCHAR2(255),
|
||||
TYPE_ VARCHAR2(255) NOT NULL,
|
||||
LOCK_EXP_TIME_ TIMESTAMP(6),
|
||||
LOCK_OWNER_ VARCHAR2(255),
|
||||
EXCLUSIVE_ NUMBER(1) CHECK (EXCLUSIVE_ IN (1,0)),
|
||||
EXECUTION_ID_ VARCHAR2(64),
|
||||
PROCESS_INSTANCE_ID_ VARCHAR2(64),
|
||||
PROC_DEF_ID_ VARCHAR2(64),
|
||||
ELEMENT_ID_ VARCHAR2(255),
|
||||
ELEMENT_NAME_ VARCHAR2(255),
|
||||
SCOPE_ID_ VARCHAR2(255),
|
||||
SUB_SCOPE_ID_ VARCHAR2(255),
|
||||
SCOPE_TYPE_ VARCHAR2(255),
|
||||
SCOPE_DEFINITION_ID_ VARCHAR2(255),
|
||||
CORRELATION_ID_ VARCHAR2(255),
|
||||
RETRIES_ INTEGER,
|
||||
EXCEPTION_STACK_ID_ VARCHAR2(64),
|
||||
EXCEPTION_MSG_ VARCHAR2(2000),
|
||||
DUEDATE_ TIMESTAMP(6),
|
||||
REPEAT_ VARCHAR2(255),
|
||||
HANDLER_TYPE_ VARCHAR2(255),
|
||||
HANDLER_CFG_ VARCHAR2(2000),
|
||||
CUSTOM_VALUES_ID_ VARCHAR2(64),
|
||||
CREATE_TIME_ TIMESTAMP(6),
|
||||
TENANT_ID_ VARCHAR2(255) DEFAULT '',
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_RU_TIMER_JOB (
|
||||
ID_ VARCHAR2(64) NOT NULL,
|
||||
REV_ INTEGER,
|
||||
CATEGORY_ VARCHAR2(255),
|
||||
TYPE_ VARCHAR2(255) NOT NULL,
|
||||
LOCK_EXP_TIME_ TIMESTAMP(6),
|
||||
LOCK_OWNER_ VARCHAR2(255),
|
||||
EXCLUSIVE_ NUMBER(1) CHECK (EXCLUSIVE_ IN (1,0)),
|
||||
EXECUTION_ID_ VARCHAR2(64),
|
||||
PROCESS_INSTANCE_ID_ VARCHAR2(64),
|
||||
PROC_DEF_ID_ VARCHAR2(64),
|
||||
ELEMENT_ID_ VARCHAR2(255),
|
||||
ELEMENT_NAME_ VARCHAR2(255),
|
||||
SCOPE_ID_ VARCHAR2(255),
|
||||
SUB_SCOPE_ID_ VARCHAR2(255),
|
||||
SCOPE_TYPE_ VARCHAR2(255),
|
||||
SCOPE_DEFINITION_ID_ VARCHAR2(255),
|
||||
CORRELATION_ID_ VARCHAR2(255),
|
||||
RETRIES_ INTEGER,
|
||||
EXCEPTION_STACK_ID_ VARCHAR2(64),
|
||||
EXCEPTION_MSG_ VARCHAR2(2000),
|
||||
DUEDATE_ TIMESTAMP(6),
|
||||
REPEAT_ VARCHAR2(255),
|
||||
HANDLER_TYPE_ VARCHAR2(255),
|
||||
HANDLER_CFG_ VARCHAR2(2000),
|
||||
CUSTOM_VALUES_ID_ VARCHAR2(64),
|
||||
CREATE_TIME_ TIMESTAMP(6),
|
||||
TENANT_ID_ VARCHAR2(255) DEFAULT '',
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_RU_SUSPENDED_JOB (
|
||||
ID_ VARCHAR2(64) NOT NULL,
|
||||
REV_ INTEGER,
|
||||
CATEGORY_ VARCHAR2(255),
|
||||
TYPE_ VARCHAR2(255) NOT NULL,
|
||||
EXCLUSIVE_ NUMBER(1) CHECK (EXCLUSIVE_ IN (1,0)),
|
||||
EXECUTION_ID_ VARCHAR2(64),
|
||||
PROCESS_INSTANCE_ID_ VARCHAR2(64),
|
||||
PROC_DEF_ID_ VARCHAR2(64),
|
||||
ELEMENT_ID_ VARCHAR2(255),
|
||||
ELEMENT_NAME_ VARCHAR2(255),
|
||||
SCOPE_ID_ VARCHAR2(255),
|
||||
SUB_SCOPE_ID_ VARCHAR2(255),
|
||||
SCOPE_TYPE_ VARCHAR2(255),
|
||||
SCOPE_DEFINITION_ID_ VARCHAR2(255),
|
||||
CORRELATION_ID_ VARCHAR2(255),
|
||||
RETRIES_ INTEGER,
|
||||
EXCEPTION_STACK_ID_ VARCHAR2(64),
|
||||
EXCEPTION_MSG_ VARCHAR2(2000),
|
||||
DUEDATE_ TIMESTAMP(6),
|
||||
REPEAT_ VARCHAR2(255),
|
||||
HANDLER_TYPE_ VARCHAR2(255),
|
||||
HANDLER_CFG_ VARCHAR2(2000),
|
||||
CUSTOM_VALUES_ID_ VARCHAR2(64),
|
||||
CREATE_TIME_ TIMESTAMP(6),
|
||||
TENANT_ID_ VARCHAR2(255) DEFAULT '',
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_RU_DEADLETTER_JOB (
|
||||
ID_ VARCHAR2(64) NOT NULL,
|
||||
REV_ INTEGER,
|
||||
CATEGORY_ VARCHAR2(255),
|
||||
TYPE_ VARCHAR2(255) NOT NULL,
|
||||
EXCLUSIVE_ NUMBER(1) CHECK (EXCLUSIVE_ IN (1,0)),
|
||||
EXECUTION_ID_ VARCHAR2(64),
|
||||
PROCESS_INSTANCE_ID_ VARCHAR2(64),
|
||||
PROC_DEF_ID_ VARCHAR2(64),
|
||||
ELEMENT_ID_ VARCHAR2(255),
|
||||
ELEMENT_NAME_ VARCHAR2(255),
|
||||
SCOPE_ID_ VARCHAR2(255),
|
||||
SUB_SCOPE_ID_ VARCHAR2(255),
|
||||
SCOPE_TYPE_ VARCHAR2(255),
|
||||
SCOPE_DEFINITION_ID_ VARCHAR2(255),
|
||||
CORRELATION_ID_ VARCHAR2(255),
|
||||
EXCEPTION_STACK_ID_ VARCHAR2(64),
|
||||
EXCEPTION_MSG_ VARCHAR2(2000),
|
||||
DUEDATE_ TIMESTAMP(6),
|
||||
REPEAT_ VARCHAR2(255),
|
||||
HANDLER_TYPE_ VARCHAR2(255),
|
||||
HANDLER_CFG_ VARCHAR2(2000),
|
||||
CUSTOM_VALUES_ID_ VARCHAR2(64),
|
||||
CREATE_TIME_ TIMESTAMP(6),
|
||||
TENANT_ID_ VARCHAR2(255) DEFAULT '',
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_RU_HISTORY_JOB (
|
||||
ID_ VARCHAR2(64) NOT NULL,
|
||||
REV_ INTEGER,
|
||||
LOCK_EXP_TIME_ TIMESTAMP(6),
|
||||
LOCK_OWNER_ VARCHAR2(255),
|
||||
RETRIES_ INTEGER,
|
||||
EXCEPTION_STACK_ID_ VARCHAR2(64),
|
||||
EXCEPTION_MSG_ VARCHAR2(2000),
|
||||
HANDLER_TYPE_ VARCHAR2(255),
|
||||
HANDLER_CFG_ VARCHAR2(2000),
|
||||
CUSTOM_VALUES_ID_ VARCHAR2(64),
|
||||
ADV_HANDLER_CFG_ID_ VARCHAR2(64),
|
||||
CREATE_TIME_ TIMESTAMP(6),
|
||||
SCOPE_TYPE_ VARCHAR2(255),
|
||||
TENANT_ID_ VARCHAR2(255) DEFAULT '',
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_RU_EXTERNAL_JOB (
|
||||
ID_ VARCHAR2(64) NOT NULL,
|
||||
REV_ INTEGER,
|
||||
CATEGORY_ VARCHAR2(255),
|
||||
TYPE_ VARCHAR2(255) NOT NULL,
|
||||
LOCK_EXP_TIME_ TIMESTAMP(6),
|
||||
LOCK_OWNER_ VARCHAR2(255),
|
||||
EXCLUSIVE_ NUMBER(1) CHECK (EXCLUSIVE_ IN (1,0)),
|
||||
EXECUTION_ID_ VARCHAR2(64),
|
||||
PROCESS_INSTANCE_ID_ VARCHAR2(64),
|
||||
PROC_DEF_ID_ VARCHAR2(64),
|
||||
ELEMENT_ID_ VARCHAR2(255),
|
||||
ELEMENT_NAME_ VARCHAR2(255),
|
||||
SCOPE_ID_ VARCHAR2(255),
|
||||
SUB_SCOPE_ID_ VARCHAR2(255),
|
||||
SCOPE_TYPE_ VARCHAR2(255),
|
||||
SCOPE_DEFINITION_ID_ VARCHAR2(255),
|
||||
CORRELATION_ID_ VARCHAR2(255),
|
||||
RETRIES_ INTEGER,
|
||||
EXCEPTION_STACK_ID_ VARCHAR2(64),
|
||||
EXCEPTION_MSG_ VARCHAR2(2000),
|
||||
DUEDATE_ TIMESTAMP(6),
|
||||
REPEAT_ VARCHAR2(255),
|
||||
HANDLER_TYPE_ VARCHAR2(255),
|
||||
HANDLER_CFG_ VARCHAR2(2000),
|
||||
CUSTOM_VALUES_ID_ VARCHAR2(64),
|
||||
CREATE_TIME_ TIMESTAMP(6),
|
||||
TENANT_ID_ VARCHAR2(255) DEFAULT '',
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create index ACT_IDX_JOB_EXCEPTION on ACT_RU_JOB(EXCEPTION_STACK_ID_);
|
||||
create index ACT_IDX_JOB_CUSTOM_VAL_ID on ACT_RU_JOB(CUSTOM_VALUES_ID_);
|
||||
create index ACT_IDX_JOB_CORRELATION_ID on ACT_RU_JOB(CORRELATION_ID_);
|
||||
|
||||
create index ACT_IDX_TJOB_EXCEPTION on ACT_RU_TIMER_JOB(EXCEPTION_STACK_ID_);
|
||||
create index ACT_IDX_TJOB_CUSTOM_VAL_ID on ACT_RU_TIMER_JOB(CUSTOM_VALUES_ID_);
|
||||
create index ACT_IDX_TJOB_CORRELATION_ID on ACT_RU_TIMER_JOB(CORRELATION_ID_);
|
||||
create index ACT_IDX_TJOB_DUEDATE on ACT_RU_TIMER_JOB(DUEDATE_);
|
||||
|
||||
create index ACT_IDX_SJOB_EXCEPTION on ACT_RU_SUSPENDED_JOB(EXCEPTION_STACK_ID_);
|
||||
create index ACT_IDX_SJOB_CUSTOM_VAL_ID on ACT_RU_SUSPENDED_JOB(CUSTOM_VALUES_ID_);
|
||||
create index ACT_IDX_SJOB_CORRELATION_ID on ACT_RU_SUSPENDED_JOB(CORRELATION_ID_);
|
||||
|
||||
create index ACT_IDX_DJOB_EXCEPTION on ACT_RU_DEADLETTER_JOB(EXCEPTION_STACK_ID_);
|
||||
create index ACT_IDX_DJOB_CUSTOM_VAL_ID on ACT_RU_DEADLETTER_JOB(CUSTOM_VALUES_ID_);
|
||||
create index ACT_IDX_DJOB_CORRELATION_ID on ACT_RU_DEADLETTER_JOB(CORRELATION_ID_);
|
||||
|
||||
create index ACT_IDX_EJOB_EXCEPTION on ACT_RU_EXTERNAL_JOB(EXCEPTION_STACK_ID_);
|
||||
create index ACT_IDX_EJOB_CUSTOM_VAL_ID on ACT_RU_EXTERNAL_JOB(CUSTOM_VALUES_ID_);
|
||||
create index ACT_IDX_EJOB_CORRELATION_ID on ACT_RU_EXTERNAL_JOB(CORRELATION_ID_);
|
||||
|
||||
alter table ACT_RU_JOB
|
||||
add constraint ACT_FK_JOB_EXCEPTION
|
||||
foreign key (EXCEPTION_STACK_ID_)
|
||||
references ACT_GE_BYTEARRAY (ID_);
|
||||
|
||||
alter table ACT_RU_JOB
|
||||
add constraint ACT_FK_JOB_CUSTOM_VAL
|
||||
foreign key (CUSTOM_VALUES_ID_)
|
||||
references ACT_GE_BYTEARRAY (ID_);
|
||||
|
||||
alter table ACT_RU_TIMER_JOB
|
||||
add constraint ACT_FK_TJOB_EXCEPTION
|
||||
foreign key (EXCEPTION_STACK_ID_)
|
||||
references ACT_GE_BYTEARRAY (ID_);
|
||||
|
||||
alter table ACT_RU_TIMER_JOB
|
||||
add constraint ACT_FK_TJOB_CUSTOM_VAL
|
||||
foreign key (CUSTOM_VALUES_ID_)
|
||||
references ACT_GE_BYTEARRAY (ID_);
|
||||
|
||||
alter table ACT_RU_SUSPENDED_JOB
|
||||
add constraint ACT_FK_SJOB_EXCEPTION
|
||||
foreign key (EXCEPTION_STACK_ID_)
|
||||
references ACT_GE_BYTEARRAY (ID_);
|
||||
|
||||
alter table ACT_RU_SUSPENDED_JOB
|
||||
add constraint ACT_FK_SJOB_CUSTOM_VAL
|
||||
foreign key (CUSTOM_VALUES_ID_)
|
||||
references ACT_GE_BYTEARRAY (ID_);
|
||||
|
||||
alter table ACT_RU_DEADLETTER_JOB
|
||||
add constraint ACT_FK_DJOB_EXCEPTION
|
||||
foreign key (EXCEPTION_STACK_ID_)
|
||||
references ACT_GE_BYTEARRAY (ID_);
|
||||
|
||||
alter table ACT_RU_DEADLETTER_JOB
|
||||
add constraint ACT_FK_DJOB_CUSTOM_VAL
|
||||
foreign key (CUSTOM_VALUES_ID_)
|
||||
references ACT_GE_BYTEARRAY (ID_);
|
||||
|
||||
alter table ACT_RU_EXTERNAL_JOB
|
||||
add constraint ACT_FK_EJOB_EXCEPTION
|
||||
foreign key (EXCEPTION_STACK_ID_)
|
||||
references ACT_GE_BYTEARRAY (ID_);
|
||||
|
||||
alter table ACT_RU_EXTERNAL_JOB
|
||||
add constraint ACT_FK_EJOB_CUSTOM_VAL
|
||||
foreign key (CUSTOM_VALUES_ID_)
|
||||
references ACT_GE_BYTEARRAY (ID_);
|
||||
|
||||
create index ACT_IDX_JOB_SCOPE on ACT_RU_JOB(SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_JOB_SUB_SCOPE on ACT_RU_JOB(SUB_SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_JOB_SCOPE_DEF on ACT_RU_JOB(SCOPE_DEFINITION_ID_, SCOPE_TYPE_);
|
||||
|
||||
create index ACT_IDX_TJOB_SCOPE on ACT_RU_TIMER_JOB(SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_TJOB_SUB_SCOPE on ACT_RU_TIMER_JOB(SUB_SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_TJOB_SCOPE_DEF on ACT_RU_TIMER_JOB(SCOPE_DEFINITION_ID_, SCOPE_TYPE_);
|
||||
|
||||
create index ACT_IDX_SJOB_SCOPE on ACT_RU_SUSPENDED_JOB(SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_SJOB_SUB_SCOPE on ACT_RU_SUSPENDED_JOB(SUB_SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_SJOB_SCOPE_DEF on ACT_RU_SUSPENDED_JOB(SCOPE_DEFINITION_ID_, SCOPE_TYPE_);
|
||||
|
||||
create index ACT_IDX_DJOB_SCOPE on ACT_RU_DEADLETTER_JOB(SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_DJOB_SUB_SCOPE on ACT_RU_DEADLETTER_JOB(SUB_SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_DJOB_SCOPE_DEF on ACT_RU_DEADLETTER_JOB(SCOPE_DEFINITION_ID_, SCOPE_TYPE_);
|
||||
|
||||
create index ACT_IDX_EJOB_SCOPE on ACT_RU_EXTERNAL_JOB(SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_EJOB_SUB_SCOPE on ACT_RU_EXTERNAL_JOB(SUB_SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_EJOB_SCOPE_DEF on ACT_RU_EXTERNAL_JOB(SCOPE_DEFINITION_ID_, SCOPE_TYPE_);
|
||||
|
||||
insert into ACT_GE_PROPERTY values ('job.schema.version', '7.0.1.1', 1);
|
||||
|
||||
@@ -0,0 +1,74 @@
|
||||
drop index ACT_IDX_JOB_SCOPE;
|
||||
drop index ACT_IDX_JOB_SUB_SCOPE;
|
||||
drop index ACT_IDX_JOB_SCOPE_DEF;
|
||||
drop index ACT_IDX_TJOB_SCOPE;
|
||||
drop index ACT_IDX_TJOB_SUB_SCOPE;
|
||||
drop index ACT_IDX_TJOB_SCOPE_DEF;
|
||||
drop index ACT_IDX_SJOB_SCOPE;
|
||||
drop index ACT_IDX_SJOB_SUB_SCOPE;
|
||||
drop index ACT_IDX_SJOB_SCOPE_DEF;
|
||||
drop index ACT_IDX_DJOB_SCOPE;
|
||||
drop index ACT_IDX_DJOB_SUB_SCOPE;
|
||||
drop index ACT_IDX_DJOB_SCOPE_DEF;
|
||||
drop index ACT_IDX_EJOB_SCOPE;
|
||||
drop index ACT_IDX_EJOB_SUB_SCOPE;
|
||||
drop index ACT_IDX_EJOB_SCOPE_DEF;
|
||||
|
||||
drop index ACT_IDX_JOB_EXCEPTION;
|
||||
drop index ACT_IDX_JOB_CUSTOM_VAL_ID;
|
||||
drop index ACT_IDX_JOB_CORRELATION_ID;
|
||||
|
||||
drop index ACT_IDX_TJOB_EXCEPTION;
|
||||
drop index ACT_IDX_TJOB_CUSTOM_VAL_ID;
|
||||
drop index ACT_IDX_TJOB_CORRELATION_ID;
|
||||
drop index ACT_IDX_TJOB_DUEDATE;
|
||||
|
||||
drop index ACT_IDX_SJOB_EXCEPTION;
|
||||
drop index ACT_IDX_SJOB_CUSTOM_VAL_ID;
|
||||
drop index ACT_IDX_SJOB_CORRELATION_ID;
|
||||
|
||||
drop index ACT_IDX_DJOB_EXCEPTION;
|
||||
drop index ACT_IDX_DJOB_CUSTOM_VAL_ID;
|
||||
drop index ACT_IDX_DJOB_CORRELATION_ID;
|
||||
|
||||
drop index ACT_IDX_EJOB_EXCEPTION;
|
||||
drop index ACT_IDX_EJOB_CUSTOM_VAL_ID;
|
||||
drop index ACT_IDX_EJOB_CORRELATION_ID;
|
||||
|
||||
alter table ACT_RU_JOB
|
||||
drop CONSTRAINT ACT_FK_JOB_EXCEPTION;
|
||||
|
||||
alter table ACT_RU_JOB
|
||||
drop CONSTRAINT ACT_FK_JOB_CUSTOM_VAL;
|
||||
|
||||
alter table ACT_RU_TIMER_JOB
|
||||
drop CONSTRAINT ACT_FK_TJOB_EXCEPTION;
|
||||
|
||||
alter table ACT_RU_TIMER_JOB
|
||||
drop CONSTRAINT ACT_FK_TJOB_CUSTOM_VAL;
|
||||
|
||||
alter table ACT_RU_SUSPENDED_JOB
|
||||
drop CONSTRAINT ACT_FK_SJOB_EXCEPTION;
|
||||
|
||||
alter table ACT_RU_SUSPENDED_JOB
|
||||
drop CONSTRAINT ACT_FK_SJOB_CUSTOM_VAL;
|
||||
|
||||
alter table ACT_RU_DEADLETTER_JOB
|
||||
drop CONSTRAINT ACT_FK_DJOB_EXCEPTION;
|
||||
|
||||
alter table ACT_RU_DEADLETTER_JOB
|
||||
drop CONSTRAINT ACT_FK_DJOB_CUSTOM_VAL;
|
||||
|
||||
alter table ACT_RU_EXTERNAL_JOB
|
||||
drop CONSTRAINT ACT_FK_DJOB_EXCEPTION;
|
||||
|
||||
alter table ACT_RU_EXTERNAL_JOB
|
||||
drop CONSTRAINT ACT_FK_DJOB_CUSTOM_VAL;
|
||||
|
||||
drop table ACT_RU_JOB;
|
||||
drop table ACT_RU_TIMER_JOB;
|
||||
drop table ACT_RU_SUSPENDED_JOB;
|
||||
drop table ACT_RU_DEADLETTER_JOB;
|
||||
drop table ACT_RU_HISTORY_JOB;
|
||||
drop table ACT_RU_EXTERNAL_JOB;
|
||||
|
||||
@@ -0,0 +1,64 @@
|
||||
create table ACT_HI_TASKINST (
|
||||
ID_ VARCHAR2(64) not null,
|
||||
REV_ INTEGER default 1,
|
||||
PROC_DEF_ID_ VARCHAR2(64),
|
||||
TASK_DEF_ID_ VARCHAR2(64),
|
||||
TASK_DEF_KEY_ VARCHAR2(255),
|
||||
PROC_INST_ID_ VARCHAR2(64),
|
||||
EXECUTION_ID_ VARCHAR2(64),
|
||||
SCOPE_ID_ VARCHAR2(255),
|
||||
SUB_SCOPE_ID_ VARCHAR2(255),
|
||||
SCOPE_TYPE_ VARCHAR2(255),
|
||||
SCOPE_DEFINITION_ID_ VARCHAR2(255),
|
||||
PROPAGATED_STAGE_INST_ID_ VARCHAR2(255),
|
||||
PARENT_TASK_ID_ VARCHAR2(64),
|
||||
STATE_ VARCHAR2(255),
|
||||
NAME_ VARCHAR2(255),
|
||||
DESCRIPTION_ VARCHAR2(2000),
|
||||
OWNER_ VARCHAR2(255),
|
||||
ASSIGNEE_ VARCHAR2(255),
|
||||
START_TIME_ TIMESTAMP(6) not null,
|
||||
IN_PROGRESS_TIME_ TIMESTAMP(6),
|
||||
IN_PROGRESS_STARTED_BY_ VARCHAR2(255),
|
||||
CLAIM_TIME_ TIMESTAMP(6),
|
||||
CLAIMED_BY_ VARCHAR2(255),
|
||||
SUSPENDED_TIME_ TIMESTAMP(6),
|
||||
SUSPENDED_BY_ VARCHAR2(255),
|
||||
END_TIME_ TIMESTAMP(6),
|
||||
COMPLETED_BY_ VARCHAR2(255),
|
||||
DURATION_ NUMBER(19,0),
|
||||
DELETE_REASON_ VARCHAR2(2000),
|
||||
PRIORITY_ INTEGER,
|
||||
IN_PROGRESS_DUE_DATE_ TIMESTAMP(6),
|
||||
DUE_DATE_ TIMESTAMP(6),
|
||||
FORM_KEY_ VARCHAR2(255),
|
||||
CATEGORY_ VARCHAR2(255),
|
||||
TENANT_ID_ VARCHAR2(255) default '',
|
||||
LAST_UPDATED_TIME_ TIMESTAMP(6),
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create table ACT_HI_TSK_LOG (
|
||||
ID_ NUMBER(19),
|
||||
TYPE_ VARCHAR2(64),
|
||||
TASK_ID_ VARCHAR2(64) not null,
|
||||
TIME_STAMP_ TIMESTAMP(6) not null,
|
||||
USER_ID_ VARCHAR2(255),
|
||||
DATA_ VARCHAR2(2000),
|
||||
EXECUTION_ID_ VARCHAR2(64),
|
||||
PROC_INST_ID_ VARCHAR2(64),
|
||||
PROC_DEF_ID_ VARCHAR2(64),
|
||||
SCOPE_ID_ VARCHAR2(255),
|
||||
SCOPE_DEFINITION_ID_ VARCHAR2(255),
|
||||
SUB_SCOPE_ID_ VARCHAR2(255),
|
||||
SCOPE_TYPE_ VARCHAR2(255),
|
||||
TENANT_ID_ VARCHAR2(255) default '',
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create sequence act_hi_task_evt_log_seq start with 1 increment by 1;
|
||||
|
||||
create index ACT_IDX_HI_TASK_SCOPE on ACT_HI_TASKINST(SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_HI_TASK_SUB_SCOPE on ACT_HI_TASKINST(SUB_SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_HI_TASK_SCOPE_DEF on ACT_HI_TASKINST(SCOPE_DEFINITION_ID_, SCOPE_TYPE_);
|
||||
|
||||
@@ -0,0 +1,48 @@
|
||||
create table ACT_RU_TASK (
|
||||
ID_ VARCHAR2(64),
|
||||
REV_ INTEGER,
|
||||
EXECUTION_ID_ VARCHAR2(64),
|
||||
PROC_INST_ID_ VARCHAR2(64),
|
||||
PROC_DEF_ID_ VARCHAR2(64),
|
||||
TASK_DEF_ID_ VARCHAR2(64),
|
||||
SCOPE_ID_ VARCHAR2(255),
|
||||
SUB_SCOPE_ID_ VARCHAR2(255),
|
||||
SCOPE_TYPE_ VARCHAR2(255),
|
||||
SCOPE_DEFINITION_ID_ VARCHAR2(255),
|
||||
PROPAGATED_STAGE_INST_ID_ VARCHAR2(255),
|
||||
STATE_ VARCHAR2(255),
|
||||
NAME_ VARCHAR2(255),
|
||||
PARENT_TASK_ID_ VARCHAR2(64),
|
||||
DESCRIPTION_ VARCHAR2(2000),
|
||||
TASK_DEF_KEY_ VARCHAR2(255),
|
||||
OWNER_ VARCHAR2(255),
|
||||
ASSIGNEE_ VARCHAR2(255),
|
||||
DELEGATION_ VARCHAR2(64),
|
||||
PRIORITY_ INTEGER,
|
||||
CREATE_TIME_ TIMESTAMP(6),
|
||||
IN_PROGRESS_TIME_ TIMESTAMP(6),
|
||||
IN_PROGRESS_STARTED_BY_ VARCHAR2(255),
|
||||
CLAIM_TIME_ TIMESTAMP(6),
|
||||
CLAIMED_BY_ VARCHAR2(255),
|
||||
SUSPENDED_TIME_ TIMESTAMP(6),
|
||||
SUSPENDED_BY_ VARCHAR2(255),
|
||||
IN_PROGRESS_DUE_DATE_ TIMESTAMP(6),
|
||||
DUE_DATE_ TIMESTAMP(6),
|
||||
CATEGORY_ VARCHAR2(255),
|
||||
SUSPENSION_STATE_ INTEGER,
|
||||
TENANT_ID_ VARCHAR2(255) DEFAULT '',
|
||||
FORM_KEY_ VARCHAR2(255),
|
||||
IS_COUNT_ENABLED_ NUMBER(1) CHECK (IS_COUNT_ENABLED_ IN (1,0)),
|
||||
VAR_COUNT_ INTEGER,
|
||||
ID_LINK_COUNT_ INTEGER,
|
||||
SUB_TASK_COUNT_ INTEGER,
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create index ACT_IDX_TASK_CREATE on ACT_RU_TASK(CREATE_TIME_);
|
||||
create index ACT_IDX_TASK_SCOPE on ACT_RU_TASK(SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_TASK_SUB_SCOPE on ACT_RU_TASK(SUB_SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_TASK_SCOPE_DEF on ACT_RU_TASK(SCOPE_DEFINITION_ID_, SCOPE_TYPE_);
|
||||
|
||||
insert into ACT_GE_PROPERTY values ('task.schema.version', '7.0.1.1', 1);
|
||||
|
||||
@@ -0,0 +1,8 @@
|
||||
drop index ACT_IDX_HI_TASK_SCOPE;
|
||||
drop index ACT_IDX_HI_TASK_SUB_SCOPE;
|
||||
drop index ACT_IDX_HI_TASK_SCOPE_DEF;
|
||||
|
||||
drop sequence act_hi_task_evt_log_seq;
|
||||
|
||||
drop table ACT_HI_TASKINST;
|
||||
drop table ACT_HI_TSK_LOG;
|
||||
@@ -0,0 +1,6 @@
|
||||
drop index ACT_IDX_TASK_CREATE;
|
||||
drop index ACT_IDX_TASK_SCOPE;
|
||||
drop index ACT_IDX_TASK_SUB_SCOPE;
|
||||
drop index ACT_IDX_TASK_SCOPE_DEF;
|
||||
|
||||
drop table ACT_RU_TASK;
|
||||
@@ -0,0 +1,26 @@
|
||||
create table ACT_HI_VARINST (
|
||||
ID_ VARCHAR2(64) not null,
|
||||
REV_ INTEGER default 1,
|
||||
PROC_INST_ID_ VARCHAR2(64),
|
||||
EXECUTION_ID_ VARCHAR2(64),
|
||||
TASK_ID_ VARCHAR2(64),
|
||||
NAME_ VARCHAR2(255) not null,
|
||||
VAR_TYPE_ VARCHAR2(100),
|
||||
SCOPE_ID_ VARCHAR2(255),
|
||||
SUB_SCOPE_ID_ VARCHAR2(255),
|
||||
SCOPE_TYPE_ VARCHAR2(255),
|
||||
BYTEARRAY_ID_ VARCHAR2(64),
|
||||
DOUBLE_ NUMBER(38,10),
|
||||
LONG_ NUMBER(19,0),
|
||||
TEXT_ VARCHAR2(2000),
|
||||
TEXT2_ VARCHAR2(2000),
|
||||
META_INFO_ VARCHAR2(2000),
|
||||
CREATE_TIME_ TIMESTAMP(6),
|
||||
LAST_UPDATED_TIME_ TIMESTAMP(6),
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create index ACT_IDX_HI_PROCVAR_NAME_TYPE on ACT_HI_VARINST(NAME_, VAR_TYPE_);
|
||||
create index ACT_IDX_HI_VAR_SCOPE_ID_TYPE on ACT_HI_VARINST(SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_HI_VAR_SUB_ID_TYPE on ACT_HI_VARINST(SUB_SCOPE_ID_, SCOPE_TYPE_);
|
||||
|
||||
@@ -0,0 +1,31 @@
|
||||
create table ACT_RU_VARIABLE (
|
||||
ID_ VARCHAR2(64) not null,
|
||||
REV_ INTEGER,
|
||||
TYPE_ VARCHAR2(255) not null,
|
||||
NAME_ VARCHAR2(255) not null,
|
||||
EXECUTION_ID_ VARCHAR2(64),
|
||||
PROC_INST_ID_ VARCHAR2(64),
|
||||
TASK_ID_ VARCHAR2(64),
|
||||
SCOPE_ID_ VARCHAR2(255),
|
||||
SUB_SCOPE_ID_ VARCHAR2(255),
|
||||
SCOPE_TYPE_ VARCHAR2(255),
|
||||
BYTEARRAY_ID_ VARCHAR2(64),
|
||||
DOUBLE_ NUMBER(38,10),
|
||||
LONG_ NUMBER(19,0),
|
||||
TEXT_ VARCHAR2(2000),
|
||||
TEXT2_ VARCHAR2(2000),
|
||||
META_INFO_ VARCHAR2(2000),
|
||||
primary key (ID_)
|
||||
);
|
||||
|
||||
create index ACT_IDX_RU_VAR_SCOPE_ID_TYPE on ACT_RU_VARIABLE(SCOPE_ID_, SCOPE_TYPE_);
|
||||
create index ACT_IDX_RU_VAR_SUB_ID_TYPE on ACT_RU_VARIABLE(SUB_SCOPE_ID_, SCOPE_TYPE_);
|
||||
|
||||
create index ACT_IDX_VAR_BYTEARRAY on ACT_RU_VARIABLE(BYTEARRAY_ID_);
|
||||
alter table ACT_RU_VARIABLE
|
||||
add constraint ACT_FK_VAR_BYTEARRAY
|
||||
foreign key (BYTEARRAY_ID_)
|
||||
references ACT_GE_BYTEARRAY (ID_);
|
||||
|
||||
insert into ACT_GE_PROPERTY values ('variable.schema.version', '7.0.1.1', 1);
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
drop index ACT_IDX_HI_PROCVAR_NAME_TYPE;
|
||||
drop index ACT_IDX_HI_VAR_SCOPE_ID_TYPE;
|
||||
drop index ACT_IDX_HI_VAR_SUB_ID_TYPE;
|
||||
|
||||
drop table ACT_HI_VARINST;
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
drop index ACT_IDX_VAR_BYTEARRAY;
|
||||
drop index ACT_IDX_RU_VAR_SCOPE_ID_TYPE;
|
||||
drop index ACT_IDX_RU_VAR_SUB_ID_TYPE;
|
||||
|
||||
alter table ACT_RU_VARIABLE
|
||||
drop CONSTRAINT ACT_FK_VAR_BYTEARRAY;
|
||||
|
||||
drop table ACT_RU_VARIABLE;
|
||||
|
||||
@@ -7,17 +7,23 @@ import com.zt.plat.module.databus.controller.admin.gateway.vo.accesslog.ApiAcces
|
||||
import com.zt.plat.module.databus.controller.admin.gateway.vo.accesslog.ApiAccessLogRespVO;
|
||||
import com.zt.plat.module.databus.dal.dataobject.gateway.ApiAccessLogDO;
|
||||
import com.zt.plat.module.databus.service.gateway.ApiAccessLogService;
|
||||
import com.zt.plat.module.databus.service.gateway.ApiDefinitionService;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.Parameter;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import jakarta.annotation.Resource;
|
||||
import jakarta.validation.Valid;
|
||||
import org.springframework.security.access.prepost.PreAuthorize;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
import org.springframework.validation.annotation.Validated;
|
||||
import org.springframework.web.bind.annotation.GetMapping;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RequestParam;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static com.zt.plat.framework.common.pojo.CommonResult.success;
|
||||
|
||||
@@ -33,13 +39,18 @@ public class ApiAccessLogController {
|
||||
@Resource
|
||||
private ApiAccessLogService apiAccessLogService;
|
||||
|
||||
@Resource
|
||||
private ApiDefinitionService apiDefinitionService;
|
||||
|
||||
@GetMapping("/get")
|
||||
@Operation(summary = "获取访问日志详情")
|
||||
@Parameter(name = "id", description = "日志编号", required = true, example = "1024")
|
||||
@PreAuthorize("@ss.hasPermission('databus:gateway:access-log:query')")
|
||||
public CommonResult<ApiAccessLogRespVO> get(@RequestParam("id") Long id) {
|
||||
ApiAccessLogDO logDO = apiAccessLogService.get(id);
|
||||
return success(ApiAccessLogConvert.INSTANCE.convert(logDO));
|
||||
ApiAccessLogRespVO respVO = ApiAccessLogConvert.INSTANCE.convert(logDO);
|
||||
enrichDefinitionInfo(respVO);
|
||||
return success(respVO);
|
||||
}
|
||||
|
||||
@GetMapping("/page")
|
||||
@@ -47,6 +58,51 @@ public class ApiAccessLogController {
|
||||
@PreAuthorize("@ss.hasPermission('databus:gateway:access-log:query')")
|
||||
public CommonResult<PageResult<ApiAccessLogRespVO>> page(@Valid ApiAccessLogPageReqVO pageReqVO) {
|
||||
PageResult<ApiAccessLogDO> pageResult = apiAccessLogService.getPage(pageReqVO);
|
||||
return success(ApiAccessLogConvert.INSTANCE.convertPage(pageResult));
|
||||
PageResult<ApiAccessLogRespVO> result = ApiAccessLogConvert.INSTANCE.convertPage(pageResult);
|
||||
enrichDefinitionInfo(result.getList());
|
||||
return success(result);
|
||||
}
|
||||
|
||||
private void enrichDefinitionInfo(List<ApiAccessLogRespVO> list) {
|
||||
// 对分页结果批量补充 API 描述,使用本地缓存减少重复查询
|
||||
if (CollectionUtils.isEmpty(list)) {
|
||||
return;
|
||||
}
|
||||
Map<String, String> cache = new HashMap<>(list.size());
|
||||
list.forEach(item -> {
|
||||
if (item == null) {
|
||||
return;
|
||||
}
|
||||
String cacheKey = buildCacheKey(item.getApiCode(), item.getApiVersion());
|
||||
if (!cache.containsKey(cacheKey)) {
|
||||
cache.put(cacheKey, resolveApiDescription(item.getApiCode(), item.getApiVersion()));
|
||||
}
|
||||
item.setApiDescription(cache.get(cacheKey));
|
||||
});
|
||||
}
|
||||
|
||||
private void enrichDefinitionInfo(ApiAccessLogRespVO item) {
|
||||
// 单条数据同样需要补全描述信息
|
||||
if (item == null) {
|
||||
return;
|
||||
}
|
||||
item.setApiDescription(resolveApiDescription(item.getApiCode(), item.getApiVersion()));
|
||||
}
|
||||
|
||||
private String resolveApiDescription(String apiCode, String apiVersion) {
|
||||
if (!StringUtils.hasText(apiCode)) {
|
||||
return null;
|
||||
}
|
||||
String normalizedVersion = StringUtils.hasText(apiVersion) ? apiVersion.trim() : apiVersion;
|
||||
// 通过网关定义服务补全 API 描述,提升页面可读性
|
||||
return apiDefinitionService.findByCodeAndVersionIncludingInactive(apiCode, normalizedVersion)
|
||||
.map(aggregate -> aggregate.getDefinition() != null ? aggregate.getDefinition().getDescription() : null)
|
||||
.filter(StringUtils::hasText)
|
||||
.orElse(null);
|
||||
}
|
||||
|
||||
private String buildCacheKey(String apiCode, String apiVersion) {
|
||||
// 组合唯一键,避免重复查询相同的 API 描述
|
||||
return (apiCode == null ? "" : apiCode) + "#" + (apiVersion == null ? "" : apiVersion);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,9 @@ import com.zt.plat.framework.common.pojo.PageResult;
|
||||
import com.zt.plat.module.databus.controller.admin.gateway.vo.accesslog.ApiAccessLogRespVO;
|
||||
import com.zt.plat.module.databus.dal.dataobject.gateway.ApiAccessLogDO;
|
||||
import org.mapstruct.Mapper;
|
||||
import org.mapstruct.Mapping;
|
||||
import org.mapstruct.factory.Mappers;
|
||||
import org.springframework.http.HttpStatus;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
@@ -13,6 +15,8 @@ public interface ApiAccessLogConvert {
|
||||
|
||||
ApiAccessLogConvert INSTANCE = Mappers.getMapper(ApiAccessLogConvert.class);
|
||||
|
||||
@Mapping(target = "statusDesc", expression = "java(statusDesc(bean.getStatus()))")
|
||||
@Mapping(target = "responseStatusText", expression = "java(resolveHttpStatusText(bean.getResponseStatus()))")
|
||||
ApiAccessLogRespVO convert(ApiAccessLogDO bean);
|
||||
|
||||
List<ApiAccessLogRespVO> convertList(List<ApiAccessLogDO> list);
|
||||
@@ -26,4 +30,26 @@ public interface ApiAccessLogConvert {
|
||||
result.setTotal(page.getTotal());
|
||||
return result;
|
||||
}
|
||||
|
||||
default String statusDesc(Integer status) {
|
||||
// 将数字状态码转换为中文描述,方便前端直接展示
|
||||
if (status == null) {
|
||||
return "未知";
|
||||
}
|
||||
return switch (status) {
|
||||
case 0 -> "成功";
|
||||
case 1 -> "客户端错误";
|
||||
case 2 -> "服务端错误";
|
||||
default -> "未知";
|
||||
};
|
||||
}
|
||||
|
||||
default String resolveHttpStatusText(Integer status) {
|
||||
// 统一使用 Spring 的 HttpStatus 解析出标准文案
|
||||
if (status == null) {
|
||||
return null;
|
||||
}
|
||||
HttpStatus resolved = HttpStatus.resolve(status);
|
||||
return resolved != null ? resolved.getReasonPhrase() : null;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,6 +21,9 @@ public class ApiAccessLogRespVO {
|
||||
@Schema(description = "API 编码", example = "user.query")
|
||||
private String apiCode;
|
||||
|
||||
@Schema(description = "API 描述", example = "用户查询服务")
|
||||
private String apiDescription;
|
||||
|
||||
@Schema(description = "API 版本", example = "v1")
|
||||
private String apiVersion;
|
||||
|
||||
@@ -42,6 +45,9 @@ public class ApiAccessLogRespVO {
|
||||
@Schema(description = "响应 HTTP 状态", example = "200")
|
||||
private Integer responseStatus;
|
||||
|
||||
@Schema(description = "响应 HTTP 状态说明", example = "OK")
|
||||
private String responseStatusText;
|
||||
|
||||
@Schema(description = "响应提示", example = "OK")
|
||||
private String responseMessage;
|
||||
|
||||
@@ -51,6 +57,9 @@ public class ApiAccessLogRespVO {
|
||||
@Schema(description = "访问状态", example = "0")
|
||||
private Integer status;
|
||||
|
||||
@Schema(description = "访问状态展示文案", example = "成功")
|
||||
private String statusDesc;
|
||||
|
||||
@Schema(description = "错误码", example = "DAT-001")
|
||||
private String errorCode;
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ public class ApiAccessLogDO extends TenantBaseDO {
|
||||
private Long id;
|
||||
|
||||
/**
|
||||
* 请求追踪标识,对应 {@link com.zt.plat.module.databus.framework.integration.gateway.model.ApiInvocationContext#getRequestId()}
|
||||
* 请求追踪标识,对应 {@link com.zt.plat.framework.common.util.monitor.TracerUtils#getTraceId()}
|
||||
*/
|
||||
private String traceId;
|
||||
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
package com.zt.plat.module.databus.framework.integration.gateway.config;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.boot.web.reactive.function.client.WebClientCustomizer;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
@@ -17,33 +19,43 @@ public class GatewayWebClientConfiguration {
|
||||
private final int maxInMemorySize;
|
||||
private final long maxIdleTimeMillis;
|
||||
private final long evictInBackgroundMillis;
|
||||
private final boolean connectionPoolEnabled;
|
||||
private final ReactorClientHttpConnector httpConnector;
|
||||
private static final Logger log = LoggerFactory.getLogger(GatewayWebClientConfiguration.class);
|
||||
|
||||
public GatewayWebClientConfiguration(
|
||||
@Value("${databus.gateway.web-client.max-in-memory-size:20971520}") int maxInMemorySize,
|
||||
@Value("${databus.gateway.web-client.max-idle-time:45000}") long maxIdleTimeMillis,
|
||||
@Value("${databus.gateway.web-client.evict-in-background-interval:20000}") long evictInBackgroundMillis) {
|
||||
@Value("${databus.gateway.web-client.evict-in-background-interval:20000}") long evictInBackgroundMillis,
|
||||
@Value("${databus.gateway.web-client.connection-pool-enabled:true}") boolean connectionPoolEnabled) {
|
||||
this.maxInMemorySize = maxInMemorySize;
|
||||
this.maxIdleTimeMillis = maxIdleTimeMillis > 0 ? maxIdleTimeMillis : 45000L;
|
||||
this.evictInBackgroundMillis = Math.max(evictInBackgroundMillis, 0L);
|
||||
this.maxIdleTimeMillis = maxIdleTimeMillis;
|
||||
this.evictInBackgroundMillis = evictInBackgroundMillis;
|
||||
this.connectionPoolEnabled = connectionPoolEnabled;
|
||||
this.httpConnector = buildConnector();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public WebClientCustomizer gatewayWebClientCustomizer() {
|
||||
// 统一设置 WebClient 连接器与内存限制,避免各处重复配置
|
||||
return builder -> builder
|
||||
.clientConnector(httpConnector)
|
||||
.codecs(configurer -> configurer.defaultCodecs().maxInMemorySize(maxInMemorySize));
|
||||
}
|
||||
|
||||
private ReactorClientHttpConnector buildConnector() {
|
||||
ConnectionProvider.Builder providerBuilder = ConnectionProvider.builder("databus-gateway")
|
||||
.maxIdleTime(Duration.ofMillis(maxIdleTimeMillis));
|
||||
if (evictInBackgroundMillis > 0) {
|
||||
providerBuilder.evictInBackground(Duration.ofMillis(evictInBackgroundMillis));
|
||||
if (connectionPoolEnabled) {
|
||||
// 启用连接池,基于配置设置空闲回收参数
|
||||
ConnectionProvider provider = ConnectionProvider.builder("databus-gateway")
|
||||
.maxIdleTime(Duration.ofMillis(maxIdleTimeMillis))
|
||||
.evictInBackground(Duration.ofMillis(evictInBackgroundMillis))
|
||||
.build();
|
||||
log.info("Databus gateway WebClient 已启用连接池 (maxIdleTime={}ms, evictInterval={}ms)",
|
||||
maxIdleTimeMillis, evictInBackgroundMillis);
|
||||
return new ReactorClientHttpConnector(HttpClient.create(provider).compress(true));
|
||||
}
|
||||
ConnectionProvider provider = providerBuilder.build();
|
||||
HttpClient httpClient = HttpClient.create(provider).compress(true);
|
||||
return new ReactorClientHttpConnector(httpClient);
|
||||
// 关闭连接池,每次请求都会重新建立 TCP 连接
|
||||
log.info("Databus gateway WebClient 已禁用连接池,所有请求将使用全新连接");
|
||||
return new ReactorClientHttpConnector(HttpClient.create().compress(true));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,12 +2,14 @@ package com.zt.plat.module.databus.framework.integration.gateway.core;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.zt.plat.framework.common.util.monitor.TracerUtils;
|
||||
import com.zt.plat.module.databus.dal.dataobject.gateway.ApiAccessLogDO;
|
||||
import com.zt.plat.module.databus.framework.integration.gateway.model.ApiInvocationContext;
|
||||
import com.zt.plat.module.databus.service.gateway.ApiAccessLogService;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.http.HttpHeaders;
|
||||
import org.springframework.http.HttpStatus;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
@@ -42,8 +44,9 @@ public class ApiGatewayAccessLogger {
|
||||
*/
|
||||
public void onRequest(ApiInvocationContext context) {
|
||||
try {
|
||||
String traceId = TracerUtils.getTraceId();
|
||||
ApiAccessLogDO logDO = new ApiAccessLogDO();
|
||||
logDO.setTraceId(context.getRequestId());
|
||||
logDO.setTraceId(traceId);
|
||||
logDO.setApiCode(context.getApiCode());
|
||||
logDO.setApiVersion(context.getApiVersion());
|
||||
logDO.setRequestMethod(context.getHttpMethod());
|
||||
@@ -60,7 +63,7 @@ public class ApiGatewayAccessLogger {
|
||||
Long logId = apiAccessLogService.create(logDO);
|
||||
context.getAttributes().put(ATTR_LOG_ID, logId);
|
||||
} catch (Exception ex) {
|
||||
log.warn("记录 API 访问日志开始阶段失败, traceId={}", context.getRequestId(), ex);
|
||||
log.warn("记录 API 访问日志开始阶段失败, traceId={}", TracerUtils.getTraceId(), ex);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,12 +88,18 @@ public class ApiGatewayAccessLogger {
|
||||
try {
|
||||
ApiAccessLogDO update = new ApiAccessLogDO();
|
||||
update.setId(logId);
|
||||
update.setResponseStatus(context.getResponseStatus());
|
||||
update.setResponseMessage(context.getResponseMessage());
|
||||
int responseStatus = resolveHttpStatus(context);
|
||||
context.setResponseStatus(responseStatus);
|
||||
update.setResponseStatus(responseStatus);
|
||||
String responseMessage = resolveResponseMessage(context, responseStatus);
|
||||
update.setResponseMessage(responseMessage);
|
||||
if (!StringUtils.hasText(context.getResponseMessage()) && StringUtils.hasText(responseMessage)) {
|
||||
context.setResponseMessage(responseMessage);
|
||||
}
|
||||
update.setResponseBody(toJson(context.getResponseBody()));
|
||||
update.setStatus(resolveStatus(context.getResponseStatus()));
|
||||
update.setErrorCode(extractErrorCode(context.getResponseBody()));
|
||||
update.setErrorMessage(resolveErrorMessage(context));
|
||||
update.setStatus(resolveStatus(responseStatus));
|
||||
update.setErrorCode(extractErrorCode(context.getResponseBody(), responseStatus));
|
||||
update.setErrorMessage(resolveErrorMessage(context, responseStatus));
|
||||
update.setExceptionStack((String) context.getAttributes().get(ATTR_EXCEPTION_STACK));
|
||||
update.setStepResults(toJson(context.getStepResults()));
|
||||
update.setExtra(toJson(buildExtra(context)));
|
||||
@@ -98,7 +107,7 @@ public class ApiGatewayAccessLogger {
|
||||
update.setDuration(calculateDuration(context));
|
||||
apiAccessLogService.update(update);
|
||||
} catch (Exception ex) {
|
||||
log.warn("记录 API 访问日志结束阶段失败, traceId={}, logId={}", context.getRequestId(), logId, ex);
|
||||
log.warn("记录 API 访问日志结束阶段失败, traceId={}, logId={}", TracerUtils.getTraceId(), logId, ex);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -137,7 +146,10 @@ public class ApiGatewayAccessLogger {
|
||||
return 3;
|
||||
}
|
||||
|
||||
private String resolveErrorMessage(ApiInvocationContext context) {
|
||||
private String resolveErrorMessage(ApiInvocationContext context, int responseStatus) {
|
||||
if (!isErrorStatus(responseStatus)) {
|
||||
return null;
|
||||
}
|
||||
if (StringUtils.hasText(context.getResponseMessage())) {
|
||||
return truncate(context.getResponseMessage());
|
||||
}
|
||||
@@ -151,7 +163,10 @@ public class ApiGatewayAccessLogger {
|
||||
return null;
|
||||
}
|
||||
|
||||
private String extractErrorCode(Object responseBody) {
|
||||
private String extractErrorCode(Object responseBody, int responseStatus) {
|
||||
if (!isErrorStatus(responseStatus)) {
|
||||
return null;
|
||||
}
|
||||
if (responseBody instanceof Map<?, ?> map) {
|
||||
Object errorCode = firstNonNull(map.get("errorCode"), map.get("code"));
|
||||
return errorCode == null ? null : truncate(String.valueOf(errorCode));
|
||||
@@ -159,6 +174,27 @@ public class ApiGatewayAccessLogger {
|
||||
return null;
|
||||
}
|
||||
|
||||
private int resolveHttpStatus(ApiInvocationContext context) {
|
||||
Integer status = context.getResponseStatus();
|
||||
if (status != null) {
|
||||
return status;
|
||||
}
|
||||
// 默认兜底为 200,避免日志中出现空的 HTTP 状态码
|
||||
return HttpStatus.OK.value();
|
||||
}
|
||||
|
||||
private String resolveResponseMessage(ApiInvocationContext context, int responseStatus) {
|
||||
if (StringUtils.hasText(context.getResponseMessage())) {
|
||||
return truncate(context.getResponseMessage());
|
||||
}
|
||||
HttpStatus resolved = HttpStatus.resolve(responseStatus);
|
||||
return resolved != null ? resolved.getReasonPhrase() : null;
|
||||
}
|
||||
|
||||
private boolean isErrorStatus(int responseStatus) {
|
||||
return responseStatus >= 400;
|
||||
}
|
||||
|
||||
private Map<String, Object> buildExtra(ApiInvocationContext context) {
|
||||
Map<String, Object> extra = new HashMap<>();
|
||||
if (!CollectionUtils.isEmpty(context.getVariables())) {
|
||||
|
||||
@@ -4,6 +4,7 @@ import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.zt.plat.framework.common.exception.ServiceException;
|
||||
import com.zt.plat.framework.common.exception.util.ServiceExceptionUtil;
|
||||
import com.zt.plat.framework.common.util.monitor.TracerUtils;
|
||||
import com.zt.plat.module.databus.controller.admin.gateway.vo.ApiGatewayInvokeReqVO;
|
||||
import com.zt.plat.module.databus.framework.integration.config.ApiGatewayProperties;
|
||||
import com.zt.plat.module.databus.framework.integration.gateway.domain.ApiDefinitionAggregate;
|
||||
@@ -236,12 +237,12 @@ public class ApiGatewayExecutionService {
|
||||
String message = StringUtils.hasText(context.getResponseMessage())
|
||||
? context.getResponseMessage()
|
||||
: HttpStatus.valueOf(status).getReasonPhrase();
|
||||
return ApiGatewayResponse.builder()
|
||||
return ApiGatewayResponse.builder()
|
||||
.code(status)
|
||||
.message(message)
|
||||
.response(context.getResponseBody())
|
||||
.traceId(context.getRequestId())
|
||||
.build();
|
||||
.message(message)
|
||||
.response(context.getResponseBody())
|
||||
.traceId(TracerUtils.getTraceId())
|
||||
.build();
|
||||
}
|
||||
|
||||
private String normalizeBasePath(String basePath) {
|
||||
|
||||
@@ -4,6 +4,7 @@ import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.zt.plat.framework.common.util.json.JsonUtils;
|
||||
import com.zt.plat.framework.common.util.monitor.TracerUtils;
|
||||
import com.zt.plat.framework.common.util.security.CryptoSignatureUtils;
|
||||
import com.zt.plat.framework.common.util.servlet.ServletUtils;
|
||||
import com.zt.plat.framework.security.core.LoginUser;
|
||||
@@ -464,11 +465,12 @@ public class GatewaySecurityFilter extends OncePerRequestFilter {
|
||||
response.resetBuffer();
|
||||
response.setStatus(status.value());
|
||||
String resolvedMessage = StringUtils.hasText(message) ? message : status.getReasonPhrase();
|
||||
ApiGatewayResponse envelope = ApiGatewayResponse.builder()
|
||||
String traceId = TracerUtils.getTraceId();
|
||||
ApiGatewayResponse envelope = ApiGatewayResponse.builder()
|
||||
.code(status.value())
|
||||
.message(resolvedMessage)
|
||||
.response(null)
|
||||
.traceId(null)
|
||||
.traceId(traceId)
|
||||
.build();
|
||||
if (shouldEncryptErrorResponse(security, credential)) {
|
||||
String encryptionKey = credential.getEncryptionKey();
|
||||
|
||||
@@ -131,4 +131,9 @@ zt:
|
||||
ignore-tables:
|
||||
- databus_api_client_credential
|
||||
|
||||
databus:
|
||||
gateway:
|
||||
web-client:
|
||||
connection-pool-enabled: false # 默认开启连接池,排查长连接问题时可临时关闭
|
||||
|
||||
debug: false
|
||||
|
||||
@@ -0,0 +1,360 @@
|
||||
package com.zt.plat.module.databus.framework.integration.gateway.step.impl;
|
||||
|
||||
import com.zt.plat.framework.common.exception.ServiceException;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.springframework.http.MediaType;
|
||||
import org.springframework.http.client.reactive.ReactorClientHttpConnector;
|
||||
import org.springframework.web.reactive.function.client.WebClient;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.netty.http.client.HttpClient;
|
||||
import reactor.netty.http.client.PrematureCloseException;
|
||||
import reactor.netty.resources.ConnectionProvider;
|
||||
import reactor.util.retry.Retry;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.OutputStream;
|
||||
import java.net.*;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.time.Duration;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* Demonstrates the stale-connection scenario using the legacy vs the deferred retry pipeline.
|
||||
*/
|
||||
class HttpStepHandlerConnectionResetScenarioTest {
|
||||
|
||||
private static final Duration RETRY_DELAY = Duration.ofMillis(200);
|
||||
private static final int RETRY_ATTEMPTS = 3;
|
||||
private static final Duration BLOCK_TIMEOUT = Duration.ofSeconds(5);
|
||||
private static final Duration RESET_WAIT = Duration.ofMillis(300);
|
||||
|
||||
@Test
|
||||
void legacyPipelineLosesSuccessfulRetry() throws Exception {
|
||||
try (ResetOnceHttpServer server = new ResetOnceHttpServer()) {
|
||||
WebClient webClient = createWebClient();
|
||||
URI uri = server.uri("/demo");
|
||||
|
||||
warmUp(server, webClient, uri);
|
||||
server.awaitWarmupConnectionReset(RESET_WAIT);
|
||||
|
||||
legacyInvoke(webClient, uri, Map.of("mode", "legacy"));
|
||||
|
||||
server.awaitFreshResponses(1, Duration.ofSeconds(2));
|
||||
assertThat(server.getFreshResponseCount()).isEqualTo(1);
|
||||
assertThat(server.getServedBodies()).contains("reset", "fresh");
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void deferredPipelinePropagatesSuccessfulRetry() throws Exception {
|
||||
try (ResetOnceHttpServer server = new ResetOnceHttpServer()) {
|
||||
WebClient webClient = createWebClient();
|
||||
URI uri = server.uri("/demo");
|
||||
|
||||
warmUp(server, webClient, uri);
|
||||
server.awaitWarmupConnectionReset(RESET_WAIT);
|
||||
|
||||
Object result = deferredInvoke(webClient, uri, Map.of("mode", "defer"));
|
||||
assertThat(result).isInstanceOf(Map.class);
|
||||
Map<?, ?> resultMap = (Map<?, ?>) result;
|
||||
assertThat(resultMap.get("stage")).isEqualTo("fresh");
|
||||
|
||||
server.awaitFreshResponses(1, Duration.ofSeconds(2));
|
||||
assertThat(server.getFreshResponseCount()).isEqualTo(1);
|
||||
assertThat(server.getServedBodies()).contains("reset", "fresh");
|
||||
}
|
||||
}
|
||||
|
||||
private WebClient createWebClient() {
|
||||
ConnectionProvider provider = ConnectionProvider.builder("http-step-handler-demo")
|
||||
.maxConnections(1)
|
||||
.pendingAcquireMaxCount(-1)
|
||||
.maxIdleTime(Duration.ofSeconds(5))
|
||||
.build();
|
||||
HttpClient httpClient = HttpClient.create(provider).compress(true);
|
||||
return WebClient.builder()
|
||||
.clientConnector(new ReactorClientHttpConnector(httpClient))
|
||||
.build();
|
||||
}
|
||||
|
||||
private void warmUp(ResetOnceHttpServer server, WebClient webClient, URI uri) {
|
||||
webClient.post()
|
||||
.uri(uri)
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.bodyValue(Map.of("warm", true))
|
||||
.retrieve()
|
||||
.bodyToMono(Object.class)
|
||||
.block(BLOCK_TIMEOUT);
|
||||
server.awaitWarmupResponse(Duration.ofSeconds(2));
|
||||
}
|
||||
|
||||
private Object legacyInvoke(WebClient webClient, URI uri, Object body) {
|
||||
WebClient.RequestHeadersSpec<?> spec = webClient.post()
|
||||
.uri(uri)
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.bodyValue(body);
|
||||
Mono<Object> responseMono = spec.retrieve()
|
||||
.bodyToMono(Object.class)
|
||||
// 模拟业务中首次订阅后缓存失败结果的场景
|
||||
.cache();
|
||||
return responseMono.retryWhen(Retry.fixedDelay(RETRY_ATTEMPTS, RETRY_DELAY)
|
||||
.filter(this::isRetryableException)
|
||||
.onRetryExhaustedThrow((specification, signal) -> signal.failure()))
|
||||
.block(BLOCK_TIMEOUT);
|
||||
}
|
||||
|
||||
private Object deferredInvoke(WebClient webClient, URI uri, Object body) {
|
||||
Mono<Object> responseMono = Mono.defer(() -> webClient.post()
|
||||
.uri(uri)
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.bodyValue(body)
|
||||
.retrieve()
|
||||
.bodyToMono(Object.class)
|
||||
// 通过 defer,每次重试都会重新创建带缓存的响应 Mono
|
||||
.cache());
|
||||
return responseMono.retryWhen(Retry.fixedDelay(RETRY_ATTEMPTS, RETRY_DELAY)
|
||||
.filter(this::isRetryableException))
|
||||
.block(BLOCK_TIMEOUT);
|
||||
}
|
||||
|
||||
private boolean isRetryableException(Throwable throwable) {
|
||||
if (throwable == null) {
|
||||
return false;
|
||||
}
|
||||
Throwable cursor = throwable;
|
||||
while (cursor != null) {
|
||||
if (cursor instanceof ServiceException) {
|
||||
return false;
|
||||
}
|
||||
if (cursor instanceof PrematureCloseException) {
|
||||
return true;
|
||||
}
|
||||
if (cursor instanceof IOException) {
|
||||
return true;
|
||||
}
|
||||
cursor = cursor.getCause();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private static final class ResetOnceHttpServer implements AutoCloseable {
|
||||
|
||||
private static final Duration RESET_DELAY = Duration.ofMillis(250);
|
||||
|
||||
private final ServerSocket serverSocket;
|
||||
private final ExecutorService acceptExecutor;
|
||||
private final ScheduledExecutorService scheduler;
|
||||
private final AtomicInteger connectionCount = new AtomicInteger();
|
||||
private final AtomicInteger freshResponses = new AtomicInteger();
|
||||
private final CountDownLatch warmupResponseSent = new CountDownLatch(1);
|
||||
private final CountDownLatch warmupReset = new CountDownLatch(1);
|
||||
private final List<String> servedBodies = new CopyOnWriteArrayList<>();
|
||||
private volatile boolean running = true;
|
||||
private volatile Socket warmupSocket;
|
||||
|
||||
ResetOnceHttpServer() throws IOException {
|
||||
this.serverSocket = new ServerSocket(0, 50, InetAddress.getByName("127.0.0.1"));
|
||||
this.serverSocket.setReuseAddress(true);
|
||||
this.acceptExecutor = Executors.newSingleThreadExecutor(r -> {
|
||||
Thread t = new Thread(r, "reset-once-http-accept");
|
||||
t.setDaemon(true);
|
||||
return t;
|
||||
});
|
||||
this.scheduler = Executors.newSingleThreadScheduledExecutor(r -> {
|
||||
Thread t = new Thread(r, "reset-once-http-scheduler");
|
||||
t.setDaemon(true);
|
||||
return t;
|
||||
});
|
||||
acceptExecutor.submit(this::acceptLoop);
|
||||
}
|
||||
|
||||
URI uri(String path) {
|
||||
Objects.requireNonNull(path, "path");
|
||||
if (!path.startsWith("/")) {
|
||||
path = "/" + path;
|
||||
}
|
||||
return URI.create("http://127.0.0.1:" + serverSocket.getLocalPort() + path);
|
||||
}
|
||||
|
||||
List<String> getServedBodies() {
|
||||
return new ArrayList<>(servedBodies);
|
||||
}
|
||||
|
||||
int getFreshResponseCount() {
|
||||
return freshResponses.get();
|
||||
}
|
||||
|
||||
void awaitWarmupResponse(Duration timeout) {
|
||||
awaitLatch(warmupResponseSent, timeout);
|
||||
}
|
||||
|
||||
void awaitWarmupConnectionReset(Duration timeout) {
|
||||
awaitLatch(warmupReset, timeout);
|
||||
}
|
||||
|
||||
void awaitFreshResponses(int expected, Duration timeout) {
|
||||
long deadline = System.nanoTime() + timeout.toNanos();
|
||||
while (freshResponses.get() < expected && System.nanoTime() < deadline) {
|
||||
try {
|
||||
Thread.sleep(10);
|
||||
} catch (InterruptedException ignored) {
|
||||
Thread.currentThread().interrupt();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void awaitLatch(CountDownLatch latch, Duration timeout) {
|
||||
try {
|
||||
if (!latch.await(timeout.toMillis(), TimeUnit.MILLISECONDS)) {
|
||||
throw new IllegalStateException("Timed out waiting for latch");
|
||||
}
|
||||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
throw new IllegalStateException(ex);
|
||||
}
|
||||
}
|
||||
|
||||
private void acceptLoop() {
|
||||
try {
|
||||
while (running) {
|
||||
Socket socket = serverSocket.accept();
|
||||
int index = connectionCount.incrementAndGet();
|
||||
handle(socket, index);
|
||||
}
|
||||
} catch (SocketException ex) {
|
||||
if (running) {
|
||||
throw new IllegalStateException("Unexpected server socket error", ex);
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
if (running) {
|
||||
throw new IllegalStateException("I/O error in server", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void handle(Socket socket, int index) {
|
||||
try {
|
||||
socket.setTcpNoDelay(true);
|
||||
RequestMetadata metadata = readRequest(socket);
|
||||
if (index == 1) {
|
||||
warmupSocket = socket;
|
||||
String body = "{\"stage\":\"warmup\",\"path\":\"" + metadata.path + "\"}";
|
||||
writeResponse(socket, body, true);
|
||||
servedBodies.add("warmup");
|
||||
warmupResponseSent.countDown();
|
||||
scheduler.schedule(() -> forceReset(socket), RESET_DELAY.toMillis(), TimeUnit.MILLISECONDS);
|
||||
} else if (index == 2) {
|
||||
// 模拟客户端复用到仍在连接池中的旧连接,但服务端已在请求到达后立即复位。
|
||||
servedBodies.add("reset");
|
||||
scheduler.schedule(() -> closeWithReset(socket), 10, TimeUnit.MILLISECONDS);
|
||||
} else {
|
||||
String body = "{\"stage\":\"fresh\",\"attempt\":" + index + "}";
|
||||
writeResponse(socket, body, false);
|
||||
servedBodies.add("fresh");
|
||||
freshResponses.incrementAndGet();
|
||||
socket.close();
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
// ignore for the purpose of the test
|
||||
}
|
||||
}
|
||||
|
||||
private void forceReset(Socket socket) {
|
||||
try {
|
||||
if (!socket.isClosed()) {
|
||||
servedBodies.add("reset");
|
||||
closeWithReset(socket);
|
||||
}
|
||||
} finally {
|
||||
warmupReset.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
private void closeWithReset(Socket socket) {
|
||||
try {
|
||||
if (!socket.isClosed()) {
|
||||
socket.setSoLinger(true, 0);
|
||||
socket.close();
|
||||
}
|
||||
} catch (IOException ignored) {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
|
||||
private RequestMetadata readRequest(Socket socket) throws IOException {
|
||||
BufferedReader reader = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.US_ASCII));
|
||||
String requestLine = reader.readLine();
|
||||
if (requestLine == null) {
|
||||
return new RequestMetadata("unknown", 0);
|
||||
}
|
||||
String path = requestLine.split(" ", 3)[1];
|
||||
int contentLength = 0;
|
||||
String line;
|
||||
while ((line = reader.readLine()) != null && !line.isEmpty()) {
|
||||
if (line.toLowerCase(Locale.ROOT).startsWith("content-length:")) {
|
||||
contentLength = Integer.parseInt(line.substring(line.indexOf(':') + 1).trim());
|
||||
}
|
||||
}
|
||||
if (contentLength > 0) {
|
||||
char[] buffer = new char[contentLength];
|
||||
int read = 0;
|
||||
while (read < contentLength) {
|
||||
int r = reader.read(buffer, read, contentLength - read);
|
||||
if (r < 0) {
|
||||
break;
|
||||
}
|
||||
read += r;
|
||||
}
|
||||
}
|
||||
return new RequestMetadata(path, contentLength);
|
||||
}
|
||||
|
||||
private void writeResponse(Socket socket, String body, boolean keepAlive) throws IOException {
|
||||
byte[] bodyBytes = body.getBytes(StandardCharsets.UTF_8);
|
||||
StringBuilder builder = new StringBuilder()
|
||||
.append("HTTP/1.1 200 OK\r\n")
|
||||
.append("Content-Type: application/json\r\n")
|
||||
.append("Content-Length: ").append(bodyBytes.length).append("\r\n");
|
||||
if (keepAlive) {
|
||||
builder.append("Connection: keep-alive\r\n");
|
||||
} else {
|
||||
builder.append("Connection: close\r\n");
|
||||
}
|
||||
builder.append("\r\n");
|
||||
OutputStream outputStream = socket.getOutputStream();
|
||||
outputStream.write(builder.toString().getBytes(StandardCharsets.US_ASCII));
|
||||
outputStream.write(bodyBytes);
|
||||
outputStream.flush();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws Exception {
|
||||
running = false;
|
||||
try {
|
||||
serverSocket.close();
|
||||
} catch (IOException ignored) {
|
||||
}
|
||||
if (warmupSocket != null && !warmupSocket.isClosed()) {
|
||||
try {
|
||||
warmupSocket.close();
|
||||
} catch (IOException ignored) {
|
||||
}
|
||||
}
|
||||
scheduler.shutdownNow();
|
||||
acceptExecutor.shutdownNow();
|
||||
}
|
||||
|
||||
private record RequestMetadata(String path, int contentLength) {
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -92,7 +92,7 @@ public class BusinessFileController {
|
||||
|
||||
@GetMapping("/page")
|
||||
@Operation(summary = "获得业务附件关联分页")
|
||||
@PreAuthorize("@ss.hasPermission('infra:business-file:query')")
|
||||
@PreAuthorize("@ss.hasAnyPermissions({'infra:business-file:query','supply:purchase-credit-granting-form-template:query-list','supply:purchase-amount-request-form-template:query-list'})")
|
||||
public CommonResult<PageResult<BusinessFileRespVO>> getBusinessFilePage(@Valid BusinessFilePageReqVO pageReqVO) {
|
||||
PageResult<BusinessFileDO> pageResult = businessFileService.getBusinessFilePage(pageReqVO);
|
||||
return success(BeanUtils.toBean(pageResult, BusinessFileRespVO.class));
|
||||
|
||||
@@ -29,6 +29,9 @@ import java.time.Duration;
|
||||
* @author ZT
|
||||
*/
|
||||
public class S3FileClient extends AbstractFileClient<S3FileClientConfig> {
|
||||
|
||||
private static final Duration DEFAULT_PRESIGNED_EXPIRATION = Duration.ofHours(24);
|
||||
private static final String PRESIGN_EXPIRE_SECONDS_PROPERTY = "zt.file.download-expire-seconds";
|
||||
/**
|
||||
* 生成临时下载地址(预签名下载 URL)
|
||||
* @param path 文件路径
|
||||
@@ -37,17 +40,7 @@ public class S3FileClient extends AbstractFileClient<S3FileClientConfig> {
|
||||
*/
|
||||
@Override
|
||||
public String getPresignedDownloadUrl(String path, Duration expiration) {
|
||||
Duration realExpiration = expiration;
|
||||
if (realExpiration == null) {
|
||||
long expireSeconds = 30; // 默认 30 秒
|
||||
try {
|
||||
String val = SpringUtils.getProperty("zt.file.download-expire-seconds");
|
||||
if (val != null && !val.isEmpty()) {
|
||||
expireSeconds = Long.parseLong(val);
|
||||
}
|
||||
} catch (Exception ignored) {}
|
||||
realExpiration = Duration.ofSeconds(expireSeconds);
|
||||
}
|
||||
Duration realExpiration = expiration != null ? expiration : resolveDefaultExpiration();
|
||||
if (path == null){
|
||||
return StringUtils.EMPTY;
|
||||
}
|
||||
@@ -135,10 +128,25 @@ public class S3FileClient extends AbstractFileClient<S3FileClientConfig> {
|
||||
|
||||
@Override
|
||||
public FilePresignedUrlRespDTO getPresignedObjectUrl(String path) {
|
||||
Duration expiration = Duration.ofHours(24);
|
||||
Duration expiration = resolveDefaultExpiration();
|
||||
return new FilePresignedUrlRespDTO(getPresignedUrl(path, expiration), config.getDomain() + "/" + path);
|
||||
}
|
||||
|
||||
private Duration resolveDefaultExpiration() {
|
||||
String propertyValue = SpringUtils.getProperty(PRESIGN_EXPIRE_SECONDS_PROPERTY);
|
||||
if (StringUtils.isNotEmpty(propertyValue)) {
|
||||
try {
|
||||
long seconds = Long.parseLong(propertyValue);
|
||||
if (seconds > 0) {
|
||||
return Duration.ofSeconds(seconds);
|
||||
}
|
||||
} catch (NumberFormatException ignored) {
|
||||
// ignore invalid config values and fall back to default
|
||||
}
|
||||
}
|
||||
return DEFAULT_PRESIGNED_EXPIRATION;
|
||||
}
|
||||
|
||||
/**
|
||||
* 生成动态的预签名上传 URL
|
||||
*
|
||||
|
||||
@@ -148,6 +148,8 @@ zt:
|
||||
key: "0123456789abcdef0123456789abcdef"
|
||||
# 附件预览
|
||||
kkfile: "http://172.16.46.63:30012/onlinePreview?url="
|
||||
file:
|
||||
download-expire-seconds: 86400 # 对象存储预签名地址默认有效期(秒)
|
||||
info:
|
||||
version: 1.0.0
|
||||
base-package: com.zt.plat.module.infra
|
||||
|
||||
@@ -5,6 +5,10 @@
|
||||
<springProperty scope="context" name="zt.info.base-package" source="zt.info.base-package"/>
|
||||
<!-- 格式化输出:%d 表示日期,%X{tid} SkWalking 链路追踪编号,%thread 表示线程名,%-5level:级别从左显示 5 个字符宽度,%msg:日志消息,%n是换行符 -->
|
||||
<property name="PATTERN_DEFAULT" value="%d{${LOG_DATEFORMAT_PATTERN:-yyyy-MM-dd HH:mm:ss.SSS}} | %highlight(${LOG_LEVEL_PATTERN:-%5p} ${PID:- }) | %boldYellow(%thread [%tid]) %boldGreen(%-40.40logger{39}) | %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}"/>
|
||||
<!--应用名称-->
|
||||
<springProperty scope="context" name="spring.application.name" source="spring.application.name"/>
|
||||
<!-- 日志输出路径 -->
|
||||
<property name="LOG_DIR" value="${user.home}/logs/${spring.application.name}"/>
|
||||
|
||||
<!-- 控制台 Appender -->
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
@@ -31,7 +35,7 @@
|
||||
<!-- 启动服务时,是否清理历史日志,一般不建议清理 -->
|
||||
<cleanHistoryOnStart>${LOGBACK_ROLLINGPOLICY_CLEAN_HISTORY_ON_START:-false}</cleanHistoryOnStart>
|
||||
<!-- 日志文件,到达多少容量,进行滚动 -->
|
||||
<maxFileSize>${LOGBACK_ROLLINGPOLICY_MAX_FILE_SIZE:-10MB}</maxFileSize>
|
||||
<maxFileSize>${LOGBACK_ROLLINGPOLICY_MAX_FILE_SIZE:-50MB}</maxFileSize>
|
||||
<!-- 日志文件的总大小,0 表示不限制 -->
|
||||
<totalSizeCap>${LOGBACK_ROLLINGPOLICY_TOTAL_SIZE_CAP:-0}</totalSizeCap>
|
||||
<!-- 日志文件的保留天数 -->
|
||||
@@ -56,18 +60,44 @@
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!-- ERROR 级别日志 -->
|
||||
<appender name="ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_DIR}-error.log</file>
|
||||
<filter class="ch.qos.logback.classic.filter.LevelFilter">
|
||||
<level>ERROR</level>
|
||||
<onMatch>ACCEPT</onMatch>
|
||||
<onMismatch>DENY</onMismatch>
|
||||
</filter>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_DIR}-error.%d{yyyy-MM-dd}.log</fileNamePattern>
|
||||
<maxHistory>30</maxHistory> <!-- 保留30天的日志 -->
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{36} - %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!--logback的日志级别 FATAL > ERROR > WARN > INFO > DEBUG-->
|
||||
<!-- 本地环境 -->
|
||||
<springProfile name="local">
|
||||
<root level="INFO">
|
||||
<springProfile name="local,dev">
|
||||
<root level="WARN">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="ERROR"/>
|
||||
<appender-ref ref="GRPC"/> <!-- 本地环境下,如果不想接入 SkyWalking 日志服务,可以注释掉本行 -->
|
||||
<appender-ref ref="ASYNC"/> <!-- 本地环境下,如果不想打印日志,可以注释掉本行 -->
|
||||
</root>
|
||||
|
||||
<!--针对不同的业务路径,配置dao层的sql打印日志级别为DEBUG-->
|
||||
<logger name="com.zt.plat.module.infra.dal.mysql" level="DEBUG" additivity="false">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
</logger>
|
||||
</springProfile>
|
||||
|
||||
<!-- 其它环境 -->
|
||||
<springProfile name="dev,test,stage,prod,default">
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="ERROR"/>
|
||||
<appender-ref ref="ASYNC"/>
|
||||
<appender-ref ref="GRPC"/>
|
||||
</root>
|
||||
|
||||
@@ -6,6 +6,7 @@ import cn.hutool.core.util.ObjUtil;
|
||||
import cn.hutool.core.util.StrUtil;
|
||||
import com.zt.plat.framework.common.pojo.PageResult;
|
||||
import com.zt.plat.module.infra.api.file.FileApi;
|
||||
import com.zt.plat.module.infra.api.file.dto.FileCreateReqDTO;
|
||||
import com.zt.plat.module.mp.controller.admin.material.vo.MpMaterialPageReqVO;
|
||||
import com.zt.plat.module.mp.controller.admin.material.vo.MpMaterialUploadNewsImageReqVO;
|
||||
import com.zt.plat.module.mp.controller.admin.material.vo.MpMaterialUploadPermanentReqVO;
|
||||
@@ -218,7 +219,8 @@ public class MpMaterialServiceImpl implements MpMaterialService {
|
||||
|
||||
private String uploadFile(String mediaId, File file) {
|
||||
String path = mediaId + "." + FileTypeUtil.getType(file);
|
||||
return fileApi.createFile(FileUtil.readBytes(file), path);
|
||||
FileCreateReqDTO createReqDTO = new FileCreateReqDTO().setName(file.getName()).setDirectory(path).setType(FileTypeUtil.getType(file)).setContent(FileUtil.readBytes(file));
|
||||
return fileApi.createFile(createReqDTO).getData();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -5,6 +5,10 @@
|
||||
<springProperty scope="context" name="zt.info.base-package" source="zt.info.base-package"/>
|
||||
<!-- 格式化输出:%d 表示日期,%X{tid} SkWalking 链路追踪编号,%thread 表示线程名,%-5level:级别从左显示 5 个字符宽度,%msg:日志消息,%n是换行符 -->
|
||||
<property name="PATTERN_DEFAULT" value="%d{${LOG_DATEFORMAT_PATTERN:-yyyy-MM-dd HH:mm:ss.SSS}} | %highlight(${LOG_LEVEL_PATTERN:-%5p} ${PID:- }) | %boldYellow(%thread [%tid]) %boldGreen(%-40.40logger{39}) | %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}"/>
|
||||
<!--应用名称-->
|
||||
<springProperty scope="context" name="spring.application.name" source="spring.application.name"/>
|
||||
<!-- 日志输出路径 -->
|
||||
<property name="LOG_DIR" value="${user.home}/logs/${spring.application.name}"/>
|
||||
|
||||
<!-- 控制台 Appender -->
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
@@ -31,7 +35,7 @@
|
||||
<!-- 启动服务时,是否清理历史日志,一般不建议清理 -->
|
||||
<cleanHistoryOnStart>${LOGBACK_ROLLINGPOLICY_CLEAN_HISTORY_ON_START:-false}</cleanHistoryOnStart>
|
||||
<!-- 日志文件,到达多少容量,进行滚动 -->
|
||||
<maxFileSize>${LOGBACK_ROLLINGPOLICY_MAX_FILE_SIZE:-10MB}</maxFileSize>
|
||||
<maxFileSize>${LOGBACK_ROLLINGPOLICY_MAX_FILE_SIZE:-50MB}</maxFileSize>
|
||||
<!-- 日志文件的总大小,0 表示不限制 -->
|
||||
<totalSizeCap>${LOGBACK_ROLLINGPOLICY_TOTAL_SIZE_CAP:-0}</totalSizeCap>
|
||||
<!-- 日志文件的保留天数 -->
|
||||
@@ -56,18 +60,44 @@
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!-- ERROR 级别日志 -->
|
||||
<appender name="ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_DIR}-error.log</file>
|
||||
<filter class="ch.qos.logback.classic.filter.LevelFilter">
|
||||
<level>ERROR</level>
|
||||
<onMatch>ACCEPT</onMatch>
|
||||
<onMismatch>DENY</onMismatch>
|
||||
</filter>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_DIR}-error.%d{yyyy-MM-dd}.log</fileNamePattern>
|
||||
<maxHistory>30</maxHistory> <!-- 保留30天的日志 -->
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{36} - %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!--logback的日志级别 FATAL > ERROR > WARN > INFO > DEBUG-->
|
||||
<!-- 本地环境 -->
|
||||
<springProfile name="local">
|
||||
<root level="INFO">
|
||||
<springProfile name="local,dev">
|
||||
<root level="WARN">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="ERROR"/>
|
||||
<appender-ref ref="GRPC"/> <!-- 本地环境下,如果不想接入 SkyWalking 日志服务,可以注释掉本行 -->
|
||||
<appender-ref ref="ASYNC"/> <!-- 本地环境下,如果不想打印日志,可以注释掉本行 -->
|
||||
</root>
|
||||
|
||||
<!--针对不同的业务路径,配置dao层的sql打印日志级别为DEBUG-->
|
||||
<logger name="com.zt.plat.module.mp.dal.mysql" level="DEBUG" additivity="false">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
</logger>
|
||||
</springProfile>
|
||||
|
||||
<!-- 其它环境 -->
|
||||
<springProfile name="dev,test,stage,prod,default">
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="ERROR"/>
|
||||
<appender-ref ref="ASYNC"/>
|
||||
<appender-ref ref="GRPC"/>
|
||||
</root>
|
||||
|
||||
@@ -5,6 +5,10 @@
|
||||
<springProperty scope="context" name="zt.info.base-package" source="zt.info.base-package"/>
|
||||
<!-- 格式化输出:%d 表示日期,%X{tid} SkWalking 链路追踪编号,%thread 表示线程名,%-5level:级别从左显示 5 个字符宽度,%msg:日志消息,%n是换行符 -->
|
||||
<property name="PATTERN_DEFAULT" value="%d{${LOG_DATEFORMAT_PATTERN:-yyyy-MM-dd HH:mm:ss.SSS}} | %highlight(${LOG_LEVEL_PATTERN:-%5p} ${PID:- }) | %boldYellow(%thread [%tid]) %boldGreen(%-40.40logger{39}) | %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}"/>
|
||||
<!--应用名称-->
|
||||
<springProperty scope="context" name="spring.application.name" source="spring.application.name"/>
|
||||
<!-- 日志输出路径 -->
|
||||
<property name="LOG_DIR" value="${user.home}/logs/${spring.application.name}"/>
|
||||
|
||||
<!-- 控制台 Appender -->
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
@@ -31,7 +35,7 @@
|
||||
<!-- 启动服务时,是否清理历史日志,一般不建议清理 -->
|
||||
<cleanHistoryOnStart>${LOGBACK_ROLLINGPOLICY_CLEAN_HISTORY_ON_START:-false}</cleanHistoryOnStart>
|
||||
<!-- 日志文件,到达多少容量,进行滚动 -->
|
||||
<maxFileSize>${LOGBACK_ROLLINGPOLICY_MAX_FILE_SIZE:-10MB}</maxFileSize>
|
||||
<maxFileSize>${LOGBACK_ROLLINGPOLICY_MAX_FILE_SIZE:-50MB}</maxFileSize>
|
||||
<!-- 日志文件的总大小,0 表示不限制 -->
|
||||
<totalSizeCap>${LOGBACK_ROLLINGPOLICY_TOTAL_SIZE_CAP:-0}</totalSizeCap>
|
||||
<!-- 日志文件的保留天数 -->
|
||||
@@ -56,18 +60,44 @@
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!-- ERROR 级别日志 -->
|
||||
<appender name="ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_DIR}-error.log</file>
|
||||
<filter class="ch.qos.logback.classic.filter.LevelFilter">
|
||||
<level>ERROR</level>
|
||||
<onMatch>ACCEPT</onMatch>
|
||||
<onMismatch>DENY</onMismatch>
|
||||
</filter>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_DIR}-error.%d{yyyy-MM-dd}.log</fileNamePattern>
|
||||
<maxHistory>30</maxHistory> <!-- 保留30天的日志 -->
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{36} - %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!--logback的日志级别 FATAL > ERROR > WARN > INFO > DEBUG-->
|
||||
<!-- 本地环境 -->
|
||||
<springProfile name="local">
|
||||
<root level="INFO">
|
||||
<springProfile name="local,dev">
|
||||
<root level="WARN">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="ERROR"/>
|
||||
<appender-ref ref="GRPC"/> <!-- 本地环境下,如果不想接入 SkyWalking 日志服务,可以注释掉本行 -->
|
||||
<appender-ref ref="ASYNC"/> <!-- 本地环境下,如果不想打印日志,可以注释掉本行 -->
|
||||
</root>
|
||||
|
||||
<!--针对不同的业务路径,配置dao层的sql打印日志级别为DEBUG-->
|
||||
<logger name="com.zt.plat.module.report.dal.mysql" level="DEBUG" additivity="false">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
</logger>
|
||||
</springProfile>
|
||||
|
||||
<!-- 其它环境 -->
|
||||
<springProfile name="dev,test,stage,prod,default">
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="ERROR"/>
|
||||
<appender-ref ref="ASYNC"/>
|
||||
<appender-ref ref="GRPC"/>
|
||||
</root>
|
||||
|
||||
@@ -0,0 +1,63 @@
|
||||
package com.zt.plat.module.system.api.iwork;
|
||||
|
||||
import com.zt.plat.framework.common.pojo.CommonResult;
|
||||
import com.zt.plat.module.system.api.iwork.dto.*;
|
||||
import com.zt.plat.module.system.enums.ApiConstants;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.springframework.cloud.openfeign.FeignClient;
|
||||
import org.springframework.web.bind.annotation.PostMapping;
|
||||
import org.springframework.web.bind.annotation.RequestBody;
|
||||
|
||||
/**
|
||||
* RPC 服务 - iWork 集成
|
||||
*/
|
||||
@FeignClient(name = ApiConstants.NAME, contextId = "iWorkIntegrationApi")
|
||||
@Tag(name = "RPC 服务 - iWork 集成")
|
||||
public interface IWorkIntegrationApi {
|
||||
|
||||
String PREFIX = ApiConstants.PREFIX + "/integration/iwork";
|
||||
|
||||
// ----------------- 认证 / 会话 -----------------
|
||||
|
||||
@PostMapping(PREFIX + "/auth/register")
|
||||
@Operation(summary = "注册 iWork 凭证,获取服务端公钥与 secret")
|
||||
CommonResult<IWorkAuthRegisterRespDTO> register(@RequestBody IWorkAuthRegisterReqDTO reqDTO);
|
||||
|
||||
@PostMapping(PREFIX + "/auth/token")
|
||||
@Operation(summary = "申请 iWork Token(独立接口)")
|
||||
CommonResult<IWorkAuthTokenRespDTO> acquireToken(@RequestBody IWorkAuthTokenReqDTO reqDTO);
|
||||
|
||||
// ----------------- 流程类能力 -----------------
|
||||
|
||||
@PostMapping(PREFIX + "/user/resolve")
|
||||
@Operation(summary = "根据外部标识获取 iWork 用户编号")
|
||||
CommonResult<IWorkUserInfoRespDTO> resolveUser(@RequestBody IWorkUserInfoReqDTO reqDTO);
|
||||
|
||||
@PostMapping(PREFIX + "/workflow/create")
|
||||
@Operation(summary = "发起 iWork 流程")
|
||||
CommonResult<IWorkOperationRespDTO> createWorkflow(@RequestBody IWorkWorkflowCreateReqDTO reqDTO);
|
||||
|
||||
@PostMapping(PREFIX + "/workflow/void")
|
||||
@Operation(summary = "作废 / 干预 iWork 流程")
|
||||
CommonResult<IWorkOperationRespDTO> voidWorkflow(@RequestBody IWorkWorkflowVoidReqDTO reqDTO);
|
||||
|
||||
// ----------------- 人力组织分页接口 -----------------
|
||||
|
||||
@PostMapping(PREFIX + "/hr/subcompany/page")
|
||||
@Operation(summary = "获取 iWork 分部列表")
|
||||
CommonResult<IWorkHrSubcompanyPageRespDTO> listSubcompanies(@RequestBody IWorkOrgPageReqDTO reqDTO);
|
||||
|
||||
@PostMapping(PREFIX + "/hr/department/page")
|
||||
@Operation(summary = "获取 iWork 部门列表")
|
||||
CommonResult<IWorkHrDepartmentPageRespDTO> listDepartments(@RequestBody IWorkOrgPageReqDTO reqDTO);
|
||||
|
||||
@PostMapping(PREFIX + "/hr/job-title/page")
|
||||
@Operation(summary = "获取 iWork 岗位列表")
|
||||
CommonResult<IWorkHrJobTitlePageRespDTO> listJobTitles(@RequestBody IWorkOrgPageReqDTO reqDTO);
|
||||
|
||||
@PostMapping(PREFIX + "/hr/user/page")
|
||||
@Operation(summary = "获取 iWork 人员列表")
|
||||
CommonResult<IWorkHrUserPageRespDTO> listUsers(@RequestBody IWorkOrgPageReqDTO reqDTO);
|
||||
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
package com.zt.plat.module.system.api.iwork.dto;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import lombok.Data;
|
||||
|
||||
/**
|
||||
* iWork 注册授权请求 DTO(供其他模块通过 Feign 调用 system-server 时使用)
|
||||
*/
|
||||
@Data
|
||||
public class IWorkAuthRegisterReqDTO {
|
||||
|
||||
@Schema(description = "iWork 应用编码", requiredMode = Schema.RequiredMode.REQUIRED)
|
||||
private String appCode;
|
||||
|
||||
@Schema(description = "iWork 网关地址", requiredMode = Schema.RequiredMode.NOT_REQUIRED)
|
||||
private String baseUrl;
|
||||
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user