From cd21239ff27f77510dacf972daed2cfa167ae68a Mon Sep 17 00:00:00 2001 From: chenbowen Date: Thu, 27 Nov 2025 09:58:44 +0800 Subject: [PATCH 1/6] =?UTF-8?q?flowable=20=E8=BE=BE=E6=A2=A6=E8=BF=81?= =?UTF-8?q?=E7=A7=BB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../config/BpmFlowableConfiguration.java | 41 ++ .../liquibase/database/core/DmDatabase.java | 60 ++- .../datatype/core/DmBooleanType.java | 32 ++ .../services/liquibase.database.Database | 1 + .../liquibase.datatype.LiquibaseDataType | 1 + .../src/main/resources/application-local.yaml | 17 +- .../create/flowable.oracle.create.batch.sql | 41 ++ .../db/drop/flowable.oracle.drop.batch.sql | 4 + .../create/flowable.oracle.create.common.sql | 23 ++ .../db/drop/flowable.oracle.drop.common.sql | 2 + .../create/flowable.oracle.create.engine.sql | 355 ++++++++++++++++++ .../create/flowable.oracle.create.history.sql | 114 ++++++ .../db/drop/flowable.oracle.drop.engine.sql | 148 ++++++++ .../db/drop/flowable.oracle.drop.history.sql | 23 ++ ...wable.oracle.create.entitylink.history.sql | 23 ++ .../flowable.oracle.create.entitylink.sql | 26 ++ ...lowable.oracle.drop.entitylink.history.sql | 4 + .../drop/flowable.oracle.drop.entitylink.sql | 4 + ...owable.oracle.create.eventsubscription.sql | 28 ++ ...flowable.oracle.drop.eventsubscription.sql | 5 + ...ble.oracle.create.identitylink.history.sql | 20 + .../flowable.oracle.create.identitylink.sql | 24 ++ ...wable.oracle.drop.identitylink.history.sql | 6 + .../flowable.oracle.drop.identitylink.sql | 7 + .../flowable.oracle.create.identity.sql | 108 ++++++ .../db/drop/flowable.oracle.drop.identity.sql | 22 ++ .../db/create/flowable.oracle.create.job.sql | 261 +++++++++++++ .../db/drop/flowable.oracle.drop.job.sql | 74 ++++ .../flowable.oracle.create.task.history.sql | 64 ++++ .../db/create/flowable.oracle.create.task.sql | 48 +++ .../flowable.oracle.drop.task.history.sql | 8 + .../db/drop/flowable.oracle.drop.task.sql | 6 + ...lowable.oracle.create.variable.history.sql | 26 ++ .../flowable.oracle.create.variable.sql | 31 ++ .../flowable.oracle.drop.variable.history.sql | 6 + .../db/drop/flowable.oracle.drop.variable.sql | 9 + 36 files changed, 1649 insertions(+), 23 deletions(-) create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/java/liquibase/datatype/core/DmBooleanType.java create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/META-INF/services/liquibase.datatype.LiquibaseDataType create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/batch/service/db/create/flowable.oracle.create.batch.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/batch/service/db/drop/flowable.oracle.drop.batch.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/common/db/create/flowable.oracle.create.common.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/common/db/drop/flowable.oracle.drop.common.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/db/create/flowable.oracle.create.engine.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/db/create/flowable.oracle.create.history.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/db/drop/flowable.oracle.drop.engine.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/db/drop/flowable.oracle.drop.history.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/entitylink/service/db/create/flowable.oracle.create.entitylink.history.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/entitylink/service/db/create/flowable.oracle.create.entitylink.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/entitylink/service/db/drop/flowable.oracle.drop.entitylink.history.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/entitylink/service/db/drop/flowable.oracle.drop.entitylink.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/eventsubscription/service/db/create/flowable.oracle.create.eventsubscription.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/eventsubscription/service/db/drop/flowable.oracle.drop.eventsubscription.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/identitylink/service/db/create/flowable.oracle.create.identitylink.history.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/identitylink/service/db/create/flowable.oracle.create.identitylink.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/identitylink/service/db/drop/flowable.oracle.drop.identitylink.history.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/identitylink/service/db/drop/flowable.oracle.drop.identitylink.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/idm/db/create/flowable.oracle.create.identity.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/idm/db/drop/flowable.oracle.drop.identity.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/job/service/db/create/flowable.oracle.create.job.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/job/service/db/drop/flowable.oracle.drop.job.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/task/service/db/create/flowable.oracle.create.task.history.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/task/service/db/create/flowable.oracle.create.task.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/task/service/db/drop/flowable.oracle.drop.task.history.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/task/service/db/drop/flowable.oracle.drop.task.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/variable/service/db/create/flowable.oracle.create.variable.history.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/variable/service/db/create/flowable.oracle.create.variable.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/variable/service/db/drop/flowable.oracle.drop.variable.history.sql create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/variable/service/db/drop/flowable.oracle.drop.variable.sql diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/java/com/zt/plat/module/bpm/framework/flowable/config/BpmFlowableConfiguration.java b/zt-module-bpm/zt-module-bpm-server/src/main/java/com/zt/plat/module/bpm/framework/flowable/config/BpmFlowableConfiguration.java index f0d5b49f..159cfce0 100644 --- a/zt-module-bpm/zt-module-bpm-server/src/main/java/com/zt/plat/module/bpm/framework/flowable/config/BpmFlowableConfiguration.java +++ b/zt-module-bpm/zt-module-bpm-server/src/main/java/com/zt/plat/module/bpm/framework/flowable/config/BpmFlowableConfiguration.java @@ -8,17 +8,25 @@ import com.zt.plat.module.bpm.framework.flowable.core.event.BpmProcessInstanceEv import com.zt.plat.module.system.api.user.AdminUserApi; import org.flowable.common.engine.api.delegate.FlowableFunctionDelegate; import org.flowable.common.engine.api.delegate.event.FlowableEventListener; +import org.flowable.engine.ProcessEngineConfiguration; import org.flowable.spring.SpringProcessEngineConfiguration; import org.flowable.spring.boot.EngineConfigurationConfigurer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.springframework.beans.factory.ObjectProvider; import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; import org.springframework.context.ApplicationEventPublisher; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.core.task.AsyncListenableTaskExecutor; +import org.springframework.jdbc.datasource.DataSourceUtils; import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; import java.util.List; +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.SQLException; /** * BPM 模块的 Flowable 配置类 @@ -28,6 +36,8 @@ import java.util.List; @Configuration(proxyBeanMethods = false) public class BpmFlowableConfiguration { + private static final Logger log = LoggerFactory.getLogger(BpmFlowableConfiguration.class); + /** * 参考 {@link org.flowable.spring.boot.FlowableJobConfiguration} 类,创建对应的 AsyncListenableTaskExecutor Bean * @@ -69,6 +79,37 @@ public class BpmFlowableConfiguration { }; } + @Bean + public EngineConfigurationConfigurer dmProcessEngineConfigurationConfigurer(DataSource dataSource) { + return configuration -> { + try { + configureDmCompatibility(configuration, dataSource); + } catch (SQLException ex) { + log.warn("Failed to inspect datasource for DM compatibility; Flowable will keep default settings", ex); + } + }; + } + + private void configureDmCompatibility(SpringProcessEngineConfiguration configuration, DataSource dataSource) throws SQLException { + Connection connection = null; + try { + connection = DataSourceUtils.getConnection(dataSource); + DatabaseMetaData metaData = connection.getMetaData(); + String productName = metaData.getDatabaseProductName(); + String jdbcUrl = metaData.getURL(); + boolean dmProduct = productName != null && productName.toLowerCase().contains("dm"); + boolean dmUrl = jdbcUrl != null && jdbcUrl.toLowerCase().startsWith("jdbc:dm"); + if (!dmProduct && !dmUrl) { + return; + } + log.info("Detected DM database (product='{}'); enabling Flowable Oracle compatibility with automatic schema updates", productName); + configuration.setDatabaseSchemaUpdate(ProcessEngineConfiguration.DB_SCHEMA_UPDATE_TRUE); + configuration.setDatabaseType("oracle"); + } finally { + DataSourceUtils.releaseConnection(connection, dataSource); + } + } + // =========== 审批人相关的 Bean ========== @Bean diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/java/liquibase/database/core/DmDatabase.java b/zt-module-bpm/zt-module-bpm-server/src/main/java/liquibase/database/core/DmDatabase.java index f7558a11..9e2c50ff 100644 --- a/zt-module-bpm/zt-module-bpm-server/src/main/java/liquibase/database/core/DmDatabase.java +++ b/zt-module-bpm/zt-module-bpm-server/src/main/java/liquibase/database/core/DmDatabase.java @@ -8,6 +8,7 @@ package liquibase.database.core; import java.lang.reflect.Method; import java.sql.CallableStatement; import java.sql.Connection; +import java.sql.DatabaseMetaData; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; @@ -114,6 +115,7 @@ public class DmDatabase extends AbstractJdbcDatabase { public void setConnection(DatabaseConnection conn) { this.reservedWords.addAll(Arrays.asList("GROUP", "USER", "SESSION", "PASSWORD", "RESOURCE", "START", "SIZE", "UID", "DESC", "ORDER")); Connection sqlConn = null; + boolean dmDatabase = false; if (!(conn instanceof OfflineConnection)) { try { if (conn instanceof JdbcConnection) { @@ -140,26 +142,42 @@ public class DmDatabase extends AbstractJdbcDatabase { Scope.getCurrentScope().getLog(this.getClass()).info("Could not set remarks reporting on OracleDatabase: " + e.getMessage()); } - CallableStatement statement = null; - try { - statement = sqlConn.prepareCall("{call DBMS_UTILITY.DB_VERSION(?,?)}"); - statement.registerOutParameter(1, 12); - statement.registerOutParameter(2, 12); - statement.execute(); - String compatibleVersion = statement.getString(2); - if (compatibleVersion != null) { - Matcher majorVersionMatcher = VERSION_PATTERN.matcher(compatibleVersion); - if (majorVersionMatcher.matches()) { - this.databaseMajorVersion = Integer.valueOf(majorVersionMatcher.group(1)); - this.databaseMinorVersion = Integer.valueOf(majorVersionMatcher.group(2)); + DatabaseMetaData metaData = sqlConn.getMetaData(); + if (metaData != null) { + String productName = metaData.getDatabaseProductName(); + dmDatabase = productName != null && PRODUCT_NAME.equalsIgnoreCase(productName); + if (dmDatabase) { + this.databaseMajorVersion = metaData.getDatabaseMajorVersion(); + this.databaseMinorVersion = metaData.getDatabaseMinorVersion(); } } } catch (SQLException e) { - String message = "Cannot read from DBMS_UTILITY.DB_VERSION: " + e.getMessage(); - Scope.getCurrentScope().getLog(this.getClass()).info("Could not set check compatibility mode on OracleDatabase, assuming not running in any sort of compatibility mode: " + message); - } finally { - JdbcUtil.closeStatement(statement); + Scope.getCurrentScope().getLog(this.getClass()).info("Unable to inspect database metadata for DM version detection: " + e.getMessage()); + } + + if (!dmDatabase) { + CallableStatement statement = null; + + try { + statement = sqlConn.prepareCall("{call DBMS_UTILITY.DB_VERSION(?,?)}"); + statement.registerOutParameter(1, 12); + statement.registerOutParameter(2, 12); + statement.execute(); + String compatibleVersion = statement.getString(2); + if (compatibleVersion != null) { + Matcher majorVersionMatcher = VERSION_PATTERN.matcher(compatibleVersion); + if (majorVersionMatcher.matches()) { + this.databaseMajorVersion = Integer.valueOf(majorVersionMatcher.group(1)); + this.databaseMinorVersion = Integer.valueOf(majorVersionMatcher.group(2)); + } + } + } catch (SQLException e) { + String message = "Cannot read from DBMS_UTILITY.DB_VERSION: " + e.getMessage(); + Scope.getCurrentScope().getLog(this.getClass()).info("Could not set check compatibility mode on OracleDatabase, assuming not running in any sort of compatibility mode: " + message); + } finally { + JdbcUtil.closeStatement(statement); + } } if (GlobalConfiguration.DDL_LOCK_TIMEOUT.getCurrentValue() != null) { @@ -266,7 +284,15 @@ public class DmDatabase extends AbstractJdbcDatabase { } public boolean isCorrectDatabaseImplementation(DatabaseConnection conn) throws DatabaseException { - return "oracle".equalsIgnoreCase(conn.getDatabaseProductName()); + String databaseProductName = conn == null ? null : conn.getDatabaseProductName(); + if (databaseProductName == null) { + return false; + } + if (PRODUCT_NAME.equalsIgnoreCase(databaseProductName)) { + return true; + } + // Flowable 历史上将 DM 映射为 Oracle 元数据,因此这里同样接受 Oracle 以保持兼容 + return "oracle".equalsIgnoreCase(databaseProductName); } public String getDefaultDriver(String url) { diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/java/liquibase/datatype/core/DmBooleanType.java b/zt-module-bpm/zt-module-bpm-server/src/main/java/liquibase/datatype/core/DmBooleanType.java new file mode 100644 index 00000000..7f662501 --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/java/liquibase/datatype/core/DmBooleanType.java @@ -0,0 +1,32 @@ +package liquibase.datatype.core; + +import liquibase.database.Database; +import liquibase.database.core.DmDatabase; +import liquibase.datatype.DataTypeInfo; +import liquibase.datatype.DatabaseDataType; + +@DataTypeInfo( + name = "boolean", + aliases = {"java.sql.Types.BOOLEAN", "java.lang.Boolean", "bit", "bool"}, + minParameters = 0, + maxParameters = 0, + priority = 2 +) +public class DmBooleanType extends BooleanType { + + @Override + public boolean supports(Database database) { + if (database instanceof DmDatabase) { + return true; + } + return super.supports(database); + } + + @Override + public DatabaseDataType toDatabaseDataType(Database database) { + if (database instanceof DmDatabase) { + return new DatabaseDataType("NUMBER", 1); + } + return super.toDatabaseDataType(database); + } +} diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/META-INF/services/liquibase.database.Database b/zt-module-bpm/zt-module-bpm-server/src/main/resources/META-INF/services/liquibase.database.Database index 0ccf2249..765e41ad 100644 --- a/zt-module-bpm/zt-module-bpm-server/src/main/resources/META-INF/services/liquibase.database.Database +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/META-INF/services/liquibase.database.Database @@ -13,6 +13,7 @@ liquibase.database.core.MariaDBDatabase liquibase.database.core.MockDatabase liquibase.database.core.MySQLDatabase liquibase.database.core.OracleDatabase +liquibase.database.core.DmDatabase liquibase.database.core.PostgresDatabase liquibase.database.core.SQLiteDatabase liquibase.database.core.SybaseASADatabase diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/META-INF/services/liquibase.datatype.LiquibaseDataType b/zt-module-bpm/zt-module-bpm-server/src/main/resources/META-INF/services/liquibase.datatype.LiquibaseDataType new file mode 100644 index 00000000..5be88a34 --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/META-INF/services/liquibase.datatype.LiquibaseDataType @@ -0,0 +1 @@ +liquibase.datatype.core.DmBooleanType diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/application-local.yaml b/zt-module-bpm/zt-module-bpm-server/src/main/resources/application-local.yaml index 2bf6df52..1a65166f 100644 --- a/zt-module-bpm/zt-module-bpm-server/src/main/resources/application-local.yaml +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/application-local.yaml @@ -39,14 +39,14 @@ spring: primary: master datasource: master: - url: jdbc:mysql://172.16.46.247:4787/ruoyi-vue-pro?useSSL=false&serverTimezone=Asia/Shanghai&allowPublicKeyRetrieval=true&nullCatalogMeansCurrent=true&rewriteBatchedStatements=true # MySQL Connector/J 8.X 连接的示例 - username: jygk-test - password: Zgty@0527 + url: jdbc:dm://172.16.46.247:1050?schema=BPM + username: SYSDBA + password: pgbsci6ddJ6Sqj@e slave: # 模拟从库,可根据自己需要修改 # 模拟从库,可根据自己需要修改 lazy: true # 开启懒加载,保证启动速度 - url: jdbc:mysql://172.16.46.247:4787/ruoyi-vue-pro?useSSL=false&serverTimezone=Asia/Shanghai&allowPublicKeyRetrieval=true&nullCatalogMeansCurrent=true&rewriteBatchedStatements=true # MySQL Connector/J 8.X 连接的示例 - username: jygk-test - password: Zgty@0527 + url: jdbc:dm://172.16.46.247:1050?schema=BPM + username: SYSDBA + password: pgbsci6ddJ6Sqj@e # Redis 配置。Redisson 默认的配置足够使用,一般不需要进行调优 data: @@ -56,6 +56,11 @@ spring: database: 0 # 数据库索引 # password: 123456 # 密码,建议生产环境开启 +# Flowable 在 DM 场景下需要识别为 Oracle 并自动升级表结构 +flowable: + database-schema-update: true + database-type: oracle + --- #################### MQ 消息队列相关配置 #################### --- #################### 定时任务相关配置 #################### diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/batch/service/db/create/flowable.oracle.create.batch.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/batch/service/db/create/flowable.oracle.create.batch.sql new file mode 100644 index 00000000..19dca822 --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/batch/service/db/create/flowable.oracle.create.batch.sql @@ -0,0 +1,41 @@ +create table FLW_RU_BATCH ( + ID_ VARCHAR2(64) not null, + REV_ INTEGER, + TYPE_ VARCHAR2(64) not null, + SEARCH_KEY_ VARCHAR2(255), + SEARCH_KEY2_ VARCHAR2(255), + CREATE_TIME_ TIMESTAMP(6) not null, + COMPLETE_TIME_ TIMESTAMP(6), + STATUS_ VARCHAR2(255), + BATCH_DOC_ID_ VARCHAR2(64), + TENANT_ID_ VARCHAR2(255) default '', + primary key (ID_) +); + +create table FLW_RU_BATCH_PART ( + ID_ VARCHAR2(64) not null, + REV_ INTEGER, + BATCH_ID_ VARCHAR2(64), + TYPE_ VARCHAR2(64) not null, + SCOPE_ID_ VARCHAR2(64), + SUB_SCOPE_ID_ VARCHAR2(64), + SCOPE_TYPE_ VARCHAR2(64), + SEARCH_KEY_ VARCHAR2(255), + SEARCH_KEY2_ VARCHAR2(255), + CREATE_TIME_ TIMESTAMP(6) not null, + COMPLETE_TIME_ TIMESTAMP(6), + STATUS_ VARCHAR2(255), + RESULT_DOC_ID_ VARCHAR2(64), + TENANT_ID_ VARCHAR2(255) default '', + primary key (ID_) +); + +create index FLW_IDX_BATCH_PART on FLW_RU_BATCH_PART(BATCH_ID_); + +alter table FLW_RU_BATCH_PART + add constraint FLW_FK_BATCH_PART_PARENT + foreign key (BATCH_ID_) + references FLW_RU_BATCH (ID_); + +insert into ACT_GE_PROPERTY values ('batch.schema.version', '7.0.1.1', 1); + diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/batch/service/db/drop/flowable.oracle.drop.batch.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/batch/service/db/drop/flowable.oracle.drop.batch.sql new file mode 100644 index 00000000..d16ba1ce --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/batch/service/db/drop/flowable.oracle.drop.batch.sql @@ -0,0 +1,4 @@ +drop index FLW_IDX_BATCH_PART; + +drop table FLW_RU_BATCH_PART; +drop table FLW_RU_BATCH; diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/common/db/create/flowable.oracle.create.common.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/common/db/create/flowable.oracle.create.common.sql new file mode 100644 index 00000000..4ef0d2e3 --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/common/db/create/flowable.oracle.create.common.sql @@ -0,0 +1,23 @@ +create table ACT_GE_PROPERTY ( + NAME_ VARCHAR2(64), + VALUE_ VARCHAR2(300), + REV_ INTEGER, + primary key (NAME_) +); + +create table ACT_GE_BYTEARRAY ( + ID_ VARCHAR2(64), + REV_ INTEGER, + NAME_ VARCHAR2(255), + DEPLOYMENT_ID_ VARCHAR2(64), + BYTES_ BLOB, + GENERATED_ NUMBER(1) CHECK (GENERATED_ IN (1,0)), + primary key (ID_) +); + +insert into ACT_GE_PROPERTY +values ('common.schema.version', '7.0.1.1', 1); + +insert into ACT_GE_PROPERTY +values ('next.dbid', '1', 1); + diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/common/db/drop/flowable.oracle.drop.common.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/common/db/drop/flowable.oracle.drop.common.sql new file mode 100644 index 00000000..9019cb9d --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/common/db/drop/flowable.oracle.drop.common.sql @@ -0,0 +1,2 @@ +drop table ACT_GE_BYTEARRAY; +drop table ACT_GE_PROPERTY; diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/db/create/flowable.oracle.create.engine.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/db/create/flowable.oracle.create.engine.sql new file mode 100644 index 00000000..d0139b78 --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/db/create/flowable.oracle.create.engine.sql @@ -0,0 +1,355 @@ +create table ACT_RE_DEPLOYMENT ( + ID_ VARCHAR2(64), + NAME_ VARCHAR2(255), + CATEGORY_ VARCHAR2(255), + KEY_ VARCHAR2(255), + TENANT_ID_ VARCHAR2(255) DEFAULT '', + DEPLOY_TIME_ TIMESTAMP(6), + DERIVED_FROM_ VARCHAR2(64), + DERIVED_FROM_ROOT_ VARCHAR2(64), + PARENT_DEPLOYMENT_ID_ VARCHAR2(255), + ENGINE_VERSION_ VARCHAR2(255), + primary key (ID_) +); + +create table ACT_RE_MODEL ( + ID_ VARCHAR2(64) not null, + REV_ INTEGER, + NAME_ VARCHAR2(255), + KEY_ VARCHAR2(255), + CATEGORY_ VARCHAR2(255), + CREATE_TIME_ TIMESTAMP(6), + LAST_UPDATE_TIME_ TIMESTAMP(6), + VERSION_ INTEGER, + META_INFO_ VARCHAR2(2000), + DEPLOYMENT_ID_ VARCHAR2(64), + EDITOR_SOURCE_VALUE_ID_ VARCHAR2(64), + EDITOR_SOURCE_EXTRA_VALUE_ID_ VARCHAR2(64), + TENANT_ID_ VARCHAR2(255) DEFAULT '', + primary key (ID_) +); + +create table ACT_RU_EXECUTION ( + ID_ VARCHAR2(64), + REV_ INTEGER, + PROC_INST_ID_ VARCHAR2(64), + BUSINESS_KEY_ VARCHAR2(255), + PARENT_ID_ VARCHAR2(64), + PROC_DEF_ID_ VARCHAR2(64), + SUPER_EXEC_ VARCHAR2(64), + ROOT_PROC_INST_ID_ VARCHAR2(64), + ACT_ID_ VARCHAR2(255), + IS_ACTIVE_ NUMBER(1) CHECK (IS_ACTIVE_ IN (1,0)), + IS_CONCURRENT_ NUMBER(1) CHECK (IS_CONCURRENT_ IN (1,0)), + IS_SCOPE_ NUMBER(1) CHECK (IS_SCOPE_ IN (1,0)), + IS_EVENT_SCOPE_ NUMBER(1) CHECK (IS_EVENT_SCOPE_ IN (1,0)), + IS_MI_ROOT_ NUMBER(1) CHECK (IS_MI_ROOT_ IN (1,0)), + SUSPENSION_STATE_ INTEGER, + CACHED_ENT_STATE_ INTEGER, + TENANT_ID_ VARCHAR2(255) DEFAULT '', + NAME_ VARCHAR2(255), + START_ACT_ID_ VARCHAR2(255), + START_TIME_ TIMESTAMP(6), + START_USER_ID_ VARCHAR2(255), + LOCK_TIME_ TIMESTAMP(6), + LOCK_OWNER_ VARCHAR2(255), + IS_COUNT_ENABLED_ NUMBER(1) CHECK (IS_COUNT_ENABLED_ IN (1,0)), + EVT_SUBSCR_COUNT_ INTEGER, + TASK_COUNT_ INTEGER, + JOB_COUNT_ INTEGER, + TIMER_JOB_COUNT_ INTEGER, + SUSP_JOB_COUNT_ INTEGER, + DEADLETTER_JOB_COUNT_ INTEGER, + EXTERNAL_WORKER_JOB_COUNT_ INTEGER, + VAR_COUNT_ INTEGER, + ID_LINK_COUNT_ INTEGER, + CALLBACK_ID_ VARCHAR2(255), + CALLBACK_TYPE_ VARCHAR2(255), + REFERENCE_ID_ VARCHAR2(255), + REFERENCE_TYPE_ VARCHAR2(255), + PROPAGATED_STAGE_INST_ID_ VARCHAR2(255), + BUSINESS_STATUS_ VARCHAR2(255), + primary key (ID_) +); + +create table ACT_RE_PROCDEF ( + ID_ VARCHAR2(64) NOT NULL, + REV_ INTEGER, + CATEGORY_ VARCHAR2(255), + NAME_ VARCHAR2(255), + KEY_ VARCHAR2(255) NOT NULL, + VERSION_ INTEGER NOT NULL, + DEPLOYMENT_ID_ VARCHAR2(64), + RESOURCE_NAME_ VARCHAR2(2000), + DGRM_RESOURCE_NAME_ VARCHAR2(4000), + DESCRIPTION_ VARCHAR2(2000), + HAS_START_FORM_KEY_ NUMBER(1) CHECK (HAS_START_FORM_KEY_ IN (1,0)), + HAS_GRAPHICAL_NOTATION_ NUMBER(1) CHECK (HAS_GRAPHICAL_NOTATION_ IN (1,0)), + SUSPENSION_STATE_ INTEGER, + TENANT_ID_ VARCHAR2(255) DEFAULT '', + DERIVED_FROM_ VARCHAR2(64), + DERIVED_FROM_ROOT_ VARCHAR2(64), + DERIVED_VERSION_ INTEGER DEFAULT 0 NOT NULL, + ENGINE_VERSION_ VARCHAR2(255), + primary key (ID_) +); + +create table ACT_EVT_LOG ( + LOG_NR_ NUMBER(19), + TYPE_ VARCHAR2(64), + PROC_DEF_ID_ VARCHAR2(64), + PROC_INST_ID_ VARCHAR2(64), + EXECUTION_ID_ VARCHAR2(64), + TASK_ID_ VARCHAR2(64), + TIME_STAMP_ TIMESTAMP(6) not null, + USER_ID_ VARCHAR2(255), + DATA_ BLOB, + LOCK_OWNER_ VARCHAR2(255), + LOCK_TIME_ TIMESTAMP(6) null, + IS_PROCESSED_ NUMBER(3) default 0, + primary key (LOG_NR_) +); + +create sequence act_evt_log_seq; + +create table ACT_PROCDEF_INFO ( + ID_ VARCHAR2(64) not null, + PROC_DEF_ID_ VARCHAR2(64) not null, + REV_ integer, + INFO_JSON_ID_ VARCHAR2(64), + primary key (ID_) +); + +create table ACT_RU_ACTINST ( + ID_ VARCHAR2(64) not null, + REV_ INTEGER default 1, + PROC_DEF_ID_ VARCHAR2(64) not null, + PROC_INST_ID_ VARCHAR2(64) not null, + EXECUTION_ID_ VARCHAR2(64) not null, + ACT_ID_ VARCHAR2(255) not null, + TASK_ID_ VARCHAR2(64), + CALL_PROC_INST_ID_ VARCHAR2(64), + ACT_NAME_ VARCHAR2(255), + ACT_TYPE_ VARCHAR2(255) not null, + ASSIGNEE_ VARCHAR2(255), + START_TIME_ TIMESTAMP(6) not null, + END_TIME_ TIMESTAMP(6), + DURATION_ NUMBER(19,0), + TRANSACTION_ORDER_ INTEGER, + DELETE_REASON_ VARCHAR2(2000), + TENANT_ID_ VARCHAR2(255) default '', + primary key (ID_) +); + +create index ACT_IDX_EXEC_BUSKEY on ACT_RU_EXECUTION(BUSINESS_KEY_); +create index ACT_IDX_EXEC_ROOT on ACT_RU_EXECUTION(ROOT_PROC_INST_ID_); +create index ACT_IDX_EXEC_REF_ID_ on ACT_RU_EXECUTION(REFERENCE_ID_); +create index ACT_IDX_VARIABLE_TASK_ID on ACT_RU_VARIABLE(TASK_ID_); + +create index ACT_IDX_RU_ACTI_START on ACT_RU_ACTINST(START_TIME_); +create index ACT_IDX_RU_ACTI_END on ACT_RU_ACTINST(END_TIME_); +create index ACT_IDX_RU_ACTI_PROC on ACT_RU_ACTINST(PROC_INST_ID_); +create index ACT_IDX_RU_ACTI_PROC_ACT on ACT_RU_ACTINST(PROC_INST_ID_, ACT_ID_); +create index ACT_IDX_RU_ACTI_EXEC on ACT_RU_ACTINST(EXECUTION_ID_); +create index ACT_IDX_RU_ACTI_EXEC_ACT on ACT_RU_ACTINST(EXECUTION_ID_, ACT_ID_); +create index ACT_IDX_RU_ACTI_TASK on ACT_RU_ACTINST(TASK_ID_); + +create index ACT_IDX_BYTEAR_DEPL on ACT_GE_BYTEARRAY(DEPLOYMENT_ID_); +alter table ACT_GE_BYTEARRAY + add constraint ACT_FK_BYTEARR_DEPL + foreign key (DEPLOYMENT_ID_) + references ACT_RE_DEPLOYMENT (ID_); + +alter table ACT_RE_PROCDEF + add constraint ACT_UNIQ_PROCDEF + unique (KEY_,VERSION_, DERIVED_VERSION_, TENANT_ID_); + +create index ACT_IDX_EXE_PROCINST on ACT_RU_EXECUTION(PROC_INST_ID_); +alter table ACT_RU_EXECUTION + add constraint ACT_FK_EXE_PROCINST + foreign key (PROC_INST_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_EXE_PARENT on ACT_RU_EXECUTION(PARENT_ID_); +alter table ACT_RU_EXECUTION + add constraint ACT_FK_EXE_PARENT + foreign key (PARENT_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_EXE_SUPER on ACT_RU_EXECUTION(SUPER_EXEC_); +alter table ACT_RU_EXECUTION + add constraint ACT_FK_EXE_SUPER + foreign key (SUPER_EXEC_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_EXE_PROCDEF on ACT_RU_EXECUTION(PROC_DEF_ID_); +alter table ACT_RU_EXECUTION + add constraint ACT_FK_EXE_PROCDEF + foreign key (PROC_DEF_ID_) + references ACT_RE_PROCDEF (ID_); + +create index ACT_IDX_TSKASS_TASK on ACT_RU_IDENTITYLINK(TASK_ID_); +alter table ACT_RU_IDENTITYLINK + add constraint ACT_FK_TSKASS_TASK + foreign key (TASK_ID_) + references ACT_RU_TASK (ID_); + +create index ACT_IDX_ATHRZ_PROCEDEF on ACT_RU_IDENTITYLINK(PROC_DEF_ID_); +alter table ACT_RU_IDENTITYLINK + add constraint ACT_FK_ATHRZ_PROCEDEF + foreign key (PROC_DEF_ID_) + references ACT_RE_PROCDEF (ID_); + +create index ACT_IDX_IDL_PROCINST on ACT_RU_IDENTITYLINK(PROC_INST_ID_); +alter table ACT_RU_IDENTITYLINK + add constraint ACT_FK_IDL_PROCINST + foreign key (PROC_INST_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_TASK_EXEC on ACT_RU_TASK(EXECUTION_ID_); +alter table ACT_RU_TASK + add constraint ACT_FK_TASK_EXE + foreign key (EXECUTION_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_TASK_PROCINST on ACT_RU_TASK(PROC_INST_ID_); +alter table ACT_RU_TASK + add constraint ACT_FK_TASK_PROCINST + foreign key (PROC_INST_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_TASK_PROCDEF on ACT_RU_TASK(PROC_DEF_ID_); +alter table ACT_RU_TASK + add constraint ACT_FK_TASK_PROCDEF + foreign key (PROC_DEF_ID_) + references ACT_RE_PROCDEF (ID_); + +create index ACT_IDX_VAR_EXE on ACT_RU_VARIABLE(EXECUTION_ID_); +alter table ACT_RU_VARIABLE + add constraint ACT_FK_VAR_EXE + foreign key (EXECUTION_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_VAR_PROCINST on ACT_RU_VARIABLE(PROC_INST_ID_); +alter table ACT_RU_VARIABLE + add constraint ACT_FK_VAR_PROCINST + foreign key (PROC_INST_ID_) + references ACT_RU_EXECUTION(ID_); + +create index ACT_IDX_JOB_EXECUTION_ID on ACT_RU_JOB(EXECUTION_ID_); +alter table ACT_RU_JOB + add constraint ACT_FK_JOB_EXECUTION + foreign key (EXECUTION_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_JOB_PROC_INST_ID on ACT_RU_JOB(PROCESS_INSTANCE_ID_); +alter table ACT_RU_JOB + add constraint ACT_FK_JOB_PROCESS_INSTANCE + foreign key (PROCESS_INSTANCE_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_JOB_PROC_DEF_ID on ACT_RU_JOB(PROC_DEF_ID_); +alter table ACT_RU_JOB + add constraint ACT_FK_JOB_PROC_DEF + foreign key (PROC_DEF_ID_) + references ACT_RE_PROCDEF (ID_); + +create index ACT_IDX_TJOB_EXECUTION_ID on ACT_RU_TIMER_JOB(EXECUTION_ID_); +alter table ACT_RU_TIMER_JOB + add constraint ACT_FK_TJOB_EXECUTION + foreign key (EXECUTION_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_TJOB_PROC_INST_ID on ACT_RU_TIMER_JOB(PROCESS_INSTANCE_ID_); +alter table ACT_RU_TIMER_JOB + add constraint ACT_FK_TJOB_PROCESS_INSTANCE + foreign key (PROCESS_INSTANCE_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_TJOB_PROC_DEF_ID on ACT_RU_TIMER_JOB(PROC_DEF_ID_); +alter table ACT_RU_TIMER_JOB + add constraint ACT_FK_TJOB_PROC_DEF + foreign key (PROC_DEF_ID_) + references ACT_RE_PROCDEF (ID_); + +create index ACT_IDX_SJOB_EXECUTION_ID on ACT_RU_SUSPENDED_JOB(EXECUTION_ID_); +alter table ACT_RU_SUSPENDED_JOB + add constraint ACT_FK_SJOB_EXECUTION + foreign key (EXECUTION_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_SJOB_PROC_INST_ID on ACT_RU_SUSPENDED_JOB(PROCESS_INSTANCE_ID_); +alter table ACT_RU_SUSPENDED_JOB + add constraint ACT_FK_SJOB_PROCESS_INSTANCE + foreign key (PROCESS_INSTANCE_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_SJOB_PROC_DEF_ID on ACT_RU_SUSPENDED_JOB(PROC_DEF_ID_); +alter table ACT_RU_SUSPENDED_JOB + add constraint ACT_FK_SJOB_PROC_DEF + foreign key (PROC_DEF_ID_) + references ACT_RE_PROCDEF (ID_); + +create index ACT_IDX_DJOB_EXECUTION_ID on ACT_RU_DEADLETTER_JOB(EXECUTION_ID_); +alter table ACT_RU_DEADLETTER_JOB + add constraint ACT_FK_DJOB_EXECUTION + foreign key (EXECUTION_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_DJOB_PROC_INST_ID on ACT_RU_DEADLETTER_JOB(PROCESS_INSTANCE_ID_); +alter table ACT_RU_DEADLETTER_JOB + add constraint ACT_FK_DJOB_PROCESS_INSTANCE + foreign key (PROCESS_INSTANCE_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_DJOB_PROC_DEF_ID on ACT_RU_DEADLETTER_JOB(PROC_DEF_ID_); +alter table ACT_RU_DEADLETTER_JOB + add constraint ACT_FK_DJOB_PROC_DEF + foreign key (PROC_DEF_ID_) + references ACT_RE_PROCDEF (ID_); + +alter table ACT_RU_EVENT_SUBSCR + add constraint ACT_FK_EVENT_EXEC + foreign key (EXECUTION_ID_) + references ACT_RU_EXECUTION(ID_); + +create index ACT_IDX_MODEL_SOURCE on ACT_RE_MODEL(EDITOR_SOURCE_VALUE_ID_); +alter table ACT_RE_MODEL + add constraint ACT_FK_MODEL_SOURCE + foreign key (EDITOR_SOURCE_VALUE_ID_) + references ACT_GE_BYTEARRAY (ID_); + +create index ACT_IDX_MODEL_SOURCE_EXTRA on ACT_RE_MODEL(EDITOR_SOURCE_EXTRA_VALUE_ID_); +alter table ACT_RE_MODEL + add constraint ACT_FK_MODEL_SOURCE_EXTRA + foreign key (EDITOR_SOURCE_EXTRA_VALUE_ID_) + references ACT_GE_BYTEARRAY (ID_); + +create index ACT_IDX_MODEL_DEPLOYMENT on ACT_RE_MODEL(DEPLOYMENT_ID_); +alter table ACT_RE_MODEL + add constraint ACT_FK_MODEL_DEPLOYMENT + foreign key (DEPLOYMENT_ID_) + references ACT_RE_DEPLOYMENT (ID_); + +create index ACT_IDX_PROCDEF_INFO_JSON on ACT_PROCDEF_INFO(INFO_JSON_ID_); +alter table ACT_PROCDEF_INFO + add constraint ACT_FK_INFO_JSON_BA + foreign key (INFO_JSON_ID_) + references ACT_GE_BYTEARRAY (ID_); + +create index ACT_IDX_PROCDEF_INFO_PROC on ACT_PROCDEF_INFO(PROC_DEF_ID_); +alter table ACT_PROCDEF_INFO + add constraint ACT_FK_INFO_PROCDEF + foreign key (PROC_DEF_ID_) + references ACT_RE_PROCDEF (ID_); + +alter table ACT_PROCDEF_INFO + add constraint ACT_UNIQ_INFO_PROCDEF + unique (PROC_DEF_ID_); + +insert into ACT_GE_PROPERTY +values ('schema.version', '7.0.1.1', 1); + +insert into ACT_GE_PROPERTY +values ('schema.history', 'create(7.0.1.1)', 1); + diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/db/create/flowable.oracle.create.history.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/db/create/flowable.oracle.create.history.sql new file mode 100644 index 00000000..75782f46 --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/db/create/flowable.oracle.create.history.sql @@ -0,0 +1,114 @@ +create table ACT_HI_PROCINST ( + ID_ VARCHAR2(64) not null, + REV_ INTEGER default 1, + PROC_INST_ID_ VARCHAR2(64) not null, + BUSINESS_KEY_ VARCHAR2(255), + PROC_DEF_ID_ VARCHAR2(64) not null, + START_TIME_ TIMESTAMP(6) not null, + END_TIME_ TIMESTAMP(6), + DURATION_ NUMBER(19,0), + START_USER_ID_ VARCHAR2(255), + START_ACT_ID_ VARCHAR2(255), + END_ACT_ID_ VARCHAR2(255), + SUPER_PROCESS_INSTANCE_ID_ VARCHAR2(64), + DELETE_REASON_ VARCHAR2(2000), + TENANT_ID_ VARCHAR2(255) default '', + NAME_ VARCHAR2(255), + CALLBACK_ID_ VARCHAR2(255), + CALLBACK_TYPE_ VARCHAR2(255), + REFERENCE_ID_ VARCHAR2(255), + REFERENCE_TYPE_ VARCHAR2(255), + PROPAGATED_STAGE_INST_ID_ VARCHAR2(255), + BUSINESS_STATUS_ VARCHAR2(255), + primary key (ID_), + unique (PROC_INST_ID_) +); + +create table ACT_HI_ACTINST ( + ID_ VARCHAR2(64) not null, + REV_ INTEGER default 1, + PROC_DEF_ID_ VARCHAR2(64) not null, + PROC_INST_ID_ VARCHAR2(64) not null, + EXECUTION_ID_ VARCHAR2(64) not null, + ACT_ID_ VARCHAR2(255) not null, + TASK_ID_ VARCHAR2(64), + CALL_PROC_INST_ID_ VARCHAR2(64), + ACT_NAME_ VARCHAR2(255), + ACT_TYPE_ VARCHAR2(255) not null, + ASSIGNEE_ VARCHAR2(255), + START_TIME_ TIMESTAMP(6) not null, + END_TIME_ TIMESTAMP(6), + TRANSACTION_ORDER_ INTEGER, + DURATION_ NUMBER(19,0), + DELETE_REASON_ VARCHAR2(2000), + TENANT_ID_ VARCHAR2(255) default '', + primary key (ID_) +); + +create table ACT_HI_DETAIL ( + ID_ VARCHAR2(64) not null, + TYPE_ VARCHAR2(255) not null, + PROC_INST_ID_ VARCHAR2(64), + EXECUTION_ID_ VARCHAR2(64), + TASK_ID_ VARCHAR2(64), + ACT_INST_ID_ VARCHAR2(64), + NAME_ VARCHAR2(255) not null, + VAR_TYPE_ VARCHAR2(64), + REV_ INTEGER, + TIME_ TIMESTAMP(6) not null, + BYTEARRAY_ID_ VARCHAR2(64), + DOUBLE_ NUMBER(38,10), + LONG_ NUMBER(19,0), + TEXT_ VARCHAR2(2000), + TEXT2_ VARCHAR2(2000), + primary key (ID_) +); + +create table ACT_HI_COMMENT ( + ID_ VARCHAR2(64) not null, + TYPE_ VARCHAR2(255), + TIME_ TIMESTAMP(6) not null, + USER_ID_ VARCHAR2(255), + TASK_ID_ VARCHAR2(64), + PROC_INST_ID_ VARCHAR2(64), + ACTION_ VARCHAR2(255), + MESSAGE_ VARCHAR2(2000), + FULL_MSG_ BLOB, + primary key (ID_) +); + +create table ACT_HI_ATTACHMENT ( + ID_ VARCHAR2(64) not null, + REV_ INTEGER, + USER_ID_ VARCHAR2(255), + NAME_ VARCHAR2(255), + DESCRIPTION_ VARCHAR2(2000), + TYPE_ VARCHAR2(255), + TASK_ID_ VARCHAR2(64), + PROC_INST_ID_ VARCHAR2(64), + URL_ VARCHAR2(2000), + CONTENT_ID_ VARCHAR2(64), + TIME_ TIMESTAMP(6), + primary key (ID_) +); + +create index ACT_IDX_HI_PRO_INST_END on ACT_HI_PROCINST(END_TIME_); +create index ACT_IDX_HI_PRO_I_BUSKEY on ACT_HI_PROCINST(BUSINESS_KEY_); +create index ACT_IDX_HI_PRO_SUPER_PROCINST on ACT_HI_PROCINST(SUPER_PROCESS_INSTANCE_ID_); +create index ACT_IDX_HI_ACT_INST_START on ACT_HI_ACTINST(START_TIME_); +create index ACT_IDX_HI_ACT_INST_END on ACT_HI_ACTINST(END_TIME_); +create index ACT_IDX_HI_DETAIL_PROC_INST on ACT_HI_DETAIL(PROC_INST_ID_); +create index ACT_IDX_HI_DETAIL_ACT_INST on ACT_HI_DETAIL(ACT_INST_ID_); +create index ACT_IDX_HI_DETAIL_TIME on ACT_HI_DETAIL(TIME_); +create index ACT_IDX_HI_DETAIL_NAME on ACT_HI_DETAIL(NAME_); +create index ACT_IDX_HI_DETAIL_TASK_ID on ACT_HI_DETAIL(TASK_ID_); +create index ACT_IDX_HI_PROCVAR_PROC_INST on ACT_HI_VARINST(PROC_INST_ID_); +create index ACT_IDX_HI_PROCVAR_TASK_ID on ACT_HI_VARINST(TASK_ID_); +create index ACT_IDX_HI_PROCVAR_EXE on ACT_HI_VARINST(EXECUTION_ID_); +create index ACT_IDX_HI_IDENT_LNK_TASK on ACT_HI_IDENTITYLINK(TASK_ID_); +create index ACT_IDX_HI_IDENT_LNK_PROCINST on ACT_HI_IDENTITYLINK(PROC_INST_ID_); + +create index ACT_IDX_HI_ACT_INST_PROCINST on ACT_HI_ACTINST(PROC_INST_ID_, ACT_ID_); +create index ACT_IDX_HI_ACT_INST_EXEC on ACT_HI_ACTINST(EXECUTION_ID_, ACT_ID_); +create index ACT_IDX_HI_TASK_INST_PROCINST on ACT_HI_TASKINST(PROC_INST_ID_); + diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/db/drop/flowable.oracle.drop.engine.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/db/drop/flowable.oracle.drop.engine.sql new file mode 100644 index 00000000..58537bab --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/db/drop/flowable.oracle.drop.engine.sql @@ -0,0 +1,148 @@ +drop index ACT_IDX_BYTEAR_DEPL; +drop index ACT_IDX_EXE_PROCINST; +drop index ACT_IDX_EXE_PARENT; +drop index ACT_IDX_EXE_SUPER; +drop index ACT_IDX_TSKASS_TASK; +drop index ACT_IDX_TASK_EXEC; +drop index ACT_IDX_TASK_PROCINST; +drop index ACT_IDX_TASK_PROCDEF; +drop index ACT_IDX_VAR_EXE; +drop index ACT_IDX_VAR_PROCINST; +drop index ACT_IDX_JOB_EXECUTION_ID; +drop index ACT_IDX_JOB_PROC_INST_ID; +drop index ACT_IDX_JOB_PROC_DEF_ID; +drop index ACT_IDX_TJOB_EXECUTION_ID; +drop index ACT_IDX_TJOB_PROC_INST_ID; +drop index ACT_IDX_TJOB_PROC_DEF_ID; +drop index ACT_IDX_SJOB_EXECUTION_ID; +drop index ACT_IDX_SJOB_PROC_INST_ID; +drop index ACT_IDX_SJOB_PROC_DEF_ID; +drop index ACT_IDX_DJOB_EXECUTION_ID; +drop index ACT_IDX_DJOB_PROC_INST_ID; +drop index ACT_IDX_DJOB_PROC_DEF_ID; +drop index ACT_IDX_MODEL_SOURCE; +drop index ACT_IDX_MODEL_SOURCE_EXTRA; +drop index ACT_IDX_MODEL_DEPLOYMENT; +drop index ACT_IDX_PROCDEF_INFO_JSON; + +drop index ACT_IDX_EXEC_BUSKEY; +drop index ACT_IDX_VARIABLE_TASK_ID; + +drop index ACT_IDX_RU_ACTI_START; +drop index ACT_IDX_RU_ACTI_END; +drop index ACT_IDX_RU_ACTI_PROC; +drop index ACT_IDX_RU_ACTI_PROC_ACT; +drop index ACT_IDX_RU_ACTI_EXEC; +drop index ACT_IDX_RU_ACTI_EXEC_ACT; + +alter table ACT_GE_BYTEARRAY + drop CONSTRAINT ACT_FK_BYTEARR_DEPL; + +alter table ACT_RU_EXECUTION + drop CONSTRAINT ACT_FK_EXE_PROCINST; + +alter table ACT_RU_EXECUTION + drop CONSTRAINT ACT_FK_EXE_PARENT; + +alter table ACT_RU_EXECUTION + drop CONSTRAINT ACT_FK_EXE_SUPER; + +alter table ACT_RU_EXECUTION + drop CONSTRAINT ACT_FK_EXE_PROCDEF; + +alter table ACT_RU_IDENTITYLINK + drop CONSTRAINT ACT_FK_TSKASS_TASK; + +alter table ACT_RU_IDENTITYLINK + drop CONSTRAINT ACT_FK_IDL_PROCINST; + +alter table ACT_RU_IDENTITYLINK + drop CONSTRAINT ACT_FK_ATHRZ_PROCEDEF; + +alter table ACT_RU_TASK + drop CONSTRAINT ACT_FK_TASK_EXE; + +alter table ACT_RU_TASK + drop CONSTRAINT ACT_FK_TASK_PROCINST; + +alter table ACT_RU_TASK + drop CONSTRAINT ACT_FK_TASK_PROCDEF; + +alter table ACT_RU_VARIABLE + drop CONSTRAINT ACT_FK_VAR_EXE; + +alter table ACT_RU_VARIABLE + drop CONSTRAINT ACT_FK_VAR_PROCINST; + +alter table ACT_RU_JOB + drop CONSTRAINT ACT_FK_JOB_EXECUTION; + +alter table ACT_RU_JOB + drop CONSTRAINT ACT_FK_JOB_PROCESS_INSTANCE; + +alter table ACT_RU_JOB + drop CONSTRAINT ACT_FK_JOB_PROC_DEF; + +alter table ACT_RU_TIMER_JOB + drop CONSTRAINT ACT_FK_TJOB_EXECUTION; + +alter table ACT_RU_TIMER_JOB + drop CONSTRAINT ACT_FK_TJOB_PROCESS_INSTANCE; + +alter table ACT_RU_TIMER_JOB + drop CONSTRAINT ACT_FK_TJOB_PROC_DEF; + +alter table ACT_RU_SUSPENDED_JOB + drop CONSTRAINT ACT_FK_SJOB_EXECUTION; + +alter table ACT_RU_SUSPENDED_JOB + drop CONSTRAINT ACT_FK_SJOB_PROCESS_INSTANCE; + +alter table ACT_RU_SUSPENDED_JOB + drop CONSTRAINT ACT_FK_SJOB_PROC_DEF; + +alter table ACT_RU_DEADLETTER_JOB + drop CONSTRAINT ACT_FK_DJOB_EXECUTION; + +alter table ACT_RU_DEADLETTER_JOB + drop CONSTRAINT ACT_FK_DJOB_PROCESS_INSTANCE; + +alter table ACT_RU_DEADLETTER_JOB + drop CONSTRAINT ACT_FK_DJOB_PROC_DEF; + +alter table ACT_RU_EVENT_SUBSCR + drop CONSTRAINT ACT_FK_EVENT_EXEC; + +alter table ACT_RE_PROCDEF + drop CONSTRAINT ACT_UNIQ_PROCDEF; + +alter table ACT_RE_MODEL + drop CONSTRAINT ACT_FK_MODEL_SOURCE; + +alter table ACT_RE_MODEL + drop CONSTRAINT ACT_FK_MODEL_SOURCE_EXTRA; + +alter table ACT_RE_MODEL + drop CONSTRAINT ACT_FK_MODEL_DEPLOYMENT; + +alter table ACT_PROCDEF_INFO + drop CONSTRAINT ACT_UNIQ_INFO_PROCDEF; + +alter table ACT_PROCDEF_INFO + drop CONSTRAINT ACT_FK_INFO_JSON_BA; + +alter table ACT_PROCDEF_INFO + drop CONSTRAINT ACT_FK_INFO_PROCDEF; + +drop index ACT_IDX_ATHRZ_PROCEDEF; +drop index ACT_IDX_PROCDEF_INFO_PROC; + +drop table ACT_RU_ACTINST; +drop table ACT_RE_DEPLOYMENT; +drop table ACT_RE_MODEL; +drop table ACT_RE_PROCDEF; +drop table ACT_RU_EXECUTION; + +drop sequence act_evt_log_seq; +drop table ACT_EVT_LOG; +drop table ACT_PROCDEF_INFO; diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/db/drop/flowable.oracle.drop.history.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/db/drop/flowable.oracle.drop.history.sql new file mode 100644 index 00000000..2a31cc4b --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/db/drop/flowable.oracle.drop.history.sql @@ -0,0 +1,23 @@ +drop index ACT_IDX_HI_PRO_INST_END; +drop index ACT_IDX_HI_PRO_I_BUSKEY; +drop index ACT_IDX_HI_ACT_INST_START; +drop index ACT_IDX_HI_ACT_INST_END; +drop index ACT_IDX_HI_DETAIL_PROC_INST; +drop index ACT_IDX_HI_DETAIL_ACT_INST; +drop index ACT_IDX_HI_DETAIL_TIME; +drop index ACT_IDX_HI_DETAIL_NAME; +drop index ACT_IDX_HI_DETAIL_TASK_ID; +drop index ACT_IDX_HI_PROCVAR_PROC_INST; +drop index ACT_IDX_HI_PROCVAR_TASK_ID; +drop index ACT_IDX_HI_PROCVAR_EXE; +drop index ACT_IDX_HI_ACT_INST_PROCINST; +drop index ACT_IDX_HI_IDENT_LNK_TASK; +drop index ACT_IDX_HI_IDENT_LNK_PROCINST; +drop index ACT_IDX_HI_TASK_INST_PROCINST; + +drop table ACT_HI_PROCINST; +drop table ACT_HI_ACTINST; +drop table ACT_HI_DETAIL; +drop table ACT_HI_COMMENT; +drop table ACT_HI_ATTACHMENT; + diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/entitylink/service/db/create/flowable.oracle.create.entitylink.history.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/entitylink/service/db/create/flowable.oracle.create.entitylink.history.sql new file mode 100644 index 00000000..55c5dbec --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/entitylink/service/db/create/flowable.oracle.create.entitylink.history.sql @@ -0,0 +1,23 @@ +create table ACT_HI_ENTITYLINK ( + ID_ VARCHAR2(64), + LINK_TYPE_ VARCHAR2(255), + CREATE_TIME_ TIMESTAMP(6), + SCOPE_ID_ VARCHAR2(255), + SUB_SCOPE_ID_ VARCHAR2(255), + SCOPE_TYPE_ VARCHAR2(255), + SCOPE_DEFINITION_ID_ VARCHAR2(255), + PARENT_ELEMENT_ID_ VARCHAR2(255), + REF_SCOPE_ID_ VARCHAR2(255), + REF_SCOPE_TYPE_ VARCHAR2(255), + REF_SCOPE_DEFINITION_ID_ VARCHAR2(255), + ROOT_SCOPE_ID_ VARCHAR2(255), + ROOT_SCOPE_TYPE_ VARCHAR2(255), + HIERARCHY_TYPE_ VARCHAR2(255), + primary key (ID_) +); + +create index ACT_IDX_HI_ENT_LNK_SCOPE on ACT_HI_ENTITYLINK(SCOPE_ID_, SCOPE_TYPE_, LINK_TYPE_); +create index ACT_IDX_HI_ENT_LNK_REF_SCOPE on ACT_HI_ENTITYLINK(REF_SCOPE_ID_, REF_SCOPE_TYPE_, LINK_TYPE_); +create index ACT_IDX_HI_ENT_LNK_ROOT_SCOPE on ACT_HI_ENTITYLINK(ROOT_SCOPE_ID_, ROOT_SCOPE_TYPE_, LINK_TYPE_); +create index ACT_IDX_HI_ENT_LNK_SCOPE_DEF on ACT_HI_ENTITYLINK(SCOPE_DEFINITION_ID_, SCOPE_TYPE_, LINK_TYPE_); + diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/entitylink/service/db/create/flowable.oracle.create.entitylink.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/entitylink/service/db/create/flowable.oracle.create.entitylink.sql new file mode 100644 index 00000000..de084516 --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/entitylink/service/db/create/flowable.oracle.create.entitylink.sql @@ -0,0 +1,26 @@ +create table ACT_RU_ENTITYLINK ( + ID_ VARCHAR2(64), + REV_ INTEGER, + CREATE_TIME_ TIMESTAMP(6), + LINK_TYPE_ VARCHAR2(255), + SCOPE_ID_ VARCHAR2(255), + SUB_SCOPE_ID_ VARCHAR2(255), + SCOPE_TYPE_ VARCHAR2(255), + SCOPE_DEFINITION_ID_ VARCHAR2(255), + PARENT_ELEMENT_ID_ VARCHAR2(255), + REF_SCOPE_ID_ VARCHAR2(255), + REF_SCOPE_TYPE_ VARCHAR2(255), + REF_SCOPE_DEFINITION_ID_ VARCHAR2(255), + ROOT_SCOPE_ID_ VARCHAR2(255), + ROOT_SCOPE_TYPE_ VARCHAR2(255), + HIERARCHY_TYPE_ VARCHAR2(255), + primary key (ID_) +); + +create index ACT_IDX_ENT_LNK_SCOPE on ACT_RU_ENTITYLINK(SCOPE_ID_, SCOPE_TYPE_, LINK_TYPE_); +create index ACT_IDX_ENT_LNK_REF_SCOPE on ACT_RU_ENTITYLINK(REF_SCOPE_ID_, REF_SCOPE_TYPE_, LINK_TYPE_); +create index ACT_IDX_ENT_LNK_ROOT_SCOPE on ACT_RU_ENTITYLINK(ROOT_SCOPE_ID_, ROOT_SCOPE_TYPE_, LINK_TYPE_); +create index ACT_IDX_ENT_LNK_SCOPE_DEF on ACT_RU_ENTITYLINK(SCOPE_DEFINITION_ID_, SCOPE_TYPE_, LINK_TYPE_); + +insert into ACT_GE_PROPERTY values ('entitylink.schema.version', '7.0.1.1', 1); + diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/entitylink/service/db/drop/flowable.oracle.drop.entitylink.history.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/entitylink/service/db/drop/flowable.oracle.drop.entitylink.history.sql new file mode 100644 index 00000000..a908877e --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/entitylink/service/db/drop/flowable.oracle.drop.entitylink.history.sql @@ -0,0 +1,4 @@ +drop index ACT_IDX_HI_ENT_LNK_SCOPE; +drop index ACT_IDX_HI_ENT_LNK_SCOPE_DEF; + +drop table ACT_HI_ENTITYLINK; diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/entitylink/service/db/drop/flowable.oracle.drop.entitylink.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/entitylink/service/db/drop/flowable.oracle.drop.entitylink.sql new file mode 100644 index 00000000..aedbacd9 --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/entitylink/service/db/drop/flowable.oracle.drop.entitylink.sql @@ -0,0 +1,4 @@ +drop index ACT_IDX_ENT_LNK_SCOPE; +drop index ACT_IDX_ENT_LNK_SCOPE_DEF; + +drop table ACT_RU_ENTITYLINK; diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/eventsubscription/service/db/create/flowable.oracle.create.eventsubscription.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/eventsubscription/service/db/create/flowable.oracle.create.eventsubscription.sql new file mode 100644 index 00000000..eb22164c --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/eventsubscription/service/db/create/flowable.oracle.create.eventsubscription.sql @@ -0,0 +1,28 @@ +create table ACT_RU_EVENT_SUBSCR ( + ID_ VARCHAR2(64) not null, + REV_ integer, + EVENT_TYPE_ VARCHAR2(255) not null, + EVENT_NAME_ VARCHAR2(255), + EXECUTION_ID_ VARCHAR2(64), + PROC_INST_ID_ VARCHAR2(64), + ACTIVITY_ID_ VARCHAR2(64), + CONFIGURATION_ VARCHAR2(255), + CREATED_ TIMESTAMP(6) not null, + PROC_DEF_ID_ VARCHAR2(64), + SUB_SCOPE_ID_ VARCHAR2(64), + SCOPE_ID_ VARCHAR2(64), + SCOPE_DEFINITION_ID_ VARCHAR2(64), + SCOPE_DEFINITION_KEY_ VARCHAR2(255), + SCOPE_TYPE_ VARCHAR2(64), + LOCK_TIME_ TIMESTAMP(6), + LOCK_OWNER_ VARCHAR2(255), + TENANT_ID_ VARCHAR2(255) DEFAULT '', + primary key (ID_) +); + +create index ACT_IDX_EVENT_SUBSCR_CONFIG_ on ACT_RU_EVENT_SUBSCR(CONFIGURATION_); +create index ACT_IDX_EVENT_SUBSCR on ACT_RU_EVENT_SUBSCR(EXECUTION_ID_); +create index ACT_IDX_EVENT_SUBSCR_SCOPEREF_ on ACT_RU_EVENT_SUBSCR(SCOPE_ID_, SCOPE_TYPE_); + +insert into ACT_GE_PROPERTY values ('eventsubscription.schema.version', '7.0.1.1', 1); + diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/eventsubscription/service/db/drop/flowable.oracle.drop.eventsubscription.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/eventsubscription/service/db/drop/flowable.oracle.drop.eventsubscription.sql new file mode 100644 index 00000000..c85ad74a --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/eventsubscription/service/db/drop/flowable.oracle.drop.eventsubscription.sql @@ -0,0 +1,5 @@ +drop index ACT_IDX_EVENT_SUBSCR_CONFIG_; +drop index ACT_IDX_EVENT_SUBSCR; +drop index ACT_IDX_EVENT_SUBSCR_SCOPEREF_; + +drop table ACT_RU_EVENT_SUBSCR; diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/identitylink/service/db/create/flowable.oracle.create.identitylink.history.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/identitylink/service/db/create/flowable.oracle.create.identitylink.history.sql new file mode 100644 index 00000000..2305f0ae --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/identitylink/service/db/create/flowable.oracle.create.identitylink.history.sql @@ -0,0 +1,20 @@ +create table ACT_HI_IDENTITYLINK ( + ID_ VARCHAR2(64), + GROUP_ID_ VARCHAR2(255), + TYPE_ VARCHAR2(255), + USER_ID_ VARCHAR2(255), + TASK_ID_ VARCHAR2(64), + CREATE_TIME_ TIMESTAMP(6), + PROC_INST_ID_ VARCHAR2(64), + SCOPE_ID_ VARCHAR2(255), + SUB_SCOPE_ID_ VARCHAR2(255), + SCOPE_TYPE_ VARCHAR2(255), + SCOPE_DEFINITION_ID_ VARCHAR2(255), + primary key (ID_) +); + +create index ACT_IDX_HI_IDENT_LNK_USER on ACT_HI_IDENTITYLINK(USER_ID_); +create index ACT_IDX_HI_IDENT_LNK_SCOPE on ACT_HI_IDENTITYLINK(SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_HI_IDENT_LNK_SUB_SCOPE on ACT_HI_IDENTITYLINK(SUB_SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_HI_IDENT_LNK_SCOPE_DEF on ACT_HI_IDENTITYLINK(SCOPE_DEFINITION_ID_, SCOPE_TYPE_); + diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/identitylink/service/db/create/flowable.oracle.create.identitylink.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/identitylink/service/db/create/flowable.oracle.create.identitylink.sql new file mode 100644 index 00000000..e2908794 --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/identitylink/service/db/create/flowable.oracle.create.identitylink.sql @@ -0,0 +1,24 @@ +create table ACT_RU_IDENTITYLINK ( + ID_ VARCHAR2(64), + REV_ INTEGER, + GROUP_ID_ VARCHAR2(255), + TYPE_ VARCHAR2(255), + USER_ID_ VARCHAR2(255), + TASK_ID_ VARCHAR2(64), + PROC_INST_ID_ VARCHAR2(64), + PROC_DEF_ID_ VARCHAR2(64), + SCOPE_ID_ VARCHAR2(255), + SUB_SCOPE_ID_ VARCHAR2(255), + SCOPE_TYPE_ VARCHAR2(255), + SCOPE_DEFINITION_ID_ VARCHAR2(255), + primary key (ID_) +); + +create index ACT_IDX_IDENT_LNK_USER on ACT_RU_IDENTITYLINK(USER_ID_); +create index ACT_IDX_IDENT_LNK_GROUP on ACT_RU_IDENTITYLINK(GROUP_ID_); +create index ACT_IDX_IDENT_LNK_SCOPE on ACT_RU_IDENTITYLINK(SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_IDENT_LNK_SUB_SCOPE on ACT_RU_IDENTITYLINK(SUB_SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_IDENT_LNK_SCOPE_DEF on ACT_RU_IDENTITYLINK(SCOPE_DEFINITION_ID_, SCOPE_TYPE_); + +insert into ACT_GE_PROPERTY values ('identitylink.schema.version', '7.0.1.1', 1); + diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/identitylink/service/db/drop/flowable.oracle.drop.identitylink.history.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/identitylink/service/db/drop/flowable.oracle.drop.identitylink.history.sql new file mode 100644 index 00000000..7cff665a --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/identitylink/service/db/drop/flowable.oracle.drop.identitylink.history.sql @@ -0,0 +1,6 @@ +drop index ACT_IDX_HI_IDENT_LNK_USER; +drop index ACT_IDX_HI_IDENT_LNK_SCOPE; +drop index ACT_IDX_HI_IDENT_LNK_SUB_SCOPE; +drop index ACT_IDX_HI_IDENT_LNK_SCOPE_DEF; + +drop table ACT_HI_IDENTITYLINK; diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/identitylink/service/db/drop/flowable.oracle.drop.identitylink.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/identitylink/service/db/drop/flowable.oracle.drop.identitylink.sql new file mode 100644 index 00000000..485344aa --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/identitylink/service/db/drop/flowable.oracle.drop.identitylink.sql @@ -0,0 +1,7 @@ +drop index ACT_IDX_IDENT_LNK_USER; +drop index ACT_IDX_IDENT_LNK_GROUP; +drop index ACT_IDX_IDENT_LNK_SCOPE; +drop index ACT_IDX_IDENT_LNK_SUB_SCOPE; +drop index ACT_IDX_IDENT_LNK_SCOPE_DEF; + +drop table ACT_RU_IDENTITYLINK; diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/idm/db/create/flowable.oracle.create.identity.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/idm/db/create/flowable.oracle.create.identity.sql new file mode 100644 index 00000000..562f45ed --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/idm/db/create/flowable.oracle.create.identity.sql @@ -0,0 +1,108 @@ +create table ACT_ID_PROPERTY ( + NAME_ VARCHAR2(64), + VALUE_ VARCHAR2(300), + REV_ INTEGER, + primary key (NAME_) +); + +insert into ACT_ID_PROPERTY +values ('schema.version', '7.0.1.1', 1); + +create table ACT_ID_BYTEARRAY ( + ID_ VARCHAR2(64), + REV_ INTEGER, + NAME_ VARCHAR2(255), + BYTES_ BLOB, + primary key (ID_) +); + +create table ACT_ID_GROUP ( + ID_ VARCHAR2(64), + REV_ INTEGER, + NAME_ VARCHAR2(255), + TYPE_ VARCHAR2(255), + primary key (ID_) +); + +create table ACT_ID_MEMBERSHIP ( + USER_ID_ VARCHAR2(64), + GROUP_ID_ VARCHAR2(64), + primary key (USER_ID_, GROUP_ID_) +); + +create table ACT_ID_USER ( + ID_ VARCHAR2(64), + REV_ INTEGER, + FIRST_ VARCHAR2(255), + LAST_ VARCHAR2(255), + DISPLAY_NAME_ VARCHAR2(255), + EMAIL_ VARCHAR2(255), + PWD_ VARCHAR2(255), + PICTURE_ID_ VARCHAR2(64), + TENANT_ID_ VARCHAR2(255) default '', + primary key (ID_) +); + +create table ACT_ID_INFO ( + ID_ VARCHAR2(64), + REV_ INTEGER, + USER_ID_ VARCHAR2(64), + TYPE_ VARCHAR2(64), + KEY_ VARCHAR2(255), + VALUE_ VARCHAR2(255), + PASSWORD_ BLOB, + PARENT_ID_ VARCHAR2(255), + primary key (ID_) +); + +create table ACT_ID_TOKEN ( + ID_ VARCHAR2(64) not null, + REV_ INTEGER, + TOKEN_VALUE_ VARCHAR2(255), + TOKEN_DATE_ TIMESTAMP(6), + IP_ADDRESS_ VARCHAR2(255), + USER_AGENT_ VARCHAR2(255), + USER_ID_ VARCHAR2(255), + TOKEN_DATA_ VARCHAR2(2000), + primary key (ID_) +); + +create table ACT_ID_PRIV ( + ID_ VARCHAR2(64) not null, + NAME_ VARCHAR2(255) not null, + primary key (ID_) +); + +create table ACT_ID_PRIV_MAPPING ( + ID_ VARCHAR2(64) not null, + PRIV_ID_ VARCHAR2(64) not null, + USER_ID_ VARCHAR2(255), + GROUP_ID_ VARCHAR2(255), + primary key (ID_) +); + +create index ACT_IDX_MEMB_GROUP on ACT_ID_MEMBERSHIP(GROUP_ID_); +alter table ACT_ID_MEMBERSHIP + add constraint ACT_FK_MEMB_GROUP + foreign key (GROUP_ID_) + references ACT_ID_GROUP (ID_); + +create index ACT_IDX_MEMB_USER on ACT_ID_MEMBERSHIP(USER_ID_); +alter table ACT_ID_MEMBERSHIP + add constraint ACT_FK_MEMB_USER + foreign key (USER_ID_) + references ACT_ID_USER (ID_); + +create index ACT_IDX_PRIV_MAPPING on ACT_ID_PRIV_MAPPING(PRIV_ID_); +alter table ACT_ID_PRIV_MAPPING + add constraint ACT_FK_PRIV_MAPPING + foreign key (PRIV_ID_) + references ACT_ID_PRIV (ID_); + +create index ACT_IDX_PRIV_USER on ACT_ID_PRIV_MAPPING(USER_ID_); +create index ACT_IDX_PRIV_GROUP on ACT_ID_PRIV_MAPPING(GROUP_ID_); + +alter table ACT_ID_PRIV + add constraint ACT_UNIQ_PRIV_NAME + unique (NAME_); + diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/idm/db/drop/flowable.oracle.drop.identity.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/idm/db/drop/flowable.oracle.drop.identity.sql new file mode 100644 index 00000000..5cac418f --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/idm/db/drop/flowable.oracle.drop.identity.sql @@ -0,0 +1,22 @@ +alter table ACT_ID_MEMBERSHIP + drop CONSTRAINT ACT_FK_MEMB_GROUP; + +alter table ACT_ID_MEMBERSHIP + drop CONSTRAINT ACT_FK_MEMB_USER; + +alter table ACT_ID_PRIV_MAPPING + drop CONSTRAINT ACT_FK_PRIV_MAPPING; + +drop index ACT_IDX_MEMB_GROUP; +drop index ACT_IDX_MEMB_USER; +drop index ACT_IDX_PRIV_MAPPING; + +drop table ACT_ID_PROPERTY; +drop table ACT_ID_BYTEARRAY; +drop table ACT_ID_INFO; +drop table ACT_ID_MEMBERSHIP; +drop table ACT_ID_GROUP; +drop table ACT_ID_USER; +drop table ACT_ID_TOKEN; +drop table ACT_ID_PRIV; +drop table ACT_ID_PRIV_MAPPING; diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/job/service/db/create/flowable.oracle.create.job.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/job/service/db/create/flowable.oracle.create.job.sql new file mode 100644 index 00000000..8b3e79bf --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/job/service/db/create/flowable.oracle.create.job.sql @@ -0,0 +1,261 @@ +create table ACT_RU_JOB ( + ID_ VARCHAR2(64) NOT NULL, + REV_ INTEGER, + CATEGORY_ VARCHAR2(255), + TYPE_ VARCHAR2(255) NOT NULL, + LOCK_EXP_TIME_ TIMESTAMP(6), + LOCK_OWNER_ VARCHAR2(255), + EXCLUSIVE_ NUMBER(1) CHECK (EXCLUSIVE_ IN (1,0)), + EXECUTION_ID_ VARCHAR2(64), + PROCESS_INSTANCE_ID_ VARCHAR2(64), + PROC_DEF_ID_ VARCHAR2(64), + ELEMENT_ID_ VARCHAR2(255), + ELEMENT_NAME_ VARCHAR2(255), + SCOPE_ID_ VARCHAR2(255), + SUB_SCOPE_ID_ VARCHAR2(255), + SCOPE_TYPE_ VARCHAR2(255), + SCOPE_DEFINITION_ID_ VARCHAR2(255), + CORRELATION_ID_ VARCHAR2(255), + RETRIES_ INTEGER, + EXCEPTION_STACK_ID_ VARCHAR2(64), + EXCEPTION_MSG_ VARCHAR2(2000), + DUEDATE_ TIMESTAMP(6), + REPEAT_ VARCHAR2(255), + HANDLER_TYPE_ VARCHAR2(255), + HANDLER_CFG_ VARCHAR2(2000), + CUSTOM_VALUES_ID_ VARCHAR2(64), + CREATE_TIME_ TIMESTAMP(6), + TENANT_ID_ VARCHAR2(255) DEFAULT '', + primary key (ID_) +); + +create table ACT_RU_TIMER_JOB ( + ID_ VARCHAR2(64) NOT NULL, + REV_ INTEGER, + CATEGORY_ VARCHAR2(255), + TYPE_ VARCHAR2(255) NOT NULL, + LOCK_EXP_TIME_ TIMESTAMP(6), + LOCK_OWNER_ VARCHAR2(255), + EXCLUSIVE_ NUMBER(1) CHECK (EXCLUSIVE_ IN (1,0)), + EXECUTION_ID_ VARCHAR2(64), + PROCESS_INSTANCE_ID_ VARCHAR2(64), + PROC_DEF_ID_ VARCHAR2(64), + ELEMENT_ID_ VARCHAR2(255), + ELEMENT_NAME_ VARCHAR2(255), + SCOPE_ID_ VARCHAR2(255), + SUB_SCOPE_ID_ VARCHAR2(255), + SCOPE_TYPE_ VARCHAR2(255), + SCOPE_DEFINITION_ID_ VARCHAR2(255), + CORRELATION_ID_ VARCHAR2(255), + RETRIES_ INTEGER, + EXCEPTION_STACK_ID_ VARCHAR2(64), + EXCEPTION_MSG_ VARCHAR2(2000), + DUEDATE_ TIMESTAMP(6), + REPEAT_ VARCHAR2(255), + HANDLER_TYPE_ VARCHAR2(255), + HANDLER_CFG_ VARCHAR2(2000), + CUSTOM_VALUES_ID_ VARCHAR2(64), + CREATE_TIME_ TIMESTAMP(6), + TENANT_ID_ VARCHAR2(255) DEFAULT '', + primary key (ID_) +); + +create table ACT_RU_SUSPENDED_JOB ( + ID_ VARCHAR2(64) NOT NULL, + REV_ INTEGER, + CATEGORY_ VARCHAR2(255), + TYPE_ VARCHAR2(255) NOT NULL, + EXCLUSIVE_ NUMBER(1) CHECK (EXCLUSIVE_ IN (1,0)), + EXECUTION_ID_ VARCHAR2(64), + PROCESS_INSTANCE_ID_ VARCHAR2(64), + PROC_DEF_ID_ VARCHAR2(64), + ELEMENT_ID_ VARCHAR2(255), + ELEMENT_NAME_ VARCHAR2(255), + SCOPE_ID_ VARCHAR2(255), + SUB_SCOPE_ID_ VARCHAR2(255), + SCOPE_TYPE_ VARCHAR2(255), + SCOPE_DEFINITION_ID_ VARCHAR2(255), + CORRELATION_ID_ VARCHAR2(255), + RETRIES_ INTEGER, + EXCEPTION_STACK_ID_ VARCHAR2(64), + EXCEPTION_MSG_ VARCHAR2(2000), + DUEDATE_ TIMESTAMP(6), + REPEAT_ VARCHAR2(255), + HANDLER_TYPE_ VARCHAR2(255), + HANDLER_CFG_ VARCHAR2(2000), + CUSTOM_VALUES_ID_ VARCHAR2(64), + CREATE_TIME_ TIMESTAMP(6), + TENANT_ID_ VARCHAR2(255) DEFAULT '', + primary key (ID_) +); + +create table ACT_RU_DEADLETTER_JOB ( + ID_ VARCHAR2(64) NOT NULL, + REV_ INTEGER, + CATEGORY_ VARCHAR2(255), + TYPE_ VARCHAR2(255) NOT NULL, + EXCLUSIVE_ NUMBER(1) CHECK (EXCLUSIVE_ IN (1,0)), + EXECUTION_ID_ VARCHAR2(64), + PROCESS_INSTANCE_ID_ VARCHAR2(64), + PROC_DEF_ID_ VARCHAR2(64), + ELEMENT_ID_ VARCHAR2(255), + ELEMENT_NAME_ VARCHAR2(255), + SCOPE_ID_ VARCHAR2(255), + SUB_SCOPE_ID_ VARCHAR2(255), + SCOPE_TYPE_ VARCHAR2(255), + SCOPE_DEFINITION_ID_ VARCHAR2(255), + CORRELATION_ID_ VARCHAR2(255), + EXCEPTION_STACK_ID_ VARCHAR2(64), + EXCEPTION_MSG_ VARCHAR2(2000), + DUEDATE_ TIMESTAMP(6), + REPEAT_ VARCHAR2(255), + HANDLER_TYPE_ VARCHAR2(255), + HANDLER_CFG_ VARCHAR2(2000), + CUSTOM_VALUES_ID_ VARCHAR2(64), + CREATE_TIME_ TIMESTAMP(6), + TENANT_ID_ VARCHAR2(255) DEFAULT '', + primary key (ID_) +); + +create table ACT_RU_HISTORY_JOB ( + ID_ VARCHAR2(64) NOT NULL, + REV_ INTEGER, + LOCK_EXP_TIME_ TIMESTAMP(6), + LOCK_OWNER_ VARCHAR2(255), + RETRIES_ INTEGER, + EXCEPTION_STACK_ID_ VARCHAR2(64), + EXCEPTION_MSG_ VARCHAR2(2000), + HANDLER_TYPE_ VARCHAR2(255), + HANDLER_CFG_ VARCHAR2(2000), + CUSTOM_VALUES_ID_ VARCHAR2(64), + ADV_HANDLER_CFG_ID_ VARCHAR2(64), + CREATE_TIME_ TIMESTAMP(6), + SCOPE_TYPE_ VARCHAR2(255), + TENANT_ID_ VARCHAR2(255) DEFAULT '', + primary key (ID_) +); + +create table ACT_RU_EXTERNAL_JOB ( + ID_ VARCHAR2(64) NOT NULL, + REV_ INTEGER, + CATEGORY_ VARCHAR2(255), + TYPE_ VARCHAR2(255) NOT NULL, + LOCK_EXP_TIME_ TIMESTAMP(6), + LOCK_OWNER_ VARCHAR2(255), + EXCLUSIVE_ NUMBER(1) CHECK (EXCLUSIVE_ IN (1,0)), + EXECUTION_ID_ VARCHAR2(64), + PROCESS_INSTANCE_ID_ VARCHAR2(64), + PROC_DEF_ID_ VARCHAR2(64), + ELEMENT_ID_ VARCHAR2(255), + ELEMENT_NAME_ VARCHAR2(255), + SCOPE_ID_ VARCHAR2(255), + SUB_SCOPE_ID_ VARCHAR2(255), + SCOPE_TYPE_ VARCHAR2(255), + SCOPE_DEFINITION_ID_ VARCHAR2(255), + CORRELATION_ID_ VARCHAR2(255), + RETRIES_ INTEGER, + EXCEPTION_STACK_ID_ VARCHAR2(64), + EXCEPTION_MSG_ VARCHAR2(2000), + DUEDATE_ TIMESTAMP(6), + REPEAT_ VARCHAR2(255), + HANDLER_TYPE_ VARCHAR2(255), + HANDLER_CFG_ VARCHAR2(2000), + CUSTOM_VALUES_ID_ VARCHAR2(64), + CREATE_TIME_ TIMESTAMP(6), + TENANT_ID_ VARCHAR2(255) DEFAULT '', + primary key (ID_) +); + +create index ACT_IDX_JOB_EXCEPTION on ACT_RU_JOB(EXCEPTION_STACK_ID_); +create index ACT_IDX_JOB_CUSTOM_VAL_ID on ACT_RU_JOB(CUSTOM_VALUES_ID_); +create index ACT_IDX_JOB_CORRELATION_ID on ACT_RU_JOB(CORRELATION_ID_); + +create index ACT_IDX_TJOB_EXCEPTION on ACT_RU_TIMER_JOB(EXCEPTION_STACK_ID_); +create index ACT_IDX_TJOB_CUSTOM_VAL_ID on ACT_RU_TIMER_JOB(CUSTOM_VALUES_ID_); +create index ACT_IDX_TJOB_CORRELATION_ID on ACT_RU_TIMER_JOB(CORRELATION_ID_); +create index ACT_IDX_TJOB_DUEDATE on ACT_RU_TIMER_JOB(DUEDATE_); + +create index ACT_IDX_SJOB_EXCEPTION on ACT_RU_SUSPENDED_JOB(EXCEPTION_STACK_ID_); +create index ACT_IDX_SJOB_CUSTOM_VAL_ID on ACT_RU_SUSPENDED_JOB(CUSTOM_VALUES_ID_); +create index ACT_IDX_SJOB_CORRELATION_ID on ACT_RU_SUSPENDED_JOB(CORRELATION_ID_); + +create index ACT_IDX_DJOB_EXCEPTION on ACT_RU_DEADLETTER_JOB(EXCEPTION_STACK_ID_); +create index ACT_IDX_DJOB_CUSTOM_VAL_ID on ACT_RU_DEADLETTER_JOB(CUSTOM_VALUES_ID_); +create index ACT_IDX_DJOB_CORRELATION_ID on ACT_RU_DEADLETTER_JOB(CORRELATION_ID_); + +create index ACT_IDX_EJOB_EXCEPTION on ACT_RU_EXTERNAL_JOB(EXCEPTION_STACK_ID_); +create index ACT_IDX_EJOB_CUSTOM_VAL_ID on ACT_RU_EXTERNAL_JOB(CUSTOM_VALUES_ID_); +create index ACT_IDX_EJOB_CORRELATION_ID on ACT_RU_EXTERNAL_JOB(CORRELATION_ID_); + +alter table ACT_RU_JOB + add constraint ACT_FK_JOB_EXCEPTION + foreign key (EXCEPTION_STACK_ID_) + references ACT_GE_BYTEARRAY (ID_); + +alter table ACT_RU_JOB + add constraint ACT_FK_JOB_CUSTOM_VAL + foreign key (CUSTOM_VALUES_ID_) + references ACT_GE_BYTEARRAY (ID_); + +alter table ACT_RU_TIMER_JOB + add constraint ACT_FK_TJOB_EXCEPTION + foreign key (EXCEPTION_STACK_ID_) + references ACT_GE_BYTEARRAY (ID_); + +alter table ACT_RU_TIMER_JOB + add constraint ACT_FK_TJOB_CUSTOM_VAL + foreign key (CUSTOM_VALUES_ID_) + references ACT_GE_BYTEARRAY (ID_); + +alter table ACT_RU_SUSPENDED_JOB + add constraint ACT_FK_SJOB_EXCEPTION + foreign key (EXCEPTION_STACK_ID_) + references ACT_GE_BYTEARRAY (ID_); + +alter table ACT_RU_SUSPENDED_JOB + add constraint ACT_FK_SJOB_CUSTOM_VAL + foreign key (CUSTOM_VALUES_ID_) + references ACT_GE_BYTEARRAY (ID_); + +alter table ACT_RU_DEADLETTER_JOB + add constraint ACT_FK_DJOB_EXCEPTION + foreign key (EXCEPTION_STACK_ID_) + references ACT_GE_BYTEARRAY (ID_); + +alter table ACT_RU_DEADLETTER_JOB + add constraint ACT_FK_DJOB_CUSTOM_VAL + foreign key (CUSTOM_VALUES_ID_) + references ACT_GE_BYTEARRAY (ID_); + +alter table ACT_RU_EXTERNAL_JOB + add constraint ACT_FK_EJOB_EXCEPTION + foreign key (EXCEPTION_STACK_ID_) + references ACT_GE_BYTEARRAY (ID_); + +alter table ACT_RU_EXTERNAL_JOB + add constraint ACT_FK_EJOB_CUSTOM_VAL + foreign key (CUSTOM_VALUES_ID_) + references ACT_GE_BYTEARRAY (ID_); + +create index ACT_IDX_JOB_SCOPE on ACT_RU_JOB(SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_JOB_SUB_SCOPE on ACT_RU_JOB(SUB_SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_JOB_SCOPE_DEF on ACT_RU_JOB(SCOPE_DEFINITION_ID_, SCOPE_TYPE_); + +create index ACT_IDX_TJOB_SCOPE on ACT_RU_TIMER_JOB(SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_TJOB_SUB_SCOPE on ACT_RU_TIMER_JOB(SUB_SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_TJOB_SCOPE_DEF on ACT_RU_TIMER_JOB(SCOPE_DEFINITION_ID_, SCOPE_TYPE_); + +create index ACT_IDX_SJOB_SCOPE on ACT_RU_SUSPENDED_JOB(SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_SJOB_SUB_SCOPE on ACT_RU_SUSPENDED_JOB(SUB_SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_SJOB_SCOPE_DEF on ACT_RU_SUSPENDED_JOB(SCOPE_DEFINITION_ID_, SCOPE_TYPE_); + +create index ACT_IDX_DJOB_SCOPE on ACT_RU_DEADLETTER_JOB(SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_DJOB_SUB_SCOPE on ACT_RU_DEADLETTER_JOB(SUB_SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_DJOB_SCOPE_DEF on ACT_RU_DEADLETTER_JOB(SCOPE_DEFINITION_ID_, SCOPE_TYPE_); + +create index ACT_IDX_EJOB_SCOPE on ACT_RU_EXTERNAL_JOB(SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_EJOB_SUB_SCOPE on ACT_RU_EXTERNAL_JOB(SUB_SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_EJOB_SCOPE_DEF on ACT_RU_EXTERNAL_JOB(SCOPE_DEFINITION_ID_, SCOPE_TYPE_); + +insert into ACT_GE_PROPERTY values ('job.schema.version', '7.0.1.1', 1); + diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/job/service/db/drop/flowable.oracle.drop.job.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/job/service/db/drop/flowable.oracle.drop.job.sql new file mode 100644 index 00000000..a219e97a --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/job/service/db/drop/flowable.oracle.drop.job.sql @@ -0,0 +1,74 @@ +drop index ACT_IDX_JOB_SCOPE; +drop index ACT_IDX_JOB_SUB_SCOPE; +drop index ACT_IDX_JOB_SCOPE_DEF; +drop index ACT_IDX_TJOB_SCOPE; +drop index ACT_IDX_TJOB_SUB_SCOPE; +drop index ACT_IDX_TJOB_SCOPE_DEF; +drop index ACT_IDX_SJOB_SCOPE; +drop index ACT_IDX_SJOB_SUB_SCOPE; +drop index ACT_IDX_SJOB_SCOPE_DEF; +drop index ACT_IDX_DJOB_SCOPE; +drop index ACT_IDX_DJOB_SUB_SCOPE; +drop index ACT_IDX_DJOB_SCOPE_DEF; +drop index ACT_IDX_EJOB_SCOPE; +drop index ACT_IDX_EJOB_SUB_SCOPE; +drop index ACT_IDX_EJOB_SCOPE_DEF; + +drop index ACT_IDX_JOB_EXCEPTION; +drop index ACT_IDX_JOB_CUSTOM_VAL_ID; +drop index ACT_IDX_JOB_CORRELATION_ID; + +drop index ACT_IDX_TJOB_EXCEPTION; +drop index ACT_IDX_TJOB_CUSTOM_VAL_ID; +drop index ACT_IDX_TJOB_CORRELATION_ID; +drop index ACT_IDX_TJOB_DUEDATE; + +drop index ACT_IDX_SJOB_EXCEPTION; +drop index ACT_IDX_SJOB_CUSTOM_VAL_ID; +drop index ACT_IDX_SJOB_CORRELATION_ID; + +drop index ACT_IDX_DJOB_EXCEPTION; +drop index ACT_IDX_DJOB_CUSTOM_VAL_ID; +drop index ACT_IDX_DJOB_CORRELATION_ID; + +drop index ACT_IDX_EJOB_EXCEPTION; +drop index ACT_IDX_EJOB_CUSTOM_VAL_ID; +drop index ACT_IDX_EJOB_CORRELATION_ID; + +alter table ACT_RU_JOB + drop CONSTRAINT ACT_FK_JOB_EXCEPTION; + +alter table ACT_RU_JOB + drop CONSTRAINT ACT_FK_JOB_CUSTOM_VAL; + +alter table ACT_RU_TIMER_JOB + drop CONSTRAINT ACT_FK_TJOB_EXCEPTION; + +alter table ACT_RU_TIMER_JOB + drop CONSTRAINT ACT_FK_TJOB_CUSTOM_VAL; + +alter table ACT_RU_SUSPENDED_JOB + drop CONSTRAINT ACT_FK_SJOB_EXCEPTION; + +alter table ACT_RU_SUSPENDED_JOB + drop CONSTRAINT ACT_FK_SJOB_CUSTOM_VAL; + +alter table ACT_RU_DEADLETTER_JOB + drop CONSTRAINT ACT_FK_DJOB_EXCEPTION; + +alter table ACT_RU_DEADLETTER_JOB + drop CONSTRAINT ACT_FK_DJOB_CUSTOM_VAL; + +alter table ACT_RU_EXTERNAL_JOB + drop CONSTRAINT ACT_FK_DJOB_EXCEPTION; + +alter table ACT_RU_EXTERNAL_JOB + drop CONSTRAINT ACT_FK_DJOB_CUSTOM_VAL; + +drop table ACT_RU_JOB; +drop table ACT_RU_TIMER_JOB; +drop table ACT_RU_SUSPENDED_JOB; +drop table ACT_RU_DEADLETTER_JOB; +drop table ACT_RU_HISTORY_JOB; +drop table ACT_RU_EXTERNAL_JOB; + diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/task/service/db/create/flowable.oracle.create.task.history.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/task/service/db/create/flowable.oracle.create.task.history.sql new file mode 100644 index 00000000..1651c0cf --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/task/service/db/create/flowable.oracle.create.task.history.sql @@ -0,0 +1,64 @@ +create table ACT_HI_TASKINST ( + ID_ VARCHAR2(64) not null, + REV_ INTEGER default 1, + PROC_DEF_ID_ VARCHAR2(64), + TASK_DEF_ID_ VARCHAR2(64), + TASK_DEF_KEY_ VARCHAR2(255), + PROC_INST_ID_ VARCHAR2(64), + EXECUTION_ID_ VARCHAR2(64), + SCOPE_ID_ VARCHAR2(255), + SUB_SCOPE_ID_ VARCHAR2(255), + SCOPE_TYPE_ VARCHAR2(255), + SCOPE_DEFINITION_ID_ VARCHAR2(255), + PROPAGATED_STAGE_INST_ID_ VARCHAR2(255), + PARENT_TASK_ID_ VARCHAR2(64), + STATE_ VARCHAR2(255), + NAME_ VARCHAR2(255), + DESCRIPTION_ VARCHAR2(2000), + OWNER_ VARCHAR2(255), + ASSIGNEE_ VARCHAR2(255), + START_TIME_ TIMESTAMP(6) not null, + IN_PROGRESS_TIME_ TIMESTAMP(6), + IN_PROGRESS_STARTED_BY_ VARCHAR2(255), + CLAIM_TIME_ TIMESTAMP(6), + CLAIMED_BY_ VARCHAR2(255), + SUSPENDED_TIME_ TIMESTAMP(6), + SUSPENDED_BY_ VARCHAR2(255), + END_TIME_ TIMESTAMP(6), + COMPLETED_BY_ VARCHAR2(255), + DURATION_ NUMBER(19,0), + DELETE_REASON_ VARCHAR2(2000), + PRIORITY_ INTEGER, + IN_PROGRESS_DUE_DATE_ TIMESTAMP(6), + DUE_DATE_ TIMESTAMP(6), + FORM_KEY_ VARCHAR2(255), + CATEGORY_ VARCHAR2(255), + TENANT_ID_ VARCHAR2(255) default '', + LAST_UPDATED_TIME_ TIMESTAMP(6), + primary key (ID_) +); + +create table ACT_HI_TSK_LOG ( + ID_ NUMBER(19), + TYPE_ VARCHAR2(64), + TASK_ID_ VARCHAR2(64) not null, + TIME_STAMP_ TIMESTAMP(6) not null, + USER_ID_ VARCHAR2(255), + DATA_ VARCHAR2(2000), + EXECUTION_ID_ VARCHAR2(64), + PROC_INST_ID_ VARCHAR2(64), + PROC_DEF_ID_ VARCHAR2(64), + SCOPE_ID_ VARCHAR2(255), + SCOPE_DEFINITION_ID_ VARCHAR2(255), + SUB_SCOPE_ID_ VARCHAR2(255), + SCOPE_TYPE_ VARCHAR2(255), + TENANT_ID_ VARCHAR2(255) default '', + primary key (ID_) +); + +create sequence act_hi_task_evt_log_seq start with 1 increment by 1; + +create index ACT_IDX_HI_TASK_SCOPE on ACT_HI_TASKINST(SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_HI_TASK_SUB_SCOPE on ACT_HI_TASKINST(SUB_SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_HI_TASK_SCOPE_DEF on ACT_HI_TASKINST(SCOPE_DEFINITION_ID_, SCOPE_TYPE_); + diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/task/service/db/create/flowable.oracle.create.task.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/task/service/db/create/flowable.oracle.create.task.sql new file mode 100644 index 00000000..9430a1cb --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/task/service/db/create/flowable.oracle.create.task.sql @@ -0,0 +1,48 @@ +create table ACT_RU_TASK ( + ID_ VARCHAR2(64), + REV_ INTEGER, + EXECUTION_ID_ VARCHAR2(64), + PROC_INST_ID_ VARCHAR2(64), + PROC_DEF_ID_ VARCHAR2(64), + TASK_DEF_ID_ VARCHAR2(64), + SCOPE_ID_ VARCHAR2(255), + SUB_SCOPE_ID_ VARCHAR2(255), + SCOPE_TYPE_ VARCHAR2(255), + SCOPE_DEFINITION_ID_ VARCHAR2(255), + PROPAGATED_STAGE_INST_ID_ VARCHAR2(255), + STATE_ VARCHAR2(255), + NAME_ VARCHAR2(255), + PARENT_TASK_ID_ VARCHAR2(64), + DESCRIPTION_ VARCHAR2(2000), + TASK_DEF_KEY_ VARCHAR2(255), + OWNER_ VARCHAR2(255), + ASSIGNEE_ VARCHAR2(255), + DELEGATION_ VARCHAR2(64), + PRIORITY_ INTEGER, + CREATE_TIME_ TIMESTAMP(6), + IN_PROGRESS_TIME_ TIMESTAMP(6), + IN_PROGRESS_STARTED_BY_ VARCHAR2(255), + CLAIM_TIME_ TIMESTAMP(6), + CLAIMED_BY_ VARCHAR2(255), + SUSPENDED_TIME_ TIMESTAMP(6), + SUSPENDED_BY_ VARCHAR2(255), + IN_PROGRESS_DUE_DATE_ TIMESTAMP(6), + DUE_DATE_ TIMESTAMP(6), + CATEGORY_ VARCHAR2(255), + SUSPENSION_STATE_ INTEGER, + TENANT_ID_ VARCHAR2(255) DEFAULT '', + FORM_KEY_ VARCHAR2(255), + IS_COUNT_ENABLED_ NUMBER(1) CHECK (IS_COUNT_ENABLED_ IN (1,0)), + VAR_COUNT_ INTEGER, + ID_LINK_COUNT_ INTEGER, + SUB_TASK_COUNT_ INTEGER, + primary key (ID_) +); + +create index ACT_IDX_TASK_CREATE on ACT_RU_TASK(CREATE_TIME_); +create index ACT_IDX_TASK_SCOPE on ACT_RU_TASK(SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_TASK_SUB_SCOPE on ACT_RU_TASK(SUB_SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_TASK_SCOPE_DEF on ACT_RU_TASK(SCOPE_DEFINITION_ID_, SCOPE_TYPE_); + +insert into ACT_GE_PROPERTY values ('task.schema.version', '7.0.1.1', 1); + diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/task/service/db/drop/flowable.oracle.drop.task.history.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/task/service/db/drop/flowable.oracle.drop.task.history.sql new file mode 100644 index 00000000..c5ce7bbf --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/task/service/db/drop/flowable.oracle.drop.task.history.sql @@ -0,0 +1,8 @@ +drop index ACT_IDX_HI_TASK_SCOPE; +drop index ACT_IDX_HI_TASK_SUB_SCOPE; +drop index ACT_IDX_HI_TASK_SCOPE_DEF; + +drop sequence act_hi_task_evt_log_seq; + +drop table ACT_HI_TASKINST; +drop table ACT_HI_TSK_LOG; diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/task/service/db/drop/flowable.oracle.drop.task.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/task/service/db/drop/flowable.oracle.drop.task.sql new file mode 100644 index 00000000..9ecd1e94 --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/task/service/db/drop/flowable.oracle.drop.task.sql @@ -0,0 +1,6 @@ +drop index ACT_IDX_TASK_CREATE; +drop index ACT_IDX_TASK_SCOPE; +drop index ACT_IDX_TASK_SUB_SCOPE; +drop index ACT_IDX_TASK_SCOPE_DEF; + +drop table ACT_RU_TASK; diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/variable/service/db/create/flowable.oracle.create.variable.history.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/variable/service/db/create/flowable.oracle.create.variable.history.sql new file mode 100644 index 00000000..3d9b50fd --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/variable/service/db/create/flowable.oracle.create.variable.history.sql @@ -0,0 +1,26 @@ +create table ACT_HI_VARINST ( + ID_ VARCHAR2(64) not null, + REV_ INTEGER default 1, + PROC_INST_ID_ VARCHAR2(64), + EXECUTION_ID_ VARCHAR2(64), + TASK_ID_ VARCHAR2(64), + NAME_ VARCHAR2(255) not null, + VAR_TYPE_ VARCHAR2(100), + SCOPE_ID_ VARCHAR2(255), + SUB_SCOPE_ID_ VARCHAR2(255), + SCOPE_TYPE_ VARCHAR2(255), + BYTEARRAY_ID_ VARCHAR2(64), + DOUBLE_ NUMBER(38,10), + LONG_ NUMBER(19,0), + TEXT_ VARCHAR2(2000), + TEXT2_ VARCHAR2(2000), + META_INFO_ VARCHAR2(2000), + CREATE_TIME_ TIMESTAMP(6), + LAST_UPDATED_TIME_ TIMESTAMP(6), + primary key (ID_) +); + +create index ACT_IDX_HI_PROCVAR_NAME_TYPE on ACT_HI_VARINST(NAME_, VAR_TYPE_); +create index ACT_IDX_HI_VAR_SCOPE_ID_TYPE on ACT_HI_VARINST(SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_HI_VAR_SUB_ID_TYPE on ACT_HI_VARINST(SUB_SCOPE_ID_, SCOPE_TYPE_); + diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/variable/service/db/create/flowable.oracle.create.variable.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/variable/service/db/create/flowable.oracle.create.variable.sql new file mode 100644 index 00000000..7c02f7f6 --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/variable/service/db/create/flowable.oracle.create.variable.sql @@ -0,0 +1,31 @@ +create table ACT_RU_VARIABLE ( + ID_ VARCHAR2(64) not null, + REV_ INTEGER, + TYPE_ VARCHAR2(255) not null, + NAME_ VARCHAR2(255) not null, + EXECUTION_ID_ VARCHAR2(64), + PROC_INST_ID_ VARCHAR2(64), + TASK_ID_ VARCHAR2(64), + SCOPE_ID_ VARCHAR2(255), + SUB_SCOPE_ID_ VARCHAR2(255), + SCOPE_TYPE_ VARCHAR2(255), + BYTEARRAY_ID_ VARCHAR2(64), + DOUBLE_ NUMBER(38,10), + LONG_ NUMBER(19,0), + TEXT_ VARCHAR2(2000), + TEXT2_ VARCHAR2(2000), + META_INFO_ VARCHAR2(2000), + primary key (ID_) +); + +create index ACT_IDX_RU_VAR_SCOPE_ID_TYPE on ACT_RU_VARIABLE(SCOPE_ID_, SCOPE_TYPE_); +create index ACT_IDX_RU_VAR_SUB_ID_TYPE on ACT_RU_VARIABLE(SUB_SCOPE_ID_, SCOPE_TYPE_); + +create index ACT_IDX_VAR_BYTEARRAY on ACT_RU_VARIABLE(BYTEARRAY_ID_); +alter table ACT_RU_VARIABLE + add constraint ACT_FK_VAR_BYTEARRAY + foreign key (BYTEARRAY_ID_) + references ACT_GE_BYTEARRAY (ID_); + +insert into ACT_GE_PROPERTY values ('variable.schema.version', '7.0.1.1', 1); + diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/variable/service/db/drop/flowable.oracle.drop.variable.history.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/variable/service/db/drop/flowable.oracle.drop.variable.history.sql new file mode 100644 index 00000000..efcc68d4 --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/variable/service/db/drop/flowable.oracle.drop.variable.history.sql @@ -0,0 +1,6 @@ +drop index ACT_IDX_HI_PROCVAR_NAME_TYPE; +drop index ACT_IDX_HI_VAR_SCOPE_ID_TYPE; +drop index ACT_IDX_HI_VAR_SUB_ID_TYPE; + +drop table ACT_HI_VARINST; + diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/variable/service/db/drop/flowable.oracle.drop.variable.sql b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/variable/service/db/drop/flowable.oracle.drop.variable.sql new file mode 100644 index 00000000..0d0a95fc --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/org/flowable/variable/service/db/drop/flowable.oracle.drop.variable.sql @@ -0,0 +1,9 @@ +drop index ACT_IDX_VAR_BYTEARRAY; +drop index ACT_IDX_RU_VAR_SCOPE_ID_TYPE; +drop index ACT_IDX_RU_VAR_SUB_ID_TYPE; + +alter table ACT_RU_VARIABLE + drop CONSTRAINT ACT_FK_VAR_BYTEARRAY; + +drop table ACT_RU_VARIABLE; + From 0ab550123ff0acf86cc725c6a1118f79e0ff5a5e Mon Sep 17 00:00:00 2001 From: chenbowen Date: Thu, 27 Nov 2025 10:27:30 +0800 Subject: [PATCH 2/6] =?UTF-8?q?=E5=85=B3=E9=97=AD=20databus=20web=20?= =?UTF-8?q?=E8=AF=B7=E6=B1=82=E8=BF=9E=E6=8E=A5=E6=B1=A0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../config/GatewayWebClientConfiguration.java | 32 +++++++++++++------ .../src/main/resources/application.yml | 5 +++ 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/zt-module-databus/zt-module-databus-server/src/main/java/com/zt/plat/module/databus/framework/integration/gateway/config/GatewayWebClientConfiguration.java b/zt-module-databus/zt-module-databus-server/src/main/java/com/zt/plat/module/databus/framework/integration/gateway/config/GatewayWebClientConfiguration.java index 67bfe6ef..24811a53 100644 --- a/zt-module-databus/zt-module-databus-server/src/main/java/com/zt/plat/module/databus/framework/integration/gateway/config/GatewayWebClientConfiguration.java +++ b/zt-module-databus/zt-module-databus-server/src/main/java/com/zt/plat/module/databus/framework/integration/gateway/config/GatewayWebClientConfiguration.java @@ -1,5 +1,7 @@ package com.zt.plat.module.databus.framework.integration.gateway.config; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Value; import org.springframework.boot.web.reactive.function.client.WebClientCustomizer; import org.springframework.context.annotation.Bean; @@ -17,33 +19,43 @@ public class GatewayWebClientConfiguration { private final int maxInMemorySize; private final long maxIdleTimeMillis; private final long evictInBackgroundMillis; + private final boolean connectionPoolEnabled; private final ReactorClientHttpConnector httpConnector; + private static final Logger log = LoggerFactory.getLogger(GatewayWebClientConfiguration.class); public GatewayWebClientConfiguration( @Value("${databus.gateway.web-client.max-in-memory-size:20971520}") int maxInMemorySize, @Value("${databus.gateway.web-client.max-idle-time:45000}") long maxIdleTimeMillis, - @Value("${databus.gateway.web-client.evict-in-background-interval:20000}") long evictInBackgroundMillis) { + @Value("${databus.gateway.web-client.evict-in-background-interval:20000}") long evictInBackgroundMillis, + @Value("${databus.gateway.web-client.connection-pool-enabled:true}") boolean connectionPoolEnabled) { this.maxInMemorySize = maxInMemorySize; - this.maxIdleTimeMillis = maxIdleTimeMillis > 0 ? maxIdleTimeMillis : 45000L; - this.evictInBackgroundMillis = Math.max(evictInBackgroundMillis, 0L); + this.maxIdleTimeMillis = maxIdleTimeMillis; + this.evictInBackgroundMillis = evictInBackgroundMillis; + this.connectionPoolEnabled = connectionPoolEnabled; this.httpConnector = buildConnector(); } @Bean public WebClientCustomizer gatewayWebClientCustomizer() { + // 统一设置 WebClient 连接器与内存限制,避免各处重复配置 return builder -> builder .clientConnector(httpConnector) .codecs(configurer -> configurer.defaultCodecs().maxInMemorySize(maxInMemorySize)); } private ReactorClientHttpConnector buildConnector() { - ConnectionProvider.Builder providerBuilder = ConnectionProvider.builder("databus-gateway") - .maxIdleTime(Duration.ofMillis(maxIdleTimeMillis)); - if (evictInBackgroundMillis > 0) { - providerBuilder.evictInBackground(Duration.ofMillis(evictInBackgroundMillis)); + if (connectionPoolEnabled) { + // 启用连接池,基于配置设置空闲回收参数 + ConnectionProvider provider = ConnectionProvider.builder("databus-gateway") + .maxIdleTime(Duration.ofMillis(maxIdleTimeMillis)) + .evictInBackground(Duration.ofMillis(evictInBackgroundMillis)) + .build(); + log.info("Databus gateway WebClient 已启用连接池 (maxIdleTime={}ms, evictInterval={}ms)", + maxIdleTimeMillis, evictInBackgroundMillis); + return new ReactorClientHttpConnector(HttpClient.create(provider).compress(true)); } - ConnectionProvider provider = providerBuilder.build(); - HttpClient httpClient = HttpClient.create(provider).compress(true); - return new ReactorClientHttpConnector(httpClient); + // 关闭连接池,每次请求都会重新建立 TCP 连接 + log.info("Databus gateway WebClient 已禁用连接池,所有请求将使用全新连接"); + return new ReactorClientHttpConnector(HttpClient.create().compress(true)); } } diff --git a/zt-module-databus/zt-module-databus-server/src/main/resources/application.yml b/zt-module-databus/zt-module-databus-server/src/main/resources/application.yml index d355c7a9..c40ecc83 100644 --- a/zt-module-databus/zt-module-databus-server/src/main/resources/application.yml +++ b/zt-module-databus/zt-module-databus-server/src/main/resources/application.yml @@ -131,4 +131,9 @@ zt: ignore-tables: - databus_api_client_credential +databus: + gateway: + web-client: + connection-pool-enabled: false # 默认开启连接池,排查长连接问题时可临时关闭 + debug: false From 4bd0402dde2d4d3aae9aa2de39571dd466d27826 Mon Sep 17 00:00:00 2001 From: chenbowen Date: Thu, 27 Nov 2025 11:16:49 +0800 Subject: [PATCH 3/6] =?UTF-8?q?=E7=A6=81=E6=AD=A2=E4=BA=8B=E4=BB=B6?= =?UTF-8?q?=E5=BC=95=E6=93=8E=E9=87=8D=E5=A4=8D=E8=87=AA=E5=8A=A8=E5=BB=BA?= =?UTF-8?q?=E8=A1=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../zt-module-bpm-server/src/main/resources/application.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/application.yaml b/zt-module-bpm/zt-module-bpm-server/src/main/resources/application.yaml index adfbf58f..37f2d24f 100644 --- a/zt-module-bpm/zt-module-bpm-server/src/main/resources/application.yaml +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/application.yaml @@ -82,6 +82,9 @@ flowable: db-history-used: true # flowable6 默认 true 生成信息表,无需手动设置 check-process-definitions: false # 设置为 false,禁用 /resources/processes 自动部署 BPMN XML 流程 history-level: audit # full:保存历史数据的最高级别,可保存全部流程相关细节,包括流程流转各节点参数 + eventregistry: + enabled: true # 默认开启事件引擎,这里显式声明,便于阅读 + database-schema-update: false # 禁止事件引擎重复自动建表,防止 FLW_EV_* 表冲突 # MyBatis Plus 的配置项 mybatis-plus: From 28a49ce45ad5d348bd87fd68ce34cf6b06e72a9f Mon Sep 17 00:00:00 2001 From: chenbowen Date: Thu, 27 Nov 2025 13:26:30 +0800 Subject: [PATCH 4/6] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=20dm=20jdbc=20=E4=B8=8D?= =?UTF-8?q?=E5=85=BC=E5=AE=B9=20flowable=20=E8=BD=AC=E4=B9=89=20sql=20?= =?UTF-8?q?=E7=9A=84=E9=94=99=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../snapshot/JdbcDatabaseSnapshot.java | 1957 +++++++++++++++++ .../src/main/resources/application.yaml | 3 - 2 files changed, 1957 insertions(+), 3 deletions(-) create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/java/liquibase/snapshot/JdbcDatabaseSnapshot.java diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/java/liquibase/snapshot/JdbcDatabaseSnapshot.java b/zt-module-bpm/zt-module-bpm-server/src/main/java/liquibase/snapshot/JdbcDatabaseSnapshot.java new file mode 100644 index 00000000..1e0a40ec --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/java/liquibase/snapshot/JdbcDatabaseSnapshot.java @@ -0,0 +1,1957 @@ +package liquibase.snapshot; + +import liquibase.CatalogAndSchema; +import liquibase.Scope; +import liquibase.database.AbstractJdbcDatabase; +import liquibase.database.Database; +import liquibase.database.DatabaseConnection; +import liquibase.database.LiquibaseTableNamesFactory; +import liquibase.database.core.*; +import liquibase.database.jvm.JdbcConnection; +import liquibase.exception.DatabaseException; +import liquibase.executor.jvm.ColumnMapRowMapper; +import liquibase.executor.jvm.RowMapperNotNullConstraintsResultSetExtractor; +import liquibase.structure.DatabaseObject; +import liquibase.structure.core.Catalog; +import liquibase.structure.core.Schema; +import liquibase.structure.core.Table; +import liquibase.structure.core.View; +import liquibase.util.JdbcUtil; +import liquibase.util.StringUtil; + +import java.sql.*; +import java.util.*; + +public class JdbcDatabaseSnapshot extends DatabaseSnapshot { + + private boolean warnedAboutDbaRecycleBin; + private static final boolean ignoreWarnAboutDbaRecycleBin = Boolean.getBoolean("liquibase.ignoreRecycleBinWarning"); + + private CachingDatabaseMetaData cachingDatabaseMetaData; + + private Map cachedExpressionMap = null; + + private Set userDefinedTypes; + + public JdbcDatabaseSnapshot(DatabaseObject[] examples, Database database, SnapshotControl snapshotControl) throws DatabaseException, InvalidExampleException { + super(examples, database, snapshotControl); + } + + public JdbcDatabaseSnapshot(DatabaseObject[] examples, Database database) throws DatabaseException, InvalidExampleException { + super(examples, database); + } + + public CachingDatabaseMetaData getMetaDataFromCache() throws SQLException { + if (cachingDatabaseMetaData == null) { + DatabaseMetaData databaseMetaData = null; + if (getDatabase().getConnection() != null) { + databaseMetaData = ((JdbcConnection) getDatabase().getConnection()).getUnderlyingConnection().getMetaData(); + } + + cachingDatabaseMetaData = new CachingDatabaseMetaData(this.getDatabase(), databaseMetaData); + } + return cachingDatabaseMetaData; + } + + public class CachingDatabaseMetaData { + private static final String SQL_FILTER_MATCH_ALL = "%"; + private final DatabaseMetaData databaseMetaData; + private final Database database; + + public CachingDatabaseMetaData(Database database, DatabaseMetaData metaData) { + this.databaseMetaData = metaData; + this.database = database; + } + + public java.sql.DatabaseMetaData getDatabaseMetaData() { + return databaseMetaData; + } + + public List getForeignKeys(final String catalogName, final String schemaName, final String tableName, + final String fkName) throws DatabaseException { + ForeignKeysResultSetCache foreignKeysResultSetCache = new ForeignKeysResultSetCache(database, catalogName, schemaName, tableName, fkName); + ResultSetCache importedKeys = getResultSetCache("getImportedKeys"); + importedKeys.setBulkTracking(!(database instanceof MSSQLDatabase)); + + return importedKeys.get(foreignKeysResultSetCache); + } + + public List getIndexInfo(final String catalogName, final String schemaName, final String tableName, final String indexName) throws DatabaseException, SQLException { + + return getResultSetCache("getIndexInfo").get(new ResultSetCache.UnionResultSetExtractor(database) { + + public boolean isBulkFetchMode; + + @Override + public ResultSetCache.RowData rowKeyParameters(CachedRow row) { + return new ResultSetCache.RowData(row.getString("TABLE_CAT"), row.getString("TABLE_SCHEM"), database, row.getString("TABLE_NAME"), row.getString("INDEX_NAME")); + } + + @Override + public ResultSetCache.RowData wantedKeyParameters() { + return new ResultSetCache.RowData(catalogName, schemaName, database, tableName, indexName); + } + + @Override + public boolean bulkContainsSchema(String schemaKey) { + return getAllCatalogsStringScratchData() != null && database instanceof OracleDatabase; + } + + @Override + public String getSchemaKey(CachedRow row) { + return row.getString("TABLE_SCHEM"); + } + + @Override + public List fastFetch() throws SQLException, DatabaseException { + List returnList = new ArrayList<>(); + + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + if (database instanceof OracleDatabase) { + warnAboutDbaRecycleBin(); + + //oracle getIndexInfo is buggy and slow. See Issue 1824548 and http://forums.oracle.com/forums/thread.jspa?messageID=578383򍍏 + String sql = + "SELECT " + + "c.INDEX_NAME, " + + "3 AS TYPE, " + + "c.TABLE_OWNER AS TABLE_SCHEM, " + + "c.TABLE_NAME, " + + "c.COLUMN_NAME, " + + "c.COLUMN_POSITION AS ORDINAL_POSITION, " + + "NULL AS FILTER_CONDITION, " + + "c.INDEX_OWNER, " + + "CASE I.UNIQUENESS WHEN 'UNIQUE' THEN 0 ELSE 1 END AS NON_UNIQUE, " + + "CASE c.DESCEND WHEN 'Y' THEN 'D' WHEN 'DESC' THEN 'D' WHEN 'N' THEN 'A' WHEN 'ASC' THEN 'A' END AS ASC_OR_DESC, " + + "CASE WHEN tablespace_name = (SELECT default_tablespace FROM user_users) " + + "THEN NULL ELSE tablespace_name END AS tablespace_name " + + "FROM ALL_IND_COLUMNS c " + + "JOIN ALL_INDEXES i ON i.owner=c.index_owner AND i.index_name = c.index_name and i.table_owner = c.table_owner " + + "LEFT OUTER JOIN " + (((OracleDatabase) database).canAccessDbaRecycleBin() ? "dba_recyclebin" : "user_recyclebin") + " d ON d.object_name=c.table_name "; + if (!isBulkFetchMode || getAllCatalogsStringScratchData() == null) { + sql += "WHERE c.TABLE_OWNER = '" + database.correctObjectName(catalogAndSchema.getCatalogName(), Schema.class) + "' "; + } else { + sql += "WHERE c.TABLE_OWNER IN ('" + database.correctObjectName(catalogAndSchema.getCatalogName(), Schema.class) + "', " + getAllCatalogsStringScratchData() + ")"; + } + sql += "AND i.OWNER = c.TABLE_OWNER " + + "AND d.object_name IS NULL "; + + + if (!isBulkFetchMode && (tableName != null)) { + sql += " AND c.TABLE_NAME='" + tableName + "'"; + } + + if (!isBulkFetchMode && (indexName != null)) { + sql += " AND c.INDEX_NAME='" + indexName + "'"; + } + + sql += " ORDER BY c.INDEX_NAME, ORDINAL_POSITION"; + + returnList.addAll(setIndexExpressions(executeAndExtract(sql, database))); + } else if (database instanceof MSSQLDatabase) { + String tableCat = "original_db_name()"; + + if (9 <= database.getDatabaseMajorVersion()) { + tableCat = "db_name()"; + } + //fetch additional index info + String sql = "SELECT " + + tableCat + " as TABLE_CAT, " + + "object_schema_name(i.object_id) as TABLE_SCHEM, " + + "object_name(i.object_id) as TABLE_NAME, " + + "CASE is_unique WHEN 1 then 0 else 1 end as NON_UNIQUE, " + + "object_name(i.object_id) as INDEX_QUALIFIER, " + + "i.name as INDEX_NAME, " + + "case i.type when 1 then 1 ELSE 3 end as TYPE, " + + "key_ordinal as ORDINAL_POSITION, " + + "COL_NAME(c.object_id,c.column_id) AS COLUMN_NAME, " + + "case is_descending_key when 0 then 'A' else 'D' end as ASC_OR_DESC, " + + "null as CARDINALITY, " + + "null as PAGES, " + + "i.filter_definition as FILTER_CONDITION, " + + "o.type AS INTERNAL_OBJECT_TYPE, " + + "i.*, " + + "c.*, " + + "s.* " + + "FROM sys.indexes i " + + "join sys.index_columns c on i.object_id=c.object_id and i.index_id=c.index_id " + + "join sys.stats s on i.object_id=s.object_id and i.name=s.name " + + "join sys.objects o on i.object_id=o.object_id " + + "WHERE object_schema_name(i.object_id)='" + database.correctObjectName(catalogAndSchema.getSchemaName(), Schema.class) + "'"; + + if (!isBulkFetchMode && (tableName != null)) { + sql += " AND object_name(i.object_id)='" + database.escapeStringForDatabase(tableName) + "'"; + } + + if (!isBulkFetchMode && (indexName != null)) { + sql += " AND i.name='" + database.escapeStringForDatabase(indexName) + "'"; + } + + sql += "ORDER BY i.object_id, i.index_id, c.key_ordinal"; + + returnList.addAll(executeAndExtract(sql, database)); + + } else if (database instanceof Db2zDatabase) { + List parameters = new ArrayList<>(3); + String sql = "SELECT i.CREATOR AS TABLE_SCHEM, " + + "i.TBNAME AS TABLE_NAME, " + + "i.NAME AS INDEX_NAME, " + + "3 AS TYPE, " + + "k.COLNAME AS COLUMN_NAME, " + + "k.COLSEQ AS ORDINAL_POSITION, " + + "CASE UNIQUERULE WHEN 'D' then 1 else 0 end as NON_UNIQUE, " + + "k.ORDERING AS ORDER, " + + "i.CREATOR AS INDEX_QUALIFIER " + + "FROM SYSIBM.SYSKEYS k " + + "JOIN SYSIBM.SYSINDEXES i " + + "ON k.IXNAME = i.NAME " + + "AND k.IXCREATOR = i.CREATOR " + + "WHERE i.CREATOR = ?"; + parameters.add(database.correctObjectName(catalogAndSchema.getSchemaName(), Schema.class)); + if (!isBulkFetchMode && tableName != null) { + sql += " AND i.TBNAME = ?"; + parameters.add(database.escapeStringForDatabase(tableName)); + } + + if (!isBulkFetchMode && indexName != null) { + sql += " AND i.NAME = ?"; + parameters.add(database.escapeStringForDatabase(indexName)); + } + + sql += "ORDER BY i.NAME, k.COLSEQ"; + + returnList.addAll(executeAndExtract(database, sql, parameters.toArray())); + } else if (!(database instanceof MariaDBDatabase) && database instanceof MySQLDatabase) { + + //mysql 8.0.13 introduced support for indexes on `lower(first_name)` which comes back in an "expression" column + String filterConditionValue = "NULL"; + if (database.getDatabaseMajorVersion() > 8 || (database.getDatabaseMajorVersion() == 8 && ((MySQLDatabase) database).getDatabasePatchVersion() >= 13)) { + filterConditionValue = "EXPRESSION"; + } + + StringBuilder sql = new StringBuilder("SELECT TABLE_CATALOG AS TABLE_CAT, TABLE_SCHEMA AS TABLE_SCHEM,"); + sql.append(" TABLE_NAME, NON_UNIQUE, NULL AS INDEX_QUALIFIER, INDEX_NAME,"); + sql.append(DatabaseMetaData.tableIndexOther); + sql.append(" AS TYPE, SEQ_IN_INDEX AS ORDINAL_POSITION, COLUMN_NAME,"); + sql.append("COLLATION AS ASC_OR_DESC, CARDINALITY, 0 AS PAGES, " + filterConditionValue + " AS FILTER_CONDITION FROM INFORMATION_SCHEMA.STATISTICS WHERE"); + sql.append(" TABLE_SCHEMA = '").append(database.correctObjectName(catalogAndSchema.getCatalogName(), Catalog.class)).append("'"); + + if (!isBulkFetchMode && tableName != null) { + sql.append(" AND TABLE_NAME = '").append(database.escapeStringForDatabase(tableName)).append("'"); + } + + if (!isBulkFetchMode && indexName != null) { + sql.append(" AND INDEX_NAME='").append(database.escapeStringForDatabase(indexName)).append("'"); + } + + sql.append("ORDER BY NON_UNIQUE, INDEX_NAME, SEQ_IN_INDEX"); + + returnList.addAll(executeAndExtract(sql.toString(), database)); + } else { + /* + * If we do not know in which table to look for the index, things get a little bit ugly. + * First, we get a collection of all tables within the catalogAndSchema, then iterate through + * them until we (hopefully) find the index we are looking for. + */ + List tables = new ArrayList<>(); + if (tableName == null) { + // Build a list of all candidate tables in the catalog/schema that might contain the index + for (CachedRow row : getTables(((AbstractJdbcDatabase) database).getJdbcCatalogName(catalogAndSchema), ((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema), null)) { + tables.add(row.getString("TABLE_NAME")); + } + } else { + tables.add(tableName); + } + + // Iterate through all the candidate tables and try to find the index. + for (String tableName1 : tables) { + ResultSet rs = databaseMetaData.getIndexInfo( + ((AbstractJdbcDatabase) database).getJdbcCatalogName(catalogAndSchema), + ((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema), + tableName1, + false, + true); + List rows = extract(rs, (database instanceof InformixDatabase)); + returnList.addAll(rows); + } + } + + return returnList; + } + + private List setIndexExpressions(List c) throws DatabaseException, SQLException { + Map expressionMap = getCachedExpressionMap(); + c.forEach(row -> { + row.set("FILTER_CONDITION", null); + String key = row.getString("INDEX_OWNER") + "::" + row.getString("INDEX_NAME") + "::" + + row.getInt("ORDINAL_POSITION"); + CachedRow fromMap = expressionMap.get(key); + if (fromMap != null) { + row.set("FILTER_CONDITION", fromMap.get("COLUMN_EXPRESSION")); + } + }); + return c; + } + + private Map getCachedExpressionMap() throws DatabaseException, SQLException { + if (cachedExpressionMap != null) { + return cachedExpressionMap; + } + String expSql = "SELECT e.column_expression, e.index_owner, e.index_name, e.column_position FROM all_ind_expressions e"; + List ec = executeAndExtract(expSql, database); + cachedExpressionMap = new HashMap<>(); + ec.forEach(row -> { + String key = row.getString("INDEX_OWNER") + "::" + row.getString("INDEX_NAME") + "::" + + row.getInt("COLUMN_POSITION"); + cachedExpressionMap.put(key, row); + }); + return cachedExpressionMap; + } + + @Override + public List bulkFetch() throws SQLException, DatabaseException { + this.isBulkFetchMode = true; + return fastFetch(); + } + + @Override + protected boolean shouldBulkSelect(String schemaKey, ResultSetCache resultSetCache) { + if (database instanceof OracleDatabase || database instanceof MSSQLDatabase) { + return JdbcDatabaseSnapshot.this.getAllCatalogsStringScratchData() != null || (tableName == null && indexName == null) || super.shouldBulkSelect(schemaKey, resultSetCache); + } + return false; + } + }); + } + + + protected void warnAboutDbaRecycleBin() { + if (!ignoreWarnAboutDbaRecycleBin && !warnedAboutDbaRecycleBin && !(((OracleDatabase) database).canAccessDbaRecycleBin())) { + Scope.getCurrentScope().getLog(getClass()).warning(((OracleDatabase) database).getDbaRecycleBinWarning()); + warnedAboutDbaRecycleBin = true; + } + } + + /** + * Return the columns for the given catalog, schema, table, and column. + */ + public List getColumns(final String catalogName, final String schemaName, final String tableName, final String columnName) throws SQLException, DatabaseException { + + if ((database instanceof MSSQLDatabase) && (userDefinedTypes == null)) { + userDefinedTypes = new HashSet<>(); + DatabaseConnection databaseConnection = database.getConnection(); + if (databaseConnection instanceof JdbcConnection) { + Statement stmt = null; + ResultSet resultSet = null; + try { + stmt = ((JdbcConnection) databaseConnection).getUnderlyingConnection().createStatement(); + resultSet = stmt.executeQuery("select name from " + (catalogName == null ? "" : "[" + catalogName + "].") + "sys.types where is_user_defined=1"); + while (resultSet.next()) { + userDefinedTypes.add(resultSet.getString("name").toLowerCase()); + } + } finally { + JdbcUtil.close(resultSet, stmt); + } + } + } + GetColumnResultSetCache getColumnResultSetCache = new GetColumnResultSetCache(database, catalogName, + schemaName, tableName, columnName); + return getResultSetCache("getColumns").get(getColumnResultSetCache); + } + + /** + * Return the NotNullConstraints for the given catalog, schema, table, and column. + */ + public List getNotNullConst(final String catalogName, final String schemaName, + final String tableName) throws DatabaseException { + if (!(database instanceof OracleDatabase)) { + return Collections.emptyList(); + } + GetNotNullConstraintsResultSetCache getNotNullConstraintsResultSetCache = new GetNotNullConstraintsResultSetCache(database, catalogName, + schemaName, tableName); + return getResultSetCache("getNotNullConst").get(getNotNullConstraintsResultSetCache); + } + + private class GetColumnResultSetCache extends ResultSetCache.SingleResultSetExtractor { + final String catalogName; + final String schemaName; + final String tableName; + final String columnName; + + private GetColumnResultSetCache(Database database, String catalogName, String schemaName, String tableName, String columnName) { + super(database); + this.catalogName = catalogName; + this.schemaName = schemaName; + this.tableName = tableName; + this.columnName = columnName; + } + + @Override + public ResultSetCache.RowData rowKeyParameters(CachedRow row) { + return new ResultSetCache.RowData(row.getString("TABLE_CAT"), row.getString("TABLE_SCHEM"), database, row.getString("TABLE_NAME"), row.getString("COLUMN_NAME")); + } + + @Override + public ResultSetCache.RowData wantedKeyParameters() { + return new ResultSetCache.RowData(catalogName, schemaName, database, tableName, columnName); + } + + @Override + public boolean bulkContainsSchema(String schemaKey) { + String catalogs = getAllCatalogsStringScratchData(); + return catalogs != null && schemaKey != null + && catalogs.contains("'" + schemaKey.toUpperCase() + "'") + && database instanceof OracleDatabase; + } + + @Override + public String getSchemaKey(CachedRow row) { + return row.getString("TABLE_SCHEM"); + } + + @Override + protected boolean shouldBulkSelect(String schemaKey, ResultSetCache resultSetCache) { + LiquibaseTableNamesFactory liquibaseTableNamesFactory = Scope.getCurrentScope().getSingleton(LiquibaseTableNamesFactory.class); + List liquibaseTableNames = liquibaseTableNamesFactory.getLiquibaseTableNames(database); + return liquibaseTableNames.stream().noneMatch(tableName::equalsIgnoreCase); + } + + @Override + public List fastFetchQuery() throws SQLException, DatabaseException { + if (database instanceof OracleDatabase) { + return oracleQuery(false); + } else if (database instanceof MSSQLDatabase) { + return mssqlQuery(false); + } + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + + try { + List returnList = + extract( + databaseMetaData.getColumns( + ((AbstractJdbcDatabase) database).getJdbcCatalogName(catalogAndSchema), + escapeForLike(((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema), database), + escapeForLike(tableName, database), + SQL_FILTER_MATCH_ALL) + ); + // + // IF MARIADB OR SQL ANYWHERE + // Query to get actual data types and then map each column to its CachedRow + // + determineActualDataTypes(returnList, tableName); + return returnList; + } catch (SQLException e) { + if (shouldReturnEmptyColumns(e)) { //view with table already dropped. Act like it has no columns. + return new ArrayList<>(); + } else { + throw e; + } + } + } + + @Override + public List bulkFetchQuery() throws SQLException, DatabaseException { + if (database instanceof OracleDatabase) { + return oracleQuery(true); + } else if (database instanceof MSSQLDatabase) { + return mssqlQuery(true); + } + + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + + try { + List returnList = + extract(databaseMetaData.getColumns(((AbstractJdbcDatabase) database) + .getJdbcCatalogName(catalogAndSchema), + escapeForLike(((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema), database), + SQL_FILTER_MATCH_ALL, SQL_FILTER_MATCH_ALL)); + // + // IF MARIADB OR SQL ANYWHERE + // Query to get actual data types and then map each column to its CachedRow + // + determineActualDataTypes(returnList, null); + return returnList; + } catch (SQLException e) { + if (shouldReturnEmptyColumns(e)) { + return new ArrayList<>(); + } else { + throw e; + } + } + } + + // + // For MariaDB, query for the data type column so that we can correctly + // set the DATETIME(6) type if specified + // + // For SQL Anywhere, query for the scale column so we can correctly + // set the size unit + // + private void determineActualDataTypes(List returnList, String tableName) throws SQLException { + // + // If not MariaDB / SQL Anywhere then just return + // + if (!(database instanceof MariaDBDatabase || database instanceof SybaseASADatabase)) { + return; + } + + if (database instanceof SybaseASADatabase) { + // + // Query for actual data type for column. The actual SYSTABCOL.scale column value is + // not reported by the DatabaseMetadata.getColumns() query for CHAR-limited (in contrast + // to BYTE-limited) columns, and it is needed to capture the kind if limitation. + // The actual SYSTABCOL.column_type is not reported by the DatabaseMetadata.getColumns() + // query as the IS_GENERATEDCOLUMN columns is missing in the result set, and it is needed to + // capture the kind of column (regular or computed). + // + // See https://help.sap.com/docs/SAP_SQL_Anywhere/93079d4ba8e44920ae63ffb4def91f5b/3beaa3956c5f1014883cb0c3e3559cc9.html. + // + String selectStatement = + "SELECT table_name, column_name, scale, column_type FROM SYSTABCOL KEY JOIN SYSTAB KEY JOIN SYSUSER " + + "WHERE user_name = ? AND ? IS NULL OR table_name = ?"; + Connection underlyingConnection = ((JdbcConnection) database.getConnection()).getUnderlyingConnection(); + try (PreparedStatement stmt = underlyingConnection.prepareStatement(selectStatement)) { + stmt.setString(1, schemaName); + stmt.setString(2, tableName); + stmt.setString(3, tableName); + try (ResultSet columnSelectRS = stmt.executeQuery()) { + while (columnSelectRS.next()) { + String selectedTableName = columnSelectRS.getString("table_name"); + String selectedColumnName = columnSelectRS.getString("column_name"); + int selectedScale = columnSelectRS.getInt("scale"); + String selectedColumnType = columnSelectRS.getString("column_type"); + for (CachedRow row : returnList) { + String rowTableName = row.getString("TABLE_NAME"); + String rowColumnName = row.getString("COLUMN_NAME"); + if (rowTableName.equalsIgnoreCase(selectedTableName) && + rowColumnName.equalsIgnoreCase(selectedColumnName)) { + int rowDataType = row.getInt("DATA_TYPE"); + if (rowDataType == Types.VARCHAR || rowDataType == Types.CHAR) { + row.set("scale", selectedScale); + } + row.set("IS_GENERATEDCOLUMN", "C".equals(selectedColumnType) ? "YES" : "NO"); + break; + } + } + } + } + } catch (SQLException sqle) { + throw new RuntimeException(sqle); + // + // Do not stop + // + } + return; + } + + // + // Query for actual data type for column. The actual DATA_TYPE column string is + // not returned by the DatabaseMetadata.getColumns() query, and it is needed + // to capture DATETIME() data types. + // + StringBuilder selectStatement = new StringBuilder( + "SELECT TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = ?"); + if(tableName != null) { + selectStatement.append(" AND TABLE_NAME = ?"); + } + Connection underlyingConnection = ((JdbcConnection) database.getConnection()).getUnderlyingConnection(); + PreparedStatement statement = underlyingConnection.prepareStatement(selectStatement.toString()); + statement.setString(1, schemaName); + if (tableName != null) { + statement.setString(2, tableName); + } + try { + ResultSet columnSelectRS = statement.executeQuery(selectStatement.toString()); + // + // Iterate the result set from the query and match the rows + // to the rows that were returned by getColumns() in order + // to assign the actual DATA_TYPE string to the appropriate row. + // + while (columnSelectRS.next()) { + String selectedTableName = columnSelectRS.getString("TABLE_NAME"); + String selectedColumnName = columnSelectRS.getString("COLUMN_NAME"); + String actualDataType = columnSelectRS.getString("DATA_TYPE"); + for (CachedRow row : returnList) { + String rowTableName = row.getString("TABLE_NAME"); + String rowColumnName = row.getString("COLUMN_NAME"); + String rowTypeName = row.getString("TYPE_NAME"); + int rowDataType = row.getInt("DATA_TYPE"); + if (rowTableName.equalsIgnoreCase(selectedTableName) && + rowColumnName.equalsIgnoreCase(selectedColumnName) && + rowTypeName.equalsIgnoreCase("datetime") && + rowDataType == Types.OTHER && + !rowTypeName.equalsIgnoreCase(actualDataType)) { + row.set("TYPE_NAME", actualDataType); + row.set("DATA_TYPE", Types.TIMESTAMP); + break; + } + } + } + } catch (SQLException sqle) { + // + // Do not stop + // + } + finally { + JdbcUtil.closeStatement(statement); + } + } + + protected boolean shouldReturnEmptyColumns(SQLException e) { + return e.getMessage().contains("references invalid table"); //view with table already dropped. Act like it has no columns. + } + + protected List oracleQuery(boolean bulk) throws DatabaseException, SQLException { + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + + String jdbcSchemaName = ((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema); + boolean collectIdentityData = database.getDatabaseMajorVersion() >= OracleDatabase.ORACLE_12C_MAJOR_VERSION; + + String sql = "select NULL AS TABLE_CAT, OWNER AS TABLE_SCHEM, 'NO' as IS_AUTOINCREMENT, cc.COMMENTS AS REMARKS," + + "OWNER, TABLE_NAME, COLUMN_NAME, DATA_TYPE AS DATA_TYPE_NAME, DATA_TYPE_MOD, DATA_TYPE_OWNER, " + + // note: oracle reports DATA_LENGTH=4*CHAR_LENGTH when using VARCHAR( CHAR ), thus BYTEs + "DECODE (c.data_type, 'CHAR', 1, 'VARCHAR2', 12, 'NUMBER', 3, 'LONG', -1, 'DATE', " + "93" + ", 'RAW', -3, 'LONG RAW', -4, 'BLOB', 2004, 'CLOB', 2005, 'BFILE', -13, 'FLOAT', 6, 'TIMESTAMP(6)', 93, 'TIMESTAMP(6) WITH TIME ZONE', -101, 'TIMESTAMP(6) WITH LOCAL TIME ZONE', -102, 'INTERVAL YEAR(2) TO MONTH', -103, 'INTERVAL DAY(2) TO SECOND(6)', -104, 'BINARY_FLOAT', 100, 'BINARY_DOUBLE', 101, 'XMLTYPE', 2009, 1111) AS data_type, " + + "DECODE( CHAR_USED, 'C',CHAR_LENGTH, DATA_LENGTH ) as DATA_LENGTH, " + + "DATA_PRECISION, DATA_SCALE, NULLABLE, COLUMN_ID as ORDINAL_POSITION, DEFAULT_LENGTH, " + + "DATA_DEFAULT, " + + "NUM_BUCKETS, CHARACTER_SET_NAME, " + + "CHAR_COL_DECL_LENGTH, CHAR_LENGTH, " + + "CHAR_USED, VIRTUAL_COLUMN "; + if (collectIdentityData) { + sql += ", DEFAULT_ON_NULL, IDENTITY_COLUMN, ic.GENERATION_TYPE "; + } + sql += "FROM ALL_TAB_COLS c " + + "JOIN ALL_COL_COMMENTS cc USING ( OWNER, TABLE_NAME, COLUMN_NAME ) "; + if (collectIdentityData) { + sql += "LEFT JOIN ALL_TAB_IDENTITY_COLS ic USING (OWNER, TABLE_NAME, COLUMN_NAME ) "; + } + if (!bulk || getAllCatalogsStringScratchData() == null) { + sql += "WHERE OWNER='" + jdbcSchemaName + "' AND hidden_column='NO'"; + } else { + sql += "WHERE OWNER IN ('" + jdbcSchemaName + "', " + getAllCatalogsStringScratchData() + ") AND hidden_column='NO'"; + } + + if (!bulk) { + if (tableName != null) { + sql += " AND TABLE_NAME='" + database.escapeStringForDatabase(tableName) + "'"; + } + if (columnName != null) { + sql += " AND COLUMN_NAME='" + database.escapeStringForDatabase(columnName) + "'"; + } + } + sql += " AND " + ((OracleDatabase) database).getSystemTableWhereClause("TABLE_NAME"); + sql += " ORDER BY OWNER, TABLE_NAME, c.COLUMN_ID"; + + return this.executeAndExtract(sql, database); + } + + + protected List mssqlQuery(boolean bulk) throws DatabaseException, SQLException { + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + + String databaseName = StringUtil.trimToNull(database.correctObjectName(catalogAndSchema.getCatalogName(), Catalog.class)); + String dbIdParam; + String databasePrefix; + if (databaseName == null) { + databasePrefix = ""; + dbIdParam = ""; + } else { + dbIdParam = ", db_id('" + databaseName + "')"; + databasePrefix = "[" + databaseName + "]."; + } + + String sql = "select " + + "db_name(" + (databaseName == null ? "" : "db_id('" + databaseName + "')") + ") AS TABLE_CAT, " + + "object_schema_name(c.object_id" + dbIdParam + ") AS TABLE_SCHEM, " + + "object_name(c.object_id" + dbIdParam + ") AS TABLE_NAME, " + + "c.name AS COLUMN_NAME, " + + "is_filestream AS IS_FILESTREAM, " + + "is_rowguidcol AS IS_ROWGUIDCOL, " + + "CASE WHEN c.is_identity = 'true' THEN 'YES' ELSE 'NO' END as IS_AUTOINCREMENT, " + + "{REMARKS_COLUMN_PLACEHOLDER}" + + "t.name AS TYPE_NAME, " + + "dc.name as COLUMN_DEF_NAME, " + + "dc.definition as COLUMN_DEF, " + + // data type mapping from https://msdn.microsoft.com/en-us/library/ms378878(v=sql.110).aspx + "CASE t.name " + + "WHEN 'bigint' THEN " + java.sql.Types.BIGINT + " " + + "WHEN 'binary' THEN " + java.sql.Types.BINARY + " " + + "WHEN 'bit' THEN " + java.sql.Types.BIT + " " + + "WHEN 'char' THEN " + java.sql.Types.CHAR + " " + + "WHEN 'date' THEN " + java.sql.Types.DATE + " " + + "WHEN 'datetime' THEN " + java.sql.Types.TIMESTAMP + " " + + "WHEN 'datetime2' THEN " + java.sql.Types.TIMESTAMP + " " + + "WHEN 'datetimeoffset' THEN -155 " + + "WHEN 'decimal' THEN " + java.sql.Types.DECIMAL + " " + + "WHEN 'float' THEN " + java.sql.Types.DOUBLE + " " + + "WHEN 'image' THEN " + java.sql.Types.LONGVARBINARY + " " + + "WHEN 'int' THEN " + java.sql.Types.INTEGER + " " + + "WHEN 'money' THEN " + java.sql.Types.DECIMAL + " " + + "WHEN 'nchar' THEN " + java.sql.Types.NCHAR + " " + + "WHEN 'ntext' THEN " + java.sql.Types.LONGNVARCHAR + " " + + "WHEN 'numeric' THEN " + java.sql.Types.NUMERIC + " " + + "WHEN 'nvarchar' THEN " + java.sql.Types.NVARCHAR + " " + + "WHEN 'real' THEN " + Types.REAL + " " + + "WHEN 'smalldatetime' THEN " + java.sql.Types.TIMESTAMP + " " + + "WHEN 'smallint' THEN " + java.sql.Types.SMALLINT + " " + + "WHEN 'smallmoney' THEN " + java.sql.Types.DECIMAL + " " + + "WHEN 'text' THEN " + java.sql.Types.LONGVARCHAR + " " + + "WHEN 'time' THEN " + java.sql.Types.TIME + " " + + "WHEN 'timestamp' THEN " + java.sql.Types.BINARY + " " + + "WHEN 'tinyint' THEN " + java.sql.Types.TINYINT + " " + + "WHEN 'udt' THEN " + java.sql.Types.VARBINARY + " " + + "WHEN 'uniqueidentifier' THEN " + java.sql.Types.CHAR + " " + + "WHEN 'varbinary' THEN " + java.sql.Types.VARBINARY + " " + + "WHEN 'varbinary(max)' THEN " + java.sql.Types.VARBINARY + " " + + "WHEN 'varchar' THEN " + java.sql.Types.VARCHAR + " " + + "WHEN 'varchar(max)' THEN " + java.sql.Types.VARCHAR + " " + + "WHEN 'xml' THEN " + java.sql.Types.LONGVARCHAR + " " + + "WHEN 'LONGNVARCHAR' THEN " + java.sql.Types.SQLXML + " " + + "ELSE " + Types.OTHER + " END AS DATA_TYPE, " + + "CASE WHEN c.is_nullable = 'true' THEN 1 ELSE 0 END AS NULLABLE, " + + "10 as NUM_PREC_RADIX, " + + "c.column_id as ORDINAL_POSITION, " + + "c.scale as DECIMAL_DIGITS, " + + "c.max_length as COLUMN_SIZE, " + + "c.precision as DATA_PRECISION, " + + "c.is_computed as IS_COMPUTED " + + "FROM " + databasePrefix + "sys.columns c " + + "inner join " + databasePrefix + "sys.types t on c.user_type_id=t.user_type_id " + + "{REMARKS_JOIN_PLACEHOLDER}" + + "left outer join " + databasePrefix + "sys.default_constraints dc on dc.parent_column_id = c.column_id AND dc.parent_object_id=c.object_id AND type_desc='DEFAULT_CONSTRAINT' " + + "WHERE object_schema_name(c.object_id" + dbIdParam + ")='" + ((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema) + "'"; + + + if (!bulk) { + if (tableName != null) { + sql += " and object_name(c.object_id" + dbIdParam + ")='" + database.escapeStringForDatabase(tableName) + "'"; + } + if (columnName != null) { + sql += " and c.name='" + database.escapeStringForDatabase(columnName) + "'"; + } + } + sql += "order by object_schema_name(c.object_id" + dbIdParam + "), object_name(c.object_id" + dbIdParam + "), c.column_id"; + + + // sys.extended_properties is added to Azure on V12: https://feedback.azure.com/forums/217321-sql-database/suggestions/6549815-add-sys-extended-properties-for-meta-data-support + if ((!((MSSQLDatabase) database).isAzureDb()) // Either NOT AzureDB (=SQL Server 2008 or higher) + || (database.getDatabaseMajorVersion() >= 12)) { // or at least AzureDB v12 + // SQL Server 2005 or later + // https://technet.microsoft.com/en-us/library/ms177541.aspx + sql = sql.replace("{REMARKS_COLUMN_PLACEHOLDER}", "CAST([ep].[value] AS [nvarchar](MAX)) AS [REMARKS], "); + sql = sql.replace("{REMARKS_JOIN_PLACEHOLDER}", "left outer join " + databasePrefix + "[sys].[extended_properties] AS [ep] ON [ep].[class] = 1 " + + "AND [ep].[major_id] = c.object_id " + + "AND [ep].[minor_id] = column_id " + + "AND [ep].[name] = 'MS_Description' "); + } else { + sql = sql.replace("{REMARKS_COLUMN_PLACEHOLDER}", ""); + sql = sql.replace("{REMARKS_JOIN_PLACEHOLDER}", ""); + } + + List rows = this.executeAndExtract(sql, database); + + for (CachedRow row : rows) { + String typeName = row.getString("TYPE_NAME"); + if ("nvarchar".equals(typeName) || "nchar".equals(typeName)) { + Integer size = row.getInt("COLUMN_SIZE"); + if (size > 0) { + row.set("COLUMN_SIZE", size / 2); + } + } else if ((row.getInt("DATA_PRECISION") != null) && (row.getInt("DATA_PRECISION") > 0)) { + row.set("COLUMN_SIZE", row.getInt("DATA_PRECISION")); + } + } + + return rows; + } + + @Override + protected List extract(ResultSet resultSet, boolean informixIndexTrimHint) throws SQLException { + List rows = super.extract(resultSet, informixIndexTrimHint); + if ((database instanceof MSSQLDatabase) && !userDefinedTypes.isEmpty()) { //UDT types in MSSQL don't take parameters + for (CachedRow row : rows) { + String dataType = (String) row.get("TYPE_NAME"); + if (userDefinedTypes.contains(dataType.toLowerCase())) { + row.set("COLUMN_SIZE", null); + row.set("DECIMAL_DIGITS ", null); + } + } + } + return rows; + } + } + + private class ForeignKeysResultSetCache extends ResultSetCache.UnionResultSetExtractor { + final String catalogName; + final String schemaName; + final String tableName; + final String fkName; + + private ForeignKeysResultSetCache(Database database, String catalogName, String schemaName, String tableName, String fkName) { + super(database); + this.catalogName = catalogName; + this.schemaName = schemaName; + this.tableName = tableName; + this.fkName = fkName; + } + + @Override + public ResultSetCache.RowData rowKeyParameters(CachedRow row) { + return new ResultSetCache.RowData(row.getString("FKTABLE_CAT"), row.getString("FKTABLE_SCHEM"), database, row.getString("FKTABLE_NAME"), row.getString("FK_NAME")); + } + + @Override + public ResultSetCache.RowData wantedKeyParameters() { + return new ResultSetCache.RowData(catalogName, schemaName, database, tableName, fkName); + } + + @Override + public boolean bulkContainsSchema(String schemaKey) { + return database instanceof OracleDatabase; + } + + @Override + public String getSchemaKey(CachedRow row) { + return row.getString("FKTABLE_SCHEM"); + } + + @Override + public List fastFetch() throws SQLException, DatabaseException { + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + + String jdbcCatalogName = ((AbstractJdbcDatabase) database).getJdbcCatalogName(catalogAndSchema); + String jdbcSchemaName = ((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema); + + if (database instanceof DB2Database) { + if (database.getDatabaseProductName().startsWith("DB2 UDB for AS/400")) { + return executeAndExtract(getDB2ForAs400Sql(jdbcSchemaName, tableName), database); + } + return querytDB2Luw(jdbcSchemaName, tableName); + } else if (database instanceof Db2zDatabase) { + return queryDb2Zos(catalogAndSchema, tableName); + } else { + List tables = new ArrayList<>(); + if (tableName == null) { + for (CachedRow row : getTables(jdbcCatalogName, jdbcSchemaName, null)) { + tables.add(row.getString("TABLE_NAME")); + } + } else { + tables.add(tableName); + } + + List returnList = new ArrayList<>(); + for (String foundTable : tables) { + if (database instanceof OracleDatabase) { + throw new RuntimeException("Should have bulk selected"); + } else { + returnList.addAll(extract(databaseMetaData.getImportedKeys(jdbcCatalogName, jdbcSchemaName, foundTable))); + } + } + + return returnList; + } + } + + @Override + public List bulkFetch() throws SQLException, DatabaseException { + if (database instanceof OracleDatabase) { + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + String jdbcSchemaName = ((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema); + String sql = getOracleSql(jdbcSchemaName); + return executeAndExtract(sql, database); + } else if (database instanceof DB2Database) { + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + String jdbcSchemaName = ((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema); + if (database.getDatabaseProductName().startsWith("DB2 UDB for AS/400")) { + return executeAndExtract(getDB2ForAs400Sql(jdbcSchemaName, null), database); + } + return querytDB2Luw(jdbcSchemaName, null); + } else if (database instanceof Db2zDatabase) { + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + return queryDb2Zos(catalogAndSchema, null); + } else if (database instanceof MSSQLDatabase) { + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + String jdbcSchemaName = ((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema); + String sql = getMSSQLSql(jdbcSchemaName, tableName); + return executeAndExtract(sql, database); + } else { + throw new RuntimeException("Cannot bulk select"); + } + } + + protected String getOracleSql(String jdbcSchemaName) { + String sql = "SELECT /*+rule*/" + + " NULL AS pktable_cat, " + + " p.owner as pktable_schem, " + + " p.table_name as pktable_name, " + + " pc.column_name as pkcolumn_name, " + + " NULL as fktable_cat, " + + " f.owner as fktable_schem, " + + " f.table_name as fktable_name, " + + " fc.column_name as fkcolumn_name, " + + " fc.position as key_seq, " + + " NULL as update_rule, " + + " decode (f.delete_rule, 'CASCADE', 0, 'SET NULL', 2, 1) as delete_rule, " + + " f.constraint_name as fk_name, " + + " p.constraint_name as pk_name, " + + " decode(f.deferrable, 'DEFERRABLE', 5, 'NOT DEFERRABLE', 7, 'DEFERRED', 6) deferrability, " + + " f.validated as fk_validate " + + "FROM " + + "all_cons_columns pc " + + "INNER JOIN all_constraints p " + + "ON pc.owner = p.owner " + + "AND pc.constraint_name = p.constraint_name " + + "INNER JOIN all_constraints f " + + "ON pc.owner = f.r_owner " + + "AND pc.constraint_name = f.r_constraint_name " + + "INNER JOIN all_cons_columns fc " + + "ON fc.owner = f.owner " + + "AND fc.constraint_name = f.constraint_name " + + "AND fc.position = pc.position "; + if (getAllCatalogsStringScratchData() == null) { + sql += "WHERE f.owner = '" + jdbcSchemaName + "' "; + } else { + sql += "WHERE f.owner IN ('" + jdbcSchemaName + "', " + getAllCatalogsStringScratchData() + ") "; + } + sql += "AND p.constraint_type in ('P', 'U') " + + "AND f.constraint_type = 'R' " + + "AND p.table_name NOT LIKE 'BIN$%' " + + "ORDER BY fktable_schem, fktable_name, key_seq"; + return sql; + } + + protected String getMSSQLSql(String jdbcSchemaName, String tableName) { + //comes from select object_definition(object_id('sp_fkeys')) + return "select " + + "convert(sysname,db_name()) AS PKTABLE_CAT, " + + "convert(sysname,schema_name(o1.schema_id)) AS PKTABLE_SCHEM, " + + "convert(sysname,o1.name) AS PKTABLE_NAME, " + + "convert(sysname,c1.name) AS PKCOLUMN_NAME, " + + "convert(sysname,db_name()) AS FKTABLE_CAT, " + + "convert(sysname,schema_name(o2.schema_id)) AS FKTABLE_SCHEM, " + + "convert(sysname,o2.name) AS FKTABLE_NAME, " + + "convert(sysname,c2.name) AS FKCOLUMN_NAME, " + + "isnull(convert(smallint,k.constraint_column_id), convert(smallint,0)) AS KEY_SEQ, " + + "convert(smallint, case ObjectProperty(f.object_id, 'CnstIsUpdateCascade') when 1 then 0 else 1 end) AS UPDATE_RULE, " + + "convert(smallint, case ObjectProperty(f.object_id, 'CnstIsDeleteCascade') when 1 then 0 else 1 end) AS DELETE_RULE, " + + "convert(sysname,object_name(f.object_id)) AS FK_NAME, " + + "convert(sysname,i.name) AS PK_NAME, " + + "convert(smallint, 7) AS DEFERRABILITY " + + "from " + + "sys.objects o1, " + + "sys.objects o2, " + + "sys.columns c1, " + + "sys.columns c2, " + + "sys.foreign_keys f inner join " + + "sys.foreign_key_columns k on (k.constraint_object_id = f.object_id) inner join " + + "sys.indexes i on (f.referenced_object_id = i.object_id and f.key_index_id = i.index_id) " + + "where " + + "o1.object_id = f.referenced_object_id and " + + "o2.object_id = f.parent_object_id and " + + "c1.object_id = f.referenced_object_id and " + + "c2.object_id = f.parent_object_id and " + + "c1.column_id = k.referenced_column_id and " + + "c2.column_id = k.parent_column_id and " + + "((object_schema_name(o1.object_id)='" + jdbcSchemaName + "'" + + " and convert(sysname,schema_name(o2.schema_id))='" + jdbcSchemaName + "' and " + + "convert(sysname,o2.name)='" + tableName + "' ) or ( convert(sysname,schema_name" + + "(o2.schema_id))='" + jdbcSchemaName + "' and convert(sysname,o2.name)='" + tableName + + "' )) order by 5, 6, 7, 9, 8"; + } + + private List querytDB2Luw(String jdbcSchemaName, String tableName) throws DatabaseException, SQLException { + List parameters = new ArrayList<>(2); + StringBuilder sql = new StringBuilder ("SELECT " + + " pk_col.tabschema AS pktable_cat, " + + " pk_col.tabname as pktable_name, " + + " pk_col.colname as pkcolumn_name, " + + " fk_col.tabschema as fktable_cat, " + + " fk_col.tabname as fktable_name, " + + " fk_col.colname as fkcolumn_name, " + + " fk_col.colseq as key_seq, " + + " decode (ref.updaterule, 'A', 3, 'R', 1, 1) as update_rule, " + + " decode (ref.deleterule, 'A', 3, 'C', 0, 'N', 2, 'R', 1, 1) as delete_rule, " + + " ref.constname as fk_name, " + + " ref.refkeyname as pk_name, " + + " 7 as deferrability " + + "FROM " + + "syscat.references ref " + + "join syscat.keycoluse fk_col on ref.constname=fk_col.constname and ref.tabschema=fk_col.tabschema and ref.tabname=fk_col.tabname " + + "join syscat.keycoluse pk_col on ref.refkeyname=pk_col.constname and ref.reftabschema=pk_col.tabschema and ref.reftabname=pk_col.tabname and pk_col.colseq=fk_col.colseq " + + "WHERE ref.tabschema = ? "); + parameters.add(jdbcSchemaName); + if (tableName != null) { + sql.append("and fk_col.tabname = ? "); + parameters.add(tableName); + } + sql.append("ORDER BY fk_col.colseq"); + return executeAndExtract(database, sql.toString(), parameters.toArray()); + } + + private String getDB2ForAs400Sql(String jdbcSchemaName, String tableName) { + return "SELECT " + + "pktable_cat, " + + "pktable_name, " + + "pkcolumn_name, " + + "fktable_cat, " + + "fktable_name, " + + "fkcolumn_name, " + + "key_seq, " + + "update_rule, " + + "delete_rule, " + + "fk_name, " + + "pk_name, " + + "deferrability " + + "FROM " + + "sysibm.SQLFORKEYS " + + "WHERE " + + "FKTABLE_SCHEM = '" + jdbcSchemaName + "' " + + "AND FKTABLE_NAME = '" + tableName + "'"; + } + + protected List queryDb2Zos(CatalogAndSchema catalogAndSchema, String tableName) throws DatabaseException, SQLException { + + List parameters = new ArrayList<>(2); + StringBuilder sql = new StringBuilder("SELECT " + + " ref.REFTBCREATOR AS pktable_cat, " + + " ref.REFTBNAME as pktable_name, " + + " pk_col.colname as pkcolumn_name, " + + " ref.CREATOR as fktable_cat, " + + " ref.TBNAME as fktable_name, " + + " fk_col.colname as fkcolumn_name, " + + " fk_col.colseq as key_seq, " + + " decode (ref.deleterule, 'A', 3, 'C', 0, 'N', 2, 'R', 1, 1) as delete_rule, " + + " ref.relname as fk_name, " + + " pk_col.colname as pk_name, " + + " 7 as deferrability " + + "FROM " + + "SYSIBM.SYSRELS ref " + + "join SYSIBM.SYSFOREIGNKEYS fk_col " + + "on ref.relname = fk_col.RELNAME " + + "and ref.CREATOR = fk_col.CREATOR " + + "and ref.TBNAME = fk_col.TBNAME " + + "join SYSIBM.SYSKEYCOLUSE pk_col " + + "on ref.REFTBCREATOR = pk_col.TBCREATOR " + + "and ref.REFTBNAME = pk_col.TBNAME " + + "and pk_col.colseq=fk_col.colseq " + + "WHERE ref.CREATOR = ? "); + parameters.add(((AbstractJdbcDatabase) CachingDatabaseMetaData.this.database).getJdbcSchemaName(catalogAndSchema)); + if (tableName != null) { + sql.append("AND ref.TBNAME = ? "); + parameters.add(tableName); + } + sql.append("ORDER BY fk_col.colseq"); + + return executeAndExtract(CachingDatabaseMetaData.this.database, sql.toString(), parameters.toArray()); + } + + @Override + protected boolean shouldBulkSelect(String schemaKey, ResultSetCache resultSetCache) { + if (database instanceof AbstractDb2Database || database instanceof MSSQLDatabase) { + return super.shouldBulkSelect(schemaKey, resultSetCache); //can bulk and fast fetch + } else { + return database instanceof OracleDatabase; //oracle is slow, always bulk select while you are at it. Other databases need to go through all tables. + } + } + } + + private class GetNotNullConstraintsResultSetCache extends ResultSetCache.SingleResultSetExtractor { + final String catalogName; + final String schemaName; + final String tableName; + + private GetNotNullConstraintsResultSetCache(Database database, String catalogName, String schemaName, String tableName) { + super(database); + this.catalogName = catalogName; + this.schemaName = schemaName; + this.tableName = tableName; + } + + @Override + public ResultSetCache.RowData rowKeyParameters(CachedRow row) { + return new ResultSetCache.RowData(row.getString("TABLE_CAT"), row.getString("TABLE_SCHEMA"), + database, row.getString("TABLE_NAME")); + } + + @Override + public ResultSetCache.RowData wantedKeyParameters() { + return new ResultSetCache.RowData(catalogName, schemaName, database, tableName); + } + + @Override + public boolean bulkContainsSchema(String schemaKey) { + return database instanceof OracleDatabase; + } + + @Override + public String getSchemaKey(CachedRow row) { + return row.getString("TABLE_SCHEMA"); + } + + @Override + protected boolean shouldBulkSelect(String schemaKey, ResultSetCache resultSetCache) { + LiquibaseTableNamesFactory liquibaseTableNamesFactory = Scope.getCurrentScope().getSingleton(LiquibaseTableNamesFactory.class); + List liquibaseTableNames = liquibaseTableNamesFactory.getLiquibaseTableNames(database); + return liquibaseTableNames.stream().noneMatch(tableName::equalsIgnoreCase); + } + + @Override + public List fastFetchQuery() throws SQLException, DatabaseException { + if (database instanceof OracleDatabase) { + return oracleQuery(false); + } + return Collections.emptyList(); + } + + @Override + public List bulkFetchQuery() throws SQLException, DatabaseException { + if (database instanceof OracleDatabase) { + return oracleQuery(true); + } + return Collections.emptyList(); + } + + private List oracleQuery(boolean bulk) throws DatabaseException, SQLException { + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + + String jdbcSchemaName = ((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema); + String jdbcTableName = database.escapeStringForDatabase(tableName); + String sqlToSelectNotNullConstraints = "SELECT NULL AS TABLE_CAT, atc.OWNER AS TABLE_SCHEMA, atc.OWNER, atc.TABLE_NAME, " + + "atc.COLUMN_NAME, NULLABLE, ac.VALIDATED as VALIDATED, ac.SEARCH_CONDITION, ac.CONSTRAINT_NAME " + + "FROM ALL_TAB_COLS atc " + + "JOIN all_cons_columns acc ON atc.OWNER = acc.OWNER AND atc.TABLE_NAME = acc.TABLE_NAME AND atc.COLUMN_NAME = acc.COLUMN_NAME " + + "JOIN all_constraints ac ON atc.OWNER = ac.OWNER AND atc.TABLE_NAME = ac.TABLE_NAME AND acc.CONSTRAINT_NAME = ac.CONSTRAINT_NAME "; + + if (!bulk || getAllCatalogsStringScratchData() == null) { + sqlToSelectNotNullConstraints += " WHERE atc.OWNER='" + jdbcSchemaName + "' AND atc.hidden_column='NO' AND ac.CONSTRAINT_TYPE='C' and ac.search_condition is not null "; + } else { + sqlToSelectNotNullConstraints += " WHERE atc.OWNER IN ('" + jdbcSchemaName + "', " + getAllCatalogsStringScratchData() + ") " + + " AND atc.hidden_column='NO' AND ac.CONSTRAINT_TYPE='C' and ac.search_condition is not null "; + } + + sqlToSelectNotNullConstraints += (!bulk && tableName != null && !tableName.isEmpty()) ? " AND atc.TABLE_NAME='" + jdbcTableName + "'" : ""; + + return this.executeAndExtract(sqlToSelectNotNullConstraints, database); + } + + @Override + protected List extract(ResultSet resultSet, boolean informixIndexTrimHint) throws SQLException { + List cachedRowList = new ArrayList<>(); + if (!(database instanceof OracleDatabase)) { + return cachedRowList; + } + + resultSet.setFetchSize(database.getFetchSize()); + + try { + List result = (List) new RowMapperNotNullConstraintsResultSetExtractor(new ColumnMapRowMapper(database.isCaseSensitive()) { + @Override + protected Object getColumnValue(ResultSet rs, int index) throws SQLException { + Object value = super.getColumnValue(rs, index); + if (!(value instanceof String)) { + return value; + } + return value.toString().trim(); + } + }).extractData(resultSet); + + for (Map row : result) { + cachedRowList.add(new CachedRow(row)); + } + } finally { + JdbcUtil.closeResultSet(resultSet); + } + return cachedRowList; + + } + } + + public List getTables(final String catalogName, final String schemaName, final String table) throws DatabaseException { + return getResultSetCache("getTables").get(new ResultSetCache.SingleResultSetExtractor(database) { + + @Override + protected boolean shouldBulkSelect(String schemaKey, ResultSetCache resultSetCache) { + return table == null || getAllCatalogsStringScratchData() != null || super.shouldBulkSelect(schemaKey, resultSetCache); + } + + @Override + public ResultSetCache.RowData rowKeyParameters(CachedRow row) { + return new ResultSetCache.RowData(row.getString("TABLE_CAT"), row.getString("TABLE_SCHEM"), database, row.getString("TABLE_NAME")); + } + + @Override + public ResultSetCache.RowData wantedKeyParameters() { + return new ResultSetCache.RowData(catalogName, schemaName, database, table); + } + + @Override + public boolean bulkContainsSchema(String schemaKey) { + return database instanceof OracleDatabase; + } + + @Override + public String getSchemaKey(CachedRow row) { + return row.getString("TABLE_SCHEM"); + } + + @Override + public List fastFetchQuery() throws SQLException, DatabaseException { + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + + if (database instanceof OracleDatabase) { + return queryOracle(catalogAndSchema, table); + } else if (database instanceof MSSQLDatabase) { + return queryMssql(catalogAndSchema, table); + } else if (database instanceof Db2zDatabase) { + return queryDb2Zos(catalogAndSchema, table); + } else if (database instanceof PostgresDatabase) { + return queryPostgres(catalogAndSchema, table); + } + + String catalog = ((AbstractJdbcDatabase) database).getJdbcCatalogName(catalogAndSchema); + String schema = ((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema); + return extract(databaseMetaData.getTables(catalog, escapeForLike(schema, database), ((table == null) ? + SQL_FILTER_MATCH_ALL : escapeForLike(table, database)), new String[]{"TABLE"})); + } + + @Override + public List bulkFetchQuery() throws SQLException, DatabaseException { + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + + if (database instanceof OracleDatabase) { + return queryOracle(catalogAndSchema, null); + } else if (database instanceof MSSQLDatabase) { + return queryMssql(catalogAndSchema, null); + } else if (database instanceof Db2zDatabase) { + return queryDb2Zos(catalogAndSchema, null); + } else if (database instanceof PostgresDatabase) { + return queryPostgres(catalogAndSchema, table); + } + + String catalog = ((AbstractJdbcDatabase) database).getJdbcCatalogName(catalogAndSchema); + String schema = ((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema); + return extract(databaseMetaData.getTables(catalog, escapeForLike(schema, database), SQL_FILTER_MATCH_ALL, new String[]{"TABLE"})); + } + + private List queryMssql(CatalogAndSchema catalogAndSchema, String tableName) throws DatabaseException, SQLException { + String ownerName = database.correctObjectName(catalogAndSchema.getSchemaName(), Schema.class); + + String databaseName = StringUtil.trimToNull(database.correctObjectName(catalogAndSchema.getCatalogName(), Catalog.class)); + String dbIdParam; + String databasePrefix; + if (databaseName == null) { + databasePrefix = ""; + dbIdParam = ""; + } else { + dbIdParam = ", db_id('" + databaseName + "')"; + databasePrefix = "[" + databaseName + "]."; + } + + + //From select object_definition(object_id('sp_tables')) + String sql = "select " + + "db_name(" + (databaseName == null ? "" : "db_id('" + databaseName + "')") + ") AS TABLE_CAT, " + + "convert(sysname,object_schema_name(o.object_id" + dbIdParam + ")) AS TABLE_SCHEM, " + + "convert(sysname,o.name) AS TABLE_NAME, " + + "'TABLE' AS TABLE_TYPE, " + + "CAST(ep.value as varchar(max)) as REMARKS " + + "from " + databasePrefix + "sys.all_objects o " + + "left outer join sys.extended_properties ep on ep.name='MS_Description' and major_id=o.object_id and minor_id=0 " + + "where " + + "o.type in ('U') " + + "and has_perms_by_name(" + (databaseName == null ? "" : "quotename('" + databaseName + "') + '.' + ") + "quotename(object_schema_name(o.object_id" + dbIdParam + ")) + '.' + quotename(o.name), 'object', 'select') = 1 " + + "and charindex(substring(o.type,1,1),'U') <> 0 " + + "and object_schema_name(o.object_id" + dbIdParam + ")='" + database.escapeStringForDatabase(ownerName) + "'"; + if (tableName != null) { + sql += " AND o.name='" + database.escapeStringForDatabase(tableName) + "' "; + } + sql += "order by 4, 1, 2, 3"; + + return executeAndExtract(sql, database); + } + + private List queryOracle(CatalogAndSchema catalogAndSchema, String tableName) throws DatabaseException, SQLException { + String ownerName = database.correctObjectName(catalogAndSchema.getCatalogName(), Schema.class); + + String sql = "SELECT null as TABLE_CAT, a.OWNER as TABLE_SCHEM, a.TABLE_NAME as TABLE_NAME, " + + "a.TEMPORARY as TEMPORARY, a.DURATION as DURATION, 'TABLE' as TABLE_TYPE, " + + "c.COMMENTS as REMARKS, A.tablespace_name as tablespace_name, CASE WHEN A.tablespace_name = " + + "(SELECT DEFAULT_TABLESPACE FROM USER_USERS) THEN 'true' ELSE null END as default_tablespace " + + "from ALL_TABLES a " + + "join ALL_TAB_COMMENTS c on a.TABLE_NAME=c.table_name and a.owner=c.owner " + + "left outer join ALL_QUEUE_TABLES q ON a.TABLE_NAME = q.QUEUE_TABLE and a.OWNER = q.OWNER " + + "WHERE q.QUEUE_TABLE is null "; + String allCatalogsString = getAllCatalogsStringScratchData(); + if (tableName != null || allCatalogsString == null) { + sql += "AND a.OWNER='" + ownerName + "'"; + } else { + sql += "AND a.OWNER IN ('" + ownerName + "', " + allCatalogsString + ")"; + } + if (tableName != null) { + sql += " AND a.TABLE_NAME='" + tableName + "'"; + } + + return executeAndExtract(sql, database); + } + + private List queryDb2Zos(CatalogAndSchema catalogAndSchema, String tableName) throws DatabaseException, SQLException { + String ownerName = database.correctObjectName(catalogAndSchema.getCatalogName(), Schema.class); + + String sql = "SELECT CREATOR AS TABLE_SCHEM, " + + "NAME AS TABLE_NAME, " + + "'TABLE' AS TABLE_TYPE, " + + "REMARKS " + + "FROM SYSIBM.SYSTABLES " + + "WHERE TYPE = 'T'"; + List parameters = new ArrayList<>(2); + if (ownerName != null) { + sql += " AND CREATOR = ?"; + parameters.add(ownerName); + } + if (tableName != null) { + sql += " AND NAME = ?"; + parameters.add(tableName); + } + + return executeAndExtract(database, sql, parameters.toArray()); + } + + private List queryPostgres(CatalogAndSchema catalogAndSchema, String tableName) throws SQLException { + String catalog = ((AbstractJdbcDatabase) database).getJdbcCatalogName(catalogAndSchema); + String schema = ((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema); + return extract(databaseMetaData.getTables(catalog, escapeForLike(schema, database), ((tableName == null) ? + SQL_FILTER_MATCH_ALL : escapeForLike(tableName, database)), new String[]{"TABLE", "PARTITIONED TABLE"})); + + } + }); + } + + public List getViews(final String catalogName, final String schemaName, String viewName) throws DatabaseException { + final String view; + if (database instanceof DB2Database) { + view = database.correctObjectName(viewName, View.class); + } else { + view = viewName; + } + return getResultSetCache("getViews").get(new ResultSetCache.SingleResultSetExtractor(database) { + + @Override + protected boolean shouldBulkSelect(String schemaKey, ResultSetCache resultSetCache) { + return view == null || getAllCatalogsStringScratchData() != null || super.shouldBulkSelect(schemaKey, resultSetCache); + } + + @Override + public ResultSetCache.RowData rowKeyParameters(CachedRow row) { + return new ResultSetCache.RowData(row.getString("TABLE_CAT"), row.getString("TABLE_SCHEM"), database, row.getString("TABLE_NAME")); + } + + + @Override + public ResultSetCache.RowData wantedKeyParameters() { + return new ResultSetCache.RowData(catalogName, schemaName, database, view); + } + + @Override + public boolean bulkContainsSchema(String schemaKey) { + return database instanceof OracleDatabase; + } + + @Override + public String getSchemaKey(CachedRow row) { + return row.getString("TABLE_SCHEM"); + } + + + @Override + public List fastFetchQuery() throws SQLException, DatabaseException { + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + + if (database instanceof OracleDatabase) { + return queryOracle(catalogAndSchema, view); + } else if (database instanceof MSSQLDatabase) { + return queryMssql(catalogAndSchema, view); + } + + String catalog = ((AbstractJdbcDatabase) database).getJdbcCatalogName(catalogAndSchema); + String schema = ((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema); + return extract(databaseMetaData.getTables(catalog, escapeForLike(schema, database), ((view == null) ? SQL_FILTER_MATCH_ALL + : escapeForLike(view, database)), new String[]{"VIEW"})); + } + + @Override + public List bulkFetchQuery() throws SQLException, DatabaseException { + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + + if (database instanceof OracleDatabase) { + return queryOracle(catalogAndSchema, null); + } else if (database instanceof MSSQLDatabase) { + return queryMssql(catalogAndSchema, null); + } + + String catalog = ((AbstractJdbcDatabase) database).getJdbcCatalogName(catalogAndSchema); + String schema = ((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema); + return extract(databaseMetaData.getTables(catalog, escapeForLike(schema, database), SQL_FILTER_MATCH_ALL, new String[]{"VIEW"})); + } + + private List queryMssql(CatalogAndSchema catalogAndSchema, String viewName) throws DatabaseException, SQLException { + String ownerName = database.correctObjectName(catalogAndSchema.getSchemaName(), Schema.class); + String databaseName = StringUtil.trimToNull(database.correctObjectName(catalogAndSchema.getCatalogName(), Catalog.class)); + String dbIdParam = ""; + String databasePrefix = ""; + boolean haveDatabaseName = databaseName != null; + + if (haveDatabaseName) { + dbIdParam = ", db_id('" + databaseName + "')"; + databasePrefix = "[" + databaseName + "]."; + } + String tableCatParam = haveDatabaseName ? "db_id('" + databaseName + "')" : ""; + String permsParam = haveDatabaseName ? "quotename('" + databaseName + "') + '.' + " : ""; + + String sql = "select " + + "db_name(" + tableCatParam + ") AS TABLE_CAT, " + + "convert(sysname,object_schema_name(o.object_id" + dbIdParam + ")) AS TABLE_SCHEM, " + + "convert(sysname,o.name) AS TABLE_NAME, " + + "'VIEW' AS TABLE_TYPE, " + + "CAST(ep.value as varchar(max)) as REMARKS " + + "from " + databasePrefix + "sys.all_objects o " + + "left join sys.extended_properties ep on ep.name='MS_Description' and major_id=o.object_id and minor_id=0 " + + "where " + + "o.type in ('V') " + + "and has_perms_by_name(" + permsParam + "quotename(object_schema_name(o.object_id" + dbIdParam + ")) + '.' + quotename(o.name), 'object', 'select') = 1 " + + "and charindex(substring(o.type,1,1),'V') <> 0 " + + "and object_schema_name(o.object_id" + dbIdParam + ")='" + database.escapeStringForDatabase(ownerName) + "'"; + if (viewName != null) { + sql += " AND o.name='" + database.escapeStringForDatabase(viewName) + "' "; + } + sql += "order by 4, 1, 2, 3"; + + return executeAndExtract(sql, database); + } + + + private List queryOracle(CatalogAndSchema catalogAndSchema, String viewName) throws DatabaseException, SQLException { + String ownerName = database.correctObjectName(catalogAndSchema.getCatalogName(), Schema.class); + + String sql = "SELECT null as TABLE_CAT, a.OWNER as TABLE_SCHEM, a.VIEW_NAME as TABLE_NAME, 'TABLE' as TABLE_TYPE, c.COMMENTS as REMARKS, TEXT as OBJECT_BODY"; + if (database.getDatabaseMajorVersion() > 10) { + sql += ", EDITIONING_VIEW"; + } + sql += " from ALL_VIEWS a " + + "join ALL_TAB_COMMENTS c on a.VIEW_NAME=c.table_name and a.owner=c.owner "; + if (viewName != null || getAllCatalogsStringScratchData() == null) { + sql += "WHERE a.OWNER='" + ownerName + "'"; + } else { + sql += "WHERE a.OWNER IN ('" + ownerName + "', " + getAllCatalogsStringScratchData() + ")"; + } + if (viewName != null) { + sql += " AND a.VIEW_NAME='" + database.correctObjectName(viewName, View.class) + "'"; + } + sql += " AND a.VIEW_NAME not in (select mv.name from all_registered_mviews mv where mv.owner=a.owner)"; + + return executeAndExtract(sql, database); + } + }); + } + + public List getPrimaryKeys(final String catalogName, final String schemaName, final String table) throws DatabaseException { + return getResultSetCache("getPrimaryKeys").get(new ResultSetCache.SingleResultSetExtractor(database) { + + @Override + public ResultSetCache.RowData rowKeyParameters(CachedRow row) { + return new ResultSetCache.RowData(row.getString("TABLE_CAT"), row.getString("TABLE_SCHEM"), database, row.getString("TABLE_NAME")); + } + + @Override + public ResultSetCache.RowData wantedKeyParameters() { + return new ResultSetCache.RowData(catalogName, schemaName, database, table); + } + + @Override + public boolean bulkContainsSchema(String schemaKey) { + return database instanceof OracleDatabase; + } + + + @Override + public String getSchemaKey(CachedRow row) { + return row.getString("TABLE_SCHEM"); + } + + @Override + public List fastFetchQuery() throws SQLException { + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + try { + List foundPks = new ArrayList<>(); + if (table == null) { + List tables = CachingDatabaseMetaData.this.getTables(catalogName, schemaName, null); + for (CachedRow table : tables) { + List pkInfo = getPkInfo(catalogAndSchema, table.getString("TABLE_NAME")); + if (pkInfo != null) { + foundPks.addAll(pkInfo); + } + } + return foundPks; + } else { + List pkInfo = getPkInfo(catalogAndSchema, table); + if (pkInfo != null) { + foundPks.addAll(pkInfo); + } + } + return foundPks; + } catch (DatabaseException e) { + throw new SQLException(e); + } + } + + private List getPkInfo(CatalogAndSchema catalogAndSchema, String tableName) throws DatabaseException, SQLException { + List pkInfo; + if (database instanceof MSSQLDatabase) { + String sql = mssqlSql(catalogAndSchema, tableName); + pkInfo = executeAndExtract(sql, database); + } else { + if (database instanceof Db2zDatabase) { + String sql = "SELECT 'NULL' AS TABLE_CAT," + + " SYSTAB.TBCREATOR AS TABLE_SCHEM, " + + "SYSTAB.TBNAME AS TABLE_NAME, " + + "COLUSE.COLNAME AS COLUMN_NAME, " + + "COLUSE.COLSEQ AS KEY_SEQ, " + + "SYSTAB.CONSTNAME AS PK_NAME " + + "FROM SYSIBM.SYSTABCONST SYSTAB " + + "JOIN SYSIBM.SYSKEYCOLUSE COLUSE " + + "ON SYSTAB.TBCREATOR = COLUSE.TBCREATOR " + + "WHERE SYSTAB.TYPE = 'P' " + + "AND SYSTAB.TBNAME = ? " + + "AND SYSTAB.TBCREATOR = ? " + + "AND SYSTAB.TBNAME=COLUSE.TBNAME " + + "AND SYSTAB.CONSTNAME=COLUSE.CONSTNAME " + + "ORDER BY COLUSE.COLNAME"; + try { + return executeAndExtract(database, sql, table, ((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema)); + } catch (DatabaseException e) { + throw new SQLException(e); + } + } else if (database instanceof OracleDatabase) { + warnAboutDbaRecycleBin(); + + String sql = "SELECT NULL AS table_cat, c.owner AS table_schem, c.table_name, c.column_name as COLUMN_NAME, c.position AS key_seq, c.constraint_name AS pk_name, k.VALIDATED as VALIDATED " + + "FROM all_cons_columns c, all_constraints k " + + "LEFT JOIN " + (((OracleDatabase) database).canAccessDbaRecycleBin() ? "dba_recyclebin" : "user_recyclebin") + " d ON d.object_name=k.table_name " + + "WHERE k.constraint_type = 'P' " + + "AND d.object_name IS NULL " + + "AND k.table_name = '" + table + "' " + + "AND k.owner = '" + ((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema) + "' " + + "AND k.constraint_name = c.constraint_name " + + "AND k.table_name = c.table_name " + + "AND k.owner = c.owner " + + "ORDER BY column_name"; + try { + return executeAndExtract(sql, database); + } catch (DatabaseException e) { + throw new SQLException(e); + } + } else if (database instanceof CockroachDatabase) { + // This is the same as the query generated by PGJDBC's getPrimaryKeys method, except it + // also adds an `asc_or_desc` column to the result. + String sql = "SELECT " + + " result.table_cat, " + + " result.table_schem, " + + " result.table_name, " + + " result.column_name, " + + " result.key_seq, " + + " result.pk_name, " + + " CASE result.indoption[result.key_seq - 1] & 1 " + + " WHEN 1 THEN 'D' " + + " ELSE 'A' " + + " END AS asc_or_desc " + + "FROM " + + " (" + + " SELECT " + + " NULL AS table_cat, " + + " n.nspname AS table_schem, " + + " ct.relname AS table_name, " + + " a.attname AS column_name, " + + " (information_schema._pg_expandarray(i.indkey)).n " + + " AS key_seq, " + + " ci.relname AS pk_name, " + + " information_schema._pg_expandarray(i.indkey) AS keys, " + + " i.indoption, " + + " a.attnum AS a_attnum " + + " FROM " + + " pg_catalog.pg_class AS ct " + + " JOIN pg_catalog.pg_attribute AS a ON (ct.oid = a.attrelid) " + + " JOIN pg_catalog.pg_namespace AS n ON " + + " (ct.relnamespace = n.oid) " + + " JOIN pg_catalog.pg_index AS i ON (a.attrelid = i.indrelid) " + + " JOIN pg_catalog.pg_class AS ci ON (ci.oid = i.indexrelid) " + + " WHERE " + + " true " + + " AND n.nspname = '" + ((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema) + "' " + + " AND ct.relname = '" + table + "' " + + " AND i.indisprimary" + + " ) " + + " AS result " + + "WHERE " + + " result.a_attnum = (result.keys).x " + + "ORDER BY " + + " result.table_name, result.pk_name, result.key_seq"; + + try { + return executeAndExtract(sql, database); + } catch (DatabaseException e) { + throw new SQLException(e); + } + } else { + return extract( + databaseMetaData.getPrimaryKeys( + ((AbstractJdbcDatabase) database).getJdbcCatalogName(catalogAndSchema), + ((AbstractJdbcDatabase) database).getJdbcSchemaName(catalogAndSchema), + table + ) + ); + } + } + return pkInfo; + } + + private String mssqlSql(CatalogAndSchema catalogAndSchema, String tableName) throws DatabaseException { + String sql; + sql = + "SELECT " + + "DB_NAME() AS [TABLE_CAT], " + + "[s].[name] AS [TABLE_SCHEM], " + + "[t].[name] AS [TABLE_NAME], " + + "[c].[name] AS [COLUMN_NAME], " + + "CASE [ic].[is_descending_key] WHEN 0 THEN N'A' WHEN 1 THEN N'D' END AS [ASC_OR_DESC], " + + "[ic].[key_ordinal] AS [KEY_SEQ], " + + "[kc].[name] AS [PK_NAME] " + + "FROM [sys].[schemas] AS [s] " + + "INNER JOIN [sys].[tables] AS [t] " + + "ON [t].[schema_id] = [s].[schema_id] " + + "INNER JOIN [sys].[key_constraints] AS [kc] " + + "ON [kc].[parent_object_id] = [t].[object_id] " + + "INNER JOIN [sys].[indexes] AS [i] " + + "ON [i].[object_id] = [kc].[parent_object_id] " + + "AND [i].[index_id] = [kc].[unique_index_id] " + + "INNER JOIN [sys].[index_columns] AS [ic] " + + "ON [ic].[object_id] = [i].[object_id] " + + "AND [ic].[index_id] = [i].[index_id] " + + "INNER JOIN [sys].[columns] AS [c] " + + "ON [c].[object_id] = [ic].[object_id] " + + "AND [c].[column_id] = [ic].[column_id] " + + "WHERE [s].[name] = N'" + database.escapeStringForDatabase(catalogAndSchema.getSchemaName()) + "' " + // The schema name was corrected in the customized CatalogAndSchema + (tableName == null ? "" : "AND [t].[name] = N'" + database.escapeStringForDatabase(database.correctObjectName(tableName, Table.class)) + "' ") + + "AND [kc].[type] = 'PK' " + + "AND [ic].[key_ordinal] > 0 " + + "ORDER BY " + + "[ic].[key_ordinal]"; + return sql; + } + + @Override + public List bulkFetchQuery() throws SQLException { + if (database instanceof OracleDatabase) { + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + + warnAboutDbaRecycleBin(); + try { + String sql = "SELECT NULL AS table_cat, c.owner AS table_schem, c.table_name, c.column_name, c.position AS key_seq,c.constraint_name AS pk_name, k.VALIDATED as VALIDATED FROM " + + "all_cons_columns c, " + + "all_constraints k " + + "LEFT JOIN " + (((OracleDatabase) database).canAccessDbaRecycleBin() ? "dba_recyclebin" : "user_recyclebin") + " d ON d.object_name=k.table_name " + + "WHERE k.constraint_type = 'P' " + + "AND d.object_name IS NULL "; + if (getAllCatalogsStringScratchData() == null) { + sql += "AND k.owner='" + catalogAndSchema.getCatalogName() + "' "; + } else { + sql += "AND k.owner IN ('" + catalogAndSchema.getCatalogName() + "', " + getAllCatalogsStringScratchData() + ")"; + } + sql += "AND k.constraint_name = c.constraint_name " + + "AND k.table_name = c.table_name " + + "AND k.owner = c.owner " + + "ORDER BY column_name"; + return executeAndExtract(sql, database); + } catch (DatabaseException e) { + throw new SQLException(e); + } + } else if (database instanceof MSSQLDatabase) { + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + try { + return executeAndExtract(mssqlSql(catalogAndSchema, null), database); + } catch (DatabaseException e) { + throw new SQLException(e); + } + } + return null; + } + + @Override + protected boolean shouldBulkSelect(String schemaKey, ResultSetCache resultSetCache) { + if ((database instanceof OracleDatabase) || (database instanceof MSSQLDatabase)) { + return table == null || getAllCatalogsStringScratchData() != null || super.shouldBulkSelect(schemaKey, resultSetCache); + } else { + return false; + } + } + }); + } + + public List getUniqueConstraints(final String catalogName, final String schemaName, final String tableName) throws DatabaseException { + return getResultSetCache("getUniqueConstraints").get(new ResultSetCache.SingleResultSetExtractor(database) { + + @Override + protected boolean shouldBulkSelect(String schemaKey, ResultSetCache resultSetCache) { + return tableName == null || getAllCatalogsStringScratchData() != null || super.shouldBulkSelect(schemaKey, resultSetCache); + } + + @Override + public boolean bulkContainsSchema(String schemaKey) { + return database instanceof OracleDatabase; + } + + @Override + public String getSchemaKey(CachedRow row) { + return row.getString("CONSTRAINT_SCHEM"); + } + + @Override + public ResultSetCache.RowData rowKeyParameters(CachedRow row) { + return new ResultSetCache.RowData(catalogName, schemaName, database, row.getString("TABLE_NAME")); + } + + @Override + public ResultSetCache.RowData wantedKeyParameters() { + return new ResultSetCache.RowData(catalogName, schemaName, database, tableName); + } + + @Override + public List fastFetchQuery() throws SQLException, DatabaseException { + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + + return queryDb(catalogAndSchema, tableName); + } + + @Override + public List bulkFetchQuery() throws SQLException, DatabaseException { + CatalogAndSchema catalogAndSchema = new CatalogAndSchema(catalogName, schemaName).customize(database); + + return queryDb(catalogAndSchema, null); + } + + private List queryDb(CatalogAndSchema catalogAndSchema, String tableName) throws SQLException, DatabaseException { + + String jdbcCatalogName = catalogAndSchema.getCatalogName(); + String jdbcSchemaName = catalogAndSchema.getSchemaName(); + + Database database = getDatabase(); + List parameters = new ArrayList<>(3); + String sql = null; + if (database instanceof Ingres9Database) { + sql = "select CONSTRAINT_NAME, TABLE_NAME from iiconstraints where schema_name ='" + + schemaName + "' and constraint_type='U'"; + if (tableName != null) { + sql += " and table_name='" + tableName + "'"; + } + } else if ((database instanceof MySQLDatabase) || (database instanceof HsqlDatabase) || (database + instanceof MariaDBDatabase)) { + sql = "select CONSTRAINT_NAME, TABLE_NAME " + + "from " + database.getSystemSchema() + ".table_constraints " + + "where constraint_schema='" + jdbcCatalogName + "' " + + "and constraint_type='UNIQUE'"; + if (tableName != null) { + sql += " and table_name='" + tableName + "'"; + } + } else if (database instanceof PostgresDatabase) { + sql = "select CONSTRAINT_NAME, TABLE_NAME " + + "from " + database.getSystemSchema() + ".table_constraints " + + "where constraint_catalog='" + jdbcCatalogName + "' " + + "and constraint_schema='" + jdbcSchemaName + "' " + + "and constraint_type='UNIQUE'"; + if (tableName != null) { + sql += " and table_name='" + tableName + "'"; + } + } else if (database.getClass().getName().contains("MaxDB")) { //have to check classname as this is currently an extension + sql = "select distinct tablename AS TABLE_NAME, constraintname AS CONSTRAINT_NAME from CONSTRAINTCOLUMNS WHERE CONSTRAINTTYPE = 'UNIQUE_CONST'"; + if (tableName != null) { + sql += " and tablename='" + tableName + "'"; + } + } else if (database instanceof MSSQLDatabase) { + sql = + "SELECT " + + "[TC].[CONSTRAINT_NAME], " + + "[TC].[TABLE_NAME], " + + "[TC].[CONSTRAINT_CATALOG] AS INDEX_CATALOG, " + + "[TC].[CONSTRAINT_SCHEMA] AS INDEX_SCHEMA, " + + "[IDX].[TYPE_DESC], " + + "[IDX].[name] AS INDEX_NAME " + + "FROM [INFORMATION_SCHEMA].[TABLE_CONSTRAINTS] AS [TC] " + + "JOIN sys.indexes AS IDX ON IDX.name=[TC].[CONSTRAINT_NAME] AND object_schema_name(object_id)=[TC].[CONSTRAINT_SCHEMA] " + + "WHERE [TC].[CONSTRAINT_TYPE] = 'UNIQUE' " + + "AND [TC].[CONSTRAINT_CATALOG] = N'" + database.escapeStringForDatabase(jdbcCatalogName) + "' " + + "AND [TC].[CONSTRAINT_SCHEMA] = N'" + database.escapeStringForDatabase(jdbcSchemaName) + "'"; + if (tableName != null) { + sql += " AND [TC].[TABLE_NAME] = N'" + database.escapeStringForDatabase(database.correctObjectName(tableName, Table.class)) + "'"; + } + } else if (database instanceof OracleDatabase) { + warnAboutDbaRecycleBin(); + + sql = "select uc.owner AS CONSTRAINT_SCHEM, uc.constraint_name, uc.table_name,uc.status,uc.deferrable,uc.deferred,ui.tablespace_name, ui.index_name, ui.owner as INDEX_CATALOG, uc.VALIDATED as VALIDATED, ac.COLUMN_NAME as COLUMN_NAME " + + "from all_constraints uc " + + "join all_indexes ui on uc.index_name = ui.index_name and uc.owner=ui.table_owner and uc.table_name=ui.table_name " + + "LEFT OUTER JOIN " + (((OracleDatabase) database).canAccessDbaRecycleBin() ? "dba_recyclebin" : "user_recyclebin") + " d ON d.object_name=ui.table_name " + + "LEFT JOIN all_cons_columns ac ON ac.OWNER = uc.OWNER AND ac.TABLE_NAME = uc.TABLE_NAME AND ac.CONSTRAINT_NAME = uc.CONSTRAINT_NAME " + + "where uc.constraint_type='U' "; + if (tableName != null || getAllCatalogsStringScratchData() == null) { + sql += "and uc.owner = '" + jdbcSchemaName + "'"; + } else { + sql += "and uc.owner IN ('" + jdbcSchemaName + "', " + getAllCatalogsStringScratchData() + ")"; + } + sql += "AND d.object_name IS NULL "; + + if (tableName != null) { + sql += " and uc.table_name = '" + tableName + "'"; + } + } else if (database instanceof DB2Database) { + // if we are on DB2 AS400 iSeries + if (database.getDatabaseProductName().startsWith("DB2 UDB for AS/400")) { + sql = "select constraint_name as constraint_name, table_name as table_name from QSYS2.TABLE_CONSTRAINTS where table_schema='" + jdbcSchemaName + "' and constraint_type='UNIQUE'"; + if (tableName != null) { + sql += " and table_name = '" + tableName + "'"; + } + // DB2 z/OS + } + // here we are on DB2 UDB + else { + sql = "select distinct k.constname as constraint_name, t.tabname as TABLE_NAME " + + "from syscat.keycoluse k " + + "inner join syscat.tabconst t " + + "on k.constname = t.constname " + + "where t.tabschema = ? " + + "and t.type = 'U'"; + parameters.add(jdbcSchemaName); + if (tableName != null) { + sql += " and t.tabname = ?"; + parameters.add(tableName); + } + } + } else if (database instanceof Db2zDatabase) { + sql = "select k.constname as constraint_name, t.tbname as TABLE_NAME" + + " from SYSIBM.SYSKEYCOLUSE k" + + " inner join SYSIBM.SYSTABCONST t" + + " on k.constname = t.constname" + + " and k.TBCREATOR = t.TBCREATOR" + + " and k.TBNAME = t.TBNAME" + + " where t.TBCREATOR = ?" + + " and t.TYPE = 'U'"; + parameters.add(jdbcSchemaName); + if (tableName != null) { + sql += " and t.TBNAME = ?"; + parameters.add(tableName); + } + } else if (database instanceof FirebirdDatabase) { + sql = "SELECT TRIM(RDB$INDICES.RDB$INDEX_NAME) AS CONSTRAINT_NAME, " + + "TRIM(RDB$INDICES.RDB$RELATION_NAME) AS TABLE_NAME " + + "FROM RDB$INDICES " + + "LEFT JOIN RDB$RELATION_CONSTRAINTS " + + "ON RDB$RELATION_CONSTRAINTS.RDB$INDEX_NAME = RDB$INDICES.RDB$INDEX_NAME " + + "WHERE RDB$INDICES.RDB$UNIQUE_FLAG IS NOT NULL " + + "AND (" + + "RDB$RELATION_CONSTRAINTS.RDB$CONSTRAINT_TYPE IS NULL " + + "OR TRIM(RDB$RELATION_CONSTRAINTS.RDB$CONSTRAINT_TYPE)='UNIQUE') " + + "AND NOT(RDB$INDICES.RDB$INDEX_NAME LIKE 'RDB$%')"; + if (tableName != null) { + sql += " AND TRIM(RDB$INDICES.RDB$RELATION_NAME)='" + tableName + "'"; + } + } else if (database instanceof DerbyDatabase) { + sql = "select c.constraintname as CONSTRAINT_NAME, tablename AS TABLE_NAME " + + "from sys.systables t, sys.sysconstraints c, sys.sysschemas s " + + "where s.schemaname='" + jdbcCatalogName + "' " + + "and t.tableid = c.tableid " + + "and t.schemaid=s.schemaid " + + "and c.type = 'U'"; + if (tableName != null) { + sql += " AND t.tablename = '" + tableName + "'"; + } + } else if (database instanceof InformixDatabase) { + sql = "select unique sysindexes.idxname as CONSTRAINT_NAME, sysindexes.idxtype, systables.tabname as TABLE_NAME " + + "from sysindexes, systables " + + "left outer join sysconstraints on sysconstraints.tabid = systables.tabid and sysconstraints.constrtype = 'P' " + + "where sysindexes.tabid = systables.tabid and sysindexes.idxtype = 'U' " + + "and sysconstraints.idxname != sysindexes.idxname " + + "and sysconstraints.tabid = sysindexes.tabid"; + if (tableName != null) { + sql += " and systables.tabname = '" + database.correctObjectName(tableName, Table.class) + "'"; + } + } else if (database instanceof SybaseDatabase) { + sql = "select idx.name as CONSTRAINT_NAME, tbl.name as TABLE_NAME " + + "from sysindexes idx " + + "inner join sysobjects tbl on tbl.id = idx.id " + + "where idx.indid between 1 and 254 " + + "and (idx.status & 2) = 2 " + + "and tbl.type = 'U'"; + if (tableName != null) { + sql += " and tbl.name = '" + database.correctObjectName(tableName, Table.class) + "'"; + } + } else if (database instanceof SybaseASADatabase) { + sql = "select sysconstraint.constraint_name, sysconstraint.constraint_type, systable.table_name " + + "from sysconstraint, systable " + + "where sysconstraint.table_object_id = systable.object_id " + + "and sysconstraint.constraint_type = 'U'"; + if (tableName != null) { + sql += " and systable.table_name = '" + tableName + "'"; + } + } else { + if (database instanceof H2Database) { + try { + if (database.getDatabaseMajorVersion() >= 2) { + sql = "select CONSTRAINT_NAME, CONSTRAINT_TYPE, TABLE_NAME " + + "from " + database.getSystemSchema() + ".table_constraints " + + "where constraint_schema='" + jdbcSchemaName + "' " + + "and constraint_catalog='" + jdbcCatalogName + "' " + + "and constraint_type='UNIQUE'"; + if (tableName != null) { + sql += " and table_name='" + tableName + "'"; + } + } + } catch (DatabaseException e) { + Scope.getCurrentScope().getLog(getClass()).fine("Cannot determine h2 version, using default unique constraint query"); + } + } + if (sql == null) { + + sql = "select CONSTRAINT_NAME, CONSTRAINT_TYPE, TABLE_NAME " + + "from " + database.getSystemSchema() + ".constraints " + + "where constraint_schema='" + jdbcSchemaName + "' " + + "and constraint_catalog='" + jdbcCatalogName + "' " + + "and constraint_type='UNIQUE'"; + if (tableName != null) { + sql += " and table_name='" + tableName + "'"; + } + } + } + + return executeAndExtract(database, database instanceof InformixDatabase, sql, parameters.toArray()); + } + }); + } + } + + private String getAllCatalogsStringScratchData() { + return (String) getScratchData(ALL_CATALOGS_STRING_SCRATCH_KEY); + } + + private String escapeForLike(String string, Database database) { + if (string == null) { + return null; + } + + if (database instanceof SQLiteDatabase || database instanceof DmDatabase) { + //sqlite jdbc's queries does not support escaped patterns. + // DM 也不支持转义的匹配方式,需要兼容 + return string; + } + + return string + .replace("%", "\\%") + .replace("_", "\\_"); + } +} diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/resources/application.yaml b/zt-module-bpm/zt-module-bpm-server/src/main/resources/application.yaml index 37f2d24f..adfbf58f 100644 --- a/zt-module-bpm/zt-module-bpm-server/src/main/resources/application.yaml +++ b/zt-module-bpm/zt-module-bpm-server/src/main/resources/application.yaml @@ -82,9 +82,6 @@ flowable: db-history-used: true # flowable6 默认 true 生成信息表,无需手动设置 check-process-definitions: false # 设置为 false,禁用 /resources/processes 自动部署 BPMN XML 流程 history-level: audit # full:保存历史数据的最高级别,可保存全部流程相关细节,包括流程流转各节点参数 - eventregistry: - enabled: true # 默认开启事件引擎,这里显式声明,便于阅读 - database-schema-update: false # 禁止事件引擎重复自动建表,防止 FLW_EV_* 表冲突 # MyBatis Plus 的配置项 mybatis-plus: From 446b5ca7a41b66f3301912dc822d63980b2eec67 Mon Sep 17 00:00:00 2001 From: chenbowen Date: Thu, 27 Nov 2025 13:48:55 +0800 Subject: [PATCH 5/6] =?UTF-8?q?=E5=89=94=E9=99=A4=E6=8E=89=20swagger=20?= =?UTF-8?q?=E4=B8=8D=E8=83=BD=E8=AF=B7=E6=B1=82=E7=9A=84=20rpc-api?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../config/ZtSwaggerAutoConfiguration.java | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/zt-framework/zt-spring-boot-starter-web/src/main/java/com/zt/plat/framework/swagger/config/ZtSwaggerAutoConfiguration.java b/zt-framework/zt-spring-boot-starter-web/src/main/java/com/zt/plat/framework/swagger/config/ZtSwaggerAutoConfiguration.java index 3a66835d..a319454f 100644 --- a/zt-framework/zt-spring-boot-starter-web/src/main/java/com/zt/plat/framework/swagger/config/ZtSwaggerAutoConfiguration.java +++ b/zt-framework/zt-spring-boot-starter-web/src/main/java/com/zt/plat/framework/swagger/config/ZtSwaggerAutoConfiguration.java @@ -1,5 +1,6 @@ package com.zt.plat.framework.swagger.config; +import com.zt.plat.framework.common.enums.RpcConstants; import io.swagger.v3.oas.models.Components; import io.swagger.v3.oas.models.OpenAPI; import io.swagger.v3.oas.models.info.Contact; @@ -11,6 +12,7 @@ import io.swagger.v3.oas.models.parameters.Parameter; import io.swagger.v3.oas.models.security.SecurityRequirement; import io.swagger.v3.oas.models.security.SecurityScheme; import org.springdoc.core.customizers.OpenApiBuilderCustomizer; +import org.springdoc.core.customizers.OpenApiCustomizer; import org.springdoc.core.customizers.ServerBaseUrlCustomizer; import org.springdoc.core.models.GroupedOpenApi; import org.springdoc.core.properties.SpringDocConfigProperties; @@ -123,12 +125,26 @@ public class ZtSwaggerAutoConfiguration { return GroupedOpenApi.builder() .group(group) .pathsToMatch("/admin-api/" + path + "/**", "/app-api/" + path + "/**") + .pathsToExclude(RpcConstants.RPC_API_PREFIX + "/**") .addOperationCustomizer((operation, handlerMethod) -> operation .addParametersItem(buildTenantHeaderParameter()) .addParametersItem(buildSecurityHeaderParameter())) .build(); } + @Bean + public OpenApiCustomizer rpcApiPathExclusionCustomiser() { + return openApi -> { + if (openApi == null || openApi.getPaths() == null) { + return; + } + openApi.getPaths().entrySet().removeIf(entry -> { + String path = entry.getKey(); + return path != null && path.startsWith(RpcConstants.RPC_API_PREFIX); + }); + }; + } + /** * 构建 Tenant 租户编号请求头参数 * From 00b2f6312d50793c5d7d74096b0f5f034f2ac187 Mon Sep 17 00:00:00 2001 From: chenbowen Date: Thu, 27 Nov 2025 16:01:05 +0800 Subject: [PATCH 6/6] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=20flowable=20=E6=97=A0?= =?UTF-8?q?=E6=B3=95=E9=80=9A=E8=BF=87=20dm=20=E6=95=B0=E6=8D=AE=E5=BA=93?= =?UTF-8?q?=E9=A9=B1=E5=8A=A8=E6=AD=A3=E5=B8=B8=E8=8E=B7=E5=8F=96=20schema?= =?UTF-8?q?=20=E7=9A=84bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../engine/impl/db/DbSqlSessionFactory.java | 354 ++++++++++++++++++ 1 file changed, 354 insertions(+) create mode 100644 zt-module-bpm/zt-module-bpm-server/src/main/java/org/flowable/common/engine/impl/db/DbSqlSessionFactory.java diff --git a/zt-module-bpm/zt-module-bpm-server/src/main/java/org/flowable/common/engine/impl/db/DbSqlSessionFactory.java b/zt-module-bpm/zt-module-bpm-server/src/main/java/org/flowable/common/engine/impl/db/DbSqlSessionFactory.java new file mode 100644 index 00000000..ed2f0c94 --- /dev/null +++ b/zt-module-bpm/zt-module-bpm-server/src/main/java/org/flowable/common/engine/impl/db/DbSqlSessionFactory.java @@ -0,0 +1,354 @@ +/* Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.flowable.common.engine.impl.db; + +import org.apache.ibatis.session.SqlSessionFactory; +import org.flowable.common.engine.api.FlowableException; +import org.flowable.common.engine.impl.context.Context; +import org.flowable.common.engine.impl.interceptor.CommandContext; +import org.flowable.common.engine.impl.interceptor.Session; +import org.flowable.common.engine.impl.interceptor.SessionFactory; +import org.flowable.common.engine.impl.persistence.cache.EntityCache; +import org.flowable.common.engine.impl.persistence.entity.Entity; + +import java.sql.SQLException; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; + +/** + * @author Tom Baeyens + * @author Joram Barrez + */ +public class DbSqlSessionFactory implements SessionFactory { + + protected Map> databaseSpecificStatements = new HashMap<>(); + + protected String databaseType; + protected String databaseTablePrefix = ""; + protected boolean tablePrefixIsSchema; + + protected String databaseCatalog; + protected String databaseSchema; + protected SqlSessionFactory sqlSessionFactory; + protected Map statementMappings; + + protected Map, String> insertStatements = new ConcurrentHashMap<>(); + protected Map, String> updateStatements = new ConcurrentHashMap<>(); + protected Map, String> deleteStatements = new ConcurrentHashMap<>(); + protected Map, String> selectStatements = new ConcurrentHashMap<>(); + + protected List> insertionOrder = new ArrayList<>(); + protected List> deletionOrder = new ArrayList<>(); + + protected boolean isDbHistoryUsed = true; + + protected Set> bulkInserteableEntityClasses = new HashSet<>(); + protected Map, String> bulkInsertStatements = new ConcurrentHashMap<>(); + + protected int maxNrOfStatementsInBulkInsert = 100; + + protected Map> logicalNameToClassMapping = new ConcurrentHashMap<>(); + + protected boolean usePrefixId; + + public DbSqlSessionFactory(boolean usePrefixId) { + this.usePrefixId = usePrefixId; + } + + @Override + public Class getSessionType() { + return DbSqlSession.class; + } + + @Override + public Session openSession(CommandContext commandContext) { + DbSqlSession dbSqlSession = createDbSqlSession(); + // 当前系统适配 dm,如果存在 schema 为空的情况,从 connection 获取 + try { + if (getDatabaseSchema() == null || getDatabaseSchema().length() == 0){ + setDatabaseSchema(dbSqlSession.getSqlSession().getConnection().getSchema()); + } + dbSqlSession.getSqlSession().getConnection().getSchema(); + } catch (SQLException e) { + throw new RuntimeException(e); + } + + if (getDatabaseSchema() != null && getDatabaseSchema().length() > 0) { + try { + dbSqlSession.getSqlSession().getConnection().setSchema(getDatabaseSchema()); + } catch (SQLException e) { + throw new FlowableException("Could not set database schema on connection", e); + } + } + if (getDatabaseCatalog() != null && getDatabaseCatalog().length() > 0) { + try { + dbSqlSession.getSqlSession().getConnection().setCatalog(getDatabaseCatalog()); + } catch (SQLException e) { + throw new FlowableException("Could not set database catalog on connection", e); + } + } + if (dbSqlSession.getSqlSession().getConnection() == null) { + throw new FlowableException("Invalid dbSqlSession: no active connection found"); + } + return dbSqlSession; + } + + protected DbSqlSession createDbSqlSession() { + return new DbSqlSession(this, Context.getCommandContext().getSession(EntityCache.class)); + } + + // insert, update and delete statements + // ///////////////////////////////////// + + public String getInsertStatement(Entity object) { + return getStatement(object.getClass(), insertStatements, "insert"); + } + + public String getInsertStatement(Class clazz) { + return getStatement(clazz, insertStatements, "insert"); + } + + public String getUpdateStatement(Entity object) { + return getStatement(object.getClass(), updateStatements, "update"); + } + + public String getDeleteStatement(Class entityClass) { + return getStatement(entityClass, deleteStatements, "delete"); + } + + public String getSelectStatement(Class entityClass) { + return getStatement(entityClass, selectStatements, "select"); + } + + protected String getStatement(Class entityClass, Map, String> cachedStatements, String prefix) { + String statement = cachedStatements.get(entityClass); + if (statement != null) { + return statement; + } + statement = prefix + entityClass.getSimpleName(); + if (statement.endsWith("Impl")) { + statement = statement.substring(0, statement.length() - 10); // removing 'entityImpl' + } else { + statement = statement.substring(0, statement.length() - 6); // removing 'entity' + } + cachedStatements.put(entityClass, statement); + return statement; + } + + // db specific mappings + // ///////////////////////////////////////////////////// + + protected void addDatabaseSpecificStatement(String databaseType, String activitiStatement, String ibatisStatement) { + Map specificStatements = databaseSpecificStatements.get(databaseType); + if (specificStatements == null) { + specificStatements = new HashMap<>(); + databaseSpecificStatements.put(databaseType, specificStatements); + } + specificStatements.put(activitiStatement, ibatisStatement); + } + + public String mapStatement(String statement) { + if (statementMappings == null) { + return statement; + } + String mappedStatement = statementMappings.get(statement); + return (mappedStatement != null ? mappedStatement : statement); + } + + // customized getters and setters + // /////////////////////////////////////////// + + public void setDatabaseType(String databaseType) { + this.databaseType = databaseType; + this.statementMappings = databaseSpecificStatements.get(databaseType); + } + + public boolean isMysql() { + return "mysql".equals(getDatabaseType()); + } + + public boolean isOracle() { + return "oracle".equals(getDatabaseType()); + } + + public Boolean isBulkInsertable(Class entityClass) { + return bulkInserteableEntityClasses != null && bulkInserteableEntityClasses.contains(entityClass); + } + + @SuppressWarnings("rawtypes") + public String getBulkInsertStatement(Class clazz) { + return getStatement(clazz, bulkInsertStatements, "bulkInsert"); + } + + public Set> getBulkInserteableEntityClasses() { + return bulkInserteableEntityClasses; + } + + public void setBulkInserteableEntityClasses(Set> bulkInserteableEntityClasses) { + this.bulkInserteableEntityClasses = bulkInserteableEntityClasses; + } + + public int getMaxNrOfStatementsInBulkInsert() { + return maxNrOfStatementsInBulkInsert; + } + + public void setMaxNrOfStatementsInBulkInsert(int maxNrOfStatementsInBulkInsert) { + this.maxNrOfStatementsInBulkInsert = maxNrOfStatementsInBulkInsert; + } + + public Map, String> getBulkInsertStatements() { + return bulkInsertStatements; + } + + public void setBulkInsertStatements(Map, String> bulkInsertStatements) { + this.bulkInsertStatements = bulkInsertStatements; + } + + // getters and setters ////////////////////////////////////////////////////// + + public SqlSessionFactory getSqlSessionFactory() { + return sqlSessionFactory; + } + + public void setSqlSessionFactory(SqlSessionFactory sqlSessionFactory) { + this.sqlSessionFactory = sqlSessionFactory; + } + + public String getDatabaseType() { + return databaseType; + } + + public Map> getDatabaseSpecificStatements() { + return databaseSpecificStatements; + } + + public void setDatabaseSpecificStatements(Map> databaseSpecificStatements) { + this.databaseSpecificStatements = databaseSpecificStatements; + } + + public Map getStatementMappings() { + return statementMappings; + } + + public void setStatementMappings(Map statementMappings) { + this.statementMappings = statementMappings; + } + + public Map, String> getInsertStatements() { + return insertStatements; + } + + public void setInsertStatements(Map, String> insertStatements) { + this.insertStatements = insertStatements; + } + + public Map, String> getUpdateStatements() { + return updateStatements; + } + + public void setUpdateStatements(Map, String> updateStatements) { + this.updateStatements = updateStatements; + } + + public Map, String> getDeleteStatements() { + return deleteStatements; + } + + public void setDeleteStatements(Map, String> deleteStatements) { + this.deleteStatements = deleteStatements; + } + + public Map, String> getSelectStatements() { + return selectStatements; + } + + public void setSelectStatements(Map, String> selectStatements) { + this.selectStatements = selectStatements; + } + + public boolean isDbHistoryUsed() { + return isDbHistoryUsed; + } + + public void setDbHistoryUsed(boolean isDbHistoryUsed) { + this.isDbHistoryUsed = isDbHistoryUsed; + } + + public void setDatabaseTablePrefix(String databaseTablePrefix) { + this.databaseTablePrefix = databaseTablePrefix; + } + + public String getDatabaseTablePrefix() { + return databaseTablePrefix; + } + + public String getDatabaseCatalog() { + return databaseCatalog; + } + + public void setDatabaseCatalog(String databaseCatalog) { + this.databaseCatalog = databaseCatalog; + } + + public String getDatabaseSchema() { + return databaseSchema; + } + + public void setDatabaseSchema(String databaseSchema) { + this.databaseSchema = databaseSchema; + } + + public void setTablePrefixIsSchema(boolean tablePrefixIsSchema) { + this.tablePrefixIsSchema = tablePrefixIsSchema; + } + + public boolean isTablePrefixIsSchema() { + return tablePrefixIsSchema; + } + + public List> getInsertionOrder() { + return insertionOrder; + } + + public void setInsertionOrder(List> insertionOrder) { + this.insertionOrder = insertionOrder; + } + + public List> getDeletionOrder() { + return deletionOrder; + } + + public void setDeletionOrder(List> deletionOrder) { + this.deletionOrder = deletionOrder; + } + public void addLogicalEntityClassMapping(String logicalName, Class entityClass) { + logicalNameToClassMapping.put(logicalName, entityClass); + } + + public Map> getLogicalNameToClassMapping() { + return logicalNameToClassMapping; + } + + public void setLogicalNameToClassMapping(Map> logicalNameToClassMapping) { + this.logicalNameToClassMapping = logicalNameToClassMapping; + } + + public boolean isUsePrefixId() { + return usePrefixId; + } + + public void setUsePrefixId(boolean usePrefixId) { + this.usePrefixId = usePrefixId; + } +}