status
stringclasses
1 value
repo_name
stringclasses
31 values
repo_url
stringclasses
31 values
issue_id
int64
1
104k
title
stringlengths
4
369
body
stringlengths
0
254k
issue_url
stringlengths
37
56
pull_url
stringlengths
37
54
before_fix_sha
stringlengths
40
40
after_fix_sha
stringlengths
40
40
report_datetime
unknown
language
stringclasses
5 values
commit_datetime
unknown
updated_file
stringlengths
4
188
file_content
stringlengths
0
5.12M
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,412
[Bug] [UI] Workflow definition timing manage edit
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Environment name is not assigned when i edit timing manage in workflow define. as follows: ![image](https://user-images.githubusercontent.com/8847400/154420171-16bc26d8-c2a5-4d1b-bf14-4d0ecaa524b7.png) ### What you expected to happen Environment Name show the correct value. ### How to reproduce Open `Process definition` -> `Cron Manage` and click edit button. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8412
https://github.com/apache/dolphinscheduler/pull/8413
6334a8221a0dfa35d7fb69de227b5aedbe775a0f
d2f8d96fa05f55e884d2c0d40f9fd3f64016d9dd
"2022-02-17T06:42:38Z"
java
"2022-02-17T11:44:47Z"
dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/_source/timing.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="timing-process-model"> <div class="clearfix list"> <div class="text"> {{$t('Start and stop time')}} </div> <div class="cont"> <el-date-picker style="width: 360px" v-model="scheduleTime" size="small" @change="_datepicker" type="datetimerange" range-separator="-" :start-placeholder="$t('startDate')" :end-placeholder="$t('endDate')" value-format="yyyy-MM-dd HH:mm:ss"> </el-date-picker> </div> </div> <div class="clearfix list"> <el-button type="primary" style="margin-left:20px" size="small" round :loading="spinnerLoading" @click="preview()">{{$t('Execute time')}}</el-button> <div class="text"> {{$t('Timing')}} </div> <div class="cont"> <template> <el-popover placement="bottom-start" trigger="click"> <template slot="reference"> <el-input style="width: 360px;" type="text" size="small" readonly :value="crontab"> </el-input> </template> <div class="crontab-box"> <v-crontab v-model="crontab" :locale="i18n"></v-crontab> </div> </el-popover> </template> </div> </div> <div class="clearfix list"> <div class="text"> {{$t('Timezone')}} </div> <div class="cont"> <el-select v-model=timezoneId filterable placeholder="Timezone"> <el-option v-for="item in availableTimezoneIDList" :key="item" :label="item" :value="item"> </el-option> </el-select> </div> </div> <div class="clearfix list"> <div style = "padding-left: 150px;">{{$t('Next five execution times')}}</div> <ul style = "padding-left: 150px;"> <li v-for="(time,i) in previewTimes" :key='i'>{{time}}</li> </ul> </div> <div class="clearfix list"> <div class="text"> {{$t('Failure Strategy')}} </div> <div class="cont"> <el-radio-group v-model="failureStrategy" style="margin-top: 7px;" size="small"> <el-radio :label="'CONTINUE'">{{$t('Continue')}}</el-radio> <el-radio :label="'END'">{{$t('End')}}</el-radio> </el-radio-group> </div> </div> <div class="clearfix list"> <div class="text"> {{$t('Notification strategy')}} </div> <div class="cont"> <el-select style="width: 200px;" size="small" v-model="warningType"> <el-option v-for="city in warningTypeList" :key="city.id" :value="city.id" :label="city.code"> </el-option> </el-select> </div> </div> <div class="clearfix list"> <div class="text"> {{$t('Process priority')}} </div> <div class="cont"> <m-priority v-model="processInstancePriority"></m-priority> </div> </div> <div class="clearfix list"> <div class="text"> {{$t('Worker group')}} </div> <div class="cont"> <m-worker-groups v-model="workerGroup"></m-worker-groups> </div> </div> <div class="clearfix list"> <div class="text"> {{$t('Environment Name')}} </div> <div class="cont"> <m-related-environment v-model="environmentCode" :workerGroup="workerGroup" v-on:environmentCodeEvent="_onUpdateEnvironmentCode"></m-related-environment> </div> </div> <div class="clearfix list"> <div class="text"> {{$t('Alarm group')}} </div> <div class="cont"> <el-select style="width: 200px;" clearable size="small" :disabled="!notifyGroupList.length" v-model="warningGroupId"> <el-input slot="trigger" readonly slot-scope="{ selectedModel }" :value="selectedModel ? selectedModel.label : ''" style="width: 200px;" @on-click-icon.stop="warningGroupId = {}"> <em slot="suffix" class="el-icon-error" style="font-size: 15px;cursor: pointer;" v-show="warningGroupId.id"></em> <em slot="suffix" class="el-icon-bottom" style="font-size: 12px;" v-show="!warningGroupId.id"></em> </el-input> <el-option v-for="city in notifyGroupList" :key="city.id" :value="city.id" :label="city.code"> </el-option> </el-select> </div> </div> <div class="submit"> <el-button type="text" size="small" @click="close()"> {{$t('Cancel')}} </el-button> <el-button type="primary" size="small" round :loading="spinnerLoading" @click="ok()">{{spinnerLoading ? $t('Loading...') : (timingData.item.crontab ? $t('Edit') : $t('Create'))}} </el-button> </div> </div> </template> <script> import moment from 'moment-timezone' import i18n from '@/module/i18n' import store from '@/conf/home/store' import { warningTypeList } from './util' import { vCrontab } from '@/module/components/crontab/index' import { formatDate } from '@/module/filter/filter' import mPriority from '@/module/components/priority/priority' import mWorkerGroups from '@/conf/home/pages/dag/_source/formModel/_source/workerGroups' import mRelatedEnvironment from '@/conf/home/pages/dag/_source/formModel/_source/relatedEnvironment' export default { name: 'timing-process', data () { return { store, processDefinitionId: 0, failureStrategy: 'CONTINUE', warningTypeList: warningTypeList, availableTimezoneIDList: moment.tz.names(), warningType: 'NONE', notifyGroupList: [], warningGroupId: '', spinnerLoading: false, scheduleTime: '', crontab: '0 0 * * * ? *', timezoneId: moment.tz.guess(), cronPopover: false, i18n: i18n.globalScope.LOCALE, processInstancePriority: 'MEDIUM', workerGroup: '', environmentCode: '', previewTimes: [] } }, props: { timingData: Object }, methods: { _datepicker (val) { this.scheduleTime = val }, _verification () { if (!this.scheduleTime) { this.$message.warning(`${i18n.$t('Please select time')}`) return false } if (this.scheduleTime[0] === this.scheduleTime[1]) { this.$message.warning(`${i18n.$t('The start time must not be the same as the end')}`) return false } if (!this.crontab) { this.$message.warning(`${i18n.$t('Please enter crontab')}`) return false } return true }, _onUpdateEnvironmentCode (o) { this.environmentCode = o }, _timing () { if (this._verification()) { let api = '' let searchParams = { schedule: JSON.stringify({ startTime: this.scheduleTime[0], endTime: this.scheduleTime[1], crontab: this.crontab, timezoneId: this.timezoneId }), failureStrategy: this.failureStrategy, warningType: this.warningType, processInstancePriority: this.processInstancePriority, warningGroupId: this.warningGroupId === '' ? 0 : this.warningGroupId, workerGroup: this.workerGroup, environmentCode: this.environmentCode } let msg = '' // edit if (this.timingData.item.crontab) { api = 'dag/updateSchedule' searchParams.id = this.timingData.item.id msg = `${i18n.$t('Edit')}${i18n.$t('Success')},${i18n.$t('Please go online')}` } else { api = 'dag/createSchedule' searchParams.processDefinitionCode = this.timingData.item.code msg = `${i18n.$t('Create')}${i18n.$t('Success')}` } this.store.dispatch(api, searchParams).then(res => { this.$message.success(msg) this.$emit('onUpdateTiming') }).catch(e => { this.$message.error(e.msg || '') }) } }, _preview () { if (this._verification()) { let api = 'dag/previewSchedule' let searchParams = { schedule: JSON.stringify({ startTime: this.scheduleTime[0], endTime: this.scheduleTime[1], crontab: this.crontab }) } this.store.dispatch(api, searchParams).then(res => { if (res.length) { this.previewTimes = res } else { this.$message.warning(`${i18n.$t('There is no data for this period of time')}`) } }) } }, _getNotifyGroupList () { return new Promise((resolve, reject) => { this.store.dispatch('dag/getNotifyGroupList').then(res => { this.notifyGroupList = res if (this.notifyGroupList.length) { resolve() } else { reject(new Error(0)) } }) }) }, ok () { this._timing() }, close () { this.$emit('closeTiming') }, preview () { this._preview() } }, watch: { }, created () { if (this.timingData.item.workerGroup === undefined) { let stateWorkerGroupsList = this.store.state.security.workerGroupsListAll || [] if (stateWorkerGroupsList.length) { this.workerGroup = stateWorkerGroupsList[0].id } else { this.store.dispatch('security/getWorkerGroupsAll').then(res => { this.$nextTick(() => { this.workerGroup = res[0].id }) }) } } else { this.workerGroup = this.timingData.item.workerGroup } if (this.timingData.item.crontab !== null) { this.crontab = this.timingData.item.crontab } if (this.timingData.type === 'timing') { let date = new Date() let year = date.getFullYear() let month = date.getMonth() + 1 let day = date.getDate() if (month < 10) { month = '0' + month } if (day < 10) { day = '0' + day } let startDate = year + '-' + month + '-' + day + ' ' + '00:00:00' let endDate = (year + 100) + '-' + month + '-' + day + ' ' + '00:00:00' let times = [] times[0] = startDate times[1] = endDate this.crontab = '0 0 * * * ? *' this.scheduleTime = times } }, mounted () { let item = this.timingData.item // Determine whether to echo if (this.timingData.item.crontab) { this.crontab = item.crontab this.scheduleTime = [formatDate(item.startTime), formatDate(item.endTime)] this.timezoneId = item.timezoneId === null ? moment.tz.guess() : item.timezoneId this.failureStrategy = item.failureStrategy this.warningType = item.warningType this.processInstancePriority = item.processInstancePriority this._getNotifyGroupList().then(() => { this.$nextTick(() => { // let list = _.filter(this.notifyGroupList, v => v.id === item.warningGroupId) this.warningGroupId = item.warningGroupId === 0 ? '' : item.warningGroupId }) }).catch(() => { this.warningGroupId = '' }) } else { this._getNotifyGroupList().then(() => { this.$nextTick(() => { this.warningGroupId = '' }) }).catch(() => { this.warningGroupId = '' }) } }, components: { vCrontab, mPriority, mWorkerGroups, mRelatedEnvironment } } </script> <style lang="scss" rel="stylesheet/scss"> .timing-process-model { width: 860px; min-height: 300px; background: #fff; border-radius: 3px; margin-top: 0; .crontab-box { margin: -6px; .v-crontab { } } .form-model { padding-top: 0; } .title-box { margin-bottom: 18px; span { padding-left: 30px; font-size: 16px; padding-top: 29px; display: block; } } .list { margin-bottom: 14px; >.text { width: 140px; float: left; text-align: right; line-height: 32px; padding-right: 8px; } .cont { width: 350px; float: left; } } .submit { text-align: right; padding-right: 30px; padding-top: 10px; padding-bottom: 30px; } } .v-crontab-form-model { .list-box { padding: 0; } } .x-date-packer-panel .x-date-packer-day .lattice label.bg-hover { background: #00BFFF!important; margin-top: -4px; } .x-date-packer-panel .x-date-packer-day .lattice em:hover { background: #0098e1!important; } </style>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,296
[Bug] [API] Update process definition error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ``` [ERROR] 2022-02-07 17:44:03.394 org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl:[332] - error: java.lang.IllegalStateException: Duplicate key TaskDefinition{id=0, code=3909232388448, name='依赖检查', version=1, description='', projectCode=0, userId=0, taskType=DEPENDENT, taskParams='{"dependence":{"relation":"AND","dependTaskList":[{"relation":"AND","dependItemList":[{"projectCode":3909232228064,"definitionCode":3909232229350,"depTaskCode":0,"cycle":"day","dateValue":"today"}]},{"relation":"AND","dependItemList":[{"projectCode":3909232228064,"definitionCode":3909232232293,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232229346,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230758,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230759,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230765,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230761,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230760,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230887,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230506,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230517,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230259,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232231796,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230263,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230628,"depTaskCode":0,"cycle":"day","dateValue":"today"}]}]},"conditionResult":{"successNode":[],"failedNode":[]},"waitStartTimeout":{"strategy":"FAILED","interval":null,"checkInterval":null,"enable":false},"switchResult":{}}', taskParamList=null, taskParamMap=null, flag=YES, taskPriority=MEDIUM, userName='null', projectName='null', workerGroup='offline-cluster', failRetryTimes=720, environmentCode='-1', failRetryInterval=1, timeoutFlag=CLOSE, timeoutNotifyStrategy=null, timeout=0, delayTime=0, resourceIds='null', createTime=null, updateTime=null} at java.util.stream.Collectors.lambda$throwingMerger$0(Collectors.java:133) at java.util.HashMap.merge(HashMap.java:1255) at java.util.stream.Collectors.lambda$toMap$58(Collectors.java:1320) at java.util.stream.ReduceOps$3ReducingSink.accept(ReduceOps.java:169) at java.util.ArrayList$ArrayListSpliterator.forEachRemaining(ArrayList.java:1384) at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:482) at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:472) at java.util.stream.ReduceOps$ReduceOp.evaluateSequential(ReduceOps.java:708) at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) at java.util.stream.ReferencePipeline.collect(ReferencePipeline.java:566) at org.apache.dolphinscheduler.service.process.ProcessService.transformTask(ProcessService.java:2517) at org.apache.dolphinscheduler.service.process.ProcessService$$FastClassBySpringCGLIB$$ed138739.invoke(<generated>) at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218) at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:689) at org.apache.dolphinscheduler.service.process.ProcessService$$EnhancerBySpringCGLIB$$a43f595c.transformTask(<generated>) at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl.checkTaskRelationList(ProcessDefinitionServiceImpl.java:305) at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl.updateProcessDefinition(ProcessDefinitionServiceImpl.java:528) at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$FastClassBySpringCGLIB$$e8e34ed9.invoke(<generated>) at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218) at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:783) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163) at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:753) at org.springframework.transaction.interceptor.TransactionInterceptor$1.proceedWithInvocation(TransactionInterceptor.java:123) at org.springframework.transaction.interceptor.TransactionAspectSupport.invokeWithinTransaction(TransactionAspectSupport.java:388) at org.springframework.transaction.interceptor.TransactionInterceptor.invoke(TransactionInterceptor.java:119) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:753) at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:698) at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$EnhancerBySpringCGLIB$$1978743c.updateProcessDefinition(<generated>) at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController.updateProcessDefinition(ProcessDefinitionController.java:249) at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController$$FastClassBySpringCGLIB$$dc9bf5db.invoke(<generated>) ``` ### What you expected to happen update successfully. ### How to reproduce update successfully. ### Anything else _No response_ ### Version 2.0.3 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8296
https://github.com/apache/dolphinscheduler/pull/8408
af39ae3ce9e055da5a9485ccc5f5678e8120ad17
f6e2a2cf2387cf4f3ff1d794cea8158bac989fb7
"2022-02-07T09:57:26Z"
java
"2022-02-18T13:05:43Z"
dolphinscheduler-dao/src/main/resources/sql/upgrade/2.0.4_schema/mysql/dolphinscheduler_ddl.sql
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,296
[Bug] [API] Update process definition error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ``` [ERROR] 2022-02-07 17:44:03.394 org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl:[332] - error: java.lang.IllegalStateException: Duplicate key TaskDefinition{id=0, code=3909232388448, name='依赖检查', version=1, description='', projectCode=0, userId=0, taskType=DEPENDENT, taskParams='{"dependence":{"relation":"AND","dependTaskList":[{"relation":"AND","dependItemList":[{"projectCode":3909232228064,"definitionCode":3909232229350,"depTaskCode":0,"cycle":"day","dateValue":"today"}]},{"relation":"AND","dependItemList":[{"projectCode":3909232228064,"definitionCode":3909232232293,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232229346,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230758,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230759,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230765,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230761,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230760,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230887,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230506,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230517,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230259,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232231796,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230263,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230628,"depTaskCode":0,"cycle":"day","dateValue":"today"}]}]},"conditionResult":{"successNode":[],"failedNode":[]},"waitStartTimeout":{"strategy":"FAILED","interval":null,"checkInterval":null,"enable":false},"switchResult":{}}', taskParamList=null, taskParamMap=null, flag=YES, taskPriority=MEDIUM, userName='null', projectName='null', workerGroup='offline-cluster', failRetryTimes=720, environmentCode='-1', failRetryInterval=1, timeoutFlag=CLOSE, timeoutNotifyStrategy=null, timeout=0, delayTime=0, resourceIds='null', createTime=null, updateTime=null} at java.util.stream.Collectors.lambda$throwingMerger$0(Collectors.java:133) at java.util.HashMap.merge(HashMap.java:1255) at java.util.stream.Collectors.lambda$toMap$58(Collectors.java:1320) at java.util.stream.ReduceOps$3ReducingSink.accept(ReduceOps.java:169) at java.util.ArrayList$ArrayListSpliterator.forEachRemaining(ArrayList.java:1384) at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:482) at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:472) at java.util.stream.ReduceOps$ReduceOp.evaluateSequential(ReduceOps.java:708) at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) at java.util.stream.ReferencePipeline.collect(ReferencePipeline.java:566) at org.apache.dolphinscheduler.service.process.ProcessService.transformTask(ProcessService.java:2517) at org.apache.dolphinscheduler.service.process.ProcessService$$FastClassBySpringCGLIB$$ed138739.invoke(<generated>) at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218) at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:689) at org.apache.dolphinscheduler.service.process.ProcessService$$EnhancerBySpringCGLIB$$a43f595c.transformTask(<generated>) at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl.checkTaskRelationList(ProcessDefinitionServiceImpl.java:305) at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl.updateProcessDefinition(ProcessDefinitionServiceImpl.java:528) at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$FastClassBySpringCGLIB$$e8e34ed9.invoke(<generated>) at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218) at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:783) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163) at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:753) at org.springframework.transaction.interceptor.TransactionInterceptor$1.proceedWithInvocation(TransactionInterceptor.java:123) at org.springframework.transaction.interceptor.TransactionAspectSupport.invokeWithinTransaction(TransactionAspectSupport.java:388) at org.springframework.transaction.interceptor.TransactionInterceptor.invoke(TransactionInterceptor.java:119) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:753) at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:698) at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$EnhancerBySpringCGLIB$$1978743c.updateProcessDefinition(<generated>) at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController.updateProcessDefinition(ProcessDefinitionController.java:249) at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController$$FastClassBySpringCGLIB$$dc9bf5db.invoke(<generated>) ``` ### What you expected to happen update successfully. ### How to reproduce update successfully. ### Anything else _No response_ ### Version 2.0.3 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8296
https://github.com/apache/dolphinscheduler/pull/8408
af39ae3ce9e055da5a9485ccc5f5678e8120ad17
f6e2a2cf2387cf4f3ff1d794cea8158bac989fb7
"2022-02-07T09:57:26Z"
java
"2022-02-18T13:05:43Z"
dolphinscheduler-dao/src/main/resources/sql/upgrade/2.0.4_schema/mysql/dolphinscheduler_dml.sql
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,296
[Bug] [API] Update process definition error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ``` [ERROR] 2022-02-07 17:44:03.394 org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl:[332] - error: java.lang.IllegalStateException: Duplicate key TaskDefinition{id=0, code=3909232388448, name='依赖检查', version=1, description='', projectCode=0, userId=0, taskType=DEPENDENT, taskParams='{"dependence":{"relation":"AND","dependTaskList":[{"relation":"AND","dependItemList":[{"projectCode":3909232228064,"definitionCode":3909232229350,"depTaskCode":0,"cycle":"day","dateValue":"today"}]},{"relation":"AND","dependItemList":[{"projectCode":3909232228064,"definitionCode":3909232232293,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232229346,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230758,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230759,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230765,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230761,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230760,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230887,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230506,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230517,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230259,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232231796,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230263,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230628,"depTaskCode":0,"cycle":"day","dateValue":"today"}]}]},"conditionResult":{"successNode":[],"failedNode":[]},"waitStartTimeout":{"strategy":"FAILED","interval":null,"checkInterval":null,"enable":false},"switchResult":{}}', taskParamList=null, taskParamMap=null, flag=YES, taskPriority=MEDIUM, userName='null', projectName='null', workerGroup='offline-cluster', failRetryTimes=720, environmentCode='-1', failRetryInterval=1, timeoutFlag=CLOSE, timeoutNotifyStrategy=null, timeout=0, delayTime=0, resourceIds='null', createTime=null, updateTime=null} at java.util.stream.Collectors.lambda$throwingMerger$0(Collectors.java:133) at java.util.HashMap.merge(HashMap.java:1255) at java.util.stream.Collectors.lambda$toMap$58(Collectors.java:1320) at java.util.stream.ReduceOps$3ReducingSink.accept(ReduceOps.java:169) at java.util.ArrayList$ArrayListSpliterator.forEachRemaining(ArrayList.java:1384) at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:482) at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:472) at java.util.stream.ReduceOps$ReduceOp.evaluateSequential(ReduceOps.java:708) at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) at java.util.stream.ReferencePipeline.collect(ReferencePipeline.java:566) at org.apache.dolphinscheduler.service.process.ProcessService.transformTask(ProcessService.java:2517) at org.apache.dolphinscheduler.service.process.ProcessService$$FastClassBySpringCGLIB$$ed138739.invoke(<generated>) at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218) at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:689) at org.apache.dolphinscheduler.service.process.ProcessService$$EnhancerBySpringCGLIB$$a43f595c.transformTask(<generated>) at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl.checkTaskRelationList(ProcessDefinitionServiceImpl.java:305) at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl.updateProcessDefinition(ProcessDefinitionServiceImpl.java:528) at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$FastClassBySpringCGLIB$$e8e34ed9.invoke(<generated>) at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218) at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:783) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163) at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:753) at org.springframework.transaction.interceptor.TransactionInterceptor$1.proceedWithInvocation(TransactionInterceptor.java:123) at org.springframework.transaction.interceptor.TransactionAspectSupport.invokeWithinTransaction(TransactionAspectSupport.java:388) at org.springframework.transaction.interceptor.TransactionInterceptor.invoke(TransactionInterceptor.java:119) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:753) at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:698) at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$EnhancerBySpringCGLIB$$1978743c.updateProcessDefinition(<generated>) at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController.updateProcessDefinition(ProcessDefinitionController.java:249) at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController$$FastClassBySpringCGLIB$$dc9bf5db.invoke(<generated>) ``` ### What you expected to happen update successfully. ### How to reproduce update successfully. ### Anything else _No response_ ### Version 2.0.3 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8296
https://github.com/apache/dolphinscheduler/pull/8408
af39ae3ce9e055da5a9485ccc5f5678e8120ad17
f6e2a2cf2387cf4f3ff1d794cea8158bac989fb7
"2022-02-07T09:57:26Z"
java
"2022-02-18T13:05:43Z"
dolphinscheduler-dao/src/main/resources/sql/upgrade/2.0.4_schema/postgresql/dolphinscheduler_ddl.sql
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,296
[Bug] [API] Update process definition error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ``` [ERROR] 2022-02-07 17:44:03.394 org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl:[332] - error: java.lang.IllegalStateException: Duplicate key TaskDefinition{id=0, code=3909232388448, name='依赖检查', version=1, description='', projectCode=0, userId=0, taskType=DEPENDENT, taskParams='{"dependence":{"relation":"AND","dependTaskList":[{"relation":"AND","dependItemList":[{"projectCode":3909232228064,"definitionCode":3909232229350,"depTaskCode":0,"cycle":"day","dateValue":"today"}]},{"relation":"AND","dependItemList":[{"projectCode":3909232228064,"definitionCode":3909232232293,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232229346,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230758,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230759,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230765,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230761,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230760,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230887,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230506,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230517,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230259,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232231796,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230263,"depTaskCode":0,"cycle":"day","dateValue":"today"},{"projectCode":3909232228064,"definitionCode":3909232230628,"depTaskCode":0,"cycle":"day","dateValue":"today"}]}]},"conditionResult":{"successNode":[],"failedNode":[]},"waitStartTimeout":{"strategy":"FAILED","interval":null,"checkInterval":null,"enable":false},"switchResult":{}}', taskParamList=null, taskParamMap=null, flag=YES, taskPriority=MEDIUM, userName='null', projectName='null', workerGroup='offline-cluster', failRetryTimes=720, environmentCode='-1', failRetryInterval=1, timeoutFlag=CLOSE, timeoutNotifyStrategy=null, timeout=0, delayTime=0, resourceIds='null', createTime=null, updateTime=null} at java.util.stream.Collectors.lambda$throwingMerger$0(Collectors.java:133) at java.util.HashMap.merge(HashMap.java:1255) at java.util.stream.Collectors.lambda$toMap$58(Collectors.java:1320) at java.util.stream.ReduceOps$3ReducingSink.accept(ReduceOps.java:169) at java.util.ArrayList$ArrayListSpliterator.forEachRemaining(ArrayList.java:1384) at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:482) at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:472) at java.util.stream.ReduceOps$ReduceOp.evaluateSequential(ReduceOps.java:708) at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) at java.util.stream.ReferencePipeline.collect(ReferencePipeline.java:566) at org.apache.dolphinscheduler.service.process.ProcessService.transformTask(ProcessService.java:2517) at org.apache.dolphinscheduler.service.process.ProcessService$$FastClassBySpringCGLIB$$ed138739.invoke(<generated>) at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218) at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:689) at org.apache.dolphinscheduler.service.process.ProcessService$$EnhancerBySpringCGLIB$$a43f595c.transformTask(<generated>) at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl.checkTaskRelationList(ProcessDefinitionServiceImpl.java:305) at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl.updateProcessDefinition(ProcessDefinitionServiceImpl.java:528) at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$FastClassBySpringCGLIB$$e8e34ed9.invoke(<generated>) at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218) at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:783) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163) at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:753) at org.springframework.transaction.interceptor.TransactionInterceptor$1.proceedWithInvocation(TransactionInterceptor.java:123) at org.springframework.transaction.interceptor.TransactionAspectSupport.invokeWithinTransaction(TransactionAspectSupport.java:388) at org.springframework.transaction.interceptor.TransactionInterceptor.invoke(TransactionInterceptor.java:119) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:753) at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:698) at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$EnhancerBySpringCGLIB$$1978743c.updateProcessDefinition(<generated>) at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController.updateProcessDefinition(ProcessDefinitionController.java:249) at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController$$FastClassBySpringCGLIB$$dc9bf5db.invoke(<generated>) ``` ### What you expected to happen update successfully. ### How to reproduce update successfully. ### Anything else _No response_ ### Version 2.0.3 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8296
https://github.com/apache/dolphinscheduler/pull/8408
af39ae3ce9e055da5a9485ccc5f5678e8120ad17
f6e2a2cf2387cf4f3ff1d794cea8158bac989fb7
"2022-02-07T09:57:26Z"
java
"2022-02-18T13:05:43Z"
dolphinscheduler-dao/src/main/resources/sql/upgrade/2.0.4_schema/postgresql/dolphinscheduler_dml.sql
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,432
[Bug] [Standalone] table t_ds_worker_group not found in standalone mode
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened table t_ds_worker_group not found in standalone mode ### What you expected to happen running successfully. ### How to reproduce running standalone server. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8432
https://github.com/apache/dolphinscheduler/pull/8433
668b36c73187279349ee91e128791926b1bbc86b
8200a3f15ab5c7de0ea8e4d356ce968c6380e42a
"2022-02-18T11:10:48Z"
java
"2022-02-18T13:46:52Z"
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/registry/ServerNodeManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.registry; import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_MASTERS; import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_WORKERS; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.NodeType; import org.apache.dolphinscheduler.common.model.Server; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.dao.AlertDao; import org.apache.dolphinscheduler.dao.entity.WorkerGroup; import org.apache.dolphinscheduler.dao.mapper.WorkerGroupMapper; import org.apache.dolphinscheduler.registry.api.Event; import org.apache.dolphinscheduler.registry.api.Event.Type; import org.apache.dolphinscheduler.registry.api.SubscribeListener; import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory; import org.apache.dolphinscheduler.service.queue.MasterPriorityQueue; import org.apache.dolphinscheduler.service.registry.RegistryClient; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import javax.annotation.PreDestroy; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.InitializingBean; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; /** * server node manager */ @Service public class ServerNodeManager implements InitializingBean { private final Logger logger = LoggerFactory.getLogger(ServerNodeManager.class); /** * master lock */ private final Lock masterLock = new ReentrantLock(); /** * worker group lock */ private final Lock workerGroupLock = new ReentrantLock(); /** * worker node info lock */ private final Lock workerNodeInfoLock = new ReentrantLock(); /** * worker group nodes */ private final ConcurrentHashMap<String, Set<String>> workerGroupNodes = new ConcurrentHashMap<>(); /** * master nodes */ private final Set<String> masterNodes = new HashSet<>(); /** * worker node info */ private final Map<String, String> workerNodeInfo = new HashMap<>(); /** * executor service */ private ScheduledExecutorService executorService; @Autowired private RegistryClient registryClient; /** * eg : /node/worker/group/127.0.0.1:xxx */ private static final int WORKER_LISTENER_CHECK_LENGTH = 5; /** * worker group mapper */ @Autowired private WorkerGroupMapper workerGroupMapper; private final MasterPriorityQueue masterPriorityQueue = new MasterPriorityQueue(); /** * alert dao */ @Autowired private AlertDao alertDao; private static volatile int MASTER_SLOT = 0; private static volatile int MASTER_SIZE = 0; public static int getSlot() { return MASTER_SLOT; } public static int getMasterSize() { return MASTER_SIZE; } /** * init listener * * @throws Exception if error throws Exception */ @Override public void afterPropertiesSet() throws Exception { /** * load nodes from zookeeper */ load(); /** * init executor service */ executorService = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("ServerNodeManagerExecutor")); executorService.scheduleWithFixedDelay(new WorkerNodeInfoAndGroupDbSyncTask(), 0, 10, TimeUnit.SECONDS); /* * init MasterNodeListener listener */ registryClient.subscribe(REGISTRY_DOLPHINSCHEDULER_MASTERS, new MasterDataListener()); /* * init WorkerNodeListener listener */ registryClient.subscribe(REGISTRY_DOLPHINSCHEDULER_WORKERS, new WorkerDataListener()); } /** * load nodes from zookeeper */ public void load() { /* * master nodes from zookeeper */ updateMasterNodes(); /* * worker group nodes from zookeeper */ Collection<String> workerGroups = registryClient.getWorkerGroupDirectly(); for (String workerGroup : workerGroups) { syncWorkerGroupNodes(workerGroup, registryClient.getWorkerGroupNodesDirectly(workerGroup)); } } /** * worker node info and worker group db sync task */ class WorkerNodeInfoAndGroupDbSyncTask implements Runnable { @Override public void run() { // sync worker node info Map<String, String> newWorkerNodeInfo = registryClient.getServerMaps(NodeType.WORKER, true); syncAllWorkerNodeInfo(newWorkerNodeInfo); // sync worker group nodes from database List<WorkerGroup> workerGroupList = workerGroupMapper.queryAllWorkerGroup(); if (CollectionUtils.isNotEmpty(workerGroupList)) { for (WorkerGroup wg : workerGroupList) { String workerGroup = wg.getName(); Set<String> nodes = new HashSet<>(); String[] addrs = wg.getAddrList().split(Constants.COMMA); for (String addr : addrs) { if (newWorkerNodeInfo.containsKey(addr)) { nodes.add(addr); } } if (!nodes.isEmpty()) { syncWorkerGroupNodes(workerGroup, nodes); } } } } } /** * worker group node listener */ class WorkerDataListener implements SubscribeListener { @Override public void notify(Event event) { final String path = event.path(); final Type type = event.type(); final String data = event.data(); if (registryClient.isWorkerPath(path)) { try { if (type == Type.ADD) { logger.info("worker group node : {} added.", path); String group = parseGroup(path); Collection<String> currentNodes = registryClient.getWorkerGroupNodesDirectly(group); logger.info("currentNodes : {}", currentNodes); syncWorkerGroupNodes(group, currentNodes); } else if (type == Type.REMOVE) { logger.info("worker group node : {} down.", path); String group = parseGroup(path); Collection<String> currentNodes = registryClient.getWorkerGroupNodesDirectly(group); syncWorkerGroupNodes(group, currentNodes); alertDao.sendServerStopedAlert(1, path, "WORKER"); } else if (type == Type.UPDATE) { logger.debug("worker group node : {} update, data: {}", path, data); String group = parseGroup(path); Collection<String> currentNodes = registryClient.getWorkerGroupNodesDirectly(group); syncWorkerGroupNodes(group, currentNodes); String node = parseNode(path); syncSingleWorkerNodeInfo(node, data); } } catch (IllegalArgumentException ex) { logger.warn(ex.getMessage()); } catch (Exception ex) { logger.error("WorkerGroupListener capture data change and get data failed", ex); } } } private String parseGroup(String path) { String[] parts = path.split("/"); if (parts.length < WORKER_LISTENER_CHECK_LENGTH) { throw new IllegalArgumentException(String.format("worker group path : %s is not valid, ignore", path)); } return parts[parts.length - 2]; } private String parseNode(String path) { String[] parts = path.split("/"); if (parts.length < WORKER_LISTENER_CHECK_LENGTH) { throw new IllegalArgumentException(String.format("worker group path : %s is not valid, ignore", path)); } return parts[parts.length - 1]; } } class MasterDataListener implements SubscribeListener { @Override public void notify(Event event) { final String path = event.path(); final Type type = event.type(); if (registryClient.isMasterPath(path)) { try { if (type.equals(Type.ADD)) { logger.info("master node : {} added.", path); updateMasterNodes(); } if (type.equals(Type.REMOVE)) { logger.info("master node : {} down.", path); updateMasterNodes(); alertDao.sendServerStopedAlert(1, path, "MASTER"); } } catch (Exception ex) { logger.error("MasterNodeListener capture data change and get data failed.", ex); } } } } private void updateMasterNodes() { MASTER_SLOT = 0; this.masterNodes.clear(); String nodeLock = Constants.REGISTRY_DOLPHINSCHEDULER_LOCK_MASTERS; try { registryClient.getLock(nodeLock); Collection<String> currentNodes = registryClient.getMasterNodesDirectly(); List<Server> masterNodes = registryClient.getServerList(NodeType.MASTER); syncMasterNodes(currentNodes, masterNodes); } catch (Exception e) { logger.error("update master nodes error", e); } finally { registryClient.releaseLock(nodeLock); } } /** * get master nodes * * @return master nodes */ public Set<String> getMasterNodes() { masterLock.lock(); try { return Collections.unmodifiableSet(masterNodes); } finally { masterLock.unlock(); } } /** * sync master nodes * * @param nodes master nodes */ private void syncMasterNodes(Collection<String> nodes, List<Server> masterNodes) { masterLock.lock(); try { String host = NetUtils.getHost(); this.masterNodes.addAll(nodes); this.masterPriorityQueue.clear(); this.masterPriorityQueue.putList(masterNodes); int index = masterPriorityQueue.getIndex(host); if (index >= 0) { MASTER_SIZE = nodes.size(); MASTER_SLOT = index; } else { logger.warn("current host:{} is not in active master list", host); } logger.info("update master nodes, master size: {}, slot: {}", MASTER_SIZE, MASTER_SLOT); } finally { masterLock.unlock(); } } /** * sync worker group nodes * * @param workerGroup worker group * @param nodes worker nodes */ private void syncWorkerGroupNodes(String workerGroup, Collection<String> nodes) { workerGroupLock.lock(); try { workerGroup = workerGroup.toLowerCase(); Set<String> workerNodes = workerGroupNodes.getOrDefault(workerGroup, new HashSet<>()); workerNodes.clear(); workerNodes.addAll(nodes); workerGroupNodes.put(workerGroup, workerNodes); } finally { workerGroupLock.unlock(); } } public Map<String, Set<String>> getWorkerGroupNodes() { return Collections.unmodifiableMap(workerGroupNodes); } /** * get worker group nodes * * @param workerGroup workerGroup * @return worker nodes */ public Set<String> getWorkerGroupNodes(String workerGroup) { workerGroupLock.lock(); try { if (StringUtils.isEmpty(workerGroup)) { workerGroup = Constants.DEFAULT_WORKER_GROUP; } workerGroup = workerGroup.toLowerCase(); Set<String> nodes = workerGroupNodes.get(workerGroup); if (CollectionUtils.isNotEmpty(nodes)) { return Collections.unmodifiableSet(nodes); } return nodes; } finally { workerGroupLock.unlock(); } } /** * get worker node info * * @return worker node info */ public Map<String, String> getWorkerNodeInfo() { return Collections.unmodifiableMap(workerNodeInfo); } /** * get worker node info * * @param workerNode worker node * @return worker node info */ public String getWorkerNodeInfo(String workerNode) { workerNodeInfoLock.lock(); try { return workerNodeInfo.getOrDefault(workerNode, null); } finally { workerNodeInfoLock.unlock(); } } /** * sync worker node info * * @param newWorkerNodeInfo new worker node info */ private void syncAllWorkerNodeInfo(Map<String, String> newWorkerNodeInfo) { workerNodeInfoLock.lock(); try { workerNodeInfo.clear(); workerNodeInfo.putAll(newWorkerNodeInfo); } finally { workerNodeInfoLock.unlock(); } } /** * sync single worker node info */ private void syncSingleWorkerNodeInfo(String node, String info) { workerNodeInfoLock.lock(); try { workerNodeInfo.put(node, info); } finally { workerNodeInfoLock.unlock(); } } /** * destroy */ @PreDestroy public void destroy() { executorService.shutdownNow(); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,135
[Bug] [DataSource] jdbc connect parameters can not input '@'
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In 2.0.2 version, when I create hive datasource with kerberos, then need input principal to jdbc connect parameters, when Test connect , The error is reported below ![image](https://user-images.githubusercontent.com/13765310/150292211-af7d1fb0-51f5-4eab-990e-5bf223248577.png) ### What you expected to happen when input correct principal, can connect to hive ![image](https://user-images.githubusercontent.com/13765310/150292515-7f4dbd33-d91d-416e-b300-3d9db9e19500.png) ### How to reproduce in AbstractDatasourceProcessor.java, modify PARAMS_PATTER field, appropriate value is > private static final PARAMS_PATTER = Pattern.compile("^[a-zA-Z0-9\\-\\_\\/\\@\\.]+$"); ### Anything else _No response_ ### Version 2.0.2 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8135
https://github.com/apache/dolphinscheduler/pull/8293
8200a3f15ab5c7de0ea8e4d356ce968c6380e42a
2a1406073a73b349f63396567919f81a802785d8
"2022-01-20T07:29:54Z"
java
"2022-02-18T13:58:43Z"
dolphinscheduler-datasource-plugin/dolphinscheduler-datasource-api/src/main/java/org/apache/dolphinscheduler/plugin/datasource/api/datasource/AbstractDataSourceProcessor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.datasource.api.datasource; import org.apache.dolphinscheduler.spi.datasource.BaseConnectionParam; import org.apache.dolphinscheduler.spi.datasource.ConnectionParam; import org.apache.dolphinscheduler.spi.enums.DbType; import org.apache.commons.collections4.MapUtils; import java.text.MessageFormat; import java.util.Map; import java.util.regex.Pattern; public abstract class AbstractDataSourceProcessor implements DataSourceProcessor { private static final Pattern IPV4_PATTERN = Pattern.compile("^[a-zA-Z0-9\\_\\-\\.\\,]+$"); private static final Pattern IPV6_PATTERN = Pattern.compile("^[a-zA-Z0-9\\_\\-\\.\\:\\[\\]\\,]+$"); private static final Pattern DATABASE_PATTER = Pattern.compile("^[a-zA-Z0-9\\_\\-\\.]+$"); private static final Pattern PARAMS_PATTER = Pattern.compile("^[a-zA-Z0-9\\-\\_\\/]+$"); @Override public void checkDatasourceParam(BaseDataSourceParamDTO baseDataSourceParamDTO) { checkHost(baseDataSourceParamDTO.getHost()); checkDatasourcePatter(baseDataSourceParamDTO.getDatabase()); checkOther(baseDataSourceParamDTO.getOther()); } /** * Check the host is valid * * @param host datasource host */ protected void checkHost(String host) { if (!IPV4_PATTERN.matcher(host).matches() || !IPV6_PATTERN.matcher(host).matches()) { throw new IllegalArgumentException("datasource host illegal"); } } /** * check database name is valid * * @param database database name */ protected void checkDatasourcePatter(String database) { if (!DATABASE_PATTER.matcher(database).matches()) { throw new IllegalArgumentException("datasource name illegal"); } } /** * check other is valid * * @param other other */ protected void checkOther(Map<String, String> other) { if (MapUtils.isEmpty(other)) { return; } boolean paramsCheck = other.entrySet().stream().allMatch(p -> PARAMS_PATTER.matcher(p.getValue()).matches()); if (!paramsCheck) { throw new IllegalArgumentException("datasource other params illegal"); } } @Override public String getDatasourceUniqueId(ConnectionParam connectionParam, DbType dbType) { BaseConnectionParam baseConnectionParam = (BaseConnectionParam) connectionParam; return MessageFormat.format("{0}@{1}@{2}", dbType.getDescp(), baseConnectionParam.getUser(), baseConnectionParam.getJdbcUrl()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,135
[Bug] [DataSource] jdbc connect parameters can not input '@'
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In 2.0.2 version, when I create hive datasource with kerberos, then need input principal to jdbc connect parameters, when Test connect , The error is reported below ![image](https://user-images.githubusercontent.com/13765310/150292211-af7d1fb0-51f5-4eab-990e-5bf223248577.png) ### What you expected to happen when input correct principal, can connect to hive ![image](https://user-images.githubusercontent.com/13765310/150292515-7f4dbd33-d91d-416e-b300-3d9db9e19500.png) ### How to reproduce in AbstractDatasourceProcessor.java, modify PARAMS_PATTER field, appropriate value is > private static final PARAMS_PATTER = Pattern.compile("^[a-zA-Z0-9\\-\\_\\/\\@\\.]+$"); ### Anything else _No response_ ### Version 2.0.2 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8135
https://github.com/apache/dolphinscheduler/pull/8293
8200a3f15ab5c7de0ea8e4d356ce968c6380e42a
2a1406073a73b349f63396567919f81a802785d8
"2022-01-20T07:29:54Z"
java
"2022-02-18T13:58:43Z"
dolphinscheduler-datasource-plugin/dolphinscheduler-datasource-api/src/test/java/org/apache/dolphinscheduler/plugin/datasource/api/datasource/AbstractDataSourceProcessorTest.java
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,213
[Bug] [dolphinscheduler-server] task run error when worker group name contains uppercase letters
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened worker group名称存在大写的时候,运行任务报错;错误如下 _[INFO] 2022-01-27 08:00:00.389 TaskLogLogger:[134] - set work flow 299 task 463 running [INFO] 2022-01-27 08:00:00.390 TaskLogLogger:[104] - work flow 299 task 463, sub work flow: 301 state: running [ERROR] 2022-01-27 08:00:01.081 org.apache.dolphinscheduler.server.master.consumer.TaskPriorityQueueConsumer:[144] - ExecuteException dispatch error: fail to execute : Command [type=TASK_EXECUTE_REQUEST, opaque=3511226, bodyLen=3375] due to no suitable worker, current task needs worker group test to execute org.apache.dolphinscheduler.server.master.dispatch.exceptions.ExecuteException: fail to execute : Command [type=TASK_EXECUTE_REQUEST, opaque=3511226, bodyLen=3375] due to no suitable worker, current task needs worker group test to execute at org.apache.dolphinscheduler.server.master.dispatch.ExecutorDispatcher.dispatch(ExecutorDispatcher.java:89) at org.apache.dolphinscheduler.server.master.consumer.TaskPriorityQueueConsumer.dispatch(TaskPriorityQueueConsumer.java:138) at org.apache.dolphinscheduler.server.master.consumer.TaskPriorityQueueConsumer.run(TaskPriorityQueueConsumer.java:101) [ERROR] 2022-01-27 08:00:01.082 org.apache.dolphinscheduler.server.master.consumer.TaskPriorityQueueConsumer:[144] - ExecuteException dispatch error: fail to execute : Command [type=TASK_EXECUTE_REQUEST, opaque=3511227, bodyLen=3106] due to no suitable worker, current task needs worker group Tenant_2_DS资源组3 to execute org.apache.dolphinscheduler.server.master.dispatch.exceptions.ExecuteException: fail to execute : Command [type=TASK_EXECUTE_REQUEST, opaque=3511227, bodyLen=3106] due to no suitable worker, current task needs worker group Tenant_2_DS资源组3 to execute at org.apache.dolphinscheduler.server.master.dispatch.ExecutorDispatcher.dispatch(ExecutorDispatcher.java:89) at org.apache.dolphinscheduler.server.master.consumer.TaskPriorityQueueConsumer.dispatch(TaskPriorityQueueConsumer.java:138) at org.apache.dolphinscheduler.server.master.consumer.TaskPriorityQueueConsumer.run(TaskPriorityQueueConsumer.java:101) [WARN] 2022-01-27 08:00:01.360 org.apache.dolphinscheduler.server.master.dispatch.host.LowerWeightHostManager:[153] - worker 192.168.92.31:1234 in work group test have not received the heartbeat [WARN] 2022-01-27 08:00:02.361 org.apache.dolphinscheduler.server.master.dispatch.host.LowerWeightHostManager:[153] - worker 192.168.92.31:1234 in work group test have not received the heartbeat _ 原因:master ServerNodeManager.syncWorkerGroupNodes() 方法,将读取到的worker group名称转为了小写,但是,从任务内读取的workergroup名称仍然是大写,因此任务找不到需要的worker group; 处理:将//workerGroup = workerGroup.toLowerCase(); 注释掉。 ` private void syncWorkerGroupNodes(String workerGroup, Collection<String> nodes) { workerGroupLock.lock(); try { //workerGroup = workerGroup.toLowerCase(); Set<String> workerNodes = workerGroupNodes.getOrDefault(workerGroup, new HashSet<>()); workerNodes.clear(); workerNodes.addAll(nodes); workerGroupNodes.put(workerGroup, workerNodes); } finally { workerGroupLock.unlock(); } } ` ### What you expected to happen 当worker group 名称存在大写字符时,任务正常运行 ### How to reproduce 创建一个包含大写字符的workergroup 名称,创建任意一个任务并关联到 创建的worker group 运行; ### Anything else _No response_ ### Version 2.0.3 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8213
https://github.com/apache/dolphinscheduler/pull/8448
02e47da30b76334c6847c8142a735cdd94f0cc43
234f399488e7fb0937cbf322c928f7abb9eb8365
"2022-01-27T04:05:26Z"
java
"2022-02-19T13:38:58Z"
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/registry/ServerNodeManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.registry; import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_MASTERS; import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_WORKERS; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.NodeType; import org.apache.dolphinscheduler.common.model.Server; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.dao.AlertDao; import org.apache.dolphinscheduler.dao.entity.WorkerGroup; import org.apache.dolphinscheduler.dao.mapper.WorkerGroupMapper; import org.apache.dolphinscheduler.registry.api.Event; import org.apache.dolphinscheduler.registry.api.Event.Type; import org.apache.dolphinscheduler.registry.api.SubscribeListener; import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory; import org.apache.dolphinscheduler.service.queue.MasterPriorityQueue; import org.apache.dolphinscheduler.service.registry.RegistryClient; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import javax.annotation.PreDestroy; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.InitializingBean; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; /** * server node manager */ @Service public class ServerNodeManager implements InitializingBean { private final Logger logger = LoggerFactory.getLogger(ServerNodeManager.class); /** * master lock */ private final Lock masterLock = new ReentrantLock(); /** * worker group lock */ private final Lock workerGroupLock = new ReentrantLock(); /** * worker node info lock */ private final Lock workerNodeInfoLock = new ReentrantLock(); /** * worker group nodes */ private final ConcurrentHashMap<String, Set<String>> workerGroupNodes = new ConcurrentHashMap<>(); /** * master nodes */ private final Set<String> masterNodes = new HashSet<>(); /** * worker node info */ private final Map<String, String> workerNodeInfo = new HashMap<>(); /** * executor service */ private ScheduledExecutorService executorService; @Autowired private RegistryClient registryClient; /** * eg : /node/worker/group/127.0.0.1:xxx */ private static final int WORKER_LISTENER_CHECK_LENGTH = 5; /** * worker group mapper */ @Autowired private WorkerGroupMapper workerGroupMapper; private final MasterPriorityQueue masterPriorityQueue = new MasterPriorityQueue(); /** * alert dao */ @Autowired private AlertDao alertDao; private static volatile int MASTER_SLOT = 0; private static volatile int MASTER_SIZE = 0; public static int getSlot() { return MASTER_SLOT; } public static int getMasterSize() { return MASTER_SIZE; } /** * init listener * * @throws Exception if error throws Exception */ @Override public void afterPropertiesSet() throws Exception { /** * load nodes from zookeeper */ load(); /** * init executor service */ executorService = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("ServerNodeManagerExecutor")); executorService.scheduleWithFixedDelay(new WorkerNodeInfoAndGroupDbSyncTask(), 0, 10, TimeUnit.SECONDS); /* * init MasterNodeListener listener */ registryClient.subscribe(REGISTRY_DOLPHINSCHEDULER_MASTERS, new MasterDataListener()); /* * init WorkerNodeListener listener */ registryClient.subscribe(REGISTRY_DOLPHINSCHEDULER_WORKERS, new WorkerDataListener()); } /** * load nodes from zookeeper */ public void load() { /* * master nodes from zookeeper */ updateMasterNodes(); /* * worker group nodes from zookeeper */ Collection<String> workerGroups = registryClient.getWorkerGroupDirectly(); for (String workerGroup : workerGroups) { syncWorkerGroupNodes(workerGroup, registryClient.getWorkerGroupNodesDirectly(workerGroup)); } } /** * worker node info and worker group db sync task */ class WorkerNodeInfoAndGroupDbSyncTask implements Runnable { @Override public void run() { try { // sync worker node info Map<String, String> newWorkerNodeInfo = registryClient.getServerMaps(NodeType.WORKER, true); syncAllWorkerNodeInfo(newWorkerNodeInfo); // sync worker group nodes from database List<WorkerGroup> workerGroupList = workerGroupMapper.queryAllWorkerGroup(); if (CollectionUtils.isNotEmpty(workerGroupList)) { for (WorkerGroup wg : workerGroupList) { String workerGroup = wg.getName(); Set<String> nodes = new HashSet<>(); String[] addrs = wg.getAddrList().split(Constants.COMMA); for (String addr : addrs) { if (newWorkerNodeInfo.containsKey(addr)) { nodes.add(addr); } } if (!nodes.isEmpty()) { syncWorkerGroupNodes(workerGroup, nodes); } } } } catch (Exception e) { logger.error("WorkerNodeInfoAndGroupDbSyncTask error:", e); } } } /** * worker group node listener */ class WorkerDataListener implements SubscribeListener { @Override public void notify(Event event) { final String path = event.path(); final Type type = event.type(); final String data = event.data(); if (registryClient.isWorkerPath(path)) { try { if (type == Type.ADD) { logger.info("worker group node : {} added.", path); String group = parseGroup(path); Collection<String> currentNodes = registryClient.getWorkerGroupNodesDirectly(group); logger.info("currentNodes : {}", currentNodes); syncWorkerGroupNodes(group, currentNodes); } else if (type == Type.REMOVE) { logger.info("worker group node : {} down.", path); String group = parseGroup(path); Collection<String> currentNodes = registryClient.getWorkerGroupNodesDirectly(group); syncWorkerGroupNodes(group, currentNodes); alertDao.sendServerStopedAlert(1, path, "WORKER"); } else if (type == Type.UPDATE) { logger.debug("worker group node : {} update, data: {}", path, data); String group = parseGroup(path); Collection<String> currentNodes = registryClient.getWorkerGroupNodesDirectly(group); syncWorkerGroupNodes(group, currentNodes); String node = parseNode(path); syncSingleWorkerNodeInfo(node, data); } } catch (IllegalArgumentException ex) { logger.warn(ex.getMessage()); } catch (Exception ex) { logger.error("WorkerGroupListener capture data change and get data failed", ex); } } } private String parseGroup(String path) { String[] parts = path.split("/"); if (parts.length < WORKER_LISTENER_CHECK_LENGTH) { throw new IllegalArgumentException(String.format("worker group path : %s is not valid, ignore", path)); } return parts[parts.length - 2]; } private String parseNode(String path) { String[] parts = path.split("/"); if (parts.length < WORKER_LISTENER_CHECK_LENGTH) { throw new IllegalArgumentException(String.format("worker group path : %s is not valid, ignore", path)); } return parts[parts.length - 1]; } } class MasterDataListener implements SubscribeListener { @Override public void notify(Event event) { final String path = event.path(); final Type type = event.type(); if (registryClient.isMasterPath(path)) { try { if (type.equals(Type.ADD)) { logger.info("master node : {} added.", path); updateMasterNodes(); } if (type.equals(Type.REMOVE)) { logger.info("master node : {} down.", path); updateMasterNodes(); alertDao.sendServerStopedAlert(1, path, "MASTER"); } } catch (Exception ex) { logger.error("MasterNodeListener capture data change and get data failed.", ex); } } } } private void updateMasterNodes() { MASTER_SLOT = 0; this.masterNodes.clear(); String nodeLock = Constants.REGISTRY_DOLPHINSCHEDULER_LOCK_MASTERS; try { registryClient.getLock(nodeLock); Collection<String> currentNodes = registryClient.getMasterNodesDirectly(); List<Server> masterNodes = registryClient.getServerList(NodeType.MASTER); syncMasterNodes(currentNodes, masterNodes); } catch (Exception e) { logger.error("update master nodes error", e); } finally { registryClient.releaseLock(nodeLock); } } /** * get master nodes * * @return master nodes */ public Set<String> getMasterNodes() { masterLock.lock(); try { return Collections.unmodifiableSet(masterNodes); } finally { masterLock.unlock(); } } /** * sync master nodes * * @param nodes master nodes */ private void syncMasterNodes(Collection<String> nodes, List<Server> masterNodes) { masterLock.lock(); try { String host = NetUtils.getHost(); this.masterNodes.addAll(nodes); this.masterPriorityQueue.clear(); this.masterPriorityQueue.putList(masterNodes); int index = masterPriorityQueue.getIndex(host); if (index >= 0) { MASTER_SIZE = nodes.size(); MASTER_SLOT = index; } else { logger.warn("current host:{} is not in active master list", host); } logger.info("update master nodes, master size: {}, slot: {}", MASTER_SIZE, MASTER_SLOT); } finally { masterLock.unlock(); } } /** * sync worker group nodes * * @param workerGroup worker group * @param nodes worker nodes */ private void syncWorkerGroupNodes(String workerGroup, Collection<String> nodes) { workerGroupLock.lock(); try { workerGroup = workerGroup.toLowerCase(); Set<String> workerNodes = workerGroupNodes.getOrDefault(workerGroup, new HashSet<>()); workerNodes.clear(); workerNodes.addAll(nodes); workerGroupNodes.put(workerGroup, workerNodes); } finally { workerGroupLock.unlock(); } } public Map<String, Set<String>> getWorkerGroupNodes() { return Collections.unmodifiableMap(workerGroupNodes); } /** * get worker group nodes * * @param workerGroup workerGroup * @return worker nodes */ public Set<String> getWorkerGroupNodes(String workerGroup) { workerGroupLock.lock(); try { if (StringUtils.isEmpty(workerGroup)) { workerGroup = Constants.DEFAULT_WORKER_GROUP; } workerGroup = workerGroup.toLowerCase(); Set<String> nodes = workerGroupNodes.get(workerGroup); if (CollectionUtils.isNotEmpty(nodes)) { return Collections.unmodifiableSet(nodes); } return nodes; } finally { workerGroupLock.unlock(); } } /** * get worker node info * * @return worker node info */ public Map<String, String> getWorkerNodeInfo() { return Collections.unmodifiableMap(workerNodeInfo); } /** * get worker node info * * @param workerNode worker node * @return worker node info */ public String getWorkerNodeInfo(String workerNode) { workerNodeInfoLock.lock(); try { return workerNodeInfo.getOrDefault(workerNode, null); } finally { workerNodeInfoLock.unlock(); } } /** * sync worker node info * * @param newWorkerNodeInfo new worker node info */ private void syncAllWorkerNodeInfo(Map<String, String> newWorkerNodeInfo) { workerNodeInfoLock.lock(); try { workerNodeInfo.clear(); workerNodeInfo.putAll(newWorkerNodeInfo); } finally { workerNodeInfoLock.unlock(); } } /** * sync single worker node info */ private void syncSingleWorkerNodeInfo(String node, String info) { workerNodeInfoLock.lock(); try { workerNodeInfo.put(node, info); } finally { workerNodeInfoLock.unlock(); } } /** * destroy */ @PreDestroy public void destroy() { executorService.shutdownNow(); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,389
[Bug] [common] oshi compatibility issues cause it fail to boot on m1 mac
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened I use a macbook with m1 chip, when starting StandaloneServer, I get the following error `Caused by: java.lang.UnsatisfiedLinkError: /private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp: dlopen(/private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp, 0x0001): tried: '/private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp' (fat file, but missing compatible architecture (have 'i386,x86_64', need 'arm64e')), '/usr/lib/jna1882043576609991586.tmp' (no such file) at java.lang.ClassLoader$NativeLibrary.load(Native Method) at java.lang.ClassLoader.loadLibrary0(ClassLoader.java:1950) at java.lang.ClassLoader.loadLibrary(ClassLoader.java:1832) at java.lang.Runtime.load0(Runtime.java:811) at java.lang.System.load(System.java:1088) at com.sun.jna.Native.loadNativeDispatchLibraryFromClasspath(Native.java:947) at com.sun.jna.Native.loadNativeDispatchLibrary(Native.java:922) at com.sun.jna.Native.<clinit>(Native.java:190) at com.sun.jna.Pointer.<clinit>(Pointer.java:54) at com.sun.jna.Structure.<clinit>(Structure.java:2130) at oshi.hardware.platform.mac.MacCentralProcessor.<clinit>(MacCentralProcessor.java:53) at oshi.hardware.platform.mac.MacHardwareAbstractionLayer.getProcessor(MacHardwareAbstractionLayer.java:54) at org.apache.dolphinscheduler.common.utils.OSUtils.cpuUsage(OSUtils.java:136) at org.apache.dolphinscheduler.common.utils.HeartBeat.fillSystemInfo(HeartBeat.java:176) at org.apache.dolphinscheduler.common.utils.HeartBeat.encodeHeartBeat(HeartBeat.java:204) at org.apache.dolphinscheduler.server.registry.HeartBeatTask.getHeartBeatInfo(HeartBeatTask.java:71) at org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient.registry(MasterRegistryClient.java:498) at org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient.start(MasterRegistryClient.java:120) at org.apache.dolphinscheduler.server.master.MasterServer.run(MasterServer.java:136) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleElement.invoke(InitDestroyAnnotationBeanPostProcessor.java:389) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleMetadata.invokeInitMethods(InitDestroyAnnotationBeanPostProcessor.java:333) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor.postProcessBeforeInitialization(InitDestroyAnnotationBeanPostProcessor.java:157) ... 18 common frames omitted` To fix this, we need upgrade oshi-core version , and modify some method calls in OSUtils to adapt the new api. ### What you expected to happen backend service can run on m1 chip system ### How to reproduce Start StandaloneServer on a m1 chip macbook ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8389
https://github.com/apache/dolphinscheduler/pull/8390
df6eef84cde877237ef23802338c7a95f97ede9b
83e88c4bc6cc3081dd6c99663d06fe32bb4a8e29
"2022-02-15T06:02:03Z"
java
"2022-02-21T03:14:39Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.utils; import org.apache.dolphinscheduler.common.shell.ShellExecutor; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.SystemUtils; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; import java.lang.management.ManagementFactory; import java.lang.management.OperatingSystemMXBean; import java.lang.management.RuntimeMXBean; import java.math.RoundingMode; import java.text.DecimalFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.StringTokenizer; import java.util.regex.Pattern; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import oshi.SystemInfo; import oshi.hardware.CentralProcessor; import oshi.hardware.GlobalMemory; import oshi.hardware.HardwareAbstractionLayer; /** * os utils */ public class OSUtils { private static final Logger logger = LoggerFactory.getLogger(OSUtils.class); private static final SystemInfo SI = new SystemInfo(); public static final String TWO_DECIMAL = "0.00"; /** * return -1 when the function can not get hardware env info * e.g {@link OSUtils#loadAverage()} {@link OSUtils#cpuUsage()} */ public static final double NEGATIVE_ONE = -1; private static final HardwareAbstractionLayer hal = SI.getHardware(); private OSUtils() { throw new UnsupportedOperationException("Construct OSUtils"); } /** * Initialization regularization, solve the problem of pre-compilation performance, * avoid the thread safety problem of multi-thread operation */ private static final Pattern PATTERN = Pattern.compile("\\s+"); /** * get memory usage * Keep 2 decimal * * @return percent % */ public static double memoryUsage() { GlobalMemory memory = hal.getMemory(); double memoryUsage = (memory.getTotal() - memory.getAvailable()) * 1.0 / memory.getTotal(); DecimalFormat df = new DecimalFormat(TWO_DECIMAL); df.setRoundingMode(RoundingMode.HALF_UP); return Double.parseDouble(df.format(memoryUsage)); } /** * get available physical memory size * <p> * Keep 2 decimal * * @return available Physical Memory Size, unit: G */ public static double availablePhysicalMemorySize() { GlobalMemory memory = hal.getMemory(); double availablePhysicalMemorySize = memory.getAvailable() / 1024.0 / 1024 / 1024; DecimalFormat df = new DecimalFormat(TWO_DECIMAL); df.setRoundingMode(RoundingMode.HALF_UP); return Double.parseDouble(df.format(availablePhysicalMemorySize)); } /** * load average * * @return load average */ public static double loadAverage() { double loadAverage; try { OperatingSystemMXBean osBean = ManagementFactory.getPlatformMXBean(OperatingSystemMXBean.class); loadAverage = osBean.getSystemLoadAverage(); } catch (Exception e) { logger.error("get operation system load average exception, try another method ", e); loadAverage = hal.getProcessor().getSystemLoadAverage(); if (Double.isNaN(loadAverage)) { return NEGATIVE_ONE; } } DecimalFormat df = new DecimalFormat(TWO_DECIMAL); df.setRoundingMode(RoundingMode.HALF_UP); return Double.parseDouble(df.format(loadAverage)); } /** * get cpu usage * * @return cpu usage */ public static double cpuUsage() { CentralProcessor processor = hal.getProcessor(); double cpuUsage = processor.getSystemCpuLoad(); if (Double.isNaN(cpuUsage)) { return NEGATIVE_ONE; } DecimalFormat df = new DecimalFormat(TWO_DECIMAL); df.setRoundingMode(RoundingMode.HALF_UP); return Double.parseDouble(df.format(cpuUsage)); } public static List<String> getUserList() { try { if (SystemUtils.IS_OS_MAC) { return getUserListFromMac(); } else if (SystemUtils.IS_OS_WINDOWS) { return getUserListFromWindows(); } else { return getUserListFromLinux(); } } catch (Exception e) { logger.error(e.getMessage(), e); } return Collections.emptyList(); } /** * get user list from linux * * @return user list */ private static List<String> getUserListFromLinux() throws IOException { List<String> userList = new ArrayList<>(); try (BufferedReader bufferedReader = new BufferedReader( new InputStreamReader(new FileInputStream("/etc/passwd")))) { String line; while ((line = bufferedReader.readLine()) != null) { if (line.contains(":")) { String[] userInfo = line.split(":"); userList.add(userInfo[0]); } } } return userList; } /** * get user list from mac * * @return user list */ private static List<String> getUserListFromMac() throws IOException { String result = exeCmd("dscl . list /users"); if (!StringUtils.isEmpty(result)) { return Arrays.asList(result.split("\n")); } return Collections.emptyList(); } /** * get user list from windows * * @return user list */ private static List<String> getUserListFromWindows() throws IOException { String result = exeCmd("net user"); String[] lines = result.split("\n"); int startPos = 0; int endPos = lines.length - 2; for (int i = 0; i < lines.length; i++) { if (lines[i].isEmpty()) { continue; } int count = 0; if (lines[i].charAt(0) == '-') { for (int j = 0; j < lines[i].length(); j++) { if (lines[i].charAt(i) == '-') { count++; } } } if (count == lines[i].length()) { startPos = i + 1; break; } } List<String> users = new ArrayList<>(); while (startPos <= endPos) { users.addAll(Arrays.asList(PATTERN.split(lines[startPos]))); startPos++; } return users; } /** * create user * * @param userName user name */ public static void createUserIfAbsent(String userName) { // if not exists this user, then create if (!getUserList().contains(userName)) { boolean isSuccess = createUser(userName); logger.info("create user {} {}", userName, isSuccess ? "success" : "fail"); } } /** * create user * * @param userName user name * @return true if creation was successful, otherwise false */ public static boolean createUser(String userName) { try { String userGroup = getGroup(); if (StringUtils.isEmpty(userGroup)) { String errorLog = String.format("%s group does not exist for this operating system.", userGroup); logger.error(errorLog); return false; } if (SystemUtils.IS_OS_MAC) { createMacUser(userName, userGroup); } else if (SystemUtils.IS_OS_WINDOWS) { createWindowsUser(userName, userGroup); } else { createLinuxUser(userName, userGroup); } return true; } catch (Exception e) { logger.error(e.getMessage(), e); } return false; } /** * create linux user * * @param userName user name * @param userGroup user group * @throws IOException in case of an I/O error */ private static void createLinuxUser(String userName, String userGroup) throws IOException { logger.info("create linux os user: {}", userName); String cmd = String.format("sudo useradd -g %s %s", userGroup, userName); logger.info("execute cmd: {}", cmd); exeCmd(cmd); } /** * create mac user (Supports Mac OSX 10.10+) * * @param userName user name * @param userGroup user group * @throws IOException in case of an I/O error */ private static void createMacUser(String userName, String userGroup) throws IOException { logger.info("create mac os user: {}", userName); String createUserCmd = String.format("sudo sysadminctl -addUser %s -password %s", userName, userName); logger.info("create user command: {}", createUserCmd); exeCmd(createUserCmd); String appendGroupCmd = String.format("sudo dseditgroup -o edit -a %s -t user %s", userName, userGroup); logger.info("append user to group: {}", appendGroupCmd); exeCmd(appendGroupCmd); } /** * create windows user * * @param userName user name * @param userGroup user group * @throws IOException in case of an I/O error */ private static void createWindowsUser(String userName, String userGroup) throws IOException { logger.info("create windows os user: {}", userName); String userCreateCmd = String.format("net user \"%s\" /add", userName); logger.info("execute create user command: {}", userCreateCmd); exeCmd(userCreateCmd); String appendGroupCmd = String.format("net localgroup \"%s\" \"%s\" /add", userGroup, userName); logger.info("execute append user to group: {}", appendGroupCmd); exeCmd(appendGroupCmd); } /** * get system group information * * @return system group info * @throws IOException errors */ public static String getGroup() throws IOException { if (SystemUtils.IS_OS_WINDOWS) { String currentProcUserName = System.getProperty("user.name"); String result = exeCmd(String.format("net user \"%s\"", currentProcUserName)); String line = result.split("\n")[22]; String group = PATTERN.split(line)[1]; if (group.charAt(0) == '*') { return group.substring(1); } else { return group; } } else { String result = exeCmd("groups"); if (!StringUtils.isEmpty(result)) { String[] groupInfo = result.split(" "); return groupInfo[0]; } } return null; } /** * get sudo command * * @param tenantCode tenantCode * @param command command * @return result of sudo execute command */ public static String getSudoCmd(String tenantCode, String command) { if (!CommonUtils.isSudoEnable() || StringUtils.isEmpty(tenantCode)) { return command; } return String.format("sudo -u %s %s", tenantCode, command); } /** * Execute the corresponding command of Linux or Windows * * @param command command * @return result of execute command * @throws IOException errors */ public static String exeCmd(String command) throws IOException { StringTokenizer st = new StringTokenizer(command); String[] cmdArray = new String[st.countTokens()]; for (int i = 0; st.hasMoreTokens(); i++) { cmdArray[i] = st.nextToken(); } return exeShell(cmdArray); } /** * Execute the shell * * @param command command * @return result of execute the shell * @throws IOException errors */ public static String exeShell(String[] command) throws IOException { return ShellExecutor.execCommand(command); } /** * get process id * * @return process id */ public static int getProcessID() { RuntimeMXBean runtimeMXBean = ManagementFactory.getRuntimeMXBean(); return Integer.parseInt(runtimeMXBean.getName().split("@")[0]); } /** * check memory and cpu usage * * @param maxCpuloadAvg maxCpuloadAvg * @param reservedMemory reservedMemory * @return check memory and cpu usage */ public static Boolean checkResource(double maxCpuloadAvg, double reservedMemory) { // system load average double loadAverage = loadAverage(); // system available physical memory double availablePhysicalMemorySize = availablePhysicalMemorySize(); if (loadAverage > maxCpuloadAvg || availablePhysicalMemorySize < reservedMemory) { logger.warn("current cpu load average {} is too high or available memory {}G is too low, under max.cpuload.avg={} and reserved.memory={}G", loadAverage, availablePhysicalMemorySize, maxCpuloadAvg, reservedMemory); return false; } else { return true; } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,389
[Bug] [common] oshi compatibility issues cause it fail to boot on m1 mac
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened I use a macbook with m1 chip, when starting StandaloneServer, I get the following error `Caused by: java.lang.UnsatisfiedLinkError: /private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp: dlopen(/private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp, 0x0001): tried: '/private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp' (fat file, but missing compatible architecture (have 'i386,x86_64', need 'arm64e')), '/usr/lib/jna1882043576609991586.tmp' (no such file) at java.lang.ClassLoader$NativeLibrary.load(Native Method) at java.lang.ClassLoader.loadLibrary0(ClassLoader.java:1950) at java.lang.ClassLoader.loadLibrary(ClassLoader.java:1832) at java.lang.Runtime.load0(Runtime.java:811) at java.lang.System.load(System.java:1088) at com.sun.jna.Native.loadNativeDispatchLibraryFromClasspath(Native.java:947) at com.sun.jna.Native.loadNativeDispatchLibrary(Native.java:922) at com.sun.jna.Native.<clinit>(Native.java:190) at com.sun.jna.Pointer.<clinit>(Pointer.java:54) at com.sun.jna.Structure.<clinit>(Structure.java:2130) at oshi.hardware.platform.mac.MacCentralProcessor.<clinit>(MacCentralProcessor.java:53) at oshi.hardware.platform.mac.MacHardwareAbstractionLayer.getProcessor(MacHardwareAbstractionLayer.java:54) at org.apache.dolphinscheduler.common.utils.OSUtils.cpuUsage(OSUtils.java:136) at org.apache.dolphinscheduler.common.utils.HeartBeat.fillSystemInfo(HeartBeat.java:176) at org.apache.dolphinscheduler.common.utils.HeartBeat.encodeHeartBeat(HeartBeat.java:204) at org.apache.dolphinscheduler.server.registry.HeartBeatTask.getHeartBeatInfo(HeartBeatTask.java:71) at org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient.registry(MasterRegistryClient.java:498) at org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient.start(MasterRegistryClient.java:120) at org.apache.dolphinscheduler.server.master.MasterServer.run(MasterServer.java:136) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleElement.invoke(InitDestroyAnnotationBeanPostProcessor.java:389) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleMetadata.invokeInitMethods(InitDestroyAnnotationBeanPostProcessor.java:333) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor.postProcessBeforeInitialization(InitDestroyAnnotationBeanPostProcessor.java:157) ... 18 common frames omitted` To fix this, we need upgrade oshi-core version , and modify some method calls in OSUtils to adapt the new api. ### What you expected to happen backend service can run on m1 chip system ### How to reproduce Start StandaloneServer on a m1 chip macbook ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8389
https://github.com/apache/dolphinscheduler/pull/8390
df6eef84cde877237ef23802338c7a95f97ede9b
83e88c4bc6cc3081dd6c99663d06fe32bb4a8e29
"2022-02-15T06:02:03Z"
java
"2022-02-21T03:14:39Z"
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/os/OshiTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.os; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import oshi.SystemInfo; import oshi.hardware.CentralProcessor; import oshi.hardware.CentralProcessor.TickType; import oshi.hardware.GlobalMemory; import oshi.hardware.HardwareAbstractionLayer; import oshi.util.FormatUtil; import oshi.util.Util; import java.util.Arrays; /** * os information test */ public class OshiTest { private static Logger logger = LoggerFactory.getLogger(OshiTest.class); @Test public void test() { SystemInfo si = new SystemInfo(); HardwareAbstractionLayer hal = si.getHardware(); logger.info("Checking Memory..."); printMemory(hal.getMemory()); logger.info("Checking CPU..."); printCpu(hal.getProcessor()); } private static void printMemory(GlobalMemory memory) { logger.info("memory avail:{} MB" , memory.getAvailable() / 1024 / 1024 );//memory avail:6863 MB logger.info("memory total:{} MB" , memory.getTotal() / 1024 / 1024 );//memory total:16384 MB } private static void printCpu(CentralProcessor processor) { logger.info(String.format("CPU load: %.1f%% (OS MXBean)%n", processor.getSystemCpuLoad() * 100));//CPU load: 24.9% (OS MXBean) logger.info("CPU load averages : {}", processor.getSystemLoadAverage());//CPU load averages : 1.5234375 logger.info("Uptime: " + FormatUtil.formatElapsedSecs(processor.getSystemUptime())); logger.info("Context Switches/Interrupts: " + processor.getContextSwitches() + " / " + processor.getInterrupts()); long[] prevTicks = processor.getSystemCpuLoadTicks(); logger.info("CPU, IOWait, and IRQ ticks @ 0 sec:" + Arrays.toString(prevTicks)); //Wait a second... Util.sleep(1000); long[] ticks = processor.getSystemCpuLoadTicks(); logger.info("CPU, IOWait, and IRQ ticks @ 1 sec:" + Arrays.toString(ticks)); long user = ticks[TickType.USER.getIndex()] - prevTicks[TickType.USER.getIndex()]; long nice = ticks[TickType.NICE.getIndex()] - prevTicks[TickType.NICE.getIndex()]; long sys = ticks[TickType.SYSTEM.getIndex()] - prevTicks[TickType.SYSTEM.getIndex()]; long idle = ticks[TickType.IDLE.getIndex()] - prevTicks[TickType.IDLE.getIndex()]; long iowait = ticks[TickType.IOWAIT.getIndex()] - prevTicks[TickType.IOWAIT.getIndex()]; long irq = ticks[TickType.IRQ.getIndex()] - prevTicks[TickType.IRQ.getIndex()]; long softirq = ticks[TickType.SOFTIRQ.getIndex()] - prevTicks[TickType.SOFTIRQ.getIndex()]; long steal = ticks[TickType.STEAL.getIndex()] - prevTicks[TickType.STEAL.getIndex()]; long totalCpu = user + nice + sys + idle + iowait + irq + softirq + steal; logger.info(String.format( "User: %.1f%% Nice: %.1f%% System: %.1f%% Idle: %.1f%% IOwait: %.1f%% IRQ: %.1f%% SoftIRQ: %.1f%% Steal: %.1f%%%n", 100d * user / totalCpu, 100d * nice / totalCpu, 100d * sys / totalCpu, 100d * idle / totalCpu, 100d * iowait / totalCpu, 100d * irq / totalCpu, 100d * softirq / totalCpu, 100d * steal / totalCpu)); logger.info(String.format("CPU load: %.1f%% (counting ticks)%n", processor.getSystemCpuLoadBetweenTicks() * 100)); double[] loadAverage = processor.getSystemLoadAverage(3); logger.info("CPU load averages:" + (loadAverage[0] < 0 ? " N/A" : String.format(" %.2f", loadAverage[0])) + (loadAverage[1] < 0 ? " N/A" : String.format(" %.2f", loadAverage[1])) + (loadAverage[2] < 0 ? " N/A" : String.format(" %.2f", loadAverage[2]))); // per core CPU StringBuilder procCpu = new StringBuilder("CPU load per processor:"); double[] load = processor.getProcessorCpuLoadBetweenTicks(); for (double avg : load) { procCpu.append(String.format(" %.1f%%", avg * 100)); } logger.info(procCpu.toString()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,389
[Bug] [common] oshi compatibility issues cause it fail to boot on m1 mac
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened I use a macbook with m1 chip, when starting StandaloneServer, I get the following error `Caused by: java.lang.UnsatisfiedLinkError: /private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp: dlopen(/private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp, 0x0001): tried: '/private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp' (fat file, but missing compatible architecture (have 'i386,x86_64', need 'arm64e')), '/usr/lib/jna1882043576609991586.tmp' (no such file) at java.lang.ClassLoader$NativeLibrary.load(Native Method) at java.lang.ClassLoader.loadLibrary0(ClassLoader.java:1950) at java.lang.ClassLoader.loadLibrary(ClassLoader.java:1832) at java.lang.Runtime.load0(Runtime.java:811) at java.lang.System.load(System.java:1088) at com.sun.jna.Native.loadNativeDispatchLibraryFromClasspath(Native.java:947) at com.sun.jna.Native.loadNativeDispatchLibrary(Native.java:922) at com.sun.jna.Native.<clinit>(Native.java:190) at com.sun.jna.Pointer.<clinit>(Pointer.java:54) at com.sun.jna.Structure.<clinit>(Structure.java:2130) at oshi.hardware.platform.mac.MacCentralProcessor.<clinit>(MacCentralProcessor.java:53) at oshi.hardware.platform.mac.MacHardwareAbstractionLayer.getProcessor(MacHardwareAbstractionLayer.java:54) at org.apache.dolphinscheduler.common.utils.OSUtils.cpuUsage(OSUtils.java:136) at org.apache.dolphinscheduler.common.utils.HeartBeat.fillSystemInfo(HeartBeat.java:176) at org.apache.dolphinscheduler.common.utils.HeartBeat.encodeHeartBeat(HeartBeat.java:204) at org.apache.dolphinscheduler.server.registry.HeartBeatTask.getHeartBeatInfo(HeartBeatTask.java:71) at org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient.registry(MasterRegistryClient.java:498) at org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient.start(MasterRegistryClient.java:120) at org.apache.dolphinscheduler.server.master.MasterServer.run(MasterServer.java:136) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleElement.invoke(InitDestroyAnnotationBeanPostProcessor.java:389) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleMetadata.invokeInitMethods(InitDestroyAnnotationBeanPostProcessor.java:333) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor.postProcessBeforeInitialization(InitDestroyAnnotationBeanPostProcessor.java:157) ... 18 common frames omitted` To fix this, we need upgrade oshi-core version , and modify some method calls in OSUtils to adapt the new api. ### What you expected to happen backend service can run on m1 chip system ### How to reproduce Start StandaloneServer on a m1 chip macbook ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8389
https://github.com/apache/dolphinscheduler/pull/8390
df6eef84cde877237ef23802338c7a95f97ede9b
83e88c4bc6cc3081dd6c99663d06fe32bb4a8e29
"2022-02-15T06:02:03Z"
java
"2022-02-21T03:14:39Z"
dolphinscheduler-dist/release-docs/LICENSE
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================================================================= Apache DolphinScheduler Subcomponents: The Apache DolphinScheduler project contains subcomponents with separate copyright notices and license terms. Your use of the source code for the these subcomponents is subject to the terms and conditions of the following licenses. ======================================================================== Apache 2.0 licenses ======================================================================== The following components are provided under the Apache License. See project link for details. The text of each license is also included at licenses/LICENSE-[project].txt. apacheds-i18n 2.0.0-M15: https://mvnrepository.com/artifact/org.apache.directory.server/apacheds-i18n/2.0.0-M15, Apache 2.0 apacheds-kerberos-codec 2.0.0-M15: https://mvnrepository.com/artifact/org.apache.directory.server/apacheds-kerberos-codec/2.0.0-M15, Apache 2.0 tomcat-embed-el 9.0.54: https://mvnrepository.com/artifact/org.apache.tomcat.embed/tomcat-embed-el/9.0.54, Apache 2.0 api-asn1-api 1.0.0-M20: https://mvnrepository.com/artifact/org.apache.directory.api/api-asn1-api/1.0.0-M20, Apache 2.0 api-util 1.0.0-M20: https://mvnrepository.com/artifact/org.apache.directory.api/api-util/1.0.0-M20, Apache 2.0 audience-annotations 0.5.0: https://mvnrepository.com/artifact/org.apache.yetus/audience-annotations/0.5.0, Apache 2.0 avro 1.7.4: https://github.com/apache/avro, Apache 2.0 aws-sdk-java 1.7.4: https://mvnrepository.com/artifact/com.amazonaws/aws-java-sdk/1.7.4, Apache 2.0 bonecp 0.8.0.RELEASE: https://github.com/wwadge/bonecp, Apache 2.0 byte-buddy 1.9.16: https://mvnrepository.com/artifact/net.bytebuddy/byte-buddy/1.9.16, Apache 2.0 caffeine 2.9.2: https://mvnrepository.com/artifact/com.github.ben-manes.caffeine/caffeine/2.9.2, Apache 2.0 classmate 1.5.1: https://mvnrepository.com/artifact/com.fasterxml/classmate/1.5.1, Apache 2.0 clickhouse-jdbc 0.1.52: https://mvnrepository.com/artifact/ru.yandex.clickhouse/clickhouse-jdbc/0.1.52, Apache 2.0 commons-beanutils 1.9.4 https://mvnrepository.com/artifact/commons-beanutils/commons-beanutils/1.9.4, Apache 2.0 commons-cli 1.2: https://mvnrepository.com/artifact/commons-cli/commons-cli/1.2, Apache 2.0 commons-codec 1.11: https://mvnrepository.com/artifact/commons-codec/commons-codec/1.11, Apache 2.0 commons-collections 3.2.2: https://mvnrepository.com/artifact/commons-collections/commons-collections/3.2.2, Apache 2.0 commons-collections4 4.1: https://mvnrepository.com/artifact/org.apache.commons/commons-collections4/4.1, Apache 2.0 commons-compress 1.19: https://mvnrepository.com/artifact/org.apache.commons/commons-compress/1.19, Apache 2.0 commons-configuration 1.10: https://mvnrepository.com/artifact/commons-configuration/commons-configuration/1.10, Apache 2.0 commons-daemon 1.0.13 https://mvnrepository.com/artifact/commons-daemon/commons-daemon/1.0.13, Apache 2.0 commons-dbcp 1.4: https://github.com/apache/commons-dbcp, Apache 2.0 commons-email 1.5: https://github.com/apache/commons-email, Apache 2.0 commons-httpclient 3.0.1: https://mvnrepository.com/artifact/commons-httpclient/commons-httpclient/3.0.1, Apache 2.0 commons-io 2.4: https://github.com/apache/commons-io, Apache 2.0 commons-lang 2.6: https://github.com/apache/commons-lang, Apache 2.0 commons-logging 1.1.1: https://github.com/apache/commons-logging, Apache 2.0 commons-math3 3.1.1: https://mvnrepository.com/artifact/org.apache.commons/commons-math3/3.1.1, Apache 2.0 commons-net 3.1: https://github.com/apache/commons-net, Apache 2.0 commons-pool 1.6: https://github.com/apache/commons-pool, Apache 2.0 cron-utils 9.1.3: https://mvnrepository.com/artifact/com.cronutils/cron-utils/9.1.3, Apache 2.0 commons-lang3 3.12.0: https://mvnrepository.com/artifact/org.apache.commons/commons-lang3/3.12.0, Apache 2.0 curator-client 4.3.0: https://mvnrepository.com/artifact/org.apache.curator/curator-client/4.3.0, Apache 2.0 curator-framework 4.3.0: https://mvnrepository.com/artifact/org.apache.curator/curator-framework/4.3.0, Apache 2.0 curator-recipes 4.3.0: https://mvnrepository.com/artifact/org.apache.curator/curator-recipes/4.3.0, Apache 2.0 curator-test 2.12.0: https://mvnrepository.com/artifact/org.apache.curator/curator-test/2.12.0, Apache 2.0 datanucleus-api-jdo 4.2.1: https://mvnrepository.com/artifact/org.datanucleus/datanucleus-api-jdo/4.2.1, Apache 2.0 datanucleus-core 4.1.6: https://mvnrepository.com/artifact/org.datanucleus/datanucleus-core/4.1.6, Apache 2.0 datanucleus-rdbms 4.1.7: https://mvnrepository.com/artifact/org.datanucleus/datanucleus-rdbms/4.1.7, Apache 2.0 derby 10.14.2.0: https://github.com/apache/derby, Apache 2.0 druid 1.1.14: https://mvnrepository.com/artifact/com.alibaba/druid/1.1.14, Apache 2.0 error_prone_annotations 2.1.3 https://mvnrepository.com/artifact/com.google.errorprone/error_prone_annotations/2.1.3, Apache 2.0 gson 2.8.8: https://github.com/google/gson, Apache 2.0 guava 24.1-jre: https://mvnrepository.com/artifact/com.google.guava/guava/24.1-jre, Apache 2.0 guava-retrying 2.0.0: https://mvnrepository.com/artifact/com.github.rholder/guava-retrying/2.0.0, Apache 2.0 guice 3.0: https://mvnrepository.com/artifact/com.google.inject/guice/3.0, Apache 2.0 guice-servlet 3.0: https://mvnrepository.com/artifact/com.google.inject.extensions/guice-servlet/3.0, Apache 2.0 hadoop-annotations 2.7.3:https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-annotations/2.7.3, Apache 2.0 hadoop-auth 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-auth/2.7.3, Apache 2.0 hadoop-aws 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-aws/2.7.3, Apache 2.0 hadoop-client 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-client/2.7.3, Apache 2.0 hadoop-common 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-common/2.7.3, Apache 2.0 hadoop-hdfs 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-hdfs/2.7.3, Apache 2.0 hadoop-mapreduce-client-app 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-mapreduce-client-app/2.7.3, Apache 2.0 hadoop-mapreduce-client-common 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-mapreduce-client-common/2.7.3, Apache 2.0 hadoop-mapreduce-client-core 2.7.3: https://mvnrepository.com/artifact/io.hops/hadoop-mapreduce-client-core/2.7.3, Apache 2.0 hadoop-mapreduce-client-jobclient 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-mapreduce-client-jobclient/2.7.3, Apache 2.0 hadoop-mapreduce-client-shuffle 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-mapreduce-client-shuffle/2.7.3, Apache 2.0 hadoop-yarn-api 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-yarn-api/2.7.3, Apache 2.0 hadoop-yarn-client 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-yarn-client/2.7.3, Apache 2.0 hadoop-yarn-common 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-yarn-common/2.7.3, Apache 2.0 hadoop-yarn-server-common 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-yarn-server-common/2.7.3, Apache 2.0 HikariCP 4.0.3: https://mvnrepository.com/artifact/com.zaxxer/HikariCP/4.0.3, Apache 2.0 hive-common 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-common/2.1.0, Apache 2.0 hive-jdbc 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-jdbc/2.1.0, Apache 2.0 hive-metastore 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-metastore/2.1.0, Apache 2.0 hive-orc 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-orc/2.1.0, Apache 2.0 hive-serde 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-serde/2.1.0, Apache 2.0 hive-service 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-service/2.1.0, Apache 2.0 hive-service-rpc 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-service-rpc/2.1.0, Apache 2.0 hive-storage-api 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-storage-api/2.1.0, Apache 2.0 htrace-core 3.1.0-incubating: https://mvnrepository.com/artifact/org.apache.htrace/htrace-core/3.1.0-incubating, Apache 2.0 httpclient 4.4.1: https://mvnrepository.com/artifact/org.apache.httpcomponents/httpclient/4.4.1, Apache 2.0 httpcore 4.4.1: https://mvnrepository.com/artifact/org.apache.httpcomponents/httpcore/4.4.1, Apache 2.0 httpmime 4.5.13: https://mvnrepository.com/artifact/org.apache.httpcomponents/httpmime/4.5.13, Apache 2.0 jackson-annotations 2.10.5: https://mvnrepository.com/artifact/com.fasterxml.jackson.core/jackson-annotations/2.10.5, Apache 2.0 jackson-core 2.10.5: https://github.com/FasterXML/jackson-core, Apache 2.0 jackson-core-asl 1.9.13: https://mvnrepository.com/artifact/org.codehaus.jackson/jackson-core-asl/1.9.13, Apache 2.0 jackson-databind 2.10.5: https://github.com/FasterXML/jackson-databind, Apache 2.0 jackson-datatype-jdk8 2.12.5: https://mvnrepository.com/artifact/com.fasterxml.jackson.datatype/jackson-datatype-jdk8/2.12.5, Apache 2.0 jackson-datatype-jsr310 2.12.5: https://mvnrepository.com/artifact/com.fasterxml.jackson.datatype/jackson-datatype-jsr310/2.12.5, Apache 2.0 jackson-jaxrs 1.9.13: https://mvnrepository.com/artifact/org.codehaus.jackson/jackson-jaxrs/1.9.13, Apache 2.0 and LGPL 2.1 jackson-mapper-asl 1.9.13: https://mvnrepository.com/artifact/org.codehaus.jackson/jackson-mapper-asl/1.9.13, Apache 2.0 jackson-module-parameter-names 2.12.5: https://mvnrepository.com/artifact/com.fasterxml.jackson.module/jackson-module-parameter-names/2.12.5, Apache 2.0 jackson-xc 1.9.13: https://mvnrepository.com/artifact/org.codehaus.jackson/jackson-xc/1.9.13, Apache 2.0 and LGPL 2.1 javax.inject 1: https://mvnrepository.com/artifact/javax.inject/javax.inject/1, Apache 2.0 javax.jdo-3.2.0-m3: https://mvnrepository.com/artifact/org.datanucleus/javax.jdo/3.2.0-m3, Apache 2.0 java-xmlbuilder 0.4 : https://mvnrepository.com/artifact/com.jamesmurty.utils/java-xmlbuilder/0.4, Apache 2.0 jdo-api 3.0.1: https://mvnrepository.com/artifact/javax.jdo/jdo-api/3.0.1, Apache 2.0 jets3t 0.9.0: https://mvnrepository.com/artifact/net.java.dev.jets3t/jets3t/0.9.0, Apache 2.0 jettison 1.1: https://github.com/jettison-json/jettison, Apache 2.0 jetty 6.1.26: https://mvnrepository.com/artifact/org.mortbay.jetty/jetty/6.1.26, Apache 2.0 and EPL 1.0 jetty-continuation 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-continuation/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-http 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-http/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-io 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-io/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-security 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-security/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-server 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-server/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-servlet 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-servlet/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-servlets 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-servlets/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-util 6.1.26: https://mvnrepository.com/artifact/org.mortbay.jetty/jetty-util/6.1.26, Apache 2.0 and EPL 1.0 jetty-util 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-util/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-util-ajax 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-util-ajax/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-webapp 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-webapp/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-xml 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-xml/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jna 4.5.2: https://mvnrepository.com/artifact/net.java.dev.jna/jna/4.5.2, Apache 2.0 and LGPL 2.1 jna-platform 4.5.2: https://mvnrepository.com/artifact/net.java.dev.jna/jna-platform/4.5.2, Apache 2.0 and LGPL 2.1 joda-time 2.10.13: https://github.com/JodaOrg/joda-time, Apache 2.0 jpam 1.1: https://mvnrepository.com/artifact/net.sf.jpam/jpam/1.1, Apache 2.0 jsqlparser 2.1: https://github.com/JSQLParser/JSqlParser, Apache 2.0 or LGPL 2.1 jsr305 3.0.0: https://mvnrepository.com/artifact/com.google.code.findbugs/jsr305, Apache 2.0 j2objc-annotations 1.1 https://mvnrepository.com/artifact/com.google.j2objc/j2objc-annotations/1.1, Apache 2.0 libfb303 0.9.3: https://mvnrepository.com/artifact/org.apache.thrift/libfb303/0.9.3, Apache 2.0 libthrift 0.9.3: https://mvnrepository.com/artifact/org.apache.thrift/libthrift/0.9.3, Apache 2.0 log4j-api 2.11.2: https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-api/2.11.2, Apache 2.0 log4j-core-2.11.2: https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-core/2.11.2, Apache 2.0 log4j 1.2.17: https://mvnrepository.com/artifact/log4j/log4j/1.2.17, Apache 2.0 log4j-1.2-api 2.14.1: https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-1.2-api/2.14.1, Apache 2.0 lz4 1.3.0: https://mvnrepository.com/artifact/net.jpountz.lz4/lz4/1.3.0, Apache 2.0 mapstruct 1.2.0.Final: https://github.com/mapstruct/mapstruct, Apache 2.0 mybatis 3.5.2 https://mvnrepository.com/artifact/org.mybatis/mybatis/3.5.2, Apache 2.0 mybatis-plus 3.2.0: https://github.com/baomidou/mybatis-plus, Apache 2.0 mybatis-plus-annotation 3.2.0: https://mvnrepository.com/artifact/com.baomidou/mybatis-plus-annotation/3.2.0, Apache 2.0 mybatis-plus-boot-starter 3.2.0: https://mvnrepository.com/artifact/com.baomidou/mybatis-plus-boot-starter/3.2.0, Apache 2.0 mybatis-plus-core 3.2.0: https://mvnrepository.com/artifact/com.baomidou/mybatis-plus-core/3.2.0, Apache 2.0 mybatis-plus-extension 3.2.0: https://mvnrepository.com/artifact/com.baomidou/mybatis-plus-extension/3.2.0, Apache 2.0 mybatis-spring 2.0.2: https://mvnrepository.com/artifact/org.mybatis/mybatis-spring/2.0.2, Apache 2.0 netty 3.6.2.Final: https://github.com/netty/netty, Apache 2.0 netty 4.1.53.Final: https://github.com/netty/netty/blob/netty-4.1.53.Final/LICENSE.txt, Apache 2.0 opencsv 2.3: https://mvnrepository.com/artifact/net.sf.opencsv/opencsv/2.3, Apache 2.0 parquet-hadoop-bundle 1.8.1: https://mvnrepository.com/artifact/org.apache.parquet/parquet-hadoop-bundle/1.8.1, Apache 2.0 poi 4.1.2: https://mvnrepository.com/artifact/org.apache.poi/poi/4.1.2, Apache 2.0 poi-ooxml 4.1.2: https://mvnrepository.com/artifact/org.apache.poi/poi-ooxml/4.1.2, Apache 2.0 poi-ooxml-schemas-4.1.2: https://mvnrepository.com/artifact/org.apache.poi/poi-ooxml-schemas/4.1.2, Apache 2.0 quartz 2.3.2: https://mvnrepository.com/artifact/org.quartz-scheduler/quartz/2.3.2, Apache 2.0 quartz-jobs 2.3.2: https://mvnrepository.com/artifact/org.quartz-scheduler/quartz-jobs/2.3.2, Apache 2.0 snakeyaml 1.28: https://mvnrepository.com/artifact/org.yaml/snakeyaml/1.28, Apache 2.0 snappy 0.2: https://mvnrepository.com/artifact/org.iq80.snappy/snappy/0.2, Apache 2.0 snappy-java 1.0.4.1: https://github.com/xerial/snappy-java, Apache 2.0 SparseBitSet 1.2: https://mvnrepository.com/artifact/com.zaxxer/SparseBitSet, Apache 2.0 spring-aop 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-aop/5.3.12, Apache 2.0 spring-beans 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-beans/5.3.12, Apache 2.0 spring-boot 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot/2.5.6, Apache 2.0 spring-boot-actuator 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-actuator/2.5.6, Apache 2.0 spring-boot-actuator-autoconfigure 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-actuator-autoconfigure/2.5.6, Apache 2.0 spring-boot-configuration-processor 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-configuration-processor/2.5.6, Apache 2.0 spring-boot-autoconfigure 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-autoconfigure/2.5.6, Apache 2.0 spring-boot-starter 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter/2.5.6, Apache 2.0 spring-boot-starter-actuator 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-actuator/2.5.6, Apache 2.0 spring-boot-starter-aop 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-aop/2.5.6, Apache 2.0 spring-boot-starter-jdbc 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-jdbc/2.5.6, Apache 2.0 spring-boot-starter-jetty 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-jetty/2.5.6, Apache 2.0 spring-boot-starter-json 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-json/2.5.6, Apache 2.0 spring-boot-starter-logging 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-logging/2.5.6, Apache 2.0 spring-boot-starter-quartz 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-quartz/2.5.6, Apache 2.0 spring-boot-starter-web 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-web/2.5.6, Apache 2.0 spring-boot-starter-cache 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-cache/2.5.6, Apache 2.0 spring-context 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-context/5.3.12, Apache 2.0 spring-context-support 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-context-support/5.3.12, Apache 2.0 spring-core 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-core, Apache 2.0 spring-expression 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-expression, Apache 2.0 springfox-core 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-core, Apache 2.0 springfox-schema 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-schema, Apache 2.0 springfox-spi 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-spi, Apache 2.0 springfox-spring-web 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-spring-web/2.9.2, Apache 2.0 springfox-swagger2 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-swagger2/2.9.2, Apache 2.0 springfox-swagger-common 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-swagger-common/2.9.2, Apache 2.0 springfox-swagger-ui 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-swagger-ui/2.9.2, Apache 2.0 spring-jcl 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-jcl/5.3.12, Apache 2.0 spring-jdbc 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-jdbc/5.3.12, Apache 2.0 spring-plugin-core 1.2.0.RELEASE: https://mvnrepository.com/artifact/org.springframework.plugin/spring-plugin-core/1.2.0.RELEASE, Apache 2.0 spring-plugin-metadata 1.2.0.RELEASE: https://mvnrepository.com/artifact/org.springframework.plugin/spring-plugin-metadata/1.2.0.RELEASE, Apache 2.0 spring-tx 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-tx/5.3.12, Apache 2.0 spring-web 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-web/5.3.12, Apache 2.0 spring-webmvc 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-webmvc/5.3.12, Apache 2.0 swagger-annotations 1.5.20: https://mvnrepository.com/artifact/io.swagger/swagger-annotations/1.5.20, Apache 2.0 swagger-bootstrap-ui 1.9.3: https://mvnrepository.com/artifact/com.github.xiaoymin/swagger-bootstrap-ui/1.9.3, Apache 2.0 swagger-models 1.5.24: https://mvnrepository.com/artifact/io.swagger/swagger-models/1.5.24, Apache 2.0 tephra-api 0.6.0: https://mvnrepository.com/artifact/co.cask.tephra/tephra-api/0.6.0, Apache 2.0 tomcat-embed-el 9.0.54: https://mvnrepository.com/artifact/org.apache.tomcat.embed/tomcat-embed-el/9.0.54, Apache 2.0 xercesImpl 2.9.1: https://mvnrepository.com/artifact/xerces/xercesImpl/2.9.1, Apache 2.0 xmlbeans 3.1.0: https://mvnrepository.com/artifact/org.apache.xmlbeans/xmlbeans/3.1.0, Apache 2.0 xml-apis 1.3.04: https://mvnrepository.com/artifact/xml-apis/xml-apis/1.3.04, Apache 2.0 and W3C zookeeper 3.4.14: https://mvnrepository.com/artifact/org.apache.zookeeper/zookeeper/3.4.14, Apache 2.0 presto-jdbc 0.238.1 https://mvnrepository.com/artifact/com.facebook.presto/presto-jdbc/0.238.1 protostuff-core 1.7.2: https://github.com/protostuff/protostuff/protostuff-core Apache-2.0 protostuff-runtime 1.7.2: https://github.com/protostuff/protostuff/protostuff-core Apache-2.0 protostuff-api 1.7.2: https://github.com/protostuff/protostuff/protostuff-api Apache-2.0 protostuff-collectionschema 1.7.2: https://github.com/protostuff/protostuff/protostuff-collectionschema Apache-2.0 prometheus client_java(simpleclient) 0.12.0: https://github.com/prometheus/client_java, Apache 2.0 snowflake snowflake-2010: https://github.com/twitter-archive/snowflake/tree/snowflake-2010, Apache 2.0 kubernetes-client 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-client/5.8.0, Apache 2.0 kubernetes-model-admissionregistration 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-admissionregistration/5.8.0, Apache 2.0 kubernetes-model-apiextensions 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-apiextensions/5.8.0, Apache 2.0 kubernetes-model-apps 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-apps/5.8.0, Apache 2.0 kubernetes-model-autoscaling 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-autoscaling/5.8.0, Apache 2.0 kubernetes-model-batch 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-autoscaling/5.8.0, Apache 2.0 kubernetes-model-certificates 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-certificates/5.8.0, Apache 2.0 kubernetes-model-common 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-common/5.8.0, Apache 2.0 kubernetes-model-coordination 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-coordination/5.8.0, Apache 2.0 kubernetes-model-core 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-core/5.8.0, Apache 2.0 kubernetes-model-discovery 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-discovery/5.8.0, Apache 2.0 kubernetes-model-events 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-events/5.8.0, Apache 2.0 kubernetes-model-extensions 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-extensions/5.8.0, Apache 2.0 kubernetes-model-flowcontrol 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-flowcontrol/5.8.0, Apache 2.0 kubernetes-model-metrics 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-metrics/5.8.0, Apache 2.0 kubernetes-model-networking 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-networking/5.8.0, Apache 2.0 kubernetes-model-node 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-node/5.8.0, Apache 2.0 kubernetes-model-policy 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-policy/5.8.0, Apache 2.0 kubernetes-model-rbac 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-rbac/5.8.0, Apache 2.0 kubernetes-model-scheduling 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-scheduling/5.8.0, Apache 2.0 kubernetes-model-storageclass 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-storageclass/5.8.0, Apache 2.0 zjsonpatch 0.3.0 https://mvnrepository.com/artifact/io.fabric8/zjsonpatch/0.3.0, Apache 2.0 generex 1.0.2 https://mvnrepository.com/artifact/com.github.mifmif/generex/1.0.2, Apache 2.0 jackson-dataformat-yaml 2.12.5 https://mvnrepository.com/artifact/com.fasterxml.jackson.dataformat/jackson-dataformat-yaml/2.12.5, Apache 2.0 logging-interceptor 3.14.9 https://mvnrepository.com/artifact/com.squareup.okhttp3/logging-interceptor/3.14.9, Apache 2.0 okhttp 3.14.3 https://mvnrepository.com/artifact/com.squareup.okhttp3/okhttp/3.14.3, Apache 2.0 okio 1.17.2 https://mvnrepository.com/artifact/com.squareup.okio/okio/1.17.2, Apache 2.0 ======================================================================== BSD licenses ======================================================================== The following components are provided under a BSD license. See project link for details. The text of each license is also included at licenses/LICENSE-[project].txt. asm 3.1: https://github.com/jdf/javalin/tree/master/lib/asm-3.1, BSD curvesapi 1.06: https://mvnrepository.com/artifact/com.github.virtuald/curvesapi/1.06, BSD 3-clause javolution 5.5.1: https://mvnrepository.com/artifact/javolution/javolution/5.5.1, BSD jline 0.9.94: https://github.com/jline/jline3, BSD jsch 0.1.42: https://mvnrepository.com/artifact/com.jcraft/jsch/0.1.42, BSD leveldbjni-all 1.8: https://github.com/fusesource/leveldbjni, BSD-3-Clause postgresql 42.2.5: https://mvnrepository.com/artifact/org.postgresql/postgresql/42.2.5, BSD 2-clause protobuf-java 2.5.0: https://mvnrepository.com/artifact/com.google.protobuf/protobuf-java/2.5.0, BSD 2-clause paranamer 2.3: https://mvnrepository.com/artifact/com.thoughtworks.paranamer/paranamer/2.3, BSD threetenbp 1.3.6: https://mvnrepository.com/artifact/org.threeten/threetenbp/1.3.6, BSD 3-clause xmlenc 0.52: https://mvnrepository.com/artifact/xmlenc/xmlenc/0.52, BSD py4j 0.10.9: https://mvnrepository.com/artifact/net.sf.py4j/py4j/0.10.9, BSD 2-clause LatencyUtils 2.0.3: https://github.com/LatencyUtils/LatencyUtils, BSD-2-Clause janino 3.1.6: https://mvnrepository.com/artifact/org.codehaus.janino/janino/3.1.6, BSD 3-clause commons-compiler 3.1.6: https://mvnrepository.com/artifact/org.codehaus.janino/janino/3.1.6, BSD 3-clause automaton 1.11-8 https://mvnrepository.com/artifact/dk.brics.automaton/automaton/1.11-8, BSD 2-clause ======================================================================== CDDL licenses ======================================================================== The following components are provided under the CDDL License. See project link for details. The text of each license is also included at licenses/LICENSE-[project].txt. activation 1.1: https://mvnrepository.com/artifact/javax.activation/activation/1.1 CDDL 1.0 javax.activation-api 1.2.0: https://mvnrepository.com/artifact/javax.activation/javax.activation-api/1.2.0, CDDL and LGPL 2.0 javax.annotation-api 1.3.2: https://mvnrepository.com/artifact/javax.annotation/javax.annotation-api/1.3.2, CDDL + GPLv2 javax.mail 1.6.2: https://mvnrepository.com/artifact/com.sun.mail/javax.mail/1.6.2, CDDL/GPLv2 javax.servlet-api 3.1.0: https://mvnrepository.com/artifact/javax.servlet/javax.servlet-api/3.1.0, CDDL + GPLv2 jaxb-api 2.3.1: https://mvnrepository.com/artifact/javax.xml.bind/jaxb-api/2.3.1, CDDL 1.1 jaxb-impl 2.2.3-1: https://mvnrepository.com/artifact/com.sun.xml.bind/jaxb-impl/2.2.3-1, CDDL and GPL 1.1 jersey-client 1.9: https://mvnrepository.com/artifact/com.sun.jersey/jersey-client/1.9, CDDL 1.1 and GPL 1.1 jersey-core 1.9: https://mvnrepository.com/artifact/com.sun.jersey/jersey-core/1.9, CDDL 1.1 and GPL 1.1 jersey-guice 1.9: https://mvnrepository.com/artifact/com.sun.jersey.contribs/jersey-guice/1.9, CDDL 1.1 and GPL 1.1 jersey-json 1.9: https://mvnrepository.com/artifact/com.sun.jersey/jersey-json/1.9, CDDL 1.1 and GPL 1.1 jersey-server 1.9: https://mvnrepository.com/artifact/com.sun.jersey/jersey-server/1.9, CDDL 1.1 and GPL 1.1 jta 1.1: https://mvnrepository.com/artifact/javax.transaction/jta/1.1, CDDL 1.0 transaction-api 1.1: https://mvnrepository.com/artifact/javax.transaction/transaction-api/1.1, CDDL 1.0 javax.el 3.0.0: https://mvnrepository.com/artifact/org.glassfish/javax.el/3.0.0, CDDL and GPL and GPL 2.0 ======================================================================== EPL licenses ======================================================================== The following components are provided under the EPL License. See project link for details. The text of each license is also included at licenses/LICENSE-[project].txt. aspectjweaver 1.9.7:https://mvnrepository.com/artifact/org.aspectj/aspectjweaver/1.9.7, EPL 1.0 logback-classic 1.2.3: https://mvnrepository.com/artifact/ch.qos.logback/logback-classic/1.2.3, EPL 1.0 and LGPL 2.1 logback-core 1.2.3: https://mvnrepository.com/artifact/ch.qos.logback/logback-core/1.2.3, EPL 1.0 and LGPL 2.1 oshi-core 3.9.1: https://mvnrepository.com/artifact/com.github.oshi/oshi-core/3.9.1, EPL 1.0 h2-1.4.200 https://github.com/h2database/h2database/blob/master/LICENSE.txt, MPL 2.0 or EPL 1.0 ======================================================================== MIT licenses ======================================================================== The following components are provided under a MIT 2.0 license. See project link for details. The text of each license is also included at licenses/LICENSE-[project].txt. jul-to-slf4j 1.7.32: https://mvnrepository.com/artifact/org.slf4j/jul-to-slf4j/1.7.32, MIT mssql-jdbc 6.1.0.jre8: https://mvnrepository.com/artifact/com.microsoft.sqlserver/mssql-jdbc/6.1.0.jre8, MIT slf4j-api 1.7.5: https://mvnrepository.com/artifact/org.slf4j/slf4j-api/1.7.5, MIT animal-sniffer-annotations 1.14 https://mvnrepository.com/artifact/org.codehaus.mojo/animal-sniffer-annotations/1.14, MIT checker-compat-qual 2.0.0 https://mvnrepository.com/artifact/org.checkerframework/checker-compat-qual/2.0.0, MIT + GPLv2 checker-qual 3.10.0 https://mvnrepository.com/artifact/org.checkerframework/checker-qual/3.10.0, MIT + GPLv2 Java-WebSocket 1.5.1: https://github.com/TooTallNate/Java-WebSocket MIT ======================================================================== MPL 1.1 licenses ======================================================================== The following components are provided under a MPL 1.1 license. See project link for details. The text of each license is also included at licenses/LICENSE-[project].txt. jamon-runtime 2.3.1: https://mvnrepository.com/artifact/org.jamon/jamon-runtime/2.3.1, MPL-1.1 javassist 3.27.0-GA: https://github.com/jboss-javassist/javassist, MPL-1.1 ======================================================================== Public Domain licenses ======================================================================== aopalliance 1.0: https://mvnrepository.com/artifact/aopalliance/aopalliance/1.0, Public Domain ======================================== WTFPL License ======================================== reflections 0.9.12: https://github.com/ronmamo/reflections WTFPL ======================================== CC0-1.0 licenses ======================================== HdrHistogram 2.1.12: https://github.com/HdrHistogram/HdrHistogram , CC0-1.0 and BSD 2-Clause ======================================================================== UI related licenses ======================================================================== The following components are used in UI.See project link for details. The text of each license is also included at licenses/ui-licenses/LICENSE-[project].txt. ======================================== MIT licenses ======================================== @form-create/element-ui 1.0.18: https://github.com/xaboy/form-create MIT axios 0.16.2: https://github.com/axios/axios MIT bootstrap 3.3.7: https://github.com/twbs/bootstrap MIT canvg 1.5.1: https://github.com/canvg/canvg MIT clipboard 2.0.1: https://github.com/zenorocha/clipboard.js MIT codemirror 5.43.0: https://github.com/codemirror/CodeMirror MIT dayjs 1.7.8: https://github.com/iamkun/dayjs MIT element-ui 2.13.2: https://github.com/ElemeFE/element MIT html2canvas 0.5.0-beta4: https://github.com/niklasvh/html2canvas MIT jquery 3.3.1: https://github.com/jquery/jquery MIT jquery-ui 1.12.1: https://github.com/jquery/jquery-ui MIT js-cookie 2.2.1: https://github.com/js-cookie/js-cookie MIT jsplumb 2.8.6: https://github.com/jsplumb/jsplumb MIT and GPLv2 lodash 4.17.11: https://github.com/lodash/lodash MIT moment-timezone 0.5.33: https://github.com/moment/moment-timezone MIT vue-treeselect 0.4.0: https://github.com/riophae/vue-treeselect MIT vue 2.5.17: https://github.com/vuejs/vue MIT vue-router 2.7.0: https://github.com/vuejs/vue-router MIT vuex 3.0.0: https://github.com/vuejs/vuex MIT vuex-router-sync 4.1.2: https://github.com/vuejs/vuex-router-sync MIT dagre 0.8.5: https://github.com/dagrejs/dagre MIT ======================================== Apache 2.0 licenses ======================================== echarts 4.1.0: https://github.com/apache/incubator-echarts Apache-2.0 remixicon 2.5.0 https://github.com/Remix-Design/remixicon Apache-2.0 ======================================== BSD licenses ======================================== d3 3.5.17: https://github.com/d3/d3 BSD-3-Clause
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,389
[Bug] [common] oshi compatibility issues cause it fail to boot on m1 mac
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened I use a macbook with m1 chip, when starting StandaloneServer, I get the following error `Caused by: java.lang.UnsatisfiedLinkError: /private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp: dlopen(/private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp, 0x0001): tried: '/private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp' (fat file, but missing compatible architecture (have 'i386,x86_64', need 'arm64e')), '/usr/lib/jna1882043576609991586.tmp' (no such file) at java.lang.ClassLoader$NativeLibrary.load(Native Method) at java.lang.ClassLoader.loadLibrary0(ClassLoader.java:1950) at java.lang.ClassLoader.loadLibrary(ClassLoader.java:1832) at java.lang.Runtime.load0(Runtime.java:811) at java.lang.System.load(System.java:1088) at com.sun.jna.Native.loadNativeDispatchLibraryFromClasspath(Native.java:947) at com.sun.jna.Native.loadNativeDispatchLibrary(Native.java:922) at com.sun.jna.Native.<clinit>(Native.java:190) at com.sun.jna.Pointer.<clinit>(Pointer.java:54) at com.sun.jna.Structure.<clinit>(Structure.java:2130) at oshi.hardware.platform.mac.MacCentralProcessor.<clinit>(MacCentralProcessor.java:53) at oshi.hardware.platform.mac.MacHardwareAbstractionLayer.getProcessor(MacHardwareAbstractionLayer.java:54) at org.apache.dolphinscheduler.common.utils.OSUtils.cpuUsage(OSUtils.java:136) at org.apache.dolphinscheduler.common.utils.HeartBeat.fillSystemInfo(HeartBeat.java:176) at org.apache.dolphinscheduler.common.utils.HeartBeat.encodeHeartBeat(HeartBeat.java:204) at org.apache.dolphinscheduler.server.registry.HeartBeatTask.getHeartBeatInfo(HeartBeatTask.java:71) at org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient.registry(MasterRegistryClient.java:498) at org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient.start(MasterRegistryClient.java:120) at org.apache.dolphinscheduler.server.master.MasterServer.run(MasterServer.java:136) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleElement.invoke(InitDestroyAnnotationBeanPostProcessor.java:389) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleMetadata.invokeInitMethods(InitDestroyAnnotationBeanPostProcessor.java:333) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor.postProcessBeforeInitialization(InitDestroyAnnotationBeanPostProcessor.java:157) ... 18 common frames omitted` To fix this, we need upgrade oshi-core version , and modify some method calls in OSUtils to adapt the new api. ### What you expected to happen backend service can run on m1 chip system ### How to reproduce Start StandaloneServer on a m1 chip macbook ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8389
https://github.com/apache/dolphinscheduler/pull/8390
df6eef84cde877237ef23802338c7a95f97ede9b
83e88c4bc6cc3081dd6c99663d06fe32bb4a8e29
"2022-02-15T06:02:03Z"
java
"2022-02-21T03:14:39Z"
dolphinscheduler-dist/release-docs/licenses/LICENSE-jna-platform.txt
Java Native Access project (JNA) is dual-licensed under 2 alternative Open Source/Free licenses: LGPL 2.1 or later and Apache License 2.0. (starting with JNA version 4.0.0). You can freely decide which license you want to apply to the project. You may obtain a copy of the LGPL License at: http://www.gnu.org/licenses/licenses.html A copy is also included in the downloadable source code package containing JNA, in file "LGPL2.1", under the same directory as this file. You may obtain a copy of the Apache License at: http://www.apache.org/licenses/ A copy is also included in the downloadable source code package containing JNA, in file "AL2.0", under the same directory as this file.
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,389
[Bug] [common] oshi compatibility issues cause it fail to boot on m1 mac
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened I use a macbook with m1 chip, when starting StandaloneServer, I get the following error `Caused by: java.lang.UnsatisfiedLinkError: /private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp: dlopen(/private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp, 0x0001): tried: '/private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp' (fat file, but missing compatible architecture (have 'i386,x86_64', need 'arm64e')), '/usr/lib/jna1882043576609991586.tmp' (no such file) at java.lang.ClassLoader$NativeLibrary.load(Native Method) at java.lang.ClassLoader.loadLibrary0(ClassLoader.java:1950) at java.lang.ClassLoader.loadLibrary(ClassLoader.java:1832) at java.lang.Runtime.load0(Runtime.java:811) at java.lang.System.load(System.java:1088) at com.sun.jna.Native.loadNativeDispatchLibraryFromClasspath(Native.java:947) at com.sun.jna.Native.loadNativeDispatchLibrary(Native.java:922) at com.sun.jna.Native.<clinit>(Native.java:190) at com.sun.jna.Pointer.<clinit>(Pointer.java:54) at com.sun.jna.Structure.<clinit>(Structure.java:2130) at oshi.hardware.platform.mac.MacCentralProcessor.<clinit>(MacCentralProcessor.java:53) at oshi.hardware.platform.mac.MacHardwareAbstractionLayer.getProcessor(MacHardwareAbstractionLayer.java:54) at org.apache.dolphinscheduler.common.utils.OSUtils.cpuUsage(OSUtils.java:136) at org.apache.dolphinscheduler.common.utils.HeartBeat.fillSystemInfo(HeartBeat.java:176) at org.apache.dolphinscheduler.common.utils.HeartBeat.encodeHeartBeat(HeartBeat.java:204) at org.apache.dolphinscheduler.server.registry.HeartBeatTask.getHeartBeatInfo(HeartBeatTask.java:71) at org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient.registry(MasterRegistryClient.java:498) at org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient.start(MasterRegistryClient.java:120) at org.apache.dolphinscheduler.server.master.MasterServer.run(MasterServer.java:136) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleElement.invoke(InitDestroyAnnotationBeanPostProcessor.java:389) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleMetadata.invokeInitMethods(InitDestroyAnnotationBeanPostProcessor.java:333) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor.postProcessBeforeInitialization(InitDestroyAnnotationBeanPostProcessor.java:157) ... 18 common frames omitted` To fix this, we need upgrade oshi-core version , and modify some method calls in OSUtils to adapt the new api. ### What you expected to happen backend service can run on m1 chip system ### How to reproduce Start StandaloneServer on a m1 chip macbook ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8389
https://github.com/apache/dolphinscheduler/pull/8390
df6eef84cde877237ef23802338c7a95f97ede9b
83e88c4bc6cc3081dd6c99663d06fe32bb4a8e29
"2022-02-15T06:02:03Z"
java
"2022-02-21T03:14:39Z"
dolphinscheduler-dist/release-docs/licenses/LICENSE-jna.txt
Java Native Access project (JNA) is dual-licensed under 2 alternative Open Source/Free licenses: LGPL 2.1 or later and Apache License 2.0. (starting with JNA version 4.0.0). You can freely decide which license you want to apply to the project. You may obtain a copy of the LGPL License at: http://www.gnu.org/licenses/licenses.html A copy is also included in the downloadable source code package containing JNA, in file "LGPL2.1", under the same directory as this file. You may obtain a copy of the Apache License at: http://www.apache.org/licenses/ A copy is also included in the downloadable source code package containing JNA, in file "AL2.0", under the same directory as this file.
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,389
[Bug] [common] oshi compatibility issues cause it fail to boot on m1 mac
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened I use a macbook with m1 chip, when starting StandaloneServer, I get the following error `Caused by: java.lang.UnsatisfiedLinkError: /private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp: dlopen(/private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp, 0x0001): tried: '/private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp' (fat file, but missing compatible architecture (have 'i386,x86_64', need 'arm64e')), '/usr/lib/jna1882043576609991586.tmp' (no such file) at java.lang.ClassLoader$NativeLibrary.load(Native Method) at java.lang.ClassLoader.loadLibrary0(ClassLoader.java:1950) at java.lang.ClassLoader.loadLibrary(ClassLoader.java:1832) at java.lang.Runtime.load0(Runtime.java:811) at java.lang.System.load(System.java:1088) at com.sun.jna.Native.loadNativeDispatchLibraryFromClasspath(Native.java:947) at com.sun.jna.Native.loadNativeDispatchLibrary(Native.java:922) at com.sun.jna.Native.<clinit>(Native.java:190) at com.sun.jna.Pointer.<clinit>(Pointer.java:54) at com.sun.jna.Structure.<clinit>(Structure.java:2130) at oshi.hardware.platform.mac.MacCentralProcessor.<clinit>(MacCentralProcessor.java:53) at oshi.hardware.platform.mac.MacHardwareAbstractionLayer.getProcessor(MacHardwareAbstractionLayer.java:54) at org.apache.dolphinscheduler.common.utils.OSUtils.cpuUsage(OSUtils.java:136) at org.apache.dolphinscheduler.common.utils.HeartBeat.fillSystemInfo(HeartBeat.java:176) at org.apache.dolphinscheduler.common.utils.HeartBeat.encodeHeartBeat(HeartBeat.java:204) at org.apache.dolphinscheduler.server.registry.HeartBeatTask.getHeartBeatInfo(HeartBeatTask.java:71) at org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient.registry(MasterRegistryClient.java:498) at org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient.start(MasterRegistryClient.java:120) at org.apache.dolphinscheduler.server.master.MasterServer.run(MasterServer.java:136) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleElement.invoke(InitDestroyAnnotationBeanPostProcessor.java:389) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleMetadata.invokeInitMethods(InitDestroyAnnotationBeanPostProcessor.java:333) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor.postProcessBeforeInitialization(InitDestroyAnnotationBeanPostProcessor.java:157) ... 18 common frames omitted` To fix this, we need upgrade oshi-core version , and modify some method calls in OSUtils to adapt the new api. ### What you expected to happen backend service can run on m1 chip system ### How to reproduce Start StandaloneServer on a m1 chip macbook ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8389
https://github.com/apache/dolphinscheduler/pull/8390
df6eef84cde877237ef23802338c7a95f97ede9b
83e88c4bc6cc3081dd6c99663d06fe32bb4a8e29
"2022-02-15T06:02:03Z"
java
"2022-02-21T03:14:39Z"
dolphinscheduler-dist/release-docs/licenses/LICENSE-oshi-core.txt
Oshi (https://github.com/oshi/oshi) Copyright (c) 2010 - 2018 The Oshi Project Team All rights reserved. This program and the accompanying materials are made available under the terms of the Eclipse Public License v1.0 which accompanies this distribution, and is available at http://www.eclipse.org/legal/epl-v10.html Maintainers: dblock[at]dblock[dot]org widdis[at]gmail[dot]com enrico.bianchi[at]gmail[dot]com Contributors: https://github.com/oshi/oshi/graphs/contributors
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,389
[Bug] [common] oshi compatibility issues cause it fail to boot on m1 mac
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened I use a macbook with m1 chip, when starting StandaloneServer, I get the following error `Caused by: java.lang.UnsatisfiedLinkError: /private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp: dlopen(/private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp, 0x0001): tried: '/private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp' (fat file, but missing compatible architecture (have 'i386,x86_64', need 'arm64e')), '/usr/lib/jna1882043576609991586.tmp' (no such file) at java.lang.ClassLoader$NativeLibrary.load(Native Method) at java.lang.ClassLoader.loadLibrary0(ClassLoader.java:1950) at java.lang.ClassLoader.loadLibrary(ClassLoader.java:1832) at java.lang.Runtime.load0(Runtime.java:811) at java.lang.System.load(System.java:1088) at com.sun.jna.Native.loadNativeDispatchLibraryFromClasspath(Native.java:947) at com.sun.jna.Native.loadNativeDispatchLibrary(Native.java:922) at com.sun.jna.Native.<clinit>(Native.java:190) at com.sun.jna.Pointer.<clinit>(Pointer.java:54) at com.sun.jna.Structure.<clinit>(Structure.java:2130) at oshi.hardware.platform.mac.MacCentralProcessor.<clinit>(MacCentralProcessor.java:53) at oshi.hardware.platform.mac.MacHardwareAbstractionLayer.getProcessor(MacHardwareAbstractionLayer.java:54) at org.apache.dolphinscheduler.common.utils.OSUtils.cpuUsage(OSUtils.java:136) at org.apache.dolphinscheduler.common.utils.HeartBeat.fillSystemInfo(HeartBeat.java:176) at org.apache.dolphinscheduler.common.utils.HeartBeat.encodeHeartBeat(HeartBeat.java:204) at org.apache.dolphinscheduler.server.registry.HeartBeatTask.getHeartBeatInfo(HeartBeatTask.java:71) at org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient.registry(MasterRegistryClient.java:498) at org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient.start(MasterRegistryClient.java:120) at org.apache.dolphinscheduler.server.master.MasterServer.run(MasterServer.java:136) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleElement.invoke(InitDestroyAnnotationBeanPostProcessor.java:389) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleMetadata.invokeInitMethods(InitDestroyAnnotationBeanPostProcessor.java:333) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor.postProcessBeforeInitialization(InitDestroyAnnotationBeanPostProcessor.java:157) ... 18 common frames omitted` To fix this, we need upgrade oshi-core version , and modify some method calls in OSUtils to adapt the new api. ### What you expected to happen backend service can run on m1 chip system ### How to reproduce Start StandaloneServer on a m1 chip macbook ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8389
https://github.com/apache/dolphinscheduler/pull/8390
df6eef84cde877237ef23802338c7a95f97ede9b
83e88c4bc6cc3081dd6c99663d06fe32bb4a8e29
"2022-02-15T06:02:03Z"
java
"2022-02-21T03:14:39Z"
pom.xml
<?xml version="1.0" encoding="UTF-8"?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler</artifactId> <version>2.0.4-SNAPSHOT</version> <packaging>pom</packaging> <name>${project.artifactId}</name> <url>https://dolphinscheduler.apache.org</url> <description>Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing. </description> <scm> <connection>scm:git:https://github.com/apache/dolphinscheduler.git</connection> <developerConnection>scm:git:https://github.com/apache/dolphinscheduler.git</developerConnection> <url>https://github.com/apache/dolphinscheduler</url> <tag>HEAD</tag> </scm> <mailingLists> <mailingList> <name>DolphinScheduler Developer List</name> <post>dev@dolphinscheduler.apache.org</post> <subscribe>dev-subscribe@dolphinscheduler.apache.org</subscribe> <unsubscribe>dev-unsubscribe@dolphinscheduler.apache.org</unsubscribe> </mailingList> </mailingLists> <parent> <groupId>org.apache</groupId> <artifactId>apache</artifactId> <version>21</version> </parent> <properties> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding> <curator.version>4.3.0</curator.version> <zookeeper.version>3.4.14</zookeeper.version> <spring.version>5.3.12</spring.version> <spring.boot.version>2.5.6</spring.boot.version> <java.version>1.8</java.version> <logback.version>1.2.3</logback.version> <hadoop.version>2.7.3</hadoop.version> <quartz.version>2.3.2</quartz.version> <jackson.version>2.10.5</jackson.version> <mybatis-plus.version>3.2.0</mybatis-plus.version> <mybatis.spring.version>2.0.1</mybatis.spring.version> <cron.utils.version>9.1.3</cron.utils.version> <druid.version>1.2.4</druid.version> <h2.version>1.4.200</h2.version> <commons.codec.version>1.11</commons.codec.version> <commons.logging.version>1.1.1</commons.logging.version> <httpclient.version>4.4.1</httpclient.version> <httpcore.version>4.4.1</httpcore.version> <junit.version>4.12</junit.version> <mysql.connector.version>8.0.16</mysql.connector.version> <slf4j.api.version>1.7.5</slf4j.api.version> <slf4j.log4j12.version>1.7.5</slf4j.log4j12.version> <commons.collections.version>3.2.2</commons.collections.version> <commons.httpclient>3.0.1</commons.httpclient> <commons.beanutils.version>1.9.4</commons.beanutils.version> <commons.configuration.version>1.10</commons.configuration.version> <commons.lang.version>2.6</commons.lang.version> <commons.email.version>1.5</commons.email.version> <poi.version>4.1.2</poi.version> <javax.servlet.api.version>3.1.0</javax.servlet.api.version> <commons.collections4.version>4.1</commons.collections4.version> <guava.version>24.1-jre</guava.version> <postgresql.version>42.2.5</postgresql.version> <hive.jdbc.version>2.1.0</hive.jdbc.version> <commons.io.version>2.4</commons.io.version> <oshi.core.version>3.9.1</oshi.core.version> <clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version> <mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version> <presto.jdbc.version>0.238.1</presto.jdbc.version> <spotbugs.version>3.1.12</spotbugs.version> <checkstyle.version>3.1.2</checkstyle.version> <curator.test>2.12.0</curator.test> <frontend-maven-plugin.version>1.6</frontend-maven-plugin.version> <maven-compiler-plugin.version>3.3</maven-compiler-plugin.version> <maven-assembly-plugin.version>3.3.0</maven-assembly-plugin.version> <maven-release-plugin.version>2.5.3</maven-release-plugin.version> <maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version> <maven-source-plugin.version>2.4</maven-source-plugin.version> <maven-surefire-plugin.version>2.22.1</maven-surefire-plugin.version> <maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version> <rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version> <jacoco.version>0.8.7</jacoco.version> <jcip.version>1.0</jcip.version> <maven.deploy.skip>false</maven.deploy.skip> <cobertura-maven-plugin.version>2.7</cobertura-maven-plugin.version> <servlet-api.version>2.5</servlet-api.version> <swagger.version>1.9.3</swagger.version> <springfox.version>2.9.2</springfox.version> <swagger-models.version>1.5.24</swagger-models.version> <guava-retry.version>2.0.0</guava-retry.version> <protostuff.version>1.7.2</protostuff.version> <reflections.version>0.9.12</reflections.version> <byte-buddy.version>1.9.16</byte-buddy.version> <java-websocket.version>1.5.1</java-websocket.version> <py4j.version>0.10.9</py4j.version> <auto-service.version>1.0.1</auto-service.version> <jacoco.skip>false</jacoco.skip> <netty.version>4.1.53.Final</netty.version> <maven-jar-plugin.version>3.2.0</maven-jar-plugin.version> <powermock.version>2.0.9</powermock.version> <jsr305.version>3.0.0</jsr305.version> <commons-compress.version>1.19</commons-compress.version> <commons-math3.version>3.1.1</commons-math3.version> <error_prone_annotations.version>2.5.1</error_prone_annotations.version> <exec-maven-plugin.version>3.0.0</exec-maven-plugin.version> <janino.version>3.1.6</janino.version> <kubernetes.version>5.8.0</kubernetes.version> <docker.hub>apache</docker.hub> <docker.repo>${project.name}</docker.repo> <docker.tag>${project.version}</docker.tag> </properties> <dependencyManagement> <dependencies> <dependency> <groupId>io.netty</groupId> <artifactId>netty-bom</artifactId> <version>${netty.version}</version> <scope>import</scope> <type>pom</type> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-parent</artifactId> <version>${spring.boot.version}</version> <type>pom</type> <scope>import</scope> </dependency> <dependency> <groupId>io.netty</groupId> <artifactId>netty-all</artifactId> <version>${netty.version}</version> </dependency> <dependency> <groupId>org.java-websocket</groupId> <artifactId>Java-WebSocket</artifactId> <version>${java-websocket.version}</version> </dependency> <dependency> <groupId>com.baomidou</groupId> <artifactId>mybatis-plus-boot-starter</artifactId> <version>${mybatis-plus.version}</version> </dependency> <dependency> <groupId>com.baomidou</groupId> <artifactId>mybatis-plus</artifactId> <version>${mybatis-plus.version}</version> </dependency> <!-- quartz--> <dependency> <groupId>org.quartz-scheduler</groupId> <artifactId>quartz</artifactId> <version>${quartz.version}</version> </dependency> <dependency> <groupId>org.quartz-scheduler</groupId> <artifactId>quartz-jobs</artifactId> <version>${quartz.version}</version> </dependency> <dependency> <groupId>com.cronutils</groupId> <artifactId>cron-utils</artifactId> <version>${cron.utils.version}</version> </dependency> <dependency> <groupId>com.alibaba</groupId> <artifactId>druid</artifactId> <version>${druid.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-core</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-context</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-beans</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-tx</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-jdbc</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-test</artifactId> <version>${spring.version}</version> <scope>test</scope> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-server</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-master</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-worker</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-log-server</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-standalone-server</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-common</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-plugin</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-registry-plugin</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-dao</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-api</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-remote</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-service</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-meter</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-spi</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-data-quality</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-python</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-api</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-server</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-dingtalk</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-email</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-feishu</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-http</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-script</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-slack</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-wechat</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-pagerduty</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-webexteams</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-telegram</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-registry-api</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-registry-zookeeper</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-plugin</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-all</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-api</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-clickhouse</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-db2</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-hive</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-mysql</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-oracle</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-postgresql</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-sqlserver</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-api</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-datax</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-flink</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-http</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-mr</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-pigeon</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-procedure</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-python</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-shell</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-spark</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-sql</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-sqoop</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-seatunnel</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-ui</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-tools</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-dataquality</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-framework</artifactId> <version>${curator.version}</version> <exclusions> <exclusion> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.zookeeper</groupId> <artifactId>zookeeper</artifactId> <version>${zookeeper.version}</version> <exclusions> <exclusion> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> </exclusion> <exclusion> <artifactId>netty</artifactId> <groupId>io.netty</groupId> </exclusion> <exclusion> <groupId>com.github.spotbugs</groupId> <artifactId>spotbugs-annotations</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-client</artifactId> <version>${curator.version}</version> <exclusions> <exclusion> <groupId>log4j-1.2-api</groupId> <artifactId>org.apache.logging.log4j</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-recipes</artifactId> <version>${curator.version}</version> <exclusions> <exclusion> <groupId>org.apache.zookeeper</groupId> <artifactId>zookeeper</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-test</artifactId> <version>${curator.test}</version> </dependency> <dependency> <groupId>commons-codec</groupId> <artifactId>commons-codec</artifactId> <version>${commons.codec.version}</version> </dependency> <dependency> <groupId>commons-logging</groupId> <artifactId>commons-logging</artifactId> <version>${commons.logging.version}</version> </dependency> <dependency> <groupId>org.apache.httpcomponents</groupId> <artifactId>httpclient</artifactId> <version>${httpclient.version}</version> </dependency> <dependency> <groupId>org.apache.httpcomponents</groupId> <artifactId>httpcore</artifactId> <version>${httpcore.version}</version> </dependency> <dependency> <groupId>com.fasterxml.jackson.core</groupId> <artifactId>jackson-annotations</artifactId> <version>${jackson.version}</version> </dependency> <dependency> <groupId>com.fasterxml.jackson.core</groupId> <artifactId>jackson-databind</artifactId> <version>${jackson.version}</version> </dependency> <dependency> <groupId>com.fasterxml.jackson.core</groupId> <artifactId>jackson-core</artifactId> <version>${jackson.version}</version> </dependency> <!--protostuff--> <dependency> <groupId>io.protostuff</groupId> <artifactId>protostuff-core</artifactId> <version>${protostuff.version}</version> </dependency> <dependency> <groupId>io.protostuff</groupId> <artifactId>protostuff-runtime</artifactId> <version>${protostuff.version}</version> </dependency> <dependency> <groupId>net.bytebuddy</groupId> <artifactId>byte-buddy</artifactId> <version>${byte-buddy.version}</version> </dependency> <dependency> <groupId>org.reflections</groupId> <artifactId>reflections</artifactId> <version>${reflections.version}</version> </dependency> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>${junit.version}</version> </dependency> <dependency> <groupId>mysql</groupId> <artifactId>mysql-connector-java</artifactId> <version>${mysql.connector.version}</version> <scope>test</scope> </dependency> <dependency> <groupId>com.h2database</groupId> <artifactId>h2</artifactId> <version>${h2.version}</version> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-api</artifactId> <version>${slf4j.api.version}</version> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> <version>${slf4j.log4j12.version}</version> </dependency> <dependency> <groupId>commons-collections</groupId> <artifactId>commons-collections</artifactId> <version>${commons.collections.version}</version> </dependency> <dependency> <groupId>commons-httpclient</groupId> <artifactId>commons-httpclient</artifactId> <version>${commons.httpclient}</version> </dependency> <dependency> <groupId>commons-beanutils</groupId> <artifactId>commons-beanutils</artifactId> <version>${commons.beanutils.version}</version> </dependency> <dependency> <groupId>commons-configuration</groupId> <artifactId>commons-configuration</artifactId> <version>${commons.configuration.version}</version> </dependency> <dependency> <groupId>commons-lang</groupId> <artifactId>commons-lang</artifactId> <version>${commons.lang.version}</version> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-classic</artifactId> <version>${logback.version}</version> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-core</artifactId> <version>${logback.version}</version> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-email</artifactId> <version>${commons.email.version}</version> </dependency> <!--excel poi--> <dependency> <groupId>org.apache.poi</groupId> <artifactId>poi</artifactId> <version>${poi.version}</version> </dependency> <dependency> <groupId>org.apache.poi</groupId> <artifactId>poi-ooxml</artifactId> <version>${poi.version}</version> </dependency> <!-- hadoop --> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-common</artifactId> <version>${hadoop.version}</version> <exclusions> <exclusion> <artifactId>slf4j-log4j12</artifactId> <groupId>org.slf4j</groupId> </exclusion> <exclusion> <artifactId>com.sun.jersey</artifactId> <groupId>jersey-json</groupId> </exclusion> <exclusion> <groupId>junit</groupId> <artifactId>junit</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-client</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-hdfs</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-yarn-common</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-aws</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-collections4</artifactId> <version>${commons.collections4.version}</version> </dependency> <dependency> <groupId>com.google.guava</groupId> <artifactId>guava</artifactId> <version>${guava.version}</version> </dependency> <dependency> <groupId>org.postgresql</groupId> <artifactId>postgresql</artifactId> <version>${postgresql.version}</version> </dependency> <dependency> <groupId>org.apache.hive</groupId> <artifactId>hive-jdbc</artifactId> <version>${hive.jdbc.version}</version> </dependency> <dependency> <groupId>commons-io</groupId> <artifactId>commons-io</artifactId> <version>${commons.io.version}</version> </dependency> <dependency> <groupId>com.github.oshi</groupId> <artifactId>oshi-core</artifactId> <version>${oshi.core.version}</version> </dependency> <dependency> <groupId>ru.yandex.clickhouse</groupId> <artifactId>clickhouse-jdbc</artifactId> <version>${clickhouse.jdbc.version}</version> </dependency> <dependency> <groupId>com.microsoft.sqlserver</groupId> <artifactId>mssql-jdbc</artifactId> <version>${mssql.jdbc.version}</version> </dependency> <dependency> <groupId>com.facebook.presto</groupId> <artifactId>presto-jdbc</artifactId> <version>${presto.jdbc.version}</version> </dependency> <dependency> <groupId>javax.servlet</groupId> <artifactId>servlet-api</artifactId> <version>${servlet-api.version}</version> </dependency> <dependency> <groupId>javax.servlet</groupId> <artifactId>javax.servlet-api</artifactId> <version>${javax.servlet.api.version}</version> </dependency> <dependency> <groupId>io.springfox</groupId> <artifactId>springfox-swagger2</artifactId> <version>${springfox.version}</version> </dependency> <dependency> <groupId>io.springfox</groupId> <artifactId>springfox-swagger-ui</artifactId> <version>${springfox.version}</version> </dependency> <dependency> <groupId>io.swagger</groupId> <artifactId>swagger-models</artifactId> <version>${swagger-models.version}</version> </dependency> <dependency> <groupId>com.github.xiaoymin</groupId> <artifactId>swagger-bootstrap-ui</artifactId> <version>${swagger.version}</version> </dependency> <dependency> <groupId>com.github.rholder</groupId> <artifactId>guava-retrying</artifactId> <version>${guava-retry.version}</version> </dependency> <dependency> <groupId>org.ow2.asm</groupId> <artifactId>asm</artifactId> <version>6.2.1</version> </dependency> <dependency> <groupId>javax.activation</groupId> <artifactId>activation</artifactId> <version>1.1</version> </dependency> <dependency> <groupId>com.sun.mail</groupId> <artifactId>javax.mail</artifactId> <version>1.6.2</version> </dependency> <dependency> <groupId>net.sf.py4j</groupId> <artifactId>py4j</artifactId> <version>${py4j.version}</version> </dependency> <dependency> <groupId>org.codehaus.janino</groupId> <artifactId>janino</artifactId> <version>${janino.version}</version> </dependency> <dependency> <groupId>com.google.code.findbugs</groupId> <artifactId>jsr305</artifactId> <version>${jsr305.version}</version> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-compress</artifactId> <version>${commons-compress.version}</version> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-math3</artifactId> <version>${commons-math3.version}</version> </dependency> <dependency> <groupId>com.google.errorprone</groupId> <artifactId>error_prone_annotations</artifactId> <version>${error_prone_annotations.version}</version> </dependency> <dependency> <groupId>io.fabric8</groupId> <artifactId>kubernetes-client</artifactId> <version>${kubernetes.version}</version> </dependency> </dependencies> </dependencyManagement> <build> <pluginManagement> <plugins> <plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>rpm-maven-plugin</artifactId> <version>${rpm-maven-plugion.version}</version> <inherited>false</inherited> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <configuration> <source>${java.version}</source> <target>${java.version}</target> <testSource>${java.version}</testSource> <testTarget>${java.version}</testTarget> </configuration> <version>${maven-compiler-plugin.version}</version> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-release-plugin</artifactId> <version>${maven-release-plugin.version}</version> <configuration> <tagNameFormat>@{project.version}</tagNameFormat> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-assembly-plugin</artifactId> <version>${maven-assembly-plugin.version}</version> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-javadoc-plugin</artifactId> <version>${maven-javadoc-plugin.version}</version> <configuration> <source>8</source> <failOnError>false</failOnError> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-dependency-plugin</artifactId> <version>${maven-dependency-plugin.version}</version> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-jar-plugin</artifactId> <version>${maven-jar-plugin.version}</version> </plugin> <plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>exec-maven-plugin</artifactId> <version>${exec-maven-plugin.version}</version> <executions> <execution> <id>docker-build</id> <phase>package</phase> <goals> <goal>exec</goal> </goals> <configuration> <environmentVariables> <DOCKER_BUILDKIT>1</DOCKER_BUILDKIT> </environmentVariables> <executable>docker</executable> <workingDirectory>${project.basedir}</workingDirectory> <arguments> <argument>build</argument> <argument>--no-cache</argument> <argument>-t</argument> <argument>${docker.hub}/${docker.repo}:${docker.tag}</argument> <argument>-t</argument> <argument>${docker.hub}/${docker.repo}:latest</argument> <argument>${project.basedir}</argument> <argument>--file=src/main/docker/Dockerfile</argument> </arguments> </configuration> </execution> <execution> <id>docker-push</id> <phase>deploy</phase> <goals> <goal>exec</goal> </goals> <configuration> <environmentVariables> <DOCKER_BUILDKIT>1</DOCKER_BUILDKIT> </environmentVariables> <executable>docker</executable> <workingDirectory>${project.basedir}</workingDirectory> <arguments> <argument>buildx</argument> <argument>build</argument> <argument>--no-cache</argument> <argument>--push</argument> <argument>-t</argument> <argument>${docker.hub}/${docker.repo}:${docker.tag}</argument> <argument>-t</argument> <argument>${docker.hub}/${docker.repo}:latest</argument> <argument>${project.basedir}</argument> <argument>--file=src/main/docker/Dockerfile</argument> </arguments> </configuration> </execution> </executions> </plugin> </plugins> </pluginManagement> <plugins> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-javadoc-plugin</artifactId> <version>${maven-javadoc-plugin.version}</version> <executions> <execution> <id>attach-javadocs</id> <goals> <goal>jar</goal> </goals> </execution> </executions> <configuration> <aggregate>true</aggregate> <charset>${project.build.sourceEncoding}</charset> <encoding>${project.build.sourceEncoding}</encoding> <docencoding>${project.build.sourceEncoding}</docencoding> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-release-plugin</artifactId> <version>${maven-release-plugin.version}</version> <configuration> <autoVersionSubmodules>true</autoVersionSubmodules> <tagNameFormat>@{project.version}</tagNameFormat> <tagBase>${project.version}</tagBase> </configuration> <dependencies> <dependency> <groupId>org.apache.maven.scm</groupId> <artifactId>maven-scm-provider-jgit</artifactId> <version>1.9.5</version> </dependency> </dependencies> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <version>${maven-compiler-plugin.version}</version> <configuration> <source>${java.version}</source> <target>${java.version}</target> <encoding>${project.build.sourceEncoding}</encoding> <skip>false</skip><!--not skip compile test classes--> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-surefire-plugin</artifactId> <version>${maven-surefire-plugin.version}</version> <dependencies> <dependency> <groupId>org.apache.maven.surefire</groupId> <artifactId>surefire-junit4</artifactId> <version>${maven-surefire-plugin.version}</version> </dependency> </dependencies> <configuration> <systemPropertyVariables> <jacoco-agent.destfile>${project.build.directory}/jacoco.exec</jacoco-agent.destfile> </systemPropertyVariables> </configuration> </plugin> <!-- jenkins plugin jacoco report--> <plugin> <groupId>org.jacoco</groupId> <artifactId>jacoco-maven-plugin</artifactId> <version>${jacoco.version}</version> <configuration> <skip>${jacoco.skip}</skip> <dataFile>${project.build.directory}/jacoco.exec</dataFile> </configuration> <executions> <execution> <id>default-instrument</id> <goals> <goal>instrument</goal> </goals> </execution> <execution> <id>default-restore-instrumented-classes</id> <goals> <goal>restore-instrumented-classes</goal> </goals> <configuration> <excludes>com/github/dreamhead/moco/*</excludes> </configuration> </execution> <execution> <id>default-report</id> <goals> <goal>report</goal> </goals> </execution> </executions> </plugin> <plugin> <groupId>com.github.spotbugs</groupId> <artifactId>spotbugs-maven-plugin</artifactId> <version>${spotbugs.version}</version> <configuration> <xmlOutput>true</xmlOutput> <threshold>medium</threshold> <effort>default</effort> <excludeFilterFile>dev-config/spotbugs-exclude.xml</excludeFilterFile> <failOnError>true</failOnError> </configuration> <dependencies> <dependency> <groupId>com.github.spotbugs</groupId> <artifactId>spotbugs</artifactId> <version>4.0.0-beta4</version> </dependency> </dependencies> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-checkstyle-plugin</artifactId> <version>${checkstyle.version}</version> <dependencies> <dependency> <groupId>com.puppycrawl.tools</groupId> <artifactId>checkstyle</artifactId> <version>8.45</version> </dependency> </dependencies> <configuration> <consoleOutput>true</consoleOutput> <encoding>UTF-8</encoding> <configLocation>style/checkstyle.xml</configLocation> <failOnViolation>true</failOnViolation> <violationSeverity>warning</violationSeverity> <includeTestSourceDirectory>true</includeTestSourceDirectory> <sourceDirectories> <sourceDirectory>${project.build.sourceDirectory}</sourceDirectory> </sourceDirectories> <excludes>**\/generated-sources\/</excludes> </configuration> <executions> <execution> <phase>compile</phase> <goals> <goal>check</goal> </goals> </execution> </executions> </plugin> <plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>cobertura-maven-plugin</artifactId> <version>${cobertura-maven-plugin.version}</version> <configuration> <check> </check> <aggregate>true</aggregate> <outputDirectory>./target/cobertura</outputDirectory> <encoding>${project.build.sourceEncoding}</encoding> <quiet>true</quiet> <format>xml</format> <instrumentation> <ignoreTrivial>true</ignoreTrivial> </instrumentation> </configuration> </plugin> <plugin> <artifactId>maven-source-plugin</artifactId> <version>${maven-source-plugin.version}</version> <executions> <execution> <id>attach-sources</id> <goals> <goal>jar</goal> </goals> </execution> </executions> </plugin> </plugins> </build> <dependencies> <!-- NOTE: only development / test phase dependencies (scope = test / provided) that won't be packaged into final jar can be declared here. For example: annotation processors, test dependencies that are used by most of the submodules. --> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <scope>test</scope> </dependency> <dependency> <groupId>org.jacoco</groupId> <artifactId>org.jacoco.agent</artifactId> <version>${jacoco.version}</version> <classifier>runtime</classifier> <scope>test</scope> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-configuration-processor</artifactId> <optional>true</optional> </dependency> <dependency> <groupId>com.google.auto.service</groupId> <artifactId>auto-service</artifactId> <version>${auto-service.version}</version> <scope>provided</scope> </dependency> <dependency> <groupId>org.powermock</groupId> <artifactId>powermock-api-mockito2</artifactId> <version>${powermock.version}</version> <scope>test</scope> </dependency> <dependency> <groupId>org.powermock</groupId> <artifactId>powermock-module-junit4</artifactId> <version>${powermock.version}</version> <scope>test</scope> </dependency> <dependency> <groupId>org.powermock</groupId> <artifactId>powermock-core</artifactId> <version>${powermock.version}</version> <scope>test</scope> </dependency> </dependencies> <modules> <module>dolphinscheduler-alert</module> <module>dolphinscheduler-spi</module> <module>dolphinscheduler-registry</module> <module>dolphinscheduler-task-plugin</module> <module>dolphinscheduler-ui</module> <module>dolphinscheduler-server</module> <module>dolphinscheduler-common</module> <module>dolphinscheduler-api</module> <module>dolphinscheduler-dao</module> <module>dolphinscheduler-dist</module> <module>dolphinscheduler-remote</module> <module>dolphinscheduler-service</module> <module>dolphinscheduler-microbench</module> <module>dolphinscheduler-data-quality</module> <module>dolphinscheduler-standalone-server</module> <module>dolphinscheduler-datasource-plugin</module> <module>dolphinscheduler-python</module> <module>dolphinscheduler-meter</module> <module>dolphinscheduler-master</module> <module>dolphinscheduler-worker</module> <module>dolphinscheduler-log-server</module> <module>dolphinscheduler-tools</module> <module>dolphinscheduler-ui-next</module> </modules> </project>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,389
[Bug] [common] oshi compatibility issues cause it fail to boot on m1 mac
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened I use a macbook with m1 chip, when starting StandaloneServer, I get the following error `Caused by: java.lang.UnsatisfiedLinkError: /private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp: dlopen(/private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp, 0x0001): tried: '/private/var/folders/k6/h_l1w_m53l76tkylxlc33q4h0000gn/T/jna-113105/jna1882043576609991586.tmp' (fat file, but missing compatible architecture (have 'i386,x86_64', need 'arm64e')), '/usr/lib/jna1882043576609991586.tmp' (no such file) at java.lang.ClassLoader$NativeLibrary.load(Native Method) at java.lang.ClassLoader.loadLibrary0(ClassLoader.java:1950) at java.lang.ClassLoader.loadLibrary(ClassLoader.java:1832) at java.lang.Runtime.load0(Runtime.java:811) at java.lang.System.load(System.java:1088) at com.sun.jna.Native.loadNativeDispatchLibraryFromClasspath(Native.java:947) at com.sun.jna.Native.loadNativeDispatchLibrary(Native.java:922) at com.sun.jna.Native.<clinit>(Native.java:190) at com.sun.jna.Pointer.<clinit>(Pointer.java:54) at com.sun.jna.Structure.<clinit>(Structure.java:2130) at oshi.hardware.platform.mac.MacCentralProcessor.<clinit>(MacCentralProcessor.java:53) at oshi.hardware.platform.mac.MacHardwareAbstractionLayer.getProcessor(MacHardwareAbstractionLayer.java:54) at org.apache.dolphinscheduler.common.utils.OSUtils.cpuUsage(OSUtils.java:136) at org.apache.dolphinscheduler.common.utils.HeartBeat.fillSystemInfo(HeartBeat.java:176) at org.apache.dolphinscheduler.common.utils.HeartBeat.encodeHeartBeat(HeartBeat.java:204) at org.apache.dolphinscheduler.server.registry.HeartBeatTask.getHeartBeatInfo(HeartBeatTask.java:71) at org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient.registry(MasterRegistryClient.java:498) at org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient.start(MasterRegistryClient.java:120) at org.apache.dolphinscheduler.server.master.MasterServer.run(MasterServer.java:136) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleElement.invoke(InitDestroyAnnotationBeanPostProcessor.java:389) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleMetadata.invokeInitMethods(InitDestroyAnnotationBeanPostProcessor.java:333) at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor.postProcessBeforeInitialization(InitDestroyAnnotationBeanPostProcessor.java:157) ... 18 common frames omitted` To fix this, we need upgrade oshi-core version , and modify some method calls in OSUtils to adapt the new api. ### What you expected to happen backend service can run on m1 chip system ### How to reproduce Start StandaloneServer on a m1 chip macbook ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8389
https://github.com/apache/dolphinscheduler/pull/8390
df6eef84cde877237ef23802338c7a95f97ede9b
83e88c4bc6cc3081dd6c99663d06fe32bb4a8e29
"2022-02-15T06:02:03Z"
java
"2022-02-21T03:14:39Z"
tools/dependencies/known-dependencies.txt
HdrHistogram-2.1.12.jar HikariCP-4.0.3.jar LatencyUtils-2.0.3.jar activation-1.1.jar animal-sniffer-annotations-1.14.jar aopalliance-1.0.jar apacheds-i18n-2.0.0-M15.jar apacheds-kerberos-codec-2.0.0-M15.jar api-asn1-api-1.0.0-M20.jar api-util-1.0.0-M20.jar asm-3.1.jar asm-6.2.1.jar aspectjweaver-1.9.7.jar audience-annotations-0.5.0.jar avro-1.7.4.jar aws-java-sdk-1.7.4.jar bonecp-0.8.0.RELEASE.jar byte-buddy-1.9.16.jar caffeine-2.9.2.jar checker-compat-qual-2.0.0.jar checker-qual-3.10.0.jar classmate-1.5.1.jar clickhouse-jdbc-0.1.52.jar commons-email-1.5.jar commons-cli-1.2.jar commons-codec-1.11.jar commons-collections-3.2.2.jar commons-collections4-4.1.jar commons-compiler-3.1.6.jar commons-compress-1.19.jar commons-configuration-1.10.jar commons-daemon-1.0.13.jar commons-beanutils-1.9.4.jar commons-dbcp-1.4.jar commons-httpclient-3.0.1.jar commons-io-2.4.jar commons-lang-2.6.jar commons-logging-1.1.1.jar commons-math3-3.1.1.jar commons-net-3.1.jar commons-pool-1.6.jar cron-utils-9.1.3.jar error_prone_annotations-2.5.1.jar janino-3.1.6.jar javax.el-3.0.0.jar commons-lang3-3.12.0.jar curator-client-4.3.0.jar curator-framework-4.3.0.jar curator-recipes-4.3.0.jar curator-test-2.12.0.jar curvesapi-1.06.jar datanucleus-api-jdo-4.2.1.jar datanucleus-core-4.1.6.jar datanucleus-rdbms-4.1.7.jar derby-10.14.2.0.jar druid-1.2.4.jar gson-2.8.8.jar guava-24.1-jre.jar guava-retrying-2.0.0.jar guice-3.0.jar guice-servlet-3.0.jar h2-1.4.200.jar hadoop-annotations-2.7.3.jar hadoop-auth-2.7.3.jar hadoop-aws-2.7.3.jar hadoop-client-2.7.3.jar hadoop-common-2.7.3.jar hadoop-hdfs-2.7.3.jar hadoop-mapreduce-client-app-2.7.3.jar hadoop-mapreduce-client-common-2.7.3.jar hadoop-mapreduce-client-core-2.7.3.jar hadoop-mapreduce-client-jobclient-2.7.3.jar hadoop-mapreduce-client-shuffle-2.7.3.jar hadoop-yarn-api-2.7.3.jar hadoop-yarn-client-2.7.3.jar hadoop-yarn-common-2.7.3.jar hadoop-yarn-server-common-2.7.3.jar hive-common-2.1.0.jar hive-jdbc-2.1.0.jar hive-metastore-2.1.0.jar hive-orc-2.1.0.jar hive-serde-2.1.0.jar hive-service-2.1.0.jar hive-service-rpc-2.1.0.jar hive-storage-api-2.1.0.jar htrace-core-3.1.0-incubating.jar httpclient-4.4.1.jar httpcore-4.4.1.jar httpmime-4.5.13.jar j2objc-annotations-1.1.jar jackson-annotations-2.10.5.jar jackson-core-2.10.5.jar jackson-core-asl-1.9.13.jar jackson-databind-2.10.5.jar jackson-datatype-jdk8-2.12.5.jar jackson-datatype-jsr310-2.12.5.jar jackson-jaxrs-1.9.13.jar jackson-mapper-asl-1.9.13.jar jackson-module-parameter-names-2.12.5.jar jackson-xc-1.9.13.jar jakarta.annotation-api-1.3.5.jar jakarta.servlet-api-4.0.4.jar jakarta.websocket-api-1.1.2.jar jamon-runtime-2.3.1.jar java-xmlbuilder-0.4.jar javassist-3.27.0-GA.jar javax.annotation-api-1.3.2.jar javax.activation-api-1.2.0.jar javax.inject-1.jar javax.jdo-3.2.0-m3.jar javax.mail-1.6.2.jar javolution-5.5.1.jar jaxb-api-2.3.1.jar jaxb-impl-2.2.3-1.jar jdo-api-3.0.1.jar jersey-client-1.9.jar jersey-core-1.9.jar jersey-guice-1.9.jar jersey-json-1.9.jar jersey-server-1.9.jar jets3t-0.9.0.jar jettison-1.1.jar jetty-6.1.26.jar jetty-continuation-9.4.44.v20210927.jar jetty-http-9.4.44.v20210927.jar jetty-io-9.4.44.v20210927.jar jetty-security-9.4.44.v20210927.jar jetty-server-9.4.44.v20210927.jar jetty-servlet-9.4.44.v20210927.jar jetty-servlets-9.4.44.v20210927.jar jetty-util-6.1.26.jar jetty-util-9.4.44.v20210927.jar jetty-util-ajax-9.4.44.v20210927.jar jetty-webapp-9.4.44.v20210927.jar jetty-xml-9.4.44.v20210927.jar jline-0.9.94.jar jna-4.5.2.jar jna-platform-4.5.2.jar joda-time-2.10.13.jar jpam-1.1.jar jsch-0.1.42.jar jsp-api-2.1.jar jsqlparser-2.1.jar jsr305-3.0.0.jar jta-1.1.jar jul-to-slf4j-1.7.32.jar leveldbjni-all-1.8.jar libfb303-0.9.3.jar libthrift-0.9.3.jar log4j-1.2-api-2.14.1.jar log4j-1.2.17.jar logback-classic-1.2.3.jar logback-core-1.2.3.jar lz4-1.3.0.jar mapstruct-1.2.0.Final.jar micrometer-core-1.7.5.jar micrometer-registry-prometheus-1.7.5.jar mssql-jdbc-6.1.0.jre8.jar mybatis-3.5.2.jar mybatis-plus-3.2.0.jar mybatis-plus-annotation-3.2.0.jar mybatis-plus-boot-starter-3.2.0.jar mybatis-plus-core-3.2.0.jar mybatis-plus-extension-3.2.0.jar mybatis-spring-2.0.2.jar netty-3.6.2.Final.jar netty-all-4.1.53.Final.jar opencsv-2.3.jar oshi-core-3.9.1.jar paranamer-2.3.jar parquet-hadoop-bundle-1.8.1.jar poi-4.1.2.jar poi-ooxml-4.1.2.jar poi-ooxml-schemas-4.1.2.jar postgresql-42.2.5.jar presto-jdbc-0.238.1.jar protobuf-java-2.5.0.jar protostuff-core-1.7.2.jar protostuff-runtime-1.7.2.jar protostuff-api-1.7.2.jar protostuff-collectionschema-1.7.2.jar py4j-0.10.9.jar quartz-2.3.2.jar quartz-jobs-2.3.2.jar reflections-0.9.12.jar simpleclient-0.10.0.jar simpleclient_common-0.10.0.jar slf4j-api-1.7.5.jar snakeyaml-1.28.jar snappy-0.2.jar snappy-java-1.0.4.1.jar SparseBitSet-1.2.jar spring-aop-5.3.12.jar spring-beans-5.3.12.jar spring-boot-2.5.6.jar spring-boot-actuator-2.5.6.jar spring-boot-actuator-autoconfigure-2.5.6.jar spring-boot-autoconfigure-2.5.6.jar spring-boot-configuration-processor-2.5.6.jar spring-boot-starter-2.5.6.jar spring-boot-starter-actuator-2.5.6.jar spring-boot-starter-aop-2.5.6.jar spring-boot-starter-jdbc-2.5.6.jar spring-boot-starter-jetty-2.5.6.jar spring-boot-starter-json-2.5.6.jar spring-boot-starter-logging-2.5.6.jar spring-boot-starter-quartz-2.5.6.jar spring-boot-starter-web-2.5.6.jar spring-boot-starter-cache-2.5.6.jar spring-context-5.3.12.jar spring-context-support-5.3.12.jar spring-core-5.3.12.jar spring-expression-5.3.12.jar spring-jcl-5.3.12.jar spring-jdbc-5.3.12.jar spring-plugin-core-1.2.0.RELEASE.jar spring-plugin-metadata-1.2.0.RELEASE.jar spring-tx-5.3.12.jar spring-web-5.3.12.jar spring-webmvc-5.3.12.jar springfox-core-2.9.2.jar springfox-schema-2.9.2.jar springfox-spi-2.9.2.jar springfox-spring-web-2.9.2.jar springfox-swagger-common-2.9.2.jar springfox-swagger-ui-2.9.2.jar springfox-swagger2-2.9.2.jar swagger-annotations-1.5.20.jar swagger-bootstrap-ui-1.9.3.jar swagger-models-1.5.24.jar tomcat-embed-el-9.0.54.jar tephra-api-0.6.0.jar transaction-api-1.1.jar xercesImpl-2.9.1.jar xml-apis-1.3.04.jar xmlbeans-3.1.0.jar xmlenc-0.52.jar zookeeper-3.4.14.jar Java-WebSocket-1.5.1.jar kubernetes-client-5.8.0.jar kubernetes-model-admissionregistration-5.8.0.jar kubernetes-model-apiextensions-5.8.0.jar kubernetes-model-apps-5.8.0.jar kubernetes-model-autoscaling-5.8.0.jar kubernetes-model-batch-5.8.0.jar kubernetes-model-certificates-5.8.0.jar kubernetes-model-common-5.8.0.jar kubernetes-model-coordination-5.8.0.jar kubernetes-model-core-5.8.0.jar kubernetes-model-discovery-5.8.0.jar kubernetes-model-events-5.8.0.jar kubernetes-model-extensions-5.8.0.jar kubernetes-model-flowcontrol-5.8.0.jar kubernetes-model-metrics-5.8.0.jar kubernetes-model-networking-5.8.0.jar kubernetes-model-node-5.8.0.jar kubernetes-model-policy-5.8.0.jar kubernetes-model-rbac-5.8.0.jar kubernetes-model-scheduling-5.8.0.jar kubernetes-model-storageclass-5.8.0.jar zjsonpatch-0.3.0.jar automaton-1.11-8.jar generex-1.0.2.jar jackson-dataformat-yaml-2.12.5.jar logging-interceptor-3.14.9.jar okhttp-3.14.9.jar okio-1.17.2.jar
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,463
[Bug] [API] releaseWorkflowAndSchedule api of timing does not take effect/relation api cause workflow offline afterbinding
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened 1. releaseWorkflowAndSchedule api of timing does not take effect 2. relation api cause workflow offline afterbinding ### What you expected to happen Should be used normally. ### How to reproduce Call through API ### Anything else _No response_ ### Version 2.0.3 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8463
https://github.com/apache/dolphinscheduler/pull/8464
cd8c5e1c9aeff536c705c9152b31a09a674ea864
8d0a0f9c323c8153573b159090aa93d6ea87a30d
"2022-02-21T07:07:39Z"
java
"2022-02-21T07:58:59Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_CODE; import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP; import org.apache.dolphinscheduler.api.dto.DagDataSchedule; import org.apache.dolphinscheduler.api.dto.ScheduleParam; import org.apache.dolphinscheduler.api.dto.treeview.Instance; import org.apache.dolphinscheduler.api.dto.treeview.TreeViewDto; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ServiceException; import org.apache.dolphinscheduler.api.service.ProcessDefinitionService; import org.apache.dolphinscheduler.api.service.ProcessInstanceService; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.api.service.SchedulerService; import org.apache.dolphinscheduler.api.utils.CheckUtils; import org.apache.dolphinscheduler.api.utils.FileUtils; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ConditionType; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.ProcessExecutionTypeEnum; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.common.enums.TaskType; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.task.sql.SqlParameters; import org.apache.dolphinscheduler.common.task.sql.SqlType; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.utils.CodeGenerateUtils; import org.apache.dolphinscheduler.common.utils.CodeGenerateUtils.CodeGenerateException; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.dao.entity.DagData; import org.apache.dolphinscheduler.dao.entity.DataSource; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.Stat; import java.io.BufferedOutputStream; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; import javax.servlet.ServletOutputStream; import javax.servlet.http.HttpServletResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import org.springframework.web.multipart.MultipartFile; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.google.common.collect.Lists; /** * process definition service impl */ @Service public class ProcessDefinitionServiceImpl extends BaseServiceImpl implements ProcessDefinitionService { private static final Logger logger = LoggerFactory.getLogger(ProcessDefinitionServiceImpl.class); private static final String RELEASESTATE = "releaseState"; @Autowired private ProjectMapper projectMapper; @Autowired private ProjectService projectService; @Autowired private UserMapper userMapper; @Autowired private ProcessDefinitionLogMapper processDefinitionLogMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper; @Autowired private ProcessInstanceService processInstanceService; @Autowired private TaskInstanceMapper taskInstanceMapper; @Autowired private ScheduleMapper scheduleMapper; @Autowired private ProcessService processService; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired private ProcessTaskRelationLogMapper processTaskRelationLogMapper; @Autowired TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private SchedulerService schedulerService; @Autowired private TenantMapper tenantMapper; @Autowired private DataSourceMapper dataSourceMapper; /** * create process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param description description * @param globalParams global params * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return create result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> createProcessDefinition(User loginUser, long projectCode, String name, String description, String globalParams, String locations, int timeout, String tenantCode, String taskRelationJson, String taskDefinitionJson, ProcessExecutionTypeEnum executionType) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } // check whether the new process define name exist ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); Map<String, Object> checkTaskDefinitions = checkTaskDefinitionList(taskDefinitionLogs, taskDefinitionJson); if (checkTaskDefinitions.get(Constants.STATUS) != Status.SUCCESS) { return checkTaskDefinitions; } List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class); Map<String, Object> checkRelationJson = checkTaskRelationList(taskRelationList, taskRelationJson, taskDefinitionLogs); if (checkRelationJson.get(Constants.STATUS) != Status.SUCCESS) { return checkRelationJson; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } long processDefinitionCode; try { processDefinitionCode = CodeGenerateUtils.getInstance().genCode(); } catch (CodeGenerateException e) { putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS); return result; } ProcessDefinition processDefinition = new ProcessDefinition(projectCode, name, processDefinitionCode, description, globalParams, locations, timeout, loginUser.getId(), tenantId); processDefinition.setExecutionType(executionType); return createDagDefine(loginUser, taskRelationList, processDefinition, taskDefinitionLogs); } private Map<String, Object> createDagDefine(User loginUser, List<ProcessTaskRelationLog> taskRelationList, ProcessDefinition processDefinition, List<TaskDefinitionLog> taskDefinitionLogs) { Map<String, Object> result = new HashMap<>(); int saveTaskResult = processService.saveTaskDefine(loginUser, processDefinition.getProjectCode(), taskDefinitionLogs, Boolean.TRUE); if (saveTaskResult == Constants.EXIT_CODE_SUCCESS) { logger.info("The task has not changed, so skip"); } if (saveTaskResult == Constants.DEFINITION_FAILURE) { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_TASK_DEFINITION_ERROR); } int insertVersion = processService.saveProcessDefine(loginUser, processDefinition, Boolean.TRUE, Boolean.TRUE); if (insertVersion == 0) { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_PROCESS_DEFINITION_ERROR); } int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs, Boolean.TRUE); if (insertResult == Constants.EXIT_CODE_SUCCESS) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.CREATE_PROCESS_TASK_RELATION_ERROR); throw new ServiceException(Status.CREATE_PROCESS_TASK_RELATION_ERROR); } return result; } private Map<String, Object> checkTaskDefinitionList(List<TaskDefinitionLog> taskDefinitionLogs, String taskDefinitionJson) { Map<String, Object> result = new HashMap<>(); try { if (taskDefinitionLogs.isEmpty()) { logger.error("taskDefinitionJson invalid: {}", taskDefinitionJson); putMsg(result, Status.DATA_IS_NOT_VALID, taskDefinitionJson); return result; } for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { if (!CheckUtils.checkTaskDefinitionParameters(taskDefinitionLog)) { logger.error("task definition {} parameter invalid", taskDefinitionLog.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskDefinitionLog.getName()); return result; } } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } private Map<String, Object> checkTaskRelationList(List<ProcessTaskRelationLog> taskRelationList, String taskRelationJson, List<TaskDefinitionLog> taskDefinitionLogs) { Map<String, Object> result = new HashMap<>(); try { if (taskRelationList == null || taskRelationList.isEmpty()) { logger.error("task relation list is null"); putMsg(result, Status.DATA_IS_NOT_VALID, taskRelationJson); return result; } List<ProcessTaskRelation> processTaskRelations = taskRelationList.stream() .map(processTaskRelationLog -> JSONUtils.parseObject(JSONUtils.toJsonString(processTaskRelationLog), ProcessTaskRelation.class)) .collect(Collectors.toList()); List<TaskNode> taskNodeList = processService.transformTask(processTaskRelations, taskDefinitionLogs); if (taskNodeList.size() != taskRelationList.size()) { Set<Long> postTaskCodes = taskRelationList.stream().map(ProcessTaskRelationLog::getPostTaskCode).collect(Collectors.toSet()); Set<Long> taskNodeCodes = taskNodeList.stream().map(TaskNode::getCode).collect(Collectors.toSet()); Collection<Long> codes = CollectionUtils.subtract(postTaskCodes, taskNodeCodes); if (CollectionUtils.isNotEmpty(codes)) { logger.error("the task code is not exist"); putMsg(result, Status.TASK_DEFINE_NOT_EXIST, org.apache.commons.lang.StringUtils.join(codes, Constants.COMMA)); return result; } } if (graphHasCycle(taskNodeList)) { logger.error("process DAG has cycle"); putMsg(result, Status.PROCESS_NODE_HAS_CYCLE); return result; } // check whether the task relation json is normal for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { if (processTaskRelationLog.getPostTaskCode() == 0) { logger.error("the post_task_code or post_task_version can't be zero"); putMsg(result, Status.CHECK_PROCESS_TASK_RELATION_ERROR); return result; } } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } /** * query process definition list * * @param loginUser login user * @param projectCode project code * @return definition list */ @Override public Map<String, Object> queryProcessDefinitionList(User loginUser, long projectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessDefinition> resourceList = processDefinitionMapper.queryAllDefinitionList(projectCode); List<DagData> dagDataList = resourceList.stream().map(processService::genDagData).collect(Collectors.toList()); result.put(Constants.DATA_LIST, dagDataList); putMsg(result, Status.SUCCESS); return result; } /** * query process definition simple list * * @param loginUser login user * @param projectCode project code * @return definition simple list */ @Override public Map<String, Object> queryProcessDefinitionSimpleList(User loginUser, long projectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessDefinition> processDefinitions = processDefinitionMapper.queryAllDefinitionList(projectCode); ArrayNode arrayNode = JSONUtils.createArrayNode(); for (ProcessDefinition processDefinition : processDefinitions) { ObjectNode processDefinitionNode = JSONUtils.createObjectNode(); processDefinitionNode.put("id", processDefinition.getId()); processDefinitionNode.put("code", processDefinition.getCode()); processDefinitionNode.put("name", processDefinition.getName()); processDefinitionNode.put("projectCode", processDefinition.getProjectCode()); arrayNode.add(processDefinitionNode); } result.put(Constants.DATA_LIST, arrayNode); putMsg(result, Status.SUCCESS); return result; } /** * query process definition list paging * * @param loginUser login user * @param projectCode project code * @param searchVal search value * @param userId user id * @param pageNo page number * @param pageSize page size * @return process definition page */ @Override public Result queryProcessDefinitionListPaging(User loginUser, long projectCode, String searchVal, Integer userId, Integer pageNo, Integer pageSize) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } Page<ProcessDefinition> page = new Page<>(pageNo, pageSize); IPage<ProcessDefinition> processDefinitionIPage = processDefinitionMapper.queryDefineListPaging( page, searchVal, userId, project.getCode(), isAdmin(loginUser)); List<ProcessDefinition> records = processDefinitionIPage.getRecords(); for (ProcessDefinition pd : records) { ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper.queryByDefinitionCodeAndVersion(pd.getCode(), pd.getVersion()); User user = userMapper.selectById(processDefinitionLog.getOperator()); pd.setModifyBy(user.getUserName()); } processDefinitionIPage.setRecords(records); PageInfo<ProcessDefinition> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotal((int) processDefinitionIPage.getTotal()); pageInfo.setTotalList(processDefinitionIPage.getRecords()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * query detail of process definition * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return process definition detail */ @Override public Map<String, Object> queryProcessDefinitionByCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null || projectCode != processDefinition.getProjectCode()) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); } else { Tenant tenant = tenantMapper.queryById(processDefinition.getTenantId()); if (tenant != null) { processDefinition.setTenantCode(tenant.getTenantCode()); } DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData); putMsg(result, Status.SUCCESS); } return result; } @Override public Map<String, Object> queryProcessDefinitionByName(User loginUser, long projectCode, String name) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, name); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, name); } else { DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData); putMsg(result, Status.SUCCESS); } return result; } /** * update process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param code process definition code * @param description description * @param globalParams global params * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return update result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> updateProcessDefinition(User loginUser, long projectCode, String name, long code, String description, String globalParams, String locations, int timeout, String tenantCode, String taskRelationJson, String taskDefinitionJson, ProcessExecutionTypeEnum executionType) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); Map<String, Object> checkTaskDefinitions = checkTaskDefinitionList(taskDefinitionLogs, taskDefinitionJson); if (checkTaskDefinitions.get(Constants.STATUS) != Status.SUCCESS) { return checkTaskDefinitions; } List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class); Map<String, Object> checkRelationJson = checkTaskRelationList(taskRelationList, taskRelationJson, taskDefinitionLogs); if (checkRelationJson.get(Constants.STATUS) != Status.SUCCESS) { return checkRelationJson; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); // check process definition exists if (processDefinition == null || projectCode != processDefinition.getProjectCode()) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { // online can not permit edit putMsg(result, Status.PROCESS_DEFINE_NOT_ALLOWED_EDIT, processDefinition.getName()); return result; } if (!name.equals(processDefinition.getName())) { // check whether the new process define name exist ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } } ProcessDefinition processDefinitionDeepCopy = JSONUtils.parseObject(JSONUtils.toJsonString(processDefinition), ProcessDefinition.class); processDefinition.set(projectCode, name, description, globalParams, locations, timeout, tenantId); processDefinition.setExecutionType(executionType); return updateDagDefine(loginUser, taskRelationList, processDefinition, processDefinitionDeepCopy, taskDefinitionLogs); } private Map<String, Object> updateDagDefine(User loginUser, List<ProcessTaskRelationLog> taskRelationList, ProcessDefinition processDefinition, ProcessDefinition processDefinitionDeepCopy, List<TaskDefinitionLog> taskDefinitionLogs) { Map<String, Object> result = new HashMap<>(); int saveTaskResult = processService.saveTaskDefine(loginUser, processDefinition.getProjectCode(), taskDefinitionLogs, Boolean.TRUE); if (saveTaskResult == Constants.EXIT_CODE_SUCCESS) { logger.info("The task has not changed, so skip"); } if (saveTaskResult == Constants.DEFINITION_FAILURE) { putMsg(result, Status.UPDATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.UPDATE_TASK_DEFINITION_ERROR); } boolean isChange = false; if (processDefinition.equals(processDefinitionDeepCopy) && saveTaskResult == Constants.EXIT_CODE_SUCCESS) { List<ProcessTaskRelationLog> processTaskRelationLogList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); if (taskRelationList.size() == processTaskRelationLogList.size()) { Set<ProcessTaskRelationLog> taskRelationSet = taskRelationList.stream().collect(Collectors.toSet()); Set<ProcessTaskRelationLog> processTaskRelationLogSet = processTaskRelationLogList.stream().collect(Collectors.toSet()); if (taskRelationSet.size() == processTaskRelationLogSet.size()) { taskRelationSet.removeAll(processTaskRelationLogSet); if (!taskRelationSet.isEmpty()) { isChange = true; } } else { isChange = true; } } else { isChange = true; } } else { isChange = true; } if (isChange) { processDefinition.setUpdateTime(new Date()); int insertVersion = processService.saveProcessDefine(loginUser, processDefinition, Boolean.TRUE, Boolean.TRUE); if (insertVersion <= 0) { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.UPDATE_PROCESS_DEFINITION_ERROR); } int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs, Boolean.TRUE); if (insertResult == Constants.EXIT_CODE_SUCCESS) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.UPDATE_PROCESS_DEFINITION_ERROR); } } else { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } return result; } /** * verify process definition name unique * * @param loginUser login user * @param projectCode project code * @param name name * @return true if process definition name not exists, otherwise false */ @Override public Map<String, Object> verifyProcessDefinitionName(User loginUser, long projectCode, String name) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.verifyByDefineName(project.getCode(), name.trim()); if (processDefinition == null) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name.trim()); } return result; } /** * delete process definition by code * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return delete result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteProcessDefinitionByCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null || projectCode != processDefinition.getProjectCode()) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } // Determine if the login user is the owner of the process definition if (loginUser.getId() != processDefinition.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } // check process definition is already online if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE, code); return result; } // check process instances is already running List<ProcessInstance> processInstances = processInstanceService.queryByProcessDefineCodeAndStatus(processDefinition.getCode(), Constants.NOT_TERMINATED_STATES); if (CollectionUtils.isNotEmpty(processInstances)) { putMsg(result, Status.DELETE_PROCESS_DEFINITION_BY_CODE_FAIL, processInstances.size()); return result; } // get the timing according to the process definition Schedule scheduleObj = scheduleMapper.queryByProcessDefinitionCode(code); if (scheduleObj != null) { if (scheduleObj.getReleaseState() == ReleaseState.OFFLINE) { int delete = scheduleMapper.deleteById(scheduleObj.getId()); if (delete == 0) { putMsg(result, Status.DELETE_SCHEDULE_CRON_BY_ID_ERROR); throw new ServiceException(Status.DELETE_SCHEDULE_CRON_BY_ID_ERROR); } } if (scheduleObj.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE, scheduleObj.getId()); return result; } } int delete = processDefinitionMapper.deleteById(processDefinition.getId()); if (delete == 0) { putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); throw new ServiceException(Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); } int deleteRelation = processTaskRelationMapper.deleteByCode(project.getCode(), processDefinition.getCode()); if (deleteRelation == 0) { logger.warn("The process definition has not relation, it will be delete successfully"); } putMsg(result, Status.SUCCESS); return result; } /** * release process definition: online / offline * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param releaseState release state * @return release result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> releaseProcessDefinition(User loginUser, long projectCode, long code, ReleaseState releaseState) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } // check state if (null == releaseState) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null || projectCode != processDefinition.getProjectCode()) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } switch (releaseState) { case ONLINE: List<ProcessTaskRelation> relationList = processService.findRelationByCode(code, processDefinition.getVersion()); if (CollectionUtils.isEmpty(relationList)) { putMsg(result, Status.PROCESS_DAG_IS_EMPTY); return result; } processDefinition.setReleaseState(releaseState); processDefinitionMapper.updateById(processDefinition); break; case OFFLINE: processDefinition.setReleaseState(releaseState); int updateProcess = processDefinitionMapper.updateById(processDefinition); Schedule schedule = scheduleMapper.queryByProcessDefinitionCode(code); if (updateProcess > 0 && schedule != null) { logger.info("set schedule offline, project code: {}, schedule id: {}, process definition code: {}", projectCode, schedule.getId(), code); // set status schedule.setReleaseState(releaseState); int updateSchedule = scheduleMapper.updateById(schedule); if (updateSchedule == 0) { putMsg(result, Status.OFFLINE_SCHEDULE_ERROR); throw new ServiceException(Status.OFFLINE_SCHEDULE_ERROR); } schedulerService.deleteSchedule(project.getId(), schedule.getId()); } break; default: putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } putMsg(result, Status.SUCCESS); return result; } /** * batch export process definition by codes */ @Override public void batchExportProcessDefinitionByCodes(User loginUser, long projectCode, String codes, HttpServletResponse response) { if (org.apache.commons.lang.StringUtils.isEmpty(codes)) { return; } Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return; } Set<Long> defineCodeSet = Lists.newArrayList(codes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(defineCodeSet); if (CollectionUtils.isEmpty(processDefinitionList)) { return; } // check processDefinition exist in project List<ProcessDefinition> processDefinitionListInProject = processDefinitionList.stream().filter(o -> projectCode == o.getProjectCode()).collect(Collectors.toList()); List<DagDataSchedule> dagDataSchedules = processDefinitionListInProject.stream().map(this::exportProcessDagData).collect(Collectors.toList()); if (CollectionUtils.isNotEmpty(dagDataSchedules)) { downloadProcessDefinitionFile(response, dagDataSchedules); } } /** * download the process definition file */ private void downloadProcessDefinitionFile(HttpServletResponse response, List<DagDataSchedule> dagDataSchedules) { response.setContentType(MediaType.APPLICATION_JSON_UTF8_VALUE); BufferedOutputStream buff = null; ServletOutputStream out = null; try { out = response.getOutputStream(); buff = new BufferedOutputStream(out); buff.write(JSONUtils.toJsonString(dagDataSchedules).getBytes(StandardCharsets.UTF_8)); buff.flush(); buff.close(); } catch (IOException e) { logger.warn("export process fail", e); } finally { if (null != buff) { try { buff.close(); } catch (Exception e) { logger.warn("export process buffer not close", e); } } if (null != out) { try { out.close(); } catch (Exception e) { logger.warn("export process output stream not close", e); } } } } /** * get export process dag data * * @param processDefinition process definition * @return DagDataSchedule */ public DagDataSchedule exportProcessDagData(ProcessDefinition processDefinition) { Schedule scheduleObj = scheduleMapper.queryByProcessDefinitionCode(processDefinition.getCode()); DagDataSchedule dagDataSchedule = new DagDataSchedule(processService.genDagData(processDefinition)); if (scheduleObj != null) { scheduleObj.setReleaseState(ReleaseState.OFFLINE); dagDataSchedule.setSchedule(scheduleObj); } return dagDataSchedule; } /** * import process definition * * @param loginUser login user * @param projectCode project code * @param file process metadata json file * @return import process */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> importProcessDefinition(User loginUser, long projectCode, MultipartFile file) { Map<String, Object> result = new HashMap<>(); String dagDataScheduleJson = FileUtils.file2String(file); List<DagDataSchedule> dagDataScheduleList = JSONUtils.toList(dagDataScheduleJson, DagDataSchedule.class); //check file content if (CollectionUtils.isEmpty(dagDataScheduleList)) { putMsg(result, Status.DATA_IS_NULL, "fileContent"); return result; } for (DagDataSchedule dagDataSchedule : dagDataScheduleList) { if (!checkAndImport(loginUser, projectCode, result, dagDataSchedule)) { return result; } } return result; } @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> importSqlProcessDefinition(User loginUser, long projectCode, MultipartFile file) { Map<String, Object> result = new HashMap<>(); String processDefinitionName = file.getOriginalFilename() == null ? file.getName() : file.getOriginalFilename(); int index = processDefinitionName.lastIndexOf("."); if (index > 0) { processDefinitionName = processDefinitionName.substring(0, index); } processDefinitionName = processDefinitionName + "_import_" + DateUtils.getCurrentTimeStamp(); ProcessDefinition processDefinition; List<TaskDefinitionLog> taskDefinitionList = new ArrayList<>(); List<ProcessTaskRelationLog> processTaskRelationList = new ArrayList<>(); // for Zip Bomb Attack final int THRESHOLD_ENTRIES = 10000; final int THRESHOLD_SIZE = 1000000000; // 1 GB final double THRESHOLD_RATIO = 10; int totalEntryArchive = 0; int totalSizeEntry = 0; // In most cases, there will be only one data source Map<String, DataSource> dataSourceCache = new HashMap<>(1); Map<String, Long> taskNameToCode = new HashMap<>(16); Map<String, List<String>> taskNameToUpstream = new HashMap<>(16); try (ZipInputStream zIn = new ZipInputStream(file.getInputStream()); BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(zIn))) { // build process definition processDefinition = new ProcessDefinition(projectCode, processDefinitionName, CodeGenerateUtils.getInstance().genCode(), "", "[]", null, 0, loginUser.getId(), loginUser.getTenantId()); ZipEntry entry; while ((entry = zIn.getNextEntry()) != null) { totalEntryArchive++; int totalSizeArchive = 0; if (!entry.isDirectory()) { StringBuilder sql = new StringBuilder(); String taskName = null; String datasourceName = null; List<String> upstreams = Collections.emptyList(); String line; while ((line = bufferedReader.readLine()) != null) { int nBytes = line.getBytes(StandardCharsets.UTF_8).length; totalSizeEntry += nBytes; totalSizeArchive += nBytes; long compressionRatio = totalSizeEntry / entry.getCompressedSize(); if (compressionRatio > THRESHOLD_RATIO) { throw new IllegalStateException("ratio between compressed and uncompressed data is highly suspicious, looks like a Zip Bomb Attack"); } int commentIndex = line.indexOf("-- "); if (commentIndex >= 0) { int colonIndex = line.indexOf(":", commentIndex); if (colonIndex > 0) { String key = line.substring(commentIndex + 3, colonIndex).trim().toLowerCase(); String value = line.substring(colonIndex + 1).trim(); switch (key) { case "name": taskName = value; line = line.substring(0, commentIndex); break; case "upstream": upstreams = Arrays.stream(value.split(",")).map(String::trim) .filter(s -> !"".equals(s)).collect(Collectors.toList()); line = line.substring(0, commentIndex); break; case "datasource": datasourceName = value; line = line.substring(0, commentIndex); break; default: break; } } } if (!"".equals(line)) { sql.append(line).append("\n"); } } // import/sql1.sql -> sql1 if (taskName == null) { taskName = entry.getName(); index = taskName.indexOf("/"); if (index > 0) { taskName = taskName.substring(index + 1); } index = taskName.lastIndexOf("."); if (index > 0) { taskName = taskName.substring(0, index); } } DataSource dataSource = dataSourceCache.get(datasourceName); if (dataSource == null) { dataSource = queryDatasourceByNameAndUser(datasourceName, loginUser); } if (dataSource == null) { putMsg(result, Status.DATASOURCE_NAME_ILLEGAL); return result; } dataSourceCache.put(datasourceName, dataSource); TaskDefinitionLog taskDefinition = buildNormalSqlTaskDefinition(taskName, dataSource, sql.substring(0, sql.length() - 1)); taskDefinitionList.add(taskDefinition); taskNameToCode.put(taskDefinition.getName(), taskDefinition.getCode()); taskNameToUpstream.put(taskDefinition.getName(), upstreams); } if (totalSizeArchive > THRESHOLD_SIZE) { throw new IllegalStateException("the uncompressed data size is too much for the application resource capacity"); } if (totalEntryArchive > THRESHOLD_ENTRIES) { throw new IllegalStateException("too much entries in this archive, can lead to inodes exhaustion of the system"); } } } catch (Exception e) { logger.error(e.getMessage(), e); putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR); return result; } // build task relation for (Map.Entry<String, Long> entry : taskNameToCode.entrySet()) { List<String> upstreams = taskNameToUpstream.get(entry.getKey()); if (CollectionUtils.isEmpty(upstreams) || (upstreams.size() == 1 && upstreams.contains("root") && !taskNameToCode.containsKey("root"))) { ProcessTaskRelationLog processTaskRelation = buildNormalTaskRelation(0, entry.getValue()); processTaskRelationList.add(processTaskRelation); continue; } for (String upstream : upstreams) { ProcessTaskRelationLog processTaskRelation = buildNormalTaskRelation(taskNameToCode.get(upstream), entry.getValue()); processTaskRelationList.add(processTaskRelation); } } return createDagDefine(loginUser, processTaskRelationList, processDefinition, taskDefinitionList); } private ProcessTaskRelationLog buildNormalTaskRelation(long preTaskCode, long postTaskCode) { ProcessTaskRelationLog processTaskRelation = new ProcessTaskRelationLog(); processTaskRelation.setPreTaskCode(preTaskCode); processTaskRelation.setPreTaskVersion(0); processTaskRelation.setPostTaskCode(postTaskCode); processTaskRelation.setPostTaskVersion(0); processTaskRelation.setConditionType(ConditionType.NONE); processTaskRelation.setName(""); return processTaskRelation; } private DataSource queryDatasourceByNameAndUser(String datasourceName, User loginUser) { if (isAdmin(loginUser)) { List<DataSource> dataSources = dataSourceMapper.queryDataSourceByName(datasourceName); if (CollectionUtils.isNotEmpty(dataSources)) { return dataSources.get(0); } } else { return dataSourceMapper.queryDataSourceByNameAndUserId(loginUser.getId(), datasourceName); } return null; } private TaskDefinitionLog buildNormalSqlTaskDefinition(String taskName, DataSource dataSource, String sql) throws CodeGenerateException { TaskDefinitionLog taskDefinition = new TaskDefinitionLog(); taskDefinition.setName(taskName); taskDefinition.setFlag(Flag.YES); SqlParameters sqlParameters = new SqlParameters(); sqlParameters.setType(dataSource.getType().name()); sqlParameters.setDatasource(dataSource.getId()); sqlParameters.setSql(sql.substring(0, sql.length() - 1)); // it may be a query type, but it can only be determined by parsing SQL sqlParameters.setSqlType(SqlType.NON_QUERY.ordinal()); sqlParameters.setLocalParams(Collections.emptyList()); taskDefinition.setTaskParams(JSONUtils.toJsonString(sqlParameters)); taskDefinition.setCode(CodeGenerateUtils.getInstance().genCode()); taskDefinition.setTaskType(TaskType.SQL.getDesc()); taskDefinition.setFailRetryTimes(0); taskDefinition.setFailRetryInterval(0); taskDefinition.setTimeoutFlag(TimeoutFlag.CLOSE); taskDefinition.setWorkerGroup(DEFAULT_WORKER_GROUP); taskDefinition.setTaskPriority(Priority.MEDIUM); taskDefinition.setEnvironmentCode(-1); taskDefinition.setTimeout(0); taskDefinition.setDelayTime(0); taskDefinition.setTimeoutNotifyStrategy(TaskTimeoutStrategy.WARN); taskDefinition.setVersion(0); taskDefinition.setResourceIds(""); return taskDefinition; } /** * check and import */ private boolean checkAndImport(User loginUser, long projectCode, Map<String, Object> result, DagDataSchedule dagDataSchedule) { if (!checkImportanceParams(dagDataSchedule, result)) { return false; } ProcessDefinition processDefinition = dagDataSchedule.getProcessDefinition(); //unique check Map<String, Object> checkResult = verifyProcessDefinitionName(loginUser, projectCode, processDefinition.getName()); if (Status.SUCCESS.equals(checkResult.get(Constants.STATUS))) { putMsg(result, Status.SUCCESS); } else { result.putAll(checkResult); return false; } String processDefinitionName = recursionProcessDefinitionName(projectCode, processDefinition.getName(), 1); processDefinition.setName(processDefinitionName + "_import_" + DateUtils.getCurrentTimeStamp()); processDefinition.setId(0); processDefinition.setProjectCode(projectCode); processDefinition.setUserId(loginUser.getId()); try { processDefinition.setCode(CodeGenerateUtils.getInstance().genCode()); } catch (CodeGenerateException e) { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); return false; } List<TaskDefinition> taskDefinitionList = dagDataSchedule.getTaskDefinitionList(); Map<Long, Long> taskCodeMap = new HashMap<>(); Date now = new Date(); List<TaskDefinitionLog> taskDefinitionLogList = new ArrayList<>(); for (TaskDefinition taskDefinition : taskDefinitionList) { TaskDefinitionLog taskDefinitionLog = new TaskDefinitionLog(taskDefinition); taskDefinitionLog.setName(taskDefinitionLog.getName() + "_import_" + DateUtils.getCurrentTimeStamp()); taskDefinitionLog.setProjectCode(projectCode); taskDefinitionLog.setUserId(loginUser.getId()); taskDefinitionLog.setVersion(Constants.VERSION_FIRST); taskDefinitionLog.setCreateTime(now); taskDefinitionLog.setUpdateTime(now); taskDefinitionLog.setOperator(loginUser.getId()); taskDefinitionLog.setOperateTime(now); try { long code = CodeGenerateUtils.getInstance().genCode(); taskCodeMap.put(taskDefinitionLog.getCode(), code); taskDefinitionLog.setCode(code); } catch (CodeGenerateException e) { logger.error("Task code get error, ", e); putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS, "Error generating task definition code"); return false; } taskDefinitionLogList.add(taskDefinitionLog); } int insert = taskDefinitionMapper.batchInsert(taskDefinitionLogList); int logInsert = taskDefinitionLogMapper.batchInsert(taskDefinitionLogList); if ((logInsert & insert) == 0) { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_TASK_DEFINITION_ERROR); } List<ProcessTaskRelation> taskRelationList = dagDataSchedule.getProcessTaskRelationList(); List<ProcessTaskRelationLog> taskRelationLogList = new ArrayList<>(); for (ProcessTaskRelation processTaskRelation : taskRelationList) { ProcessTaskRelationLog processTaskRelationLog = new ProcessTaskRelationLog(processTaskRelation); if (taskCodeMap.containsKey(processTaskRelationLog.getPreTaskCode())) { processTaskRelationLog.setPreTaskCode(taskCodeMap.get(processTaskRelationLog.getPreTaskCode())); } if (taskCodeMap.containsKey(processTaskRelationLog.getPostTaskCode())) { processTaskRelationLog.setPostTaskCode(taskCodeMap.get(processTaskRelationLog.getPostTaskCode())); } processTaskRelationLog.setPreTaskVersion(Constants.VERSION_FIRST); processTaskRelationLog.setPostTaskVersion(Constants.VERSION_FIRST); taskRelationLogList.add(processTaskRelationLog); } if (StringUtils.isNotEmpty(processDefinition.getLocations()) && JSONUtils.checkJsonValid(processDefinition.getLocations())) { ArrayNode arrayNode = JSONUtils.parseArray(processDefinition.getLocations()); ArrayNode newArrayNode = JSONUtils.createArrayNode(); for (int i = 0; i < arrayNode.size(); i++) { ObjectNode newObjectNode = newArrayNode.addObject(); JsonNode jsonNode = arrayNode.get(i); Long taskCode = taskCodeMap.get(jsonNode.get("taskCode").asLong()); if (Objects.nonNull(taskCode)) { newObjectNode.put("taskCode", taskCode); newObjectNode.set("x", jsonNode.get("x")); newObjectNode.set("y", jsonNode.get("y")); } } processDefinition.setLocations(newArrayNode.toString()); } processDefinition.setCreateTime(new Date()); processDefinition.setUpdateTime(new Date()); Map<String, Object> createDagResult = createDagDefine(loginUser, taskRelationLogList, processDefinition, Lists.newArrayList()); if (Status.SUCCESS.equals(createDagResult.get(Constants.STATUS))) { putMsg(createDagResult, Status.SUCCESS); } else { result.putAll(createDagResult); throw new ServiceException(Status.IMPORT_PROCESS_DEFINE_ERROR); } Schedule schedule = dagDataSchedule.getSchedule(); if (null != schedule) { ProcessDefinition newProcessDefinition = processDefinitionMapper.queryByCode(processDefinition.getCode()); schedule.setProcessDefinitionCode(newProcessDefinition.getCode()); schedule.setUserId(loginUser.getId()); schedule.setCreateTime(now); schedule.setUpdateTime(now); int scheduleInsert = scheduleMapper.insert(schedule); if (0 == scheduleInsert) { putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR); throw new ServiceException(Status.IMPORT_PROCESS_DEFINE_ERROR); } } return true; } /** * check importance params */ private boolean checkImportanceParams(DagDataSchedule dagDataSchedule, Map<String, Object> result) { if (dagDataSchedule.getProcessDefinition() == null) { putMsg(result, Status.DATA_IS_NULL, "ProcessDefinition"); return false; } if (CollectionUtils.isEmpty(dagDataSchedule.getTaskDefinitionList())) { putMsg(result, Status.DATA_IS_NULL, "TaskDefinitionList"); return false; } if (CollectionUtils.isEmpty(dagDataSchedule.getProcessTaskRelationList())) { putMsg(result, Status.DATA_IS_NULL, "ProcessTaskRelationList"); return false; } return true; } private String recursionProcessDefinitionName(long projectCode, String processDefinitionName, int num) { ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, processDefinitionName); if (processDefinition != null) { if (num > 1) { String str = processDefinitionName.substring(0, processDefinitionName.length() - 3); processDefinitionName = str + "(" + num + ")"; } else { processDefinitionName = processDefinition.getName() + "(" + num + ")"; } } else { return processDefinitionName; } return recursionProcessDefinitionName(projectCode, processDefinitionName, num + 1); } /** * check the process task relation json * * @param processTaskRelationJson process task relation json * @return check result code */ @Override public Map<String, Object> checkProcessNodeList(String processTaskRelationJson, List<TaskDefinitionLog> taskDefinitionLogsList) { Map<String, Object> result = new HashMap<>(); try { if (processTaskRelationJson == null) { logger.error("process data is null"); putMsg(result, Status.DATA_IS_NOT_VALID, processTaskRelationJson); return result; } List<ProcessTaskRelation> taskRelationList = JSONUtils.toList(processTaskRelationJson, ProcessTaskRelation.class); // Check whether the task node is normal List<TaskNode> taskNodes = processService.transformTask(taskRelationList, taskDefinitionLogsList); if (CollectionUtils.isEmpty(taskNodes)) { logger.error("process node info is empty"); putMsg(result, Status.PROCESS_DAG_IS_EMPTY); return result; } // check has cycle if (graphHasCycle(taskNodes)) { logger.error("process DAG has cycle"); putMsg(result, Status.PROCESS_NODE_HAS_CYCLE); return result; } // check whether the process definition json is normal for (TaskNode taskNode : taskNodes) { if (!CheckUtils.checkTaskNodeParameters(taskNode)) { logger.error("task node {} parameter invalid", taskNode.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskNode.getName()); return result; } // check extra params CheckUtils.checkOtherParams(taskNode.getExtras()); } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.INTERNAL_SERVER_ERROR_ARGS); putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS, e.getMessage()); logger.error(Status.INTERNAL_SERVER_ERROR_ARGS.getMsg(), e); } return result; } /** * get task node details based on process definition * * @param loginUser loginUser * @param projectCode project code * @param code process definition code * @return task node list */ @Override public Map<String, Object> getTaskNodeListByDefinitionCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null || projectCode != processDefinition.getProjectCode()) { logger.info("process define not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData.getTaskDefinitionList()); putMsg(result, Status.SUCCESS); return result; } /** * get task node details map based on process definition * * @param loginUser loginUser * @param projectCode project code * @param codes define codes * @return task node list */ @Override public Map<String, Object> getNodeListMapByDefinitionCodes(User loginUser, long projectCode, String codes) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } Set<Long> defineCodeSet = Lists.newArrayList(codes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(defineCodeSet); if (CollectionUtils.isEmpty(processDefinitionList)) { logger.info("process definition not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, codes); return result; } HashMap<Long, Project> userProjects = new HashMap<>(Constants.DEFAULT_HASH_MAP_SIZE); projectMapper.queryProjectCreatedAndAuthorizedByUserId(loginUser.getId()) .forEach(userProject -> userProjects.put(userProject.getCode(), userProject)); // check processDefinition exist in project List<ProcessDefinition> processDefinitionListInProject = processDefinitionList.stream() .filter(o -> userProjects.containsKey(o.getProjectCode())).collect(Collectors.toList()); if (CollectionUtils.isEmpty(processDefinitionListInProject)) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, codes); return result; } Map<Long, List<TaskDefinition>> taskNodeMap = new HashMap<>(); for (ProcessDefinition processDefinition : processDefinitionListInProject) { DagData dagData = processService.genDagData(processDefinition); taskNodeMap.put(processDefinition.getCode(), dagData.getTaskDefinitionList()); } result.put(Constants.DATA_LIST, taskNodeMap); putMsg(result, Status.SUCCESS); return result; } /** * query process definition all by project code * * @param loginUser loginUser * @param projectCode project code * @return process definitions in the project */ @Override public Map<String, Object> queryAllProcessDefinitionByProjectCode(User loginUser, long projectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessDefinition> processDefinitions = processDefinitionMapper.queryAllDefinitionList(projectCode); List<DagData> dagDataList = processDefinitions.stream().map(processService::genDagData).collect(Collectors.toList()); result.put(Constants.DATA_LIST, dagDataList); putMsg(result, Status.SUCCESS); return result; } /** * Encapsulates the TreeView structure * * @param projectCode project code * @param code process definition code * @param limit limit * @return tree view json data */ @Override public Map<String, Object> viewTree(long projectCode, long code, Integer limit) { Map<String, Object> result = new HashMap<>(); ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (null == processDefinition || projectCode != processDefinition.getProjectCode()) { logger.info("process define not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } DAG<String, TaskNode, TaskNodeRelation> dag = processService.genDagGraph(processDefinition); // nodes that is running Map<String, List<TreeViewDto>> runningNodeMap = new ConcurrentHashMap<>(); //nodes that is waiting to run Map<String, List<TreeViewDto>> waitingRunningNodeMap = new ConcurrentHashMap<>(); // List of process instances List<ProcessInstance> processInstanceList = processInstanceService.queryByProcessDefineCode(code, limit); processInstanceList.forEach(processInstance -> processInstance.setDuration(DateUtils.format2Duration(processInstance.getStartTime(), processInstance.getEndTime()))); List<TaskDefinitionLog> taskDefinitionList = processService.genTaskDefineList(processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode())); Map<Long, TaskDefinitionLog> taskDefinitionMap = taskDefinitionList.stream() .collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog)); if (limit > processInstanceList.size()) { limit = processInstanceList.size(); } TreeViewDto parentTreeViewDto = new TreeViewDto(); parentTreeViewDto.setName("DAG"); parentTreeViewDto.setType(""); parentTreeViewDto.setCode(0L); // Specify the process definition, because it is a TreeView for a process definition for (int i = limit - 1; i >= 0; i--) { ProcessInstance processInstance = processInstanceList.get(i); Date endTime = processInstance.getEndTime() == null ? new Date() : processInstance.getEndTime(); parentTreeViewDto.getInstances().add(new Instance(processInstance.getId(), processInstance.getName(), processInstance.getProcessDefinitionCode(), "", processInstance.getState().toString(), processInstance.getStartTime(), endTime, processInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - processInstance.getStartTime().getTime()))); } List<TreeViewDto> parentTreeViewDtoList = new ArrayList<>(); parentTreeViewDtoList.add(parentTreeViewDto); // Here is the encapsulation task instance for (String startNode : dag.getBeginNode()) { runningNodeMap.put(startNode, parentTreeViewDtoList); } while (Stopper.isRunning()) { Set<String> postNodeList; Iterator<Map.Entry<String, List<TreeViewDto>>> iter = runningNodeMap.entrySet().iterator(); while (iter.hasNext()) { Map.Entry<String, List<TreeViewDto>> en = iter.next(); String nodeCode = en.getKey(); parentTreeViewDtoList = en.getValue(); TreeViewDto treeViewDto = new TreeViewDto(); TaskNode taskNode = dag.getNode(nodeCode); treeViewDto.setType(taskNode.getType()); treeViewDto.setCode(taskNode.getCode()); treeViewDto.setName(taskNode.getName()); //set treeViewDto instances for (int i = limit - 1; i >= 0; i--) { ProcessInstance processInstance = processInstanceList.get(i); TaskInstance taskInstance = taskInstanceMapper.queryByInstanceIdAndCode(processInstance.getId(), Long.parseLong(nodeCode)); if (taskInstance == null) { treeViewDto.getInstances().add(new Instance(-1, "not running", 0, "null")); } else { Date startTime = taskInstance.getStartTime() == null ? new Date() : taskInstance.getStartTime(); Date endTime = taskInstance.getEndTime() == null ? new Date() : taskInstance.getEndTime(); long subProcessCode = 0L; // if process is sub process, the return sub id, or sub id=0 if (taskInstance.isSubProcess()) { TaskDefinition taskDefinition = taskDefinitionMap.get(taskInstance.getTaskCode()); subProcessCode = Integer.parseInt(JSONUtils.parseObject( taskDefinition.getTaskParams()).path(CMD_PARAM_SUB_PROCESS_DEFINE_CODE).asText()); } treeViewDto.getInstances().add(new Instance(taskInstance.getId(), taskInstance.getName(), taskInstance.getTaskCode(), taskInstance.getTaskType(), taskInstance.getState().toString(), taskInstance.getStartTime(), taskInstance.getEndTime(), taskInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - startTime.getTime()), subProcessCode)); } } for (TreeViewDto pTreeViewDto : parentTreeViewDtoList) { pTreeViewDto.getChildren().add(treeViewDto); } postNodeList = dag.getSubsequentNodes(nodeCode); if (CollectionUtils.isNotEmpty(postNodeList)) { for (String nextNodeCode : postNodeList) { List<TreeViewDto> treeViewDtoList = waitingRunningNodeMap.get(nextNodeCode); if (CollectionUtils.isEmpty(treeViewDtoList)) { treeViewDtoList = new ArrayList<>(); } treeViewDtoList.add(treeViewDto); waitingRunningNodeMap.put(nextNodeCode, treeViewDtoList); } } runningNodeMap.remove(nodeCode); } if (waitingRunningNodeMap.size() == 0) { break; } else { runningNodeMap.putAll(waitingRunningNodeMap); waitingRunningNodeMap.clear(); } } result.put(Constants.DATA_LIST, parentTreeViewDto); result.put(Constants.STATUS, Status.SUCCESS); result.put(Constants.MSG, Status.SUCCESS.getMsg()); return result; } /** * whether the graph has a ring * * @param taskNodeResponseList task node response list * @return if graph has cycle flag */ private boolean graphHasCycle(List<TaskNode> taskNodeResponseList) { DAG<String, TaskNode, String> graph = new DAG<>(); // Fill the vertices for (TaskNode taskNodeResponse : taskNodeResponseList) { graph.addNode(Long.toString(taskNodeResponse.getCode()), taskNodeResponse); } // Fill edge relations for (TaskNode taskNodeResponse : taskNodeResponseList) { List<String> preTasks = JSONUtils.toList(taskNodeResponse.getPreTasks(), String.class); if (CollectionUtils.isNotEmpty(preTasks)) { for (String preTask : preTasks) { if (!graph.addEdge(preTask, Long.toString(taskNodeResponse.getCode()))) { return true; } } } } return graph.hasCycle(); } /** * batch copy process definition * * @param loginUser loginUser * @param projectCode projectCode * @param codes processDefinitionCodes * @param targetProjectCode targetProjectCode */ @Override public Map<String, Object> batchCopyProcessDefinition(User loginUser, long projectCode, String codes, long targetProjectCode) { Map<String, Object> result = checkParams(loginUser, projectCode, codes, targetProjectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<String> failedProcessList = new ArrayList<>(); doBatchOperateProcessDefinition(loginUser, targetProjectCode, failedProcessList, codes, result, true); if (result.get(Constants.STATUS) == Status.NOT_SUPPORT_COPY_TASK_TYPE) { return result; } checkBatchOperateResult(projectCode, targetProjectCode, result, failedProcessList, true); return result; } /** * batch move process definition * Will be deleted * @param loginUser loginUser * @param projectCode projectCode * @param codes processDefinitionCodes * @param targetProjectCode targetProjectCode */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> batchMoveProcessDefinition(User loginUser, long projectCode, String codes, long targetProjectCode) { Map<String, Object> result = checkParams(loginUser, projectCode, codes, targetProjectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (projectCode == targetProjectCode) { return result; } List<String> failedProcessList = new ArrayList<>(); doBatchOperateProcessDefinition(loginUser, targetProjectCode, failedProcessList, codes, result, false); checkBatchOperateResult(projectCode, targetProjectCode, result, failedProcessList, false); return result; } private Map<String, Object> checkParams(User loginUser, long projectCode, String processDefinitionCodes, long targetProjectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (org.apache.commons.lang.StringUtils.isEmpty(processDefinitionCodes)) { putMsg(result, Status.PROCESS_DEFINITION_CODES_IS_EMPTY, processDefinitionCodes); return result; } if (projectCode != targetProjectCode) { Project targetProject = projectMapper.queryByCode(targetProjectCode); //check user access for project Map<String, Object> targetResult = projectService.checkProjectAndAuth(loginUser, targetProject, targetProjectCode); if (targetResult.get(Constants.STATUS) != Status.SUCCESS) { return targetResult; } } return result; } private void doBatchOperateProcessDefinition(User loginUser, long targetProjectCode, List<String> failedProcessList, String processDefinitionCodes, Map<String, Object> result, boolean isCopy) { Set<Long> definitionCodes = Arrays.stream(processDefinitionCodes.split(Constants.COMMA)).map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(definitionCodes); Set<Long> queryCodes = processDefinitionList.stream().map(ProcessDefinition::getCode).collect(Collectors.toSet()); // definitionCodes - queryCodes Set<Long> diffCode = definitionCodes.stream().filter(code -> !queryCodes.contains(code)).collect(Collectors.toSet()); diffCode.forEach(code -> failedProcessList.add(code + "[null]")); for (ProcessDefinition processDefinition : processDefinitionList) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<ProcessTaskRelationLog> taskRelationList = processTaskRelations.stream().map(ProcessTaskRelationLog::new).collect(Collectors.toList()); processDefinition.setProjectCode(targetProjectCode); if (isCopy) { List<TaskDefinitionLog> taskDefinitionLogs = processService.genTaskDefineList(processTaskRelations); Map<Long, Long> taskCodeMap = new HashMap<>(); for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { if (TaskType.CONDITIONS.getDesc().equals(taskDefinitionLog.getTaskType()) || TaskType.SWITCH.getDesc().equals(taskDefinitionLog.getTaskType()) || TaskType.SUB_PROCESS.getDesc().equals(taskDefinitionLog.getTaskType()) || TaskType.DEPENDENT.getDesc().equals(taskDefinitionLog.getTaskType())) { putMsg(result, Status.NOT_SUPPORT_COPY_TASK_TYPE, taskDefinitionLog.getTaskType()); return; } try { long taskCode = CodeGenerateUtils.getInstance().genCode(); taskCodeMap.put(taskDefinitionLog.getCode(), taskCode); taskDefinitionLog.setCode(taskCode); } catch (CodeGenerateException e) { putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS); throw new ServiceException(Status.INTERNAL_SERVER_ERROR_ARGS); } taskDefinitionLog.setProjectCode(targetProjectCode); taskDefinitionLog.setVersion(0); taskDefinitionLog.setName(taskDefinitionLog.getName() + "_copy_" + DateUtils.getCurrentTimeStamp()); } for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { if (processTaskRelationLog.getPreTaskCode() > 0) { processTaskRelationLog.setPreTaskCode(taskCodeMap.get(processTaskRelationLog.getPreTaskCode())); } if (processTaskRelationLog.getPostTaskCode() > 0) { processTaskRelationLog.setPostTaskCode(taskCodeMap.get(processTaskRelationLog.getPostTaskCode())); } } try { processDefinition.setCode(CodeGenerateUtils.getInstance().genCode()); } catch (CodeGenerateException e) { putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS); throw new ServiceException(Status.INTERNAL_SERVER_ERROR_ARGS); } processDefinition.setId(0); processDefinition.setUserId(loginUser.getId()); processDefinition.setName(processDefinition.getName() + "_copy_" + DateUtils.getCurrentTimeStamp()); if (StringUtils.isNotBlank(processDefinition.getLocations())) { ArrayNode jsonNodes = JSONUtils.parseArray(processDefinition.getLocations()); for (int i = 0; i < jsonNodes.size(); i++) { ObjectNode node = (ObjectNode) jsonNodes.path(i); node.put("taskCode", taskCodeMap.get(node.get("taskCode").asLong())); jsonNodes.set(i, node); } processDefinition.setLocations(JSONUtils.toJsonString(jsonNodes)); } try { result.putAll(createDagDefine(loginUser, taskRelationList, processDefinition, taskDefinitionLogs)); } catch (Exception e) { putMsg(result, Status.COPY_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.COPY_PROCESS_DEFINITION_ERROR); } } else { try { result.putAll(updateDagDefine(loginUser, taskRelationList, processDefinition, null, Lists.newArrayList())); } catch (Exception e) { putMsg(result, Status.MOVE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.MOVE_PROCESS_DEFINITION_ERROR); } } if (result.get(Constants.STATUS) != Status.SUCCESS) { failedProcessList.add(processDefinition.getCode() + "[" + processDefinition.getName() + "]"); } } } /** * switch the defined process definition version * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param version the version user want to switch * @return switch process definition version result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> switchProcessDefinitionVersion(User loginUser, long projectCode, long code, int version) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (Objects.isNull(processDefinition) || projectCode != processDefinition.getProjectCode()) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_ERROR, code); return result; } ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper.queryByDefinitionCodeAndVersion(code, version); if (Objects.isNull(processDefinitionLog)) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_VERSION_ERROR, processDefinition.getCode(), version); return result; } int switchVersion = processService.switchVersion(processDefinition, processDefinitionLog); if (switchVersion <= 0) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR); throw new ServiceException(Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR); } putMsg(result, Status.SUCCESS); return result; } /** * check batch operate result * * @param srcProjectCode srcProjectCode * @param targetProjectCode targetProjectCode * @param result result * @param failedProcessList failedProcessList * @param isCopy isCopy */ private void checkBatchOperateResult(long srcProjectCode, long targetProjectCode, Map<String, Object> result, List<String> failedProcessList, boolean isCopy) { if (!failedProcessList.isEmpty()) { if (isCopy) { putMsg(result, Status.COPY_PROCESS_DEFINITION_ERROR, srcProjectCode, targetProjectCode, String.join(",", failedProcessList)); } else { putMsg(result, Status.MOVE_PROCESS_DEFINITION_ERROR, srcProjectCode, targetProjectCode, String.join(",", failedProcessList)); } } else { putMsg(result, Status.SUCCESS); } } /** * query the pagination versions info by one certain process definition code * * @param loginUser login user info to check auth * @param projectCode project code * @param pageNo page number * @param pageSize page size * @param code process definition code * @return the pagination process definition versions info of the certain process definition */ @Override public Result queryProcessDefinitionVersions(User loginUser, long projectCode, int pageNo, int pageSize, long code) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); // check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } PageInfo<ProcessDefinitionLog> pageInfo = new PageInfo<>(pageNo, pageSize); Page<ProcessDefinitionLog> page = new Page<>(pageNo, pageSize); IPage<ProcessDefinitionLog> processDefinitionVersionsPaging = processDefinitionLogMapper.queryProcessDefinitionVersionsPaging(page, code, projectCode); List<ProcessDefinitionLog> processDefinitionLogs = processDefinitionVersionsPaging.getRecords(); pageInfo.setTotalList(processDefinitionLogs); pageInfo.setTotal((int) processDefinitionVersionsPaging.getTotal()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * delete one certain process definition by version number and process definition code * * @param loginUser login user info to check auth * @param projectCode project code * @param code process definition code * @param version version number * @return delete result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteProcessDefinitionVersion(User loginUser, long projectCode, long code, int version) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null || projectCode != processDefinition.getProjectCode()) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); } else { if (processDefinition.getVersion() == version) { putMsg(result, Status.MAIN_TABLE_USING_VERSION); return result; } int deleteLog = processDefinitionLogMapper.deleteByProcessDefinitionCodeAndVersion(code, version); int deleteRelationLog = processTaskRelationLogMapper.deleteByCode(code, version); if (deleteLog == 0 || deleteRelationLog == 0) { putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); throw new ServiceException(Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); } putMsg(result, Status.SUCCESS); } return result; } /** * create empty process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param description description * @param globalParams globalParams * @param timeout timeout * @param tenantCode tenantCode * @param scheduleJson scheduleJson * @return process definition code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> createEmptyProcessDefinition(User loginUser, long projectCode, String name, String description, String globalParams, int timeout, String tenantCode, String scheduleJson, ProcessExecutionTypeEnum executionType) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } // check whether the new process define name exist ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } long processDefinitionCode; try { processDefinitionCode = CodeGenerateUtils.getInstance().genCode(); } catch (CodeGenerateException e) { putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS); return result; } ProcessDefinition processDefinition = new ProcessDefinition(projectCode, name, processDefinitionCode, description, globalParams, "", timeout, loginUser.getId(), tenantId); processDefinition.setExecutionType(executionType); result = createEmptyDagDefine(loginUser, processDefinition); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (StringUtils.isBlank(scheduleJson)) { return result; } // save dag schedule Map<String, Object> scheduleResult = createDagSchedule(loginUser, processDefinition, scheduleJson); if (scheduleResult.get(Constants.STATUS) != Status.SUCCESS) { Status scheduleResultStatus = (Status) scheduleResult.get(Constants.STATUS); putMsg(result, scheduleResultStatus); throw new ServiceException(scheduleResultStatus); } return result; } private Map<String, Object> createEmptyDagDefine(User loginUser, ProcessDefinition processDefinition) { Map<String, Object> result = new HashMap<>(); int insertVersion = processService.saveProcessDefine(loginUser, processDefinition, Boolean.TRUE, Boolean.TRUE); if (insertVersion == 0) { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_PROCESS_DEFINITION_ERROR); } putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); return result; } private Map<String, Object> createDagSchedule(User loginUser, ProcessDefinition processDefinition, String scheduleJson) { Map<String, Object> result = new HashMap<>(); Schedule scheduleObj = JSONUtils.parseObject(scheduleJson, Schedule.class); if (scheduleObj == null) { putMsg(result, Status.DATA_IS_NOT_VALID, scheduleJson); throw new ServiceException(Status.DATA_IS_NOT_VALID); } Date now = new Date(); scheduleObj.setProcessDefinitionCode(processDefinition.getCode()); if (DateUtils.differSec(scheduleObj.getStartTime(), scheduleObj.getEndTime()) == 0) { logger.warn("The start time must not be the same as the end"); putMsg(result, Status.SCHEDULE_START_TIME_END_TIME_SAME); return result; } if (!org.quartz.CronExpression.isValidExpression(scheduleObj.getCrontab())) { logger.error("{} verify failure", scheduleObj.getCrontab()); putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, scheduleObj.getCrontab()); return result; } scheduleObj.setWarningType(scheduleObj.getWarningType() == null ? WarningType.NONE : scheduleObj.getWarningType()); scheduleObj.setWarningGroupId(scheduleObj.getWarningGroupId() == 0 ? 1 : scheduleObj.getWarningGroupId()); scheduleObj.setFailureStrategy(scheduleObj.getFailureStrategy() == null ? FailureStrategy.CONTINUE : scheduleObj.getFailureStrategy()); scheduleObj.setCreateTime(now); scheduleObj.setUpdateTime(now); scheduleObj.setUserId(loginUser.getId()); scheduleObj.setReleaseState(ReleaseState.OFFLINE); scheduleObj.setProcessInstancePriority(scheduleObj.getProcessInstancePriority() == null ? Priority.MEDIUM : scheduleObj.getProcessInstancePriority()); scheduleObj.setWorkerGroup(scheduleObj.getWorkerGroup() == null ? "default" : scheduleObj.getWorkerGroup()); scheduleObj.setEnvironmentCode(scheduleObj.getEnvironmentCode() == null ? -1 : scheduleObj.getEnvironmentCode()); scheduleMapper.insert(scheduleObj); putMsg(result, Status.SUCCESS); result.put("scheduleId", scheduleObj.getId()); return result; } /** * update process definition basic info * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param code process definition code * @param description description * @param globalParams globalParams * @param timeout timeout * @param tenantCode tenantCode * @param scheduleJson scheduleJson * @param executionType executionType * @return update result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> updateProcessDefinitionBasicInfo(User loginUser, long projectCode, String name, long code, String description, String globalParams, int timeout, String tenantCode, String scheduleJson, ProcessExecutionTypeEnum executionType) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); // check process definition exists if (processDefinition == null || projectCode != processDefinition.getProjectCode()) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { // online can not permit edit putMsg(result, Status.PROCESS_DEFINE_NOT_ALLOWED_EDIT, processDefinition.getName()); return result; } if (!name.equals(processDefinition.getName())) { // check whether the new process define name exist ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } } ProcessDefinition processDefinitionDeepCopy = JSONUtils.parseObject(JSONUtils.toJsonString(processDefinition), ProcessDefinition.class); processDefinition.set(projectCode, name, description, globalParams, "", timeout, tenantId); processDefinition.setExecutionType(executionType); List<ProcessTaskRelationLog> taskRelationList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); result = updateDagDefine(loginUser, taskRelationList, processDefinition, processDefinitionDeepCopy, Lists.newArrayList()); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (StringUtils.isBlank(scheduleJson)) { return result; } // update dag schedule Map<String, Object> scheduleResult = updateDagSchedule(loginUser, projectCode, code, scheduleJson); if (scheduleResult.get(Constants.STATUS) != Status.SUCCESS) { Status scheduleResultStatus = (Status) scheduleResult.get(Constants.STATUS); putMsg(result, scheduleResultStatus); throw new ServiceException(scheduleResultStatus); } return result; } private Map<String, Object> updateDagSchedule(User loginUser, long projectCode, long processDefinitionCode, String scheduleJson) { Map<String, Object> result = new HashMap<>(); Schedule schedule = JSONUtils.parseObject(scheduleJson, Schedule.class); if (schedule == null) { putMsg(result, Status.DATA_IS_NOT_VALID, scheduleJson); throw new ServiceException(Status.DATA_IS_NOT_VALID); } // set default value FailureStrategy failureStrategy = schedule.getFailureStrategy() == null ? FailureStrategy.CONTINUE : schedule.getFailureStrategy(); WarningType warningType = schedule.getWarningType() == null ? WarningType.NONE : schedule.getWarningType(); Priority processInstancePriority = schedule.getProcessInstancePriority() == null ? Priority.MEDIUM : schedule.getProcessInstancePriority(); int warningGroupId = schedule.getWarningGroupId() == 0 ? 1 : schedule.getWarningGroupId(); String workerGroup = schedule.getWorkerGroup() == null ? "default" : schedule.getWorkerGroup(); long environmentCode = schedule.getEnvironmentCode() == null ? -1 : schedule.getEnvironmentCode(); ScheduleParam param = new ScheduleParam(); param.setStartTime(schedule.getStartTime()); param.setEndTime(schedule.getEndTime()); param.setCrontab(schedule.getCrontab()); param.setTimezoneId(schedule.getTimezoneId()); return schedulerService.updateScheduleByProcessDefinitionCode( loginUser, projectCode, processDefinitionCode, JSONUtils.toJsonString(param), warningType, warningGroupId, failureStrategy, processInstancePriority, workerGroup, environmentCode); } /** * release process definition and schedule * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param releaseState releaseState * @return update result code */ @Transactional(rollbackFor = RuntimeException.class) @Override public Map<String, Object> releaseWorkflowAndSchedule(User loginUser, long projectCode, long code, ReleaseState releaseState) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } // check state if (null == releaseState) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } Schedule scheduleObj = scheduleMapper.queryByProcessDefinitionCode(code); if (scheduleObj == null) { putMsg(result, Status.SCHEDULE_CRON_NOT_EXISTS, "processDefinitionCode:" + code); return result; } switch (releaseState) { case ONLINE: List<ProcessTaskRelation> relationList = processService.findRelationByCode(code, processDefinition.getVersion()); if (CollectionUtils.isEmpty(relationList)) { putMsg(result, Status.PROCESS_DAG_IS_EMPTY); return result; } processDefinition.setReleaseState(releaseState); processDefinitionMapper.updateById(processDefinition); scheduleObj.setReleaseState(ReleaseState.ONLINE); scheduleMapper.updateById(scheduleObj); break; case OFFLINE: processDefinition.setReleaseState(releaseState); int updateProcess = processDefinitionMapper.updateById(processDefinition); if (updateProcess > 0) { logger.info("set schedule offline, project code: {}, schedule id: {}, process definition code: {}", projectCode, scheduleObj.getId(), code); // set status scheduleObj.setReleaseState(ReleaseState.OFFLINE); int updateSchedule = scheduleMapper.updateById(scheduleObj); if (updateSchedule == 0) { putMsg(result, Status.OFFLINE_SCHEDULE_ERROR); throw new ServiceException(Status.OFFLINE_SCHEDULE_ERROR); } schedulerService.deleteSchedule(project.getId(), scheduleObj.getId()); } break; default: putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } putMsg(result, Status.SUCCESS); return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,463
[Bug] [API] releaseWorkflowAndSchedule api of timing does not take effect/relation api cause workflow offline afterbinding
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened 1. releaseWorkflowAndSchedule api of timing does not take effect 2. relation api cause workflow offline afterbinding ### What you expected to happen Should be used normally. ### How to reproduce Call through API ### Anything else _No response_ ### Version 2.0.3 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8463
https://github.com/apache/dolphinscheduler/pull/8464
cd8c5e1c9aeff536c705c9152b31a09a674ea864
8d0a0f9c323c8153573b159090aa93d6ea87a30d
"2022-02-21T07:07:39Z"
java
"2022-02-21T07:58:59Z"
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.process; import static java.util.stream.Collectors.toSet; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_EMPTY_SUB_PROCESS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_FATHER_PARAMS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_CODE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID; import static org.apache.dolphinscheduler.common.Constants.LOCAL_PARAMS; import static org.apache.dolphinscheduler.spi.task.dq.utils.DataQualityConstants.TASK_INSTANCE_ID; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.TaskGroupQueueStatus; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.DateInterval; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.process.ProcessDag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter; import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters; import org.apache.dolphinscheduler.common.utils.CodeGenerateUtils; import org.apache.dolphinscheduler.common.utils.CodeGenerateUtils.CodeGenerateException; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.TaskParametersUtils; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.DagData; import org.apache.dolphinscheduler.dao.entity.DataSource; import org.apache.dolphinscheduler.dao.entity.DqComparisonType; import org.apache.dolphinscheduler.dao.entity.DqExecuteResult; import org.apache.dolphinscheduler.dao.entity.DqRule; import org.apache.dolphinscheduler.dao.entity.DqRuleExecuteSql; import org.apache.dolphinscheduler.dao.entity.DqRuleInputEntry; import org.apache.dolphinscheduler.dao.entity.DqTaskStatisticsValue; import org.apache.dolphinscheduler.dao.entity.Environment; import org.apache.dolphinscheduler.dao.entity.ErrorCommand; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskGroup; import org.apache.dolphinscheduler.dao.entity.TaskGroupQueue; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.UdfFunc; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.CommandMapper; import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper; import org.apache.dolphinscheduler.dao.mapper.DqComparisonTypeMapper; import org.apache.dolphinscheduler.dao.mapper.DqExecuteResultMapper; import org.apache.dolphinscheduler.dao.mapper.DqRuleExecuteSqlMapper; import org.apache.dolphinscheduler.dao.mapper.DqRuleInputEntryMapper; import org.apache.dolphinscheduler.dao.mapper.DqRuleMapper; import org.apache.dolphinscheduler.dao.mapper.DqTaskStatisticsValueMapper; import org.apache.dolphinscheduler.dao.mapper.EnvironmentMapper; import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.TaskGroupMapper; import org.apache.dolphinscheduler.dao.mapper.TaskGroupQueueMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.dao.utils.DagHelper; import org.apache.dolphinscheduler.dao.utils.DqRuleUtils; import org.apache.dolphinscheduler.remote.command.StateEventChangeCommand; import org.apache.dolphinscheduler.remote.command.TaskEventChangeCommand; import org.apache.dolphinscheduler.remote.processor.StateEventCallbackService; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import org.apache.dolphinscheduler.service.exceptions.ServiceException; import org.apache.dolphinscheduler.service.log.LogClientService; import org.apache.dolphinscheduler.service.quartz.cron.CronUtils; import org.apache.dolphinscheduler.spi.enums.ResourceType; import org.apache.dolphinscheduler.spi.task.dq.enums.DqTaskState; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang3.SerializationUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import org.springframework.transaction.annotation.Transactional; import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.node.ObjectNode; import com.google.common.collect.Lists; /** * process relative dao that some mappers in this. */ @Component public class ProcessService { private final Logger logger = LoggerFactory.getLogger(getClass()); private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(), ExecutionStatus.RUNNING_EXECUTION.ordinal(), ExecutionStatus.DELAY_EXECUTION.ordinal(), ExecutionStatus.READY_PAUSE.ordinal(), ExecutionStatus.READY_STOP.ordinal()}; @Autowired private UserMapper userMapper; @Autowired private ProcessDefinitionMapper processDefineMapper; @Autowired private ProcessDefinitionLogMapper processDefineLogMapper; @Autowired private ProcessInstanceMapper processInstanceMapper; @Autowired private DataSourceMapper dataSourceMapper; @Autowired private ProcessInstanceMapMapper processInstanceMapMapper; @Autowired private TaskInstanceMapper taskInstanceMapper; @Autowired private CommandMapper commandMapper; @Autowired private ScheduleMapper scheduleMapper; @Autowired private UdfFuncMapper udfFuncMapper; @Autowired private ResourceMapper resourceMapper; @Autowired private ResourceUserMapper resourceUserMapper; @Autowired private ErrorCommandMapper errorCommandMapper; @Autowired private TenantMapper tenantMapper; @Autowired private ProjectMapper projectMapper; @Autowired private DqExecuteResultMapper dqExecuteResultMapper; @Autowired private DqRuleMapper dqRuleMapper; @Autowired private DqRuleInputEntryMapper dqRuleInputEntryMapper; @Autowired private DqRuleExecuteSqlMapper dqRuleExecuteSqlMapper; @Autowired private DqComparisonTypeMapper dqComparisonTypeMapper; @Autowired private DqTaskStatisticsValueMapper dqTaskStatisticsValueMapper; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired private ProcessTaskRelationLogMapper processTaskRelationLogMapper; @Autowired StateEventCallbackService stateEventCallbackService; @Autowired private EnvironmentMapper environmentMapper; @Autowired private TaskGroupQueueMapper taskGroupQueueMapper; @Autowired private TaskGroupMapper taskGroupMapper; /** * handle Command (construct ProcessInstance from Command) , wrapped in transaction * * @param logger logger * @param host host * @param command found command * @return process instance */ @Transactional public ProcessInstance handleCommand(Logger logger, String host, Command command) { ProcessInstance processInstance = constructProcessInstance(command, host); // cannot construct process instance, return null if (processInstance == null) { logger.error("scan command, command parameter is error: {}", command); moveToErrorCommand(command, "process instance is null"); return null; } processInstance.setCommandType(command.getCommandType()); processInstance.addHistoryCmd(command.getCommandType()); //if the processDefinition is serial ProcessDefinition processDefinition = this.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); if (processDefinition.getExecutionType().typeIsSerial()) { saveSerialProcess(processInstance, processDefinition); if (processInstance.getState() != ExecutionStatus.SUBMITTED_SUCCESS) { setSubProcessParam(processInstance); deleteCommandWithCheck(command.getId()); return null; } } else { saveProcessInstance(processInstance); } setSubProcessParam(processInstance); deleteCommandWithCheck(command.getId()); return processInstance; } private void saveSerialProcess(ProcessInstance processInstance, ProcessDefinition processDefinition) { processInstance.setState(ExecutionStatus.SERIAL_WAIT); saveProcessInstance(processInstance); //serial wait //when we get the running instance(or waiting instance) only get the priority instance(by id) if (processDefinition.getExecutionType().typeIsSerialWait()) { while (true) { List<ProcessInstance> runningProcessInstances = this.processInstanceMapper.queryByProcessDefineCodeAndStatusAndNextId(processInstance.getProcessDefinitionCode(), Constants.RUNNING_PROCESS_STATE, processInstance.getId()); if (CollectionUtils.isEmpty(runningProcessInstances)) { processInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); saveProcessInstance(processInstance); return; } ProcessInstance runningProcess = runningProcessInstances.get(0); if (this.processInstanceMapper.updateNextProcessIdById(processInstance.getId(), runningProcess.getId())) { return; } } } else if (processDefinition.getExecutionType().typeIsSerialDiscard()) { List<ProcessInstance> runningProcessInstances = this.processInstanceMapper.queryByProcessDefineCodeAndStatusAndNextId(processInstance.getProcessDefinitionCode(), Constants.RUNNING_PROCESS_STATE, processInstance.getId()); if (CollectionUtils.isEmpty(runningProcessInstances)) { processInstance.setState(ExecutionStatus.STOP); saveProcessInstance(processInstance); } } else if (processDefinition.getExecutionType().typeIsSerialPriority()) { List<ProcessInstance> runningProcessInstances = this.processInstanceMapper.queryByProcessDefineCodeAndStatusAndNextId(processInstance.getProcessDefinitionCode(), Constants.RUNNING_PROCESS_STATE, processInstance.getId()); if (CollectionUtils.isNotEmpty(runningProcessInstances)) { for (ProcessInstance info : runningProcessInstances) { info.setCommandType(CommandType.STOP); info.addHistoryCmd(CommandType.STOP); info.setState(ExecutionStatus.READY_STOP); int update = updateProcessInstance(info); // determine whether the process is normal if (update > 0) { String host = info.getHost(); String address = host.split(":")[0]; int port = Integer.parseInt(host.split(":")[1]); StateEventChangeCommand stateEventChangeCommand = new StateEventChangeCommand( info.getId(), 0, info.getState(), info.getId(), 0 ); try { stateEventCallbackService.sendResult(address, port, stateEventChangeCommand.convert2Command()); } catch (Exception e) { logger.error("sendResultError"); } } } } } } /** * save error command, and delete original command * * @param command command * @param message message */ public void moveToErrorCommand(Command command, String message) { ErrorCommand errorCommand = new ErrorCommand(command, message); this.errorCommandMapper.insert(errorCommand); this.commandMapper.deleteById(command.getId()); } /** * set process waiting thread * * @param command command * @param processInstance processInstance * @return process instance */ private ProcessInstance setWaitingThreadProcess(Command command, ProcessInstance processInstance) { processInstance.setState(ExecutionStatus.WAITING_THREAD); if (command.getCommandType() != CommandType.RECOVER_WAITING_THREAD) { processInstance.addHistoryCmd(command.getCommandType()); } saveProcessInstance(processInstance); this.setSubProcessParam(processInstance); createRecoveryWaitingThreadCommand(command, processInstance); return null; } /** * insert one command * * @param command command * @return create result */ public int createCommand(Command command) { int result = 0; if (command != null) { result = commandMapper.insert(command); } return result; } /** * get command page */ public List<Command> findCommandPage(int pageSize, int pageNumber) { return commandMapper.queryCommandPage(pageSize, pageNumber * pageSize); } /** * check the input command exists in queue list * * @param command command * @return create command result */ public boolean verifyIsNeedCreateCommand(Command command) { boolean isNeedCreate = true; EnumMap<CommandType, Integer> cmdTypeMap = new EnumMap<>(CommandType.class); cmdTypeMap.put(CommandType.REPEAT_RUNNING, 1); cmdTypeMap.put(CommandType.RECOVER_SUSPENDED_PROCESS, 1); cmdTypeMap.put(CommandType.START_FAILURE_TASK_PROCESS, 1); CommandType commandType = command.getCommandType(); if (cmdTypeMap.containsKey(commandType)) { ObjectNode cmdParamObj = JSONUtils.parseObject(command.getCommandParam()); int processInstanceId = cmdParamObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt(); List<Command> commands = commandMapper.selectList(null); // for all commands for (Command tmpCommand : commands) { if (cmdTypeMap.containsKey(tmpCommand.getCommandType())) { ObjectNode tempObj = JSONUtils.parseObject(tmpCommand.getCommandParam()); if (tempObj != null && processInstanceId == tempObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt()) { isNeedCreate = false; break; } } } } return isNeedCreate; } /** * find process instance detail by id * * @param processId processId * @return process instance */ public ProcessInstance findProcessInstanceDetailById(int processId) { return processInstanceMapper.queryDetailById(processId); } /** * get task node list by definitionId */ public List<TaskDefinition> getTaskNodeListByDefinition(long defineCode) { ProcessDefinition processDefinition = processDefineMapper.queryByCode(defineCode); if (processDefinition == null) { logger.error("process define not exists"); return Lists.newArrayList(); } List<ProcessTaskRelationLog> processTaskRelations = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); Set<TaskDefinition> taskDefinitionSet = new HashSet<>(); for (ProcessTaskRelationLog processTaskRelation : processTaskRelations) { if (processTaskRelation.getPostTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion())); } } if (taskDefinitionSet.isEmpty()) { return Lists.newArrayList(); } List<TaskDefinitionLog> taskDefinitionLogs = taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet); return Lists.newArrayList(taskDefinitionLogs); } /** * find process instance by id * * @param processId processId * @return process instance */ public ProcessInstance findProcessInstanceById(int processId) { return processInstanceMapper.selectById(processId); } /** * find process define by id. * * @param processDefinitionId processDefinitionId * @return process definition */ public ProcessDefinition findProcessDefineById(int processDefinitionId) { return processDefineMapper.selectById(processDefinitionId); } /** * find process define by code and version. * * @param processDefinitionCode processDefinitionCode * @return process definition */ public ProcessDefinition findProcessDefinition(Long processDefinitionCode, int version) { ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode); if (processDefinition == null || processDefinition.getVersion() != version) { processDefinition = processDefineLogMapper.queryByDefinitionCodeAndVersion(processDefinitionCode, version); if (processDefinition != null) { processDefinition.setId(0); } } return processDefinition; } /** * find process define by code. * * @param processDefinitionCode processDefinitionCode * @return process definition */ public ProcessDefinition findProcessDefinitionByCode(Long processDefinitionCode) { return processDefineMapper.queryByCode(processDefinitionCode); } /** * delete work process instance by id * * @param processInstanceId processInstanceId * @return delete process instance result */ public int deleteWorkProcessInstanceById(int processInstanceId) { return processInstanceMapper.deleteById(processInstanceId); } /** * delete all sub process by parent instance id * * @param processInstanceId processInstanceId * @return delete all sub process instance result */ public int deleteAllSubWorkProcessByParentId(int processInstanceId) { List<Integer> subProcessIdList = processInstanceMapMapper.querySubIdListByParentId(processInstanceId); for (Integer subId : subProcessIdList) { deleteAllSubWorkProcessByParentId(subId); deleteWorkProcessMapByParentId(subId); removeTaskLogFile(subId); deleteWorkProcessInstanceById(subId); } return 1; } /** * remove task log file * * @param processInstanceId processInstanceId */ public void removeTaskLogFile(Integer processInstanceId) { List<TaskInstance> taskInstanceList = findValidTaskListByProcessId(processInstanceId); if (CollectionUtils.isEmpty(taskInstanceList)) { return; } try (LogClientService logClient = new LogClientService()) { for (TaskInstance taskInstance : taskInstanceList) { String taskLogPath = taskInstance.getLogPath(); if (StringUtils.isEmpty(taskInstance.getHost())) { continue; } Host host = Host.of(taskInstance.getHost()); // remove task log from loggerserver logClient.removeTaskLog(host.getIp(), host.getPort(), taskLogPath); } } } /** * recursive delete all task instance by process instance id * @param processInstanceId */ public void deleteWorkTaskInstanceByProcessInstanceId(int processInstanceId) { List<TaskInstance> taskInstanceList = findValidTaskListByProcessId(processInstanceId); if (CollectionUtils.isEmpty(taskInstanceList)) { return; } List<Integer> taskInstanceIdList = new ArrayList<>(); for (TaskInstance taskInstance : taskInstanceList) { taskInstanceIdList.add(taskInstance.getId()); } taskInstanceMapper.deleteBatchIds(taskInstanceIdList); } /** * recursive query sub process definition id by parent id. * * @param parentCode parentCode * @param ids ids */ public void recurseFindSubProcess(long parentCode, List<Long> ids) { List<TaskDefinition> taskNodeList = this.getTaskNodeListByDefinition(parentCode); if (taskNodeList != null && !taskNodeList.isEmpty()) { for (TaskDefinition taskNode : taskNodeList) { String parameter = taskNode.getTaskParams(); ObjectNode parameterJson = JSONUtils.parseObject(parameter); if (parameterJson.get(CMD_PARAM_SUB_PROCESS_DEFINE_CODE) != null) { SubProcessParameters subProcessParam = JSONUtils.parseObject(parameter, SubProcessParameters.class); ids.add(subProcessParam.getProcessDefinitionCode()); recurseFindSubProcess(subProcessParam.getProcessDefinitionCode(), ids); } } } } /** * create recovery waiting thread command when thread pool is not enough for the process instance. * sub work process instance need not to create recovery command. * create recovery waiting thread command and delete origin command at the same time. * if the recovery command is exists, only update the field update_time * * @param originCommand originCommand * @param processInstance processInstance */ public void createRecoveryWaitingThreadCommand(Command originCommand, ProcessInstance processInstance) { // sub process doesnot need to create wait command if (processInstance.getIsSubProcess() == Flag.YES) { if (originCommand != null) { commandMapper.deleteById(originCommand.getId()); } return; } Map<String, String> cmdParam = new HashMap<>(); cmdParam.put(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD, String.valueOf(processInstance.getId())); // process instance quit by "waiting thread" state if (originCommand == null) { Command command = new Command( CommandType.RECOVER_WAITING_THREAD, processInstance.getTaskDependType(), processInstance.getFailureStrategy(), processInstance.getExecutorId(), processInstance.getProcessDefinition().getCode(), JSONUtils.toJsonString(cmdParam), processInstance.getWarningType(), processInstance.getWarningGroupId(), processInstance.getScheduleTime(), processInstance.getWorkerGroup(), processInstance.getEnvironmentCode(), processInstance.getProcessInstancePriority(), processInstance.getDryRun(), processInstance.getId(), processInstance.getProcessDefinitionVersion() ); saveCommand(command); return; } // update the command time if current command if recover from waiting if (originCommand.getCommandType() == CommandType.RECOVER_WAITING_THREAD) { originCommand.setUpdateTime(new Date()); saveCommand(originCommand); } else { // delete old command and create new waiting thread command commandMapper.deleteById(originCommand.getId()); originCommand.setId(0); originCommand.setCommandType(CommandType.RECOVER_WAITING_THREAD); originCommand.setUpdateTime(new Date()); originCommand.setCommandParam(JSONUtils.toJsonString(cmdParam)); originCommand.setProcessInstancePriority(processInstance.getProcessInstancePriority()); saveCommand(originCommand); } } /** * get schedule time from command * * @param command command * @param cmdParam cmdParam map * @return date */ private Date getScheduleTime(Command command, Map<String, String> cmdParam) { Date scheduleTime = command.getScheduleTime(); if (scheduleTime == null && cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) { Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE)); Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); List<Schedule> schedules = queryReleaseSchedulerListByProcessDefinitionCode(command.getProcessDefinitionCode()); List<Date> complementDateList = CronUtils.getSelfFireDateList(start, end, schedules); if (complementDateList.size() > 0) { scheduleTime = complementDateList.get(0); } else { logger.error("set scheduler time error: complement date list is empty, command: {}", command.toString()); } } return scheduleTime; } /** * generate a new work process instance from command. * * @param processDefinition processDefinition * @param command command * @param cmdParam cmdParam map * @return process instance */ private ProcessInstance generateNewProcessInstance(ProcessDefinition processDefinition, Command command, Map<String, String> cmdParam) { ProcessInstance processInstance = new ProcessInstance(processDefinition); processInstance.setProcessDefinitionCode(processDefinition.getCode()); processInstance.setProcessDefinitionVersion(processDefinition.getVersion()); processInstance.setState(ExecutionStatus.RUNNING_EXECUTION); processInstance.setRecovery(Flag.NO); processInstance.setStartTime(new Date()); processInstance.setRestartTime(processInstance.getStartTime()); processInstance.setRunTimes(1); processInstance.setMaxTryTimes(0); processInstance.setCommandParam(command.getCommandParam()); processInstance.setCommandType(command.getCommandType()); processInstance.setIsSubProcess(Flag.NO); processInstance.setTaskDependType(command.getTaskDependType()); processInstance.setFailureStrategy(command.getFailureStrategy()); processInstance.setExecutorId(command.getExecutorId()); WarningType warningType = command.getWarningType() == null ? WarningType.NONE : command.getWarningType(); processInstance.setWarningType(warningType); Integer warningGroupId = command.getWarningGroupId() == null ? 0 : command.getWarningGroupId(); processInstance.setWarningGroupId(warningGroupId); processInstance.setDryRun(command.getDryRun()); if (command.getScheduleTime() != null) { processInstance.setScheduleTime(command.getScheduleTime()); } processInstance.setCommandStartTime(command.getStartTime()); processInstance.setLocations(processDefinition.getLocations()); // reset global params while there are start parameters setGlobalParamIfCommanded(processDefinition, cmdParam); // curing global params processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), getCommandTypeIfComplement(processInstance, command), processInstance.getScheduleTime())); // set process instance priority processInstance.setProcessInstancePriority(command.getProcessInstancePriority()); String workerGroup = StringUtils.isBlank(command.getWorkerGroup()) ? Constants.DEFAULT_WORKER_GROUP : command.getWorkerGroup(); processInstance.setWorkerGroup(workerGroup); processInstance.setEnvironmentCode(Objects.isNull(command.getEnvironmentCode()) ? -1 : command.getEnvironmentCode()); processInstance.setTimeout(processDefinition.getTimeout()); processInstance.setTenantId(processDefinition.getTenantId()); return processInstance; } private void setGlobalParamIfCommanded(ProcessDefinition processDefinition, Map<String, String> cmdParam) { // get start params from command param Map<String, String> startParamMap = new HashMap<>(); if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_START_PARAMS)) { String startParamJson = cmdParam.get(Constants.CMD_PARAM_START_PARAMS); startParamMap = JSONUtils.toMap(startParamJson); } Map<String, String> fatherParamMap = new HashMap<>(); if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_FATHER_PARAMS)) { String fatherParamJson = cmdParam.get(Constants.CMD_PARAM_FATHER_PARAMS); fatherParamMap = JSONUtils.toMap(fatherParamJson); } startParamMap.putAll(fatherParamMap); // set start param into global params if (startParamMap.size() > 0 && processDefinition.getGlobalParamMap() != null) { for (Map.Entry<String, String> param : processDefinition.getGlobalParamMap().entrySet()) { String val = startParamMap.get(param.getKey()); if (val != null) { param.setValue(val); } } } } /** * get process tenant * there is tenant id in definition, use the tenant of the definition. * if there is not tenant id in the definiton or the tenant not exist * use definition creator's tenant. * * @param tenantId tenantId * @param userId userId * @return tenant */ public Tenant getTenantForProcess(int tenantId, int userId) { Tenant tenant = null; if (tenantId >= 0) { tenant = tenantMapper.queryById(tenantId); } if (userId == 0) { return null; } if (tenant == null) { User user = userMapper.selectById(userId); tenant = tenantMapper.queryById(user.getTenantId()); } return tenant; } /** * get an environment * use the code of the environment to find a environment. * * @param environmentCode environmentCode * @return Environment */ public Environment findEnvironmentByCode(Long environmentCode) { Environment environment = null; if (environmentCode >= 0) { environment = environmentMapper.queryByEnvironmentCode(environmentCode); } return environment; } /** * check command parameters is valid * * @param command command * @param cmdParam cmdParam map * @return whether command param is valid */ private Boolean checkCmdParam(Command command, Map<String, String> cmdParam) { if (command.getTaskDependType() == TaskDependType.TASK_ONLY || command.getTaskDependType() == TaskDependType.TASK_PRE) { if (cmdParam == null || !cmdParam.containsKey(Constants.CMD_PARAM_START_NODES) || cmdParam.get(Constants.CMD_PARAM_START_NODES).isEmpty()) { logger.error("command node depend type is {}, but start nodes is null ", command.getTaskDependType()); return false; } } return true; } /** * construct process instance according to one command. * * @param command command * @param host host * @return process instance */ private ProcessInstance constructProcessInstance(Command command, String host) { ProcessInstance processInstance; ProcessDefinition processDefinition; CommandType commandType = command.getCommandType(); processDefinition = this.findProcessDefinition(command.getProcessDefinitionCode(), command.getProcessDefinitionVersion()); if (processDefinition == null) { logger.error("cannot find the work process define! define code : {}", command.getProcessDefinitionCode()); return null; } Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam()); int processInstanceId = command.getProcessInstanceId(); if (processInstanceId == 0) { processInstance = generateNewProcessInstance(processDefinition, command, cmdParam); } else { processInstance = this.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { return processInstance; } } if (cmdParam != null) { CommandType commandTypeIfComplement = getCommandTypeIfComplement(processInstance, command); // reset global params while repeat running is needed by cmdParam if (commandTypeIfComplement == CommandType.REPEAT_RUNNING) { setGlobalParamIfCommanded(processDefinition, cmdParam); } // Recalculate global parameters after rerun. processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), commandTypeIfComplement, processInstance.getScheduleTime())); processInstance.setProcessDefinition(processDefinition); } //reset command parameter if (processInstance.getCommandParam() != null) { Map<String, String> processCmdParam = JSONUtils.toMap(processInstance.getCommandParam()); for (Map.Entry<String, String> entry : processCmdParam.entrySet()) { if (!cmdParam.containsKey(entry.getKey())) { cmdParam.put(entry.getKey(), entry.getValue()); } } } // reset command parameter if sub process if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { processInstance.setCommandParam(command.getCommandParam()); } if (Boolean.FALSE.equals(checkCmdParam(command, cmdParam))) { logger.error("command parameter check failed!"); return null; } if (command.getScheduleTime() != null) { processInstance.setScheduleTime(command.getScheduleTime()); } processInstance.setHost(host); processInstance.setRestartTime(new Date()); ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXECUTION; int runTime = processInstance.getRunTimes(); switch (commandType) { case START_PROCESS: break; case START_FAILURE_TASK_PROCESS: // find failed tasks and init these tasks List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE); List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE); List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL); cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); failedList.addAll(killedList); failedList.addAll(toleranceList); for (Integer taskId : failedList) { initTaskInstance(this.findTaskInstanceById(taskId)); } cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(Constants.COMMA, convertIntListToString(failedList))); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); processInstance.setRunTimes(runTime + 1); break; case START_CURRENT_TASK_PROCESS: break; case RECOVER_WAITING_THREAD: break; case RECOVER_SUSPENDED_PROCESS: // find pause tasks and init task's state cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE); List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL); suspendedNodeList.addAll(stopNodeList); for (Integer taskId : suspendedNodeList) { // initialize the pause state initTaskInstance(this.findTaskInstanceById(taskId)); } cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(",", convertIntListToString(suspendedNodeList))); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); processInstance.setRunTimes(runTime + 1); break; case RECOVER_TOLERANCE_FAULT_PROCESS: // recover tolerance fault process processInstance.setRecovery(Flag.YES); runStatus = processInstance.getState(); break; case COMPLEMENT_DATA: // delete all the valid tasks when complement data if id is not null if (processInstance.getId() != 0) { List<TaskInstance> taskInstanceList = this.findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance taskInstance : taskInstanceList) { taskInstance.setFlag(Flag.NO); this.updateTaskInstance(taskInstance); } } break; case REPEAT_RUNNING: // delete the recover task names from command parameter if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) { cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); } // delete all the valid tasks when repeat running List<TaskInstance> validTaskList = findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance taskInstance : validTaskList) { taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); } processInstance.setStartTime(new Date()); processInstance.setRestartTime(processInstance.getStartTime()); processInstance.setEndTime(null); processInstance.setRunTimes(runTime + 1); initComplementDataParam(processDefinition, processInstance, cmdParam); break; case SCHEDULER: break; default: break; } processInstance.setState(runStatus); return processInstance; } /** * get process definition by command * If it is a fault-tolerant command, get the specified version of ProcessDefinition through ProcessInstance * Otherwise, get the latest version of ProcessDefinition * * @return ProcessDefinition */ private ProcessDefinition getProcessDefinitionByCommand(long processDefinitionCode, Map<String, String> cmdParam) { if (cmdParam != null) { int processInstanceId = 0; if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)); } else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS)); } else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)); } if (processInstanceId != 0) { ProcessInstance processInstance = this.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { return null; } return processDefineLogMapper.queryByDefinitionCodeAndVersion( processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); } } return processDefineMapper.queryByCode(processDefinitionCode); } /** * return complement data if the process start with complement data * * @param processInstance processInstance * @param command command * @return command type */ private CommandType getCommandTypeIfComplement(ProcessInstance processInstance, Command command) { if (CommandType.COMPLEMENT_DATA == processInstance.getCmdTypeIfComplement()) { return CommandType.COMPLEMENT_DATA; } else { return command.getCommandType(); } } /** * initialize complement data parameters * * @param processDefinition processDefinition * @param processInstance processInstance * @param cmdParam cmdParam */ private void initComplementDataParam(ProcessDefinition processDefinition, ProcessInstance processInstance, Map<String, String> cmdParam) { if (!processInstance.isComplementData()) { return; } Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE)); Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); List<Schedule> listSchedules = queryReleaseSchedulerListByProcessDefinitionCode(processInstance.getProcessDefinitionCode()); List<Date> complementDate = CronUtils.getSelfFireDateList(start, end, listSchedules); if (complementDate.size() > 0 && Flag.NO == processInstance.getIsSubProcess()) { processInstance.setScheduleTime(complementDate.get(0)); } processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime())); } /** * set sub work process parameters. * handle sub work process instance, update relation table and command parameters * set sub work process flag, extends parent work process command parameters * * @param subProcessInstance subProcessInstance */ public void setSubProcessParam(ProcessInstance subProcessInstance) { String cmdParam = subProcessInstance.getCommandParam(); if (StringUtils.isEmpty(cmdParam)) { return; } Map<String, String> paramMap = JSONUtils.toMap(cmdParam); // write sub process id into cmd param. if (paramMap.containsKey(CMD_PARAM_SUB_PROCESS) && CMD_PARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMD_PARAM_SUB_PROCESS))) { paramMap.remove(CMD_PARAM_SUB_PROCESS); paramMap.put(CMD_PARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId())); subProcessInstance.setCommandParam(JSONUtils.toJsonString(paramMap)); subProcessInstance.setIsSubProcess(Flag.YES); this.saveProcessInstance(subProcessInstance); } // copy parent instance user def params to sub process.. String parentInstanceId = paramMap.get(CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID); if (StringUtils.isNotEmpty(parentInstanceId)) { ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId)); if (parentInstance != null) { subProcessInstance.setGlobalParams( joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams())); this.saveProcessInstance(subProcessInstance); } else { logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam); } } ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class); if (processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0) { return; } // update sub process id to process map table processInstanceMap.setProcessInstanceId(subProcessInstance.getId()); this.updateWorkProcessInstanceMap(processInstanceMap); } /** * join parent global params into sub process. * only the keys doesn't in sub process global would be joined. * * @param parentGlobalParams parentGlobalParams * @param subGlobalParams subGlobalParams * @return global params join */ private String joinGlobalParams(String parentGlobalParams, String subGlobalParams) { // Since JSONUtils.toList return unmodified list, we need to creat a new List here. List<Property> parentParams = Lists.newArrayList(JSONUtils.toList(parentGlobalParams, Property.class)); List<Property> subParams = JSONUtils.toList(subGlobalParams, Property.class); Set<String> parentParamKeys = parentParams.stream().map(Property::getProp).collect(toSet()); // We will combine the params of parent workflow and sub workflow // If the params are defined in both, we will use parent's params to override the sub workflow(ISSUE-7962) // todo: Do we need to consider the other attribute of Property? // e.g. the subProp's type is not equals with parent, or subProp's direct is not equals with parent // It's suggested to add node name in property, this kind of problem can be solved. List<Property> extraSubParams = subParams.stream() .filter(subProp -> !parentParamKeys.contains(subProp.getProp())).collect(Collectors.toList()); parentParams.addAll(extraSubParams); return JSONUtils.toJsonString(parentParams); } /** * initialize task instance * * @param taskInstance taskInstance */ private void initTaskInstance(TaskInstance taskInstance) { if (!taskInstance.isSubProcess() && (taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure())) { taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); return; } taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); updateTaskInstance(taskInstance); } /** * retry submit task to db */ public TaskInstance submitTaskWithRetry(ProcessInstance processInstance, TaskInstance taskInstance, int commitRetryTimes, int commitInterval) { int retryTimes = 1; TaskInstance task = null; while (retryTimes <= commitRetryTimes) { try { // submit task to db task = SpringApplicationContext.getBean(ProcessService.class).submitTask(processInstance, taskInstance); if (task != null && task.getId() != 0) { break; } logger.error("task commit to db failed , taskId {} has already retry {} times, please check the database", taskInstance.getId(), retryTimes); Thread.sleep(commitInterval); } catch (Exception e) { logger.error("task commit to db failed", e); } retryTimes += 1; } return task; } /** * submit task to db * submit sub process to command * * @param processInstance processInstance * @param taskInstance taskInstance * @return task instance */ @Transactional(rollbackFor = Exception.class) public TaskInstance submitTask(ProcessInstance processInstance, TaskInstance taskInstance) { logger.info("start submit task : {}, instance id:{}, state: {}", taskInstance.getName(), taskInstance.getProcessInstanceId(), processInstance.getState()); //submit to db TaskInstance task = submitTaskInstanceToDB(taskInstance, processInstance); if (task == null) { logger.error("end submit task to db error, task name:{}, process id:{} state: {} ", taskInstance.getName(), taskInstance.getProcessInstance(), processInstance.getState()); return null; } if (!task.getState().typeIsFinished()) { createSubWorkProcess(processInstance, task); } logger.info("end submit task to db successfully:{} {} state:{} complete, instance id:{} state: {} ", taskInstance.getId(), taskInstance.getName(), task.getState(), processInstance.getId(), processInstance.getState()); return task; } /** * set work process instance map * consider o * repeat running does not generate new sub process instance * set map {parent instance id, task instance id, 0(child instance id)} * * @param parentInstance parentInstance * @param parentTask parentTask * @return process instance map */ private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask) { ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId()); if (processMap != null) { return processMap; } if (parentInstance.getCommandType() == CommandType.REPEAT_RUNNING) { // update current task id to map processMap = findPreviousTaskProcessMap(parentInstance, parentTask); if (processMap != null) { processMap.setParentTaskInstanceId(parentTask.getId()); updateWorkProcessInstanceMap(processMap); return processMap; } } // new task processMap = new ProcessInstanceMap(); processMap.setParentProcessInstanceId(parentInstance.getId()); processMap.setParentTaskInstanceId(parentTask.getId()); createWorkProcessInstanceMap(processMap); return processMap; } /** * find previous task work process map. * * @param parentProcessInstance parentProcessInstance * @param parentTask parentTask * @return process instance map */ private ProcessInstanceMap findPreviousTaskProcessMap(ProcessInstance parentProcessInstance, TaskInstance parentTask) { Integer preTaskId = 0; List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId()); for (TaskInstance task : preTaskList) { if (task.getName().equals(parentTask.getName())) { preTaskId = task.getId(); ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId); if (map != null) { return map; } } } logger.info("sub process instance is not found,parent task:{},parent instance:{}", parentTask.getId(), parentProcessInstance.getId()); return null; } /** * create sub work process command * * @param parentProcessInstance parentProcessInstance * @param task task */ public void createSubWorkProcess(ProcessInstance parentProcessInstance, TaskInstance task) { if (!task.isSubProcess()) { return; } //check create sub work flow firstly ProcessInstanceMap instanceMap = findWorkProcessMapByParent(parentProcessInstance.getId(), task.getId()); if (null != instanceMap && CommandType.RECOVER_TOLERANCE_FAULT_PROCESS == parentProcessInstance.getCommandType()) { // recover failover tolerance would not create a new command when the sub command already have been created return; } instanceMap = setProcessInstanceMap(parentProcessInstance, task); ProcessInstance childInstance = null; if (instanceMap.getProcessInstanceId() != 0) { childInstance = findProcessInstanceById(instanceMap.getProcessInstanceId()); } Command subProcessCommand = createSubProcessCommand(parentProcessInstance, childInstance, instanceMap, task); updateSubProcessDefinitionByParent(parentProcessInstance, subProcessCommand.getProcessDefinitionCode()); initSubInstanceState(childInstance); createCommand(subProcessCommand); logger.info("sub process command created: {} ", subProcessCommand); } /** * complement data needs transform parent parameter to child. */ private String getSubWorkFlowParam(ProcessInstanceMap instanceMap, ProcessInstance parentProcessInstance, Map<String, String> fatherParams) { // set sub work process command String processMapStr = JSONUtils.toJsonString(instanceMap); Map<String, String> cmdParam = JSONUtils.toMap(processMapStr); if (parentProcessInstance.isComplementData()) { Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam()); String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE); String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime); processMapStr = JSONUtils.toJsonString(cmdParam); } if (fatherParams.size() != 0) { cmdParam.put(CMD_PARAM_FATHER_PARAMS, JSONUtils.toJsonString(fatherParams)); processMapStr = JSONUtils.toJsonString(cmdParam); } return processMapStr; } public Map<String, String> getGlobalParamMap(String globalParams) { List<Property> propList; Map<String, String> globalParamMap = new HashMap<>(); if (StringUtils.isNotEmpty(globalParams)) { propList = JSONUtils.toList(globalParams, Property.class); globalParamMap = propList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)); } return globalParamMap; } /** * create sub work process command */ public Command createSubProcessCommand(ProcessInstance parentProcessInstance, ProcessInstance childInstance, ProcessInstanceMap instanceMap, TaskInstance task) { CommandType commandType = getSubCommandType(parentProcessInstance, childInstance); Map<String, String> subProcessParam = JSONUtils.toMap(task.getTaskParams()); long childDefineCode = 0L; if (subProcessParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS_DEFINE_CODE)) { childDefineCode = Long.parseLong(subProcessParam.get(Constants.CMD_PARAM_SUB_PROCESS_DEFINE_CODE)); } ProcessDefinition subProcessDefinition = processDefineMapper.queryByCode(childDefineCode); Object localParams = subProcessParam.get(Constants.LOCAL_PARAMS); List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class); Map<String, String> globalMap = this.getGlobalParamMap(parentProcessInstance.getGlobalParams()); Map<String, String> fatherParams = new HashMap<>(); if (CollectionUtils.isNotEmpty(allParam)) { for (Property info : allParam) { fatherParams.put(info.getProp(), globalMap.get(info.getProp())); } } String processParam = getSubWorkFlowParam(instanceMap, parentProcessInstance, fatherParams); int subProcessInstanceId = childInstance == null ? 0 : childInstance.getId(); return new Command( commandType, TaskDependType.TASK_POST, parentProcessInstance.getFailureStrategy(), parentProcessInstance.getExecutorId(), subProcessDefinition.getCode(), processParam, parentProcessInstance.getWarningType(), parentProcessInstance.getWarningGroupId(), parentProcessInstance.getScheduleTime(), task.getWorkerGroup(), task.getEnvironmentCode(), parentProcessInstance.getProcessInstancePriority(), parentProcessInstance.getDryRun(), subProcessInstanceId, subProcessDefinition.getVersion() ); } /** * initialize sub work flow state * child instance state would be initialized when 'recovery from pause/stop/failure' */ private void initSubInstanceState(ProcessInstance childInstance) { if (childInstance != null) { childInstance.setState(ExecutionStatus.RUNNING_EXECUTION); updateProcessInstance(childInstance); } } /** * get sub work flow command type * child instance exist: child command = fatherCommand * child instance not exists: child command = fatherCommand[0] */ private CommandType getSubCommandType(ProcessInstance parentProcessInstance, ProcessInstance childInstance) { CommandType commandType = parentProcessInstance.getCommandType(); if (childInstance == null) { String fatherHistoryCommand = parentProcessInstance.getHistoryCmd(); commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]); } return commandType; } /** * update sub process definition * * @param parentProcessInstance parentProcessInstance * @param childDefinitionCode childDefinitionId */ private void updateSubProcessDefinitionByParent(ProcessInstance parentProcessInstance, long childDefinitionCode) { ProcessDefinition fatherDefinition = this.findProcessDefinition(parentProcessInstance.getProcessDefinitionCode(), parentProcessInstance.getProcessDefinitionVersion()); ProcessDefinition childDefinition = this.findProcessDefinitionByCode(childDefinitionCode); if (childDefinition != null && fatherDefinition != null) { childDefinition.setWarningGroupId(fatherDefinition.getWarningGroupId()); processDefineMapper.updateById(childDefinition); } } /** * submit task to mysql * * @param taskInstance taskInstance * @param processInstance processInstance * @return task instance */ public TaskInstance submitTaskInstanceToDB(TaskInstance taskInstance, ProcessInstance processInstance) { ExecutionStatus processInstanceState = processInstance.getState(); if (processInstanceState.typeIsFinished() || processInstanceState == ExecutionStatus.READY_PAUSE || processInstanceState == ExecutionStatus.READY_STOP) { logger.warn("processInstance {} was {}, skip submit task", processInstance.getProcessDefinitionCode(), processInstanceState); return null; } taskInstance.setExecutorId(processInstance.getExecutorId()); taskInstance.setProcessInstancePriority(processInstance.getProcessInstancePriority()); taskInstance.setState(getSubmitTaskState(taskInstance, processInstance)); if (taskInstance.getSubmitTime() == null) { taskInstance.setSubmitTime(new Date()); } if (taskInstance.getFirstSubmitTime() == null) { taskInstance.setFirstSubmitTime(taskInstance.getSubmitTime()); } boolean saveResult = saveTaskInstance(taskInstance); if (!saveResult) { return null; } return taskInstance; } /** * get submit task instance state by the work process state * cannot modify the task state when running/kill/submit success, or this * task instance is already exists in task queue . * return pause if work process state is ready pause * return stop if work process state is ready stop * if all of above are not satisfied, return submit success * * @param taskInstance taskInstance * @param processInstance processInstance * @return process instance state */ public ExecutionStatus getSubmitTaskState(TaskInstance taskInstance, ProcessInstance processInstance) { ExecutionStatus state = taskInstance.getState(); // running, delayed or killed // the task already exists in task queue // return state if ( state == ExecutionStatus.RUNNING_EXECUTION || state == ExecutionStatus.DELAY_EXECUTION || state == ExecutionStatus.KILL ) { return state; } //return pasue /stop if process instance state is ready pause / stop // or return submit success if (processInstance.getState() == ExecutionStatus.READY_PAUSE) { state = ExecutionStatus.PAUSE; } else if (processInstance.getState() == ExecutionStatus.READY_STOP || !checkProcessStrategy(taskInstance, processInstance)) { state = ExecutionStatus.KILL; } else { state = ExecutionStatus.SUBMITTED_SUCCESS; } return state; } /** * check process instance strategy * * @param taskInstance taskInstance * @return check strategy result */ private boolean checkProcessStrategy(TaskInstance taskInstance, ProcessInstance processInstance) { FailureStrategy failureStrategy = processInstance.getFailureStrategy(); if (failureStrategy == FailureStrategy.CONTINUE) { return true; } List<TaskInstance> taskInstances = this.findValidTaskListByProcessId(taskInstance.getProcessInstanceId()); for (TaskInstance task : taskInstances) { if (task.getState() == ExecutionStatus.FAILURE && task.getRetryTimes() >= task.getMaxRetryTimes()) { return false; } } return true; } /** * insert or update work process instance to data base * * @param processInstance processInstance */ public void saveProcessInstance(ProcessInstance processInstance) { if (processInstance == null) { logger.error("save error, process instance is null!"); return; } if (processInstance.getId() != 0) { processInstanceMapper.updateById(processInstance); } else { processInstanceMapper.insert(processInstance); } } /** * insert or update command * * @param command command * @return save command result */ public int saveCommand(Command command) { if (command.getId() != 0) { return commandMapper.updateById(command); } else { return commandMapper.insert(command); } } /** * insert or update task instance * * @param taskInstance taskInstance * @return save task instance result */ public boolean saveTaskInstance(TaskInstance taskInstance) { if (taskInstance.getId() != 0) { return updateTaskInstance(taskInstance); } else { return createTaskInstance(taskInstance); } } /** * insert task instance * * @param taskInstance taskInstance * @return create task instance result */ public boolean createTaskInstance(TaskInstance taskInstance) { int count = taskInstanceMapper.insert(taskInstance); return count > 0; } /** * update task instance * * @param taskInstance taskInstance * @return update task instance result */ public boolean updateTaskInstance(TaskInstance taskInstance) { int count = taskInstanceMapper.updateById(taskInstance); return count > 0; } /** * find task instance by id * * @param taskId task id * @return task instance */ public TaskInstance findTaskInstanceById(Integer taskId) { return taskInstanceMapper.selectById(taskId); } /** * find task instance list by id list * * @param idList task id list * @return task instance list */ public List<TaskInstance> findTaskInstanceByIdList(List<Integer> idList) { if (CollectionUtils.isEmpty(idList)) { return new ArrayList<>(); } return taskInstanceMapper.selectBatchIds(idList); } /** * package task instance */ public void packageTaskInstance(TaskInstance taskInstance, ProcessInstance processInstance) { taskInstance.setProcessInstance(processInstance); taskInstance.setProcessDefine(processInstance.getProcessDefinition()); TaskDefinition taskDefinition = this.findTaskDefinition( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion()); this.updateTaskDefinitionResources(taskDefinition); taskInstance.setTaskDefine(taskDefinition); } /** * Update {@link ResourceInfo} information in {@link TaskDefinition} * * @param taskDefinition the given {@link TaskDefinition} */ public void updateTaskDefinitionResources(TaskDefinition taskDefinition) { Map<String, Object> taskParameters = JSONUtils.parseObject( taskDefinition.getTaskParams(), new TypeReference<Map<String, Object>>() { }); if (taskParameters != null) { // if contains mainJar field, query resource from database // Flink, Spark, MR if (taskParameters.containsKey("mainJar")) { Object mainJarObj = taskParameters.get("mainJar"); ResourceInfo mainJar = JSONUtils.parseObject( JSONUtils.toJsonString(mainJarObj), ResourceInfo.class); ResourceInfo resourceInfo = updateResourceInfo(mainJar); if (resourceInfo != null) { taskParameters.put("mainJar", resourceInfo); } } // update resourceList information if (taskParameters.containsKey("resourceList")) { String resourceListStr = JSONUtils.toJsonString(taskParameters.get("resourceList")); List<ResourceInfo> resourceInfos = JSONUtils.toList(resourceListStr, ResourceInfo.class); List<ResourceInfo> updatedResourceInfos = resourceInfos .stream() .map(this::updateResourceInfo) .filter(Objects::nonNull) .collect(Collectors.toList()); taskParameters.put("resourceList", updatedResourceInfos); } // set task parameters taskDefinition.setTaskParams(JSONUtils.toJsonString(taskParameters)); } } /** * update {@link ResourceInfo} by given original ResourceInfo * * @param res origin resource info * @return {@link ResourceInfo} */ private ResourceInfo updateResourceInfo(ResourceInfo res) { ResourceInfo resourceInfo = null; // only if mainJar is not null and does not contains "resourceName" field if (res != null) { int resourceId = res.getId(); if (resourceId <= 0) { logger.error("invalid resourceId, {}", resourceId); return null; } resourceInfo = new ResourceInfo(); // get resource from database, only one resource should be returned Resource resource = getResourceById(resourceId); resourceInfo.setId(resourceId); resourceInfo.setRes(resource.getFileName()); resourceInfo.setResourceName(resource.getFullName()); if (logger.isInfoEnabled()) { logger.info("updated resource info {}", JSONUtils.toJsonString(resourceInfo)); } } return resourceInfo; } /** * get id list by task state * * @param instanceId instanceId * @param state state * @return task instance states */ public List<Integer> findTaskIdByInstanceState(int instanceId, ExecutionStatus state) { return taskInstanceMapper.queryTaskByProcessIdAndState(instanceId, state.ordinal()); } /** * find valid task list by process definition id * * @param processInstanceId processInstanceId * @return task instance list */ public List<TaskInstance> findValidTaskListByProcessId(Integer processInstanceId) { return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.YES); } /** * find previous task list by work process id * * @param processInstanceId processInstanceId * @return task instance list */ public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId) { return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO); } /** * update work process instance map * * @param processInstanceMap processInstanceMap * @return update process instance result */ public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) { return processInstanceMapMapper.updateById(processInstanceMap); } /** * create work process instance map * * @param processInstanceMap processInstanceMap * @return create process instance result */ public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) { int count = 0; if (processInstanceMap != null) { return processInstanceMapMapper.insert(processInstanceMap); } return count; } /** * find work process map by parent process id and parent task id. * * @param parentWorkProcessId parentWorkProcessId * @param parentTaskId parentTaskId * @return process instance map */ public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId) { return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId); } /** * delete work process map by parent process id * * @param parentWorkProcessId parentWorkProcessId * @return delete process map result */ public int deleteWorkProcessMapByParentId(int parentWorkProcessId) { return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId); } /** * find sub process instance * * @param parentProcessId parentProcessId * @param parentTaskId parentTaskId * @return process instance */ public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId) { ProcessInstance processInstance = null; ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId); if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) { return processInstance; } processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId()); return processInstance; } /** * find parent process instance * * @param subProcessId subProcessId * @return process instance */ public ProcessInstance findParentProcessInstance(Integer subProcessId) { ProcessInstance processInstance = null; ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId); if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) { return processInstance; } processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId()); return processInstance; } /** * change task state * * @param state state * @param startTime startTime * @param host host * @param executePath executePath * @param logPath logPath */ public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date startTime, String host, String executePath, String logPath) { taskInstance.setState(state); taskInstance.setStartTime(startTime); taskInstance.setHost(host); taskInstance.setExecutePath(executePath); taskInstance.setLogPath(logPath); saveTaskInstance(taskInstance); } /** * update process instance * * @param processInstance processInstance * @return update process instance result */ public int updateProcessInstance(ProcessInstance processInstance) { return processInstanceMapper.updateById(processInstance); } /** * change task state * * @param state state * @param endTime endTime * @param varPool varPool */ public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date endTime, int processId, String appIds, String varPool) { taskInstance.setPid(processId); taskInstance.setAppLink(appIds); taskInstance.setState(state); taskInstance.setEndTime(endTime); taskInstance.setVarPool(varPool); changeOutParam(taskInstance); saveTaskInstance(taskInstance); } /** * for show in page of taskInstance */ public void changeOutParam(TaskInstance taskInstance) { if (StringUtils.isEmpty(taskInstance.getVarPool())) { return; } List<Property> properties = JSONUtils.toList(taskInstance.getVarPool(), Property.class); if (CollectionUtils.isEmpty(properties)) { return; } //if the result more than one line,just get the first . Map<String, Object> taskParams = JSONUtils.parseObject(taskInstance.getTaskParams(), new TypeReference<Map<String, Object>>() { }); Object localParams = taskParams.get(LOCAL_PARAMS); if (localParams == null) { return; } List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class); Map<String, String> outProperty = new HashMap<>(); for (Property info : properties) { if (info.getDirect() == Direct.OUT) { outProperty.put(info.getProp(), info.getValue()); } } for (Property info : allParam) { if (info.getDirect() == Direct.OUT) { String paramName = info.getProp(); info.setValue(outProperty.get(paramName)); } } taskParams.put(LOCAL_PARAMS, allParam); taskInstance.setTaskParams(JSONUtils.toJsonString(taskParams)); } /** * convert integer list to string list * * @param intList intList * @return string list */ public List<String> convertIntListToString(List<Integer> intList) { if (intList == null) { return new ArrayList<>(); } List<String> result = new ArrayList<>(intList.size()); for (Integer intVar : intList) { result.add(String.valueOf(intVar)); } return result; } /** * query schedule by id * * @param id id * @return schedule */ public Schedule querySchedule(int id) { return scheduleMapper.selectById(id); } /** * query Schedule by processDefinitionCode * * @param processDefinitionCode processDefinitionCode * @see Schedule */ public List<Schedule> queryReleaseSchedulerListByProcessDefinitionCode(long processDefinitionCode) { return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode); } /** * query need failover process instance * * @param host host * @return process instance list */ public List<ProcessInstance> queryNeedFailoverProcessInstances(String host) { return processInstanceMapper.queryByHostAndStatus(host, stateArray); } public List<String> queryNeedFailoverProcessInstanceHost() { return processInstanceMapper.queryNeedFailoverProcessInstanceHost(stateArray); } /** * process need failover process instance * * @param processInstance processInstance */ @Transactional(rollbackFor = RuntimeException.class) public void processNeedFailoverProcessInstances(ProcessInstance processInstance) { //1 update processInstance host is null processInstance.setHost(Constants.NULL); processInstanceMapper.updateById(processInstance); ProcessDefinition processDefinition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); //2 insert into recover command Command cmd = new Command(); cmd.setProcessDefinitionCode(processDefinition.getCode()); cmd.setProcessDefinitionVersion(processDefinition.getVersion()); cmd.setProcessInstanceId(processInstance.getId()); cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId())); cmd.setExecutorId(processInstance.getExecutorId()); cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS); createCommand(cmd); } /** * query all need failover task instances by host * * @param host host * @return task instance list */ public List<TaskInstance> queryNeedFailoverTaskInstances(String host) { return taskInstanceMapper.queryByHostAndStatus(host, stateArray); } /** * find data source by id * * @param id id * @return datasource */ public DataSource findDataSourceById(int id) { return dataSourceMapper.selectById(id); } /** * update process instance state by id * * @param processInstanceId processInstanceId * @param executionStatus executionStatus * @return update process result */ public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) { ProcessInstance instance = processInstanceMapper.selectById(processInstanceId); instance.setState(executionStatus); return processInstanceMapper.updateById(instance); } /** * find process instance by the task id * * @param taskId taskId * @return process instance */ public ProcessInstance findProcessInstanceByTaskId(int taskId) { TaskInstance taskInstance = taskInstanceMapper.selectById(taskId); if (taskInstance != null) { return processInstanceMapper.selectById(taskInstance.getProcessInstanceId()); } return null; } /** * find udf function list by id list string * * @param ids ids * @return udf function list */ public List<UdfFunc> queryUdfFunListByIds(int[] ids) { return udfFuncMapper.queryUdfByIdStr(ids, null); } /** * find tenant code by resource name * * @param resName resource name * @param resourceType resource type * @return tenant code */ public String queryTenantCodeByResName(String resName, ResourceType resourceType) { // in order to query tenant code successful although the version is older String fullName = resName.startsWith("/") ? resName : String.format("/%s", resName); List<Resource> resourceList = resourceMapper.queryResource(fullName, resourceType.ordinal()); if (CollectionUtils.isEmpty(resourceList)) { return StringUtils.EMPTY; } int userId = resourceList.get(0).getUserId(); User user = userMapper.selectById(userId); if (Objects.isNull(user)) { return StringUtils.EMPTY; } Tenant tenant = tenantMapper.queryById(user.getTenantId()); if (Objects.isNull(tenant)) { return StringUtils.EMPTY; } return tenant.getTenantCode(); } /** * find schedule list by process define codes. * * @param codes codes * @return schedule list */ public List<Schedule> selectAllByProcessDefineCode(long[] codes) { return scheduleMapper.selectAllByProcessDefineArray(codes); } /** * find last scheduler process instance in the date interval * * @param definitionCode definitionCode * @param dateInterval dateInterval * @return process instance */ public ProcessInstance findLastSchedulerProcessInterval(Long definitionCode, DateInterval dateInterval) { return processInstanceMapper.queryLastSchedulerProcess(definitionCode, dateInterval.getStartTime(), dateInterval.getEndTime()); } /** * find last manual process instance interval * * @param definitionCode process definition code * @param dateInterval dateInterval * @return process instance */ public ProcessInstance findLastManualProcessInterval(Long definitionCode, DateInterval dateInterval) { return processInstanceMapper.queryLastManualProcess(definitionCode, dateInterval.getStartTime(), dateInterval.getEndTime()); } /** * find last running process instance * * @param definitionCode process definition code * @param startTime start time * @param endTime end time * @return process instance */ public ProcessInstance findLastRunningProcess(Long definitionCode, Date startTime, Date endTime) { return processInstanceMapper.queryLastRunningProcess(definitionCode, startTime, endTime, stateArray); } /** * query user queue by process instance * * @param processInstance processInstance * @return queue */ public String queryUserQueueByProcessInstance(ProcessInstance processInstance) { String queue = ""; if (processInstance == null) { return queue; } User executor = userMapper.selectById(processInstance.getExecutorId()); if (executor != null) { queue = executor.getQueue(); } return queue; } /** * query project name and user name by processInstanceId. * * @param processInstanceId processInstanceId * @return projectName and userName */ public ProjectUser queryProjectWithUserByProcessInstanceId(int processInstanceId) { return projectMapper.queryProjectWithUserByProcessInstanceId(processInstanceId); } /** * get task worker group * * @param taskInstance taskInstance * @return workerGroupId */ public String getTaskWorkerGroup(TaskInstance taskInstance) { String workerGroup = taskInstance.getWorkerGroup(); if (StringUtils.isNotBlank(workerGroup)) { return workerGroup; } int processInstanceId = taskInstance.getProcessInstanceId(); ProcessInstance processInstance = findProcessInstanceById(processInstanceId); if (processInstance != null) { return processInstance.getWorkerGroup(); } logger.info("task : {} will use default worker group", taskInstance.getId()); return Constants.DEFAULT_WORKER_GROUP; } /** * get have perm project list * * @param userId userId * @return project list */ public List<Project> getProjectListHavePerm(int userId) { List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId); List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId); if (createProjects == null) { createProjects = new ArrayList<>(); } if (authedProjects != null) { createProjects.addAll(authedProjects); } return createProjects; } /** * list unauthorized udf function * * @param userId user id * @param needChecks data source id array * @return unauthorized udf function list */ public <T> List<T> listUnauthorized(int userId, T[] needChecks, AuthorizationType authorizationType) { List<T> resultList = new ArrayList<>(); if (Objects.nonNull(needChecks) && needChecks.length > 0) { Set<T> originResSet = new HashSet<>(Arrays.asList(needChecks)); switch (authorizationType) { case RESOURCE_FILE_ID: case UDF_FILE: List<Resource> ownUdfResources = resourceMapper.listAuthorizedResourceById(userId, needChecks); addAuthorizedResources(ownUdfResources, userId); Set<Integer> authorizedResourceFiles = ownUdfResources.stream().map(Resource::getId).collect(toSet()); originResSet.removeAll(authorizedResourceFiles); break; case RESOURCE_FILE_NAME: List<Resource> ownResources = resourceMapper.listAuthorizedResource(userId, needChecks); addAuthorizedResources(ownResources, userId); Set<String> authorizedResources = ownResources.stream().map(Resource::getFullName).collect(toSet()); originResSet.removeAll(authorizedResources); break; case DATASOURCE: Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId, needChecks).stream().map(DataSource::getId).collect(toSet()); originResSet.removeAll(authorizedDatasources); break; case UDF: Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(UdfFunc::getId).collect(toSet()); originResSet.removeAll(authorizedUdfs); break; default: break; } resultList.addAll(originResSet); } return resultList; } /** * get user by user id * * @param userId user id * @return User */ public User getUserById(int userId) { return userMapper.selectById(userId); } /** * get resource by resource id * * @param resourceId resource id * @return Resource */ public Resource getResourceById(int resourceId) { return resourceMapper.selectById(resourceId); } /** * list resources by ids * * @param resIds resIds * @return resource list */ public List<Resource> listResourceByIds(Integer[] resIds) { return resourceMapper.listResourceByIds(resIds); } /** * format task app id in task instance */ public String formatTaskAppId(TaskInstance taskInstance) { ProcessInstance processInstance = findProcessInstanceById(taskInstance.getProcessInstanceId()); if (processInstance == null) { return ""; } ProcessDefinition definition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); if (definition == null) { return ""; } return String.format("%s_%s_%s", definition.getId(), processInstance.getId(), taskInstance.getId()); } /** * switch process definition version to process definition log version */ public int switchVersion(ProcessDefinition processDefinition, ProcessDefinitionLog processDefinitionLog) { if (null == processDefinition || null == processDefinitionLog) { return Constants.DEFINITION_FAILURE; } processDefinitionLog.setId(processDefinition.getId()); processDefinitionLog.setReleaseState(ReleaseState.OFFLINE); processDefinitionLog.setFlag(Flag.YES); int result = processDefineMapper.updateById(processDefinitionLog); if (result > 0) { result = switchProcessTaskRelationVersion(processDefinitionLog); if (result <= 0) { return Constants.EXIT_CODE_FAILURE; } } return result; } public int switchProcessTaskRelationVersion(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); if (!processTaskRelationList.isEmpty()) { processTaskRelationMapper.deleteByCode(processDefinition.getProjectCode(), processDefinition.getCode()); } List<ProcessTaskRelationLog> processTaskRelationLogList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); int batchInsert = processTaskRelationMapper.batchInsert(processTaskRelationLogList); if (batchInsert == 0) { return Constants.EXIT_CODE_FAILURE; } else { int result = 0; for (ProcessTaskRelationLog taskRelationLog : processTaskRelationLogList) { int switchResult = switchTaskDefinitionVersion(taskRelationLog.getPostTaskCode(), taskRelationLog.getPostTaskVersion()); if (switchResult != Constants.EXIT_CODE_FAILURE) { result++; } } return result; } } public int switchTaskDefinitionVersion(long taskCode, int taskVersion) { TaskDefinition taskDefinition = taskDefinitionMapper.queryByCode(taskCode); if (taskDefinition == null) { return Constants.EXIT_CODE_FAILURE; } if (taskDefinition.getVersion() == taskVersion) { return Constants.EXIT_CODE_SUCCESS; } TaskDefinitionLog taskDefinitionUpdate = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskCode, taskVersion); if (taskDefinitionUpdate == null) { return Constants.EXIT_CODE_FAILURE; } taskDefinitionUpdate.setUpdateTime(new Date()); taskDefinitionUpdate.setId(taskDefinition.getId()); return taskDefinitionMapper.updateById(taskDefinitionUpdate); } /** * get resource ids * * @param taskDefinition taskDefinition * @return resource ids */ public String getResourceIds(TaskDefinition taskDefinition) { Set<Integer> resourceIds = null; AbstractParameters params = TaskParametersUtils.getParameters(taskDefinition.getTaskType(), taskDefinition.getTaskParams()); if (params != null && CollectionUtils.isNotEmpty(params.getResourceFilesList())) { resourceIds = params.getResourceFilesList(). stream() .filter(t -> t.getId() != 0) .map(ResourceInfo::getId) .collect(Collectors.toSet()); } if (CollectionUtils.isEmpty(resourceIds)) { return StringUtils.EMPTY; } return StringUtils.join(resourceIds, ","); } public int saveTaskDefine(User operator, long projectCode, List<TaskDefinitionLog> taskDefinitionLogs, Boolean syncDefine) { Date now = new Date(); List<TaskDefinitionLog> newTaskDefinitionLogs = new ArrayList<>(); List<TaskDefinitionLog> updateTaskDefinitionLogs = new ArrayList<>(); for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { taskDefinitionLog.setProjectCode(projectCode); taskDefinitionLog.setUpdateTime(now); taskDefinitionLog.setOperateTime(now); taskDefinitionLog.setOperator(operator.getId()); taskDefinitionLog.setResourceIds(getResourceIds(taskDefinitionLog)); if (taskDefinitionLog.getCode() > 0 && taskDefinitionLog.getVersion() > 0) { TaskDefinitionLog definitionCodeAndVersion = taskDefinitionLogMapper .queryByDefinitionCodeAndVersion(taskDefinitionLog.getCode(), taskDefinitionLog.getVersion()); if (definitionCodeAndVersion != null) { if (!taskDefinitionLog.equals(definitionCodeAndVersion)) { taskDefinitionLog.setUserId(definitionCodeAndVersion.getUserId()); Integer version = taskDefinitionLogMapper.queryMaxVersionForDefinition(taskDefinitionLog.getCode()); taskDefinitionLog.setVersion(version + 1); taskDefinitionLog.setCreateTime(definitionCodeAndVersion.getCreateTime()); updateTaskDefinitionLogs.add(taskDefinitionLog); } continue; } } taskDefinitionLog.setUserId(operator.getId()); taskDefinitionLog.setVersion(Constants.VERSION_FIRST); taskDefinitionLog.setCreateTime(now); if (taskDefinitionLog.getCode() == 0) { try { taskDefinitionLog.setCode(CodeGenerateUtils.getInstance().genCode()); } catch (CodeGenerateException e) { logger.error("Task code get error, ", e); return Constants.DEFINITION_FAILURE; } } newTaskDefinitionLogs.add(taskDefinitionLog); } int insertResult = 0; int updateResult = 0; for (TaskDefinitionLog taskDefinitionToUpdate : updateTaskDefinitionLogs) { TaskDefinition task = taskDefinitionMapper.queryByCode(taskDefinitionToUpdate.getCode()); if (task == null) { newTaskDefinitionLogs.add(taskDefinitionToUpdate); } else { insertResult += taskDefinitionLogMapper.insert(taskDefinitionToUpdate); if (Boolean.TRUE.equals(syncDefine)) { taskDefinitionToUpdate.setId(task.getId()); updateResult += taskDefinitionMapper.updateById(taskDefinitionToUpdate); } else { updateResult++; } } } if (!newTaskDefinitionLogs.isEmpty()) { insertResult += taskDefinitionLogMapper.batchInsert(newTaskDefinitionLogs); if (Boolean.TRUE.equals(syncDefine)) { updateResult += taskDefinitionMapper.batchInsert(newTaskDefinitionLogs); } else { updateResult += newTaskDefinitionLogs.size(); } } return (insertResult & updateResult) > 0 ? 1 : Constants.EXIT_CODE_SUCCESS; } /** * save processDefinition (including create or update processDefinition) */ public int saveProcessDefine(User operator, ProcessDefinition processDefinition, Boolean syncDefine, Boolean isFromProcessDefine) { ProcessDefinitionLog processDefinitionLog = new ProcessDefinitionLog(processDefinition); Integer version = processDefineLogMapper.queryMaxVersionForDefinition(processDefinition.getCode()); int insertVersion = version == null || version == 0 ? Constants.VERSION_FIRST : version + 1; processDefinitionLog.setVersion(insertVersion); processDefinitionLog.setReleaseState(isFromProcessDefine ? ReleaseState.OFFLINE : ReleaseState.ONLINE); processDefinitionLog.setOperator(operator.getId()); processDefinitionLog.setOperateTime(processDefinition.getUpdateTime()); int insertLog = processDefineLogMapper.insert(processDefinitionLog); int result = 1; if (Boolean.TRUE.equals(syncDefine)) { if (0 == processDefinition.getId()) { result = processDefineMapper.insert(processDefinitionLog); } else { processDefinitionLog.setId(processDefinition.getId()); result = processDefineMapper.updateById(processDefinitionLog); } } return (insertLog & result) > 0 ? insertVersion : 0; } /** * save task relations */ public int saveTaskRelation(User operator, long projectCode, long processDefinitionCode, int processDefinitionVersion, List<ProcessTaskRelationLog> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs, Boolean syncDefine) { if (taskRelationList.isEmpty()) { return Constants.EXIT_CODE_SUCCESS; } Map<Long, TaskDefinitionLog> taskDefinitionLogMap = null; if (CollectionUtils.isNotEmpty(taskDefinitionLogs)) { taskDefinitionLogMap = taskDefinitionLogs.stream() .collect(Collectors.toMap(TaskDefinition::getCode, taskDefinitionLog -> taskDefinitionLog)); } Date now = new Date(); for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { processTaskRelationLog.setProjectCode(projectCode); processTaskRelationLog.setProcessDefinitionCode(processDefinitionCode); processTaskRelationLog.setProcessDefinitionVersion(processDefinitionVersion); if (taskDefinitionLogMap != null) { TaskDefinitionLog preTaskDefinitionLog = taskDefinitionLogMap.get(processTaskRelationLog.getPreTaskCode()); if (preTaskDefinitionLog != null) { processTaskRelationLog.setPreTaskVersion(preTaskDefinitionLog.getVersion()); } TaskDefinitionLog postTaskDefinitionLog = taskDefinitionLogMap.get(processTaskRelationLog.getPostTaskCode()); if (postTaskDefinitionLog != null) { processTaskRelationLog.setPostTaskVersion(postTaskDefinitionLog.getVersion()); } } processTaskRelationLog.setCreateTime(now); processTaskRelationLog.setUpdateTime(now); processTaskRelationLog.setOperator(operator.getId()); processTaskRelationLog.setOperateTime(now); } int insert = taskRelationList.size(); if (Boolean.TRUE.equals(syncDefine)) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); if (!processTaskRelationList.isEmpty()) { Set<Integer> processTaskRelationSet = processTaskRelationList.stream().map(ProcessTaskRelation::hashCode).collect(toSet()); Set<Integer> taskRelationSet = taskRelationList.stream().map(ProcessTaskRelationLog::hashCode).collect(toSet()); boolean result = CollectionUtils.isEqualCollection(processTaskRelationSet, taskRelationSet); if (result) { return Constants.EXIT_CODE_SUCCESS; } processTaskRelationMapper.deleteByCode(projectCode, processDefinitionCode); } insert = processTaskRelationMapper.batchInsert(taskRelationList); } int resultLog = processTaskRelationLogMapper.batchInsert(taskRelationList); return (insert & resultLog) > 0 ? Constants.EXIT_CODE_SUCCESS : Constants.EXIT_CODE_FAILURE; } public boolean isTaskOnline(long taskCode) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByTaskCode(taskCode); if (!processTaskRelationList.isEmpty()) { Set<Long> processDefinitionCodes = processTaskRelationList .stream() .map(ProcessTaskRelation::getProcessDefinitionCode) .collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefineMapper.queryByCodes(processDefinitionCodes); // check process definition is already online for (ProcessDefinition processDefinition : processDefinitionList) { if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { return true; } } } return false; } /** * Generate the DAG Graph based on the process definition id * Use temporarily before refactoring taskNode * * @param processDefinition process definition * @return dag graph */ public DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) { List<ProcessTaskRelation> taskRelations = this.findRelationByCode(processDefinition.getCode(), processDefinition.getVersion()); List<TaskNode> taskNodeList = transformTask(taskRelations, Lists.newArrayList()); ProcessDag processDag = DagHelper.getProcessDag(taskNodeList, new ArrayList<>(taskRelations)); // Generate concrete Dag to be executed return DagHelper.buildDagGraph(processDag); } /** * generate DagData */ public DagData genDagData(ProcessDefinition processDefinition) { List<ProcessTaskRelation> taskRelations = this.findRelationByCode(processDefinition.getCode(), processDefinition.getVersion()); List<TaskDefinitionLog> taskDefinitionLogList = genTaskDefineList(taskRelations); List<TaskDefinition> taskDefinitions = taskDefinitionLogList.stream().map(t -> (TaskDefinition) t).collect(Collectors.toList()); return new DagData(processDefinition, taskRelations, taskDefinitions); } public List<TaskDefinitionLog> genTaskDefineList(List<ProcessTaskRelation> processTaskRelations) { Set<TaskDefinition> taskDefinitionSet = new HashSet<>(); for (ProcessTaskRelation processTaskRelation : processTaskRelations) { if (processTaskRelation.getPreTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPreTaskCode(), processTaskRelation.getPreTaskVersion())); } if (processTaskRelation.getPostTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion())); } } if (taskDefinitionSet.isEmpty()) { return Lists.newArrayList(); } return taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet); } public List<TaskDefinitionLog> getTaskDefineLogListByRelation(List<ProcessTaskRelation> processTaskRelations) { List<TaskDefinitionLog> taskDefinitionLogs = new ArrayList<>(); Map<Long, Integer> taskCodeVersionMap = new HashMap<>(); for (ProcessTaskRelation processTaskRelation : processTaskRelations) { if (processTaskRelation.getPreTaskCode() > 0) { taskCodeVersionMap.put(processTaskRelation.getPreTaskCode(), processTaskRelation.getPreTaskVersion()); } if (processTaskRelation.getPostTaskCode() > 0) { taskCodeVersionMap.put(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion()); } } taskCodeVersionMap.forEach((code,version) -> { taskDefinitionLogs.add((TaskDefinitionLog) this.findTaskDefinition(code, version)); }); return taskDefinitionLogs; } /** * find task definition by code and version */ public TaskDefinition findTaskDefinition(long taskCode, int taskDefinitionVersion) { return taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskCode, taskDefinitionVersion); } /** * find process task relation list by process */ public List<ProcessTaskRelation> findRelationByCode(long processDefinitionCode, int processDefinitionVersion) { List<ProcessTaskRelationLog> processTaskRelationLogList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinitionCode, processDefinitionVersion); return processTaskRelationLogList.stream().map(r -> (ProcessTaskRelation) r).collect(Collectors.toList()); } /** * add authorized resources * * @param ownResources own resources * @param userId userId */ private void addAuthorizedResources(List<Resource> ownResources, int userId) { List<Integer> relationResourceIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 7); List<Resource> relationResources = CollectionUtils.isNotEmpty(relationResourceIds) ? resourceMapper.queryResourceListById(relationResourceIds) : new ArrayList<>(); ownResources.addAll(relationResources); } /** * Use temporarily before refactoring taskNode */ public List<TaskNode> transformTask(List<ProcessTaskRelation> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) { Map<Long, List<Long>> taskCodeMap = new HashMap<>(); for (ProcessTaskRelation processTaskRelation : taskRelationList) { taskCodeMap.compute(processTaskRelation.getPostTaskCode(), (k, v) -> { if (v == null) { v = new ArrayList<>(); } if (processTaskRelation.getPreTaskCode() != 0L) { v.add(processTaskRelation.getPreTaskCode()); } return v; }); } if (CollectionUtils.isEmpty(taskDefinitionLogs)) { taskDefinitionLogs = genTaskDefineList(taskRelationList); } Map<Long, TaskDefinitionLog> taskDefinitionLogMap = taskDefinitionLogs.stream() .collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog)); List<TaskNode> taskNodeList = new ArrayList<>(); for (Entry<Long, List<Long>> code : taskCodeMap.entrySet()) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(code.getKey()); if (taskDefinitionLog != null) { TaskNode taskNode = new TaskNode(); taskNode.setCode(taskDefinitionLog.getCode()); taskNode.setVersion(taskDefinitionLog.getVersion()); taskNode.setName(taskDefinitionLog.getName()); taskNode.setDesc(taskDefinitionLog.getDescription()); taskNode.setType(taskDefinitionLog.getTaskType().toUpperCase()); taskNode.setRunFlag(taskDefinitionLog.getFlag() == Flag.YES ? Constants.FLOWNODE_RUN_FLAG_NORMAL : Constants.FLOWNODE_RUN_FLAG_FORBIDDEN); taskNode.setMaxRetryTimes(taskDefinitionLog.getFailRetryTimes()); taskNode.setRetryInterval(taskDefinitionLog.getFailRetryInterval()); Map<String, Object> taskParamsMap = taskNode.taskParamsToJsonObj(taskDefinitionLog.getTaskParams()); taskNode.setConditionResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.CONDITION_RESULT))); taskNode.setSwitchResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.SWITCH_RESULT))); taskNode.setDependence(JSONUtils.toJsonString(taskParamsMap.get(Constants.DEPENDENCE))); taskParamsMap.remove(Constants.CONDITION_RESULT); taskParamsMap.remove(Constants.DEPENDENCE); taskNode.setParams(JSONUtils.toJsonString(taskParamsMap)); taskNode.setTaskInstancePriority(taskDefinitionLog.getTaskPriority()); taskNode.setWorkerGroup(taskDefinitionLog.getWorkerGroup()); taskNode.setEnvironmentCode(taskDefinitionLog.getEnvironmentCode()); taskNode.setTimeout(JSONUtils.toJsonString(new TaskTimeoutParameter(taskDefinitionLog.getTimeoutFlag() == TimeoutFlag.OPEN, taskDefinitionLog.getTimeoutNotifyStrategy(), taskDefinitionLog.getTimeout()))); taskNode.setDelayTime(taskDefinitionLog.getDelayTime()); taskNode.setPreTasks(JSONUtils.toJsonString(code.getValue().stream().map(taskDefinitionLogMap::get).map(TaskDefinition::getCode).collect(Collectors.toList()))); taskNode.setTaskGroupId(taskDefinitionLog.getTaskGroupId()); taskNode.setTaskGroupPriority(taskDefinitionLog.getTaskGroupPriority()); taskNodeList.add(taskNode); } } return taskNodeList; } public Map<ProcessInstance, TaskInstance> notifyProcessList(int processId) { HashMap<ProcessInstance, TaskInstance> processTaskMap = new HashMap<>(); //find sub tasks ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(processId); if (processInstanceMap == null) { return processTaskMap; } ProcessInstance fatherProcess = this.findProcessInstanceById(processInstanceMap.getParentProcessInstanceId()); TaskInstance fatherTask = this.findTaskInstanceById(processInstanceMap.getParentTaskInstanceId()); if (fatherProcess != null) { processTaskMap.put(fatherProcess, fatherTask); } return processTaskMap; } public DqExecuteResult getDqExecuteResultByTaskInstanceId(int taskInstanceId) { return dqExecuteResultMapper.getExecuteResultById(taskInstanceId); } public int updateDqExecuteResultUserId(int taskInstanceId) { DqExecuteResult dqExecuteResult = dqExecuteResultMapper.selectOne(new QueryWrapper<DqExecuteResult>().eq(TASK_INSTANCE_ID,taskInstanceId)); if (dqExecuteResult == null) { return -1; } ProcessInstance processInstance = processInstanceMapper.selectById(dqExecuteResult.getProcessInstanceId()); if (processInstance == null) { return -1; } ProcessDefinition processDefinition = processDefineMapper.queryByCode(processInstance.getProcessDefinitionCode()); if (processDefinition == null) { return -1; } dqExecuteResult.setProcessDefinitionId(processDefinition.getId()); dqExecuteResult.setUserId(processDefinition.getUserId()); dqExecuteResult.setState(DqTaskState.DEFAULT.getCode()); return dqExecuteResultMapper.updateById(dqExecuteResult); } public int updateDqExecuteResultState(DqExecuteResult dqExecuteResult) { return dqExecuteResultMapper.updateById(dqExecuteResult); } public int deleteDqExecuteResultByTaskInstanceId(int taskInstanceId) { return dqExecuteResultMapper.delete( new QueryWrapper<DqExecuteResult>() .eq(TASK_INSTANCE_ID,taskInstanceId)); } public int deleteTaskStatisticsValueByTaskInstanceId(int taskInstanceId) { return dqTaskStatisticsValueMapper.delete( new QueryWrapper<DqTaskStatisticsValue>() .eq(TASK_INSTANCE_ID,taskInstanceId)); } public DqRule getDqRule(int ruleId) { return dqRuleMapper.selectById(ruleId); } public List<DqRuleInputEntry> getRuleInputEntry(int ruleId) { return DqRuleUtils.transformInputEntry(dqRuleInputEntryMapper.getRuleInputEntryList(ruleId)); } public List<DqRuleExecuteSql> getDqExecuteSql(int ruleId) { return dqRuleExecuteSqlMapper.getExecuteSqlList(ruleId); } public DqComparisonType getComparisonTypeById(int id) { return dqComparisonTypeMapper.selectById(id); } /** * the first time (when submit the task ) get the resource of the task group * @param taskId task id * @param taskName * @param groupId * @param processId * @param priority * @return */ public boolean acquireTaskGroup(int taskId, String taskName, int groupId, int processId, int priority) { TaskGroup taskGroup = taskGroupMapper.selectById(groupId); if (taskGroup == null) { return true; } // if task group is not applicable if (taskGroup.getStatus() == Flag.NO.getCode()) { return true; } TaskGroupQueue taskGroupQueue = this.taskGroupQueueMapper.queryByTaskId(taskId); if (taskGroupQueue == null) { taskGroupQueue = insertIntoTaskGroupQueue(taskId, taskName, groupId, processId, priority, TaskGroupQueueStatus.WAIT_QUEUE); } else { if (taskGroupQueue.getStatus() == TaskGroupQueueStatus.ACQUIRE_SUCCESS) { return true; } taskGroupQueue.setInQueue(Flag.NO.getCode()); taskGroupQueue.setStatus(TaskGroupQueueStatus.WAIT_QUEUE); this.taskGroupQueueMapper.updateById(taskGroupQueue); } //check priority List<TaskGroupQueue> highPriorityTasks = taskGroupQueueMapper.queryHighPriorityTasks(groupId, priority, TaskGroupQueueStatus.WAIT_QUEUE.getCode()); if (CollectionUtils.isNotEmpty(highPriorityTasks)) { this.taskGroupQueueMapper.updateInQueue(Flag.NO.getCode(), taskGroupQueue.getId()); return false; } //try to get taskGroup int count = taskGroupMapper.selectAvailableCountById(groupId); if (count == 1 && robTaskGroupResouce(taskGroupQueue)) { return true; } this.taskGroupQueueMapper.updateInQueue(Flag.NO.getCode(), taskGroupQueue.getId()); return false; } /** * try to get the task group resource(when other task release the resource) * @param taskGroupQueue * @return */ public boolean robTaskGroupResouce(TaskGroupQueue taskGroupQueue) { TaskGroup taskGroup = taskGroupMapper.selectById(taskGroupQueue.getGroupId()); int affectedCount = taskGroupMapper.updateTaskGroupResource(taskGroup.getId(),taskGroupQueue.getId(), TaskGroupQueueStatus.WAIT_QUEUE.getCode()); if (affectedCount > 0) { taskGroupQueue.setStatus(TaskGroupQueueStatus.ACQUIRE_SUCCESS); this.taskGroupQueueMapper.updateById(taskGroupQueue); this.taskGroupQueueMapper.updateInQueue(Flag.NO.getCode(), taskGroupQueue.getId()); return true; } return false; } public boolean acquireTaskGroupAgain(TaskGroupQueue taskGroupQueue) { return robTaskGroupResouce(taskGroupQueue); } public void releaseAllTaskGroup(int processInstanceId) { List<TaskInstance> taskInstances = this.taskInstanceMapper.loadAllInfosNoRelease(processInstanceId, TaskGroupQueueStatus.ACQUIRE_SUCCESS.getCode()); for (TaskInstance info : taskInstances) { releaseTaskGroup(info); } } /** * release the TGQ resource when the corresponding task is finished. * * @return the result code and msg */ public TaskInstance releaseTaskGroup(TaskInstance taskInstance) { TaskGroup taskGroup = taskGroupMapper.selectById(taskInstance.getTaskGroupId()); if (taskGroup == null) { return null; } TaskGroupQueue thisTaskGroupQueue = this.taskGroupQueueMapper.queryByTaskId(taskInstance.getId()); if (thisTaskGroupQueue.getStatus() == TaskGroupQueueStatus.RELEASE) { return null; } try { while (taskGroupMapper.releaseTaskGroupResource(taskGroup.getId(), taskGroup.getUseSize() , thisTaskGroupQueue.getId(), TaskGroupQueueStatus.ACQUIRE_SUCCESS.getCode()) != 1) { thisTaskGroupQueue = this.taskGroupQueueMapper.queryByTaskId(taskInstance.getId()); if (thisTaskGroupQueue.getStatus() == TaskGroupQueueStatus.RELEASE) { return null; } taskGroup = taskGroupMapper.selectById(taskInstance.getTaskGroupId()); } } catch (Exception e) { logger.error("release the task group error",e); } logger.info("updateTask:{}",taskInstance.getName()); changeTaskGroupQueueStatus(taskInstance.getId(), TaskGroupQueueStatus.RELEASE); TaskGroupQueue taskGroupQueue = this.taskGroupQueueMapper.queryTheHighestPriorityTasks(taskGroup.getId(), TaskGroupQueueStatus.WAIT_QUEUE.getCode(), Flag.NO.getCode(), Flag.NO.getCode()); if (taskGroupQueue == null) { return null; } while (this.taskGroupQueueMapper.updateInQueueCAS(Flag.NO.getCode(), Flag.YES.getCode(), taskGroupQueue.getId()) != 1) { taskGroupQueue = this.taskGroupQueueMapper.queryTheHighestPriorityTasks(taskGroup.getId(), TaskGroupQueueStatus.WAIT_QUEUE.getCode(), Flag.NO.getCode(), Flag.NO.getCode()); if (taskGroupQueue == null) { return null; } } return this.taskInstanceMapper.selectById(taskGroupQueue.getTaskId()); } /** * release the TGQ resource when the corresponding task is finished. * * @param taskId task id * @return the result code and msg */ public void changeTaskGroupQueueStatus(int taskId, TaskGroupQueueStatus status) { TaskGroupQueue taskGroupQueue = taskGroupQueueMapper.queryByTaskId(taskId); taskGroupQueue.setStatus(status); taskGroupQueue.setUpdateTime(new Date(System.currentTimeMillis())); taskGroupQueueMapper.updateById(taskGroupQueue); } /** * insert into task group queue * * @param taskId task id * @param taskName task name * @param groupId group id * @param processId process id * @param priority priority * @return result and msg code */ public TaskGroupQueue insertIntoTaskGroupQueue(Integer taskId, String taskName, Integer groupId, Integer processId, Integer priority, TaskGroupQueueStatus status) { TaskGroupQueue taskGroupQueue = new TaskGroupQueue(taskId, taskName, groupId, processId, priority, status); taskGroupQueue.setCreateTime(new Date()); taskGroupQueue.setUpdateTime(new Date()); taskGroupQueueMapper.insert(taskGroupQueue); return taskGroupQueue; } public int updateTaskGroupQueueStatus(Integer taskId, int status) { return taskGroupQueueMapper.updateStatusByTaskId(taskId, status); } public int updateTaskGroupQueue(TaskGroupQueue taskGroupQueue) { return taskGroupQueueMapper.updateById(taskGroupQueue); } public TaskGroupQueue loadTaskGroupQueue(int taskId) { return this.taskGroupQueueMapper.queryByTaskId(taskId); } public void sendStartTask2Master(ProcessInstance processInstance,int taskId, org.apache.dolphinscheduler.remote.command.CommandType taskType) { String host = processInstance.getHost(); String address = host.split(":")[0]; int port = Integer.parseInt(host.split(":")[1]); TaskEventChangeCommand taskEventChangeCommand = new TaskEventChangeCommand( processInstance.getId(), taskId ); stateEventCallbackService.sendResult(address, port, taskEventChangeCommand.convert2Command(taskType)); } public ProcessInstance loadNextProcess4Serial(long code, int state) { return this.processInstanceMapper.loadNextProcess4Serial(code, state); } private void deleteCommandWithCheck(int commandId) { int delete = this.commandMapper.deleteById(commandId); if (delete != 1) { throw new ServiceException("delete command fail, id:" + commandId); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,499
[Feature][Master] Optimize workflowlieage query speed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Optimize workflowlieage query speed ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8499
https://github.com/apache/dolphinscheduler/pull/8500
64baf6dc3db9f58d32947539a6fa36ac6e1eb200
512f8cb5dde7fb42398e7caa50c0c6665b6ec45f
"2022-02-23T05:37:00Z"
java
"2022-02-23T08:53:36Z"
dolphinscheduler-dao/src/main/resources/sql/dolphinscheduler_mysql.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ SET FOREIGN_KEY_CHECKS=0; -- ---------------------------- -- Table structure for QRTZ_BLOB_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_BLOB_TRIGGERS`; CREATE TABLE `QRTZ_BLOB_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `BLOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `SCHED_NAME` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_BLOB_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_BLOB_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_CALENDARS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_CALENDARS`; CREATE TABLE `QRTZ_CALENDARS` ( `SCHED_NAME` varchar(120) NOT NULL, `CALENDAR_NAME` varchar(200) NOT NULL, `CALENDAR` blob NOT NULL, PRIMARY KEY (`SCHED_NAME`,`CALENDAR_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_CALENDARS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_CRON_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_CRON_TRIGGERS`; CREATE TABLE `QRTZ_CRON_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `CRON_EXPRESSION` varchar(120) NOT NULL, `TIME_ZONE_ID` varchar(80) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_CRON_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_CRON_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_FIRED_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_FIRED_TRIGGERS`; CREATE TABLE `QRTZ_FIRED_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `ENTRY_ID` varchar(200) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `INSTANCE_NAME` varchar(200) NOT NULL, `FIRED_TIME` bigint(13) NOT NULL, `SCHED_TIME` bigint(13) NOT NULL, `PRIORITY` int(11) NOT NULL, `STATE` varchar(16) NOT NULL, `JOB_NAME` varchar(200) DEFAULT NULL, `JOB_GROUP` varchar(200) DEFAULT NULL, `IS_NONCONCURRENT` varchar(1) DEFAULT NULL, `REQUESTS_RECOVERY` varchar(1) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`ENTRY_ID`), KEY `IDX_QRTZ_FT_TRIG_INST_NAME` (`SCHED_NAME`,`INSTANCE_NAME`), KEY `IDX_QRTZ_FT_INST_JOB_REQ_RCVRY` (`SCHED_NAME`,`INSTANCE_NAME`,`REQUESTS_RECOVERY`), KEY `IDX_QRTZ_FT_J_G` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_FT_JG` (`SCHED_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_FT_T_G` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_FT_TG` (`SCHED_NAME`,`TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_FIRED_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_JOB_DETAILS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_JOB_DETAILS`; CREATE TABLE `QRTZ_JOB_DETAILS` ( `SCHED_NAME` varchar(120) NOT NULL, `JOB_NAME` varchar(200) NOT NULL, `JOB_GROUP` varchar(200) NOT NULL, `DESCRIPTION` varchar(250) DEFAULT NULL, `JOB_CLASS_NAME` varchar(250) NOT NULL, `IS_DURABLE` varchar(1) NOT NULL, `IS_NONCONCURRENT` varchar(1) NOT NULL, `IS_UPDATE_DATA` varchar(1) NOT NULL, `REQUESTS_RECOVERY` varchar(1) NOT NULL, `JOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_J_REQ_RECOVERY` (`SCHED_NAME`,`REQUESTS_RECOVERY`), KEY `IDX_QRTZ_J_GRP` (`SCHED_NAME`,`JOB_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_JOB_DETAILS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_LOCKS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_LOCKS`; CREATE TABLE `QRTZ_LOCKS` ( `SCHED_NAME` varchar(120) NOT NULL, `LOCK_NAME` varchar(40) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`LOCK_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_LOCKS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_PAUSED_TRIGGER_GRPS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_PAUSED_TRIGGER_GRPS`; CREATE TABLE `QRTZ_PAUSED_TRIGGER_GRPS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_PAUSED_TRIGGER_GRPS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SCHEDULER_STATE -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SCHEDULER_STATE`; CREATE TABLE `QRTZ_SCHEDULER_STATE` ( `SCHED_NAME` varchar(120) NOT NULL, `INSTANCE_NAME` varchar(200) NOT NULL, `LAST_CHECKIN_TIME` bigint(13) NOT NULL, `CHECKIN_INTERVAL` bigint(13) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`INSTANCE_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SCHEDULER_STATE -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SIMPLE_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SIMPLE_TRIGGERS`; CREATE TABLE `QRTZ_SIMPLE_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `REPEAT_COUNT` bigint(7) NOT NULL, `REPEAT_INTERVAL` bigint(12) NOT NULL, `TIMES_TRIGGERED` bigint(10) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_SIMPLE_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SIMPLE_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SIMPROP_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SIMPROP_TRIGGERS`; CREATE TABLE `QRTZ_SIMPROP_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `STR_PROP_1` varchar(512) DEFAULT NULL, `STR_PROP_2` varchar(512) DEFAULT NULL, `STR_PROP_3` varchar(512) DEFAULT NULL, `INT_PROP_1` int(11) DEFAULT NULL, `INT_PROP_2` int(11) DEFAULT NULL, `LONG_PROP_1` bigint(20) DEFAULT NULL, `LONG_PROP_2` bigint(20) DEFAULT NULL, `DEC_PROP_1` decimal(13,4) DEFAULT NULL, `DEC_PROP_2` decimal(13,4) DEFAULT NULL, `BOOL_PROP_1` varchar(1) DEFAULT NULL, `BOOL_PROP_2` varchar(1) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_SIMPROP_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SIMPROP_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_TRIGGERS`; CREATE TABLE `QRTZ_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `JOB_NAME` varchar(200) NOT NULL, `JOB_GROUP` varchar(200) NOT NULL, `DESCRIPTION` varchar(250) DEFAULT NULL, `NEXT_FIRE_TIME` bigint(13) DEFAULT NULL, `PREV_FIRE_TIME` bigint(13) DEFAULT NULL, `PRIORITY` int(11) DEFAULT NULL, `TRIGGER_STATE` varchar(16) NOT NULL, `TRIGGER_TYPE` varchar(8) NOT NULL, `START_TIME` bigint(13) NOT NULL, `END_TIME` bigint(13) DEFAULT NULL, `CALENDAR_NAME` varchar(200) DEFAULT NULL, `MISFIRE_INSTR` smallint(2) DEFAULT NULL, `JOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_T_J` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_T_JG` (`SCHED_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_T_C` (`SCHED_NAME`,`CALENDAR_NAME`), KEY `IDX_QRTZ_T_G` (`SCHED_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_T_STATE` (`SCHED_NAME`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_N_STATE` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_N_G_STATE` (`SCHED_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_NEXT_FIRE_TIME` (`SCHED_NAME`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_ST` (`SCHED_NAME`,`TRIGGER_STATE`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_ST_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_NFT_ST_MISFIRE_GRP` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), CONSTRAINT `QRTZ_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`) REFERENCES `QRTZ_JOB_DETAILS` (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_access_token -- ---------------------------- DROP TABLE IF EXISTS `t_ds_access_token`; CREATE TABLE `t_ds_access_token` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) DEFAULT NULL COMMENT 'user id', `token` varchar(64) DEFAULT NULL COMMENT 'token', `expire_time` datetime DEFAULT NULL COMMENT 'end time of token ', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_access_token -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_alert -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert`; CREATE TABLE `t_ds_alert` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `title` varchar(64) DEFAULT NULL COMMENT 'title', `content` text COMMENT 'Message content (can be email, can be SMS. Mail is stored in JSON map, and SMS is string)', `alert_status` tinyint(4) DEFAULT '0' COMMENT '0:wait running,1:success,2:failed', `log` text COMMENT 'log', `alertgroup_id` int(11) DEFAULT NULL COMMENT 'alert group id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_alert -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_alertgroup -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alertgroup`; CREATE TABLE `t_ds_alertgroup`( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `alert_instance_ids` varchar (255) DEFAULT NULL COMMENT 'alert instance ids', `create_user_id` int(11) DEFAULT NULL COMMENT 'create user id', `group_name` varchar(255) DEFAULT NULL COMMENT 'group name', `description` varchar(255) DEFAULT NULL, `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `t_ds_alertgroup_name_un` (`group_name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_alertgroup -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_command -- ---------------------------- DROP TABLE IF EXISTS `t_ds_command`; CREATE TABLE `t_ds_command` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `command_type` tinyint(4) DEFAULT NULL COMMENT 'Command type: 0 start workflow, 1 start execution from current node, 2 resume fault-tolerant workflow, 3 resume pause process, 4 start execution from failed node, 5 complement, 6 schedule, 7 rerun, 8 pause, 9 stop, 10 resume waiting thread', `process_definition_code` bigint(20) NOT NULL COMMENT 'process definition code', `process_definition_version` int(11) DEFAULT '0' COMMENT 'process definition version', `process_instance_id` int(11) DEFAULT '0' COMMENT 'process instance id', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'Node dependency type: 0 current node, 1 forward, 2 backward', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'Failed policy: 0 end, 1 continue', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group', `schedule_time` datetime DEFAULT NULL COMMENT 'schedule time', `start_time` datetime DEFAULT NULL COMMENT 'start time', `executor_id` int(11) DEFAULT NULL COMMENT 'executor id', `update_time` datetime DEFAULT NULL COMMENT 'update time', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority: 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) COMMENT 'worker group', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `dry_run` tinyint(4) DEFAULT '0' COMMENT 'dry run flag:0 normal, 1 dry run', PRIMARY KEY (`id`), KEY `priority_id_index` (`process_instance_priority`,`id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_command -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_datasource -- ---------------------------- DROP TABLE IF EXISTS `t_ds_datasource`; CREATE TABLE `t_ds_datasource` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(64) NOT NULL COMMENT 'data source name', `note` varchar(255) DEFAULT NULL COMMENT 'description', `type` tinyint(4) NOT NULL COMMENT 'data source type: 0:mysql,1:postgresql,2:hive,3:spark', `user_id` int(11) NOT NULL COMMENT 'the creator id', `connection_params` text NOT NULL COMMENT 'json connection params', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `t_ds_datasource_name_un` (`name`, `type`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_datasource -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_error_command -- ---------------------------- DROP TABLE IF EXISTS `t_ds_error_command`; CREATE TABLE `t_ds_error_command` ( `id` int(11) NOT NULL COMMENT 'key', `command_type` tinyint(4) DEFAULT NULL COMMENT 'command type', `executor_id` int(11) DEFAULT NULL COMMENT 'executor id', `process_definition_code` bigint(20) NOT NULL COMMENT 'process definition code', `process_definition_version` int(11) DEFAULT '0' COMMENT 'process definition version', `process_instance_id` int(11) DEFAULT '0' COMMENT 'process instance id: 0', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id', `schedule_time` datetime DEFAULT NULL COMMENT 'scheduler time', `start_time` datetime DEFAULT NULL COMMENT 'start time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority, 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) COMMENT 'worker group', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `message` text COMMENT 'message', `dry_run` tinyint(4) DEFAULT '0' COMMENT 'dry run flag: 0 normal, 1 dry run', PRIMARY KEY (`id`) USING BTREE ) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC; -- ---------------------------- -- Records of t_ds_error_command -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_process_definition -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_definition`; CREATE TABLE `t_ds_process_definition` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(255) DEFAULT NULL COMMENT 'process definition name', `version` int(11) DEFAULT '0' COMMENT 'process definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online', `user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available', `locations` text COMMENT 'Node location information', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out, unit: minute', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `execution_type` tinyint(4) DEFAULT '0' COMMENT 'execution_type 0:parallel,1:serial wait,2:serial discard,3:serial priority', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`,`code`), UNIQUE KEY `process_unique` (`name`,`project_code`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_process_definition -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_process_definition_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_definition_log`; CREATE TABLE `t_ds_process_definition_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'process definition name', `version` int(11) DEFAULT '0' COMMENT 'process definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online', `user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available', `locations` text COMMENT 'Node location information', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out,unit: minute', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `execution_type` tinyint(4) DEFAULT '0' COMMENT 'execution_type 0:parallel,1:serial wait,2:serial discard,3:serial priority', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_task_definition -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_definition`; CREATE TABLE `t_ds_task_definition` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'task definition name', `version` int(11) DEFAULT '0' COMMENT 'task definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_params` longtext COMMENT 'job custom parameters', `flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available', `task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority', `worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries', `fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval', `timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open', `timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail', `timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute', `delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute', `resource_ids` text COMMENT 'resource id, separated by comma', `task_group_id` int(11) DEFAULT NULL COMMENT 'task group id', `task_group_priority` tinyint(4) DEFAULT '0' COMMENT 'task group priority', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`,`code`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_task_definition_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_definition_log`; CREATE TABLE `t_ds_task_definition_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'task definition name', `version` int(11) DEFAULT '0' COMMENT 'task definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_params` longtext COMMENT 'job custom parameters', `flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available', `task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority', `worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries', `fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval', `timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open', `timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail', `timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute', `delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute', `resource_ids` text DEFAULT NULL COMMENT 'resource id, separated by comma', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `task_group_id` int(11) DEFAULT NULL COMMENT 'task group id', `task_group_priority` tinyint(4) DEFAULT 0 COMMENT 'task group priority', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`), KEY `idx_code_version` (`code`,`version`), KEY `idx_project_code` (`project_code`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_task_relation -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_task_relation`; CREATE TABLE `t_ds_process_task_relation` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `name` varchar(200) DEFAULT NULL COMMENT 'relation name', `project_code` bigint(20) NOT NULL COMMENT 'project code', `process_definition_code` bigint(20) NOT NULL COMMENT 'process code', `process_definition_version` int(11) NOT NULL COMMENT 'process version', `pre_task_code` bigint(20) NOT NULL COMMENT 'pre task code', `pre_task_version` int(11) NOT NULL COMMENT 'pre task version', `post_task_code` bigint(20) NOT NULL COMMENT 'post task code', `post_task_version` int(11) NOT NULL COMMENT 'post task version', `condition_type` tinyint(2) DEFAULT NULL COMMENT 'condition type : 0 none, 1 judge 2 delay', `condition_params` text COMMENT 'condition params(json)', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`), KEY `idx_code` (`project_code`,`process_definition_code`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_task_relation_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_task_relation_log`; CREATE TABLE `t_ds_process_task_relation_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `name` varchar(200) DEFAULT NULL COMMENT 'relation name', `project_code` bigint(20) NOT NULL COMMENT 'project code', `process_definition_code` bigint(20) NOT NULL COMMENT 'process code', `process_definition_version` int(11) NOT NULL COMMENT 'process version', `pre_task_code` bigint(20) NOT NULL COMMENT 'pre task code', `pre_task_version` int(11) NOT NULL COMMENT 'pre task version', `post_task_code` bigint(20) NOT NULL COMMENT 'post task code', `post_task_version` int(11) NOT NULL COMMENT 'post task version', `condition_type` tinyint(2) DEFAULT NULL COMMENT 'condition type : 0 none, 1 judge 2 delay', `condition_params` text COMMENT 'condition params(json)', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`), KEY `idx_process_code_version` (`process_definition_code`,`process_definition_version`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_instance`; CREATE TABLE `t_ds_process_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(255) DEFAULT NULL COMMENT 'process instance name', `process_definition_code` bigint(20) NOT NULL COMMENT 'process definition code', `process_definition_version` int(11) DEFAULT '0' COMMENT 'process definition version', `state` tinyint(4) DEFAULT NULL COMMENT 'process instance Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete', `recovery` tinyint(4) DEFAULT NULL COMMENT 'process instance failover flag:0:normal,1:failover instance', `start_time` datetime DEFAULT NULL COMMENT 'process instance start time', `end_time` datetime DEFAULT NULL COMMENT 'process instance end time', `run_times` int(11) DEFAULT NULL COMMENT 'process instance run times', `host` varchar(135) DEFAULT NULL COMMENT 'process instance host', `command_type` tinyint(4) DEFAULT NULL COMMENT 'command type', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type. 0: only current node,1:before the node,2:later nodes', `max_try_times` tinyint(4) DEFAULT '0' COMMENT 'max try times', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy. 0:end the process when node failed,1:continue running the other nodes when node failed', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type. 0:no warning,1:warning if process success,2:warning if process failed,3:warning if success', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id', `schedule_time` datetime DEFAULT NULL COMMENT 'schedule time', `command_start_time` datetime DEFAULT NULL COMMENT 'command start time', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT '1' COMMENT 'flag', `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `is_sub_process` int(11) DEFAULT '0' COMMENT 'flag, whether the process is sub process', `executor_id` int(11) NOT NULL COMMENT 'executor id', `history_cmd` text COMMENT 'history commands of process instance operation', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority. 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `timeout` int(11) DEFAULT '0' COMMENT 'time out', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `var_pool` longtext COMMENT 'var_pool', `dry_run` tinyint(4) DEFAULT '0' COMMENT 'dry run flag:0 normal, 1 dry run', `next_process_instance_id` int(11) DEFAULT '0' COMMENT 'serial queue next processInstanceId', `restart_time` datetime DEFAULT NULL COMMENT 'process instance restart time', PRIMARY KEY (`id`), KEY `process_instance_index` (`process_definition_code`,`id`) USING BTREE, KEY `start_time_index` (`start_time`,`end_time`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_process_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_project -- ---------------------------- DROP TABLE IF EXISTS `t_ds_project`; CREATE TABLE `t_ds_project` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(100) DEFAULT NULL COMMENT 'project name', `code` bigint(20) NOT NULL COMMENT 'encoding', `description` varchar(200) DEFAULT NULL, `user_id` int(11) DEFAULT NULL COMMENT 'creator id', `flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), KEY `user_id_index` (`user_id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_project -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_queue -- ---------------------------- DROP TABLE IF EXISTS `t_ds_queue`; CREATE TABLE `t_ds_queue` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `queue_name` varchar(64) DEFAULT NULL COMMENT 'queue name', `queue` varchar(64) DEFAULT NULL COMMENT 'yarn queue name', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_queue -- ---------------------------- INSERT INTO `t_ds_queue` VALUES ('1', 'default', 'default', null, null); -- ---------------------------- -- Table structure for t_ds_relation_datasource_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_datasource_user`; CREATE TABLE `t_ds_relation_datasource_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `datasource_id` int(11) DEFAULT NULL COMMENT 'data source id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_datasource_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_process_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_process_instance`; CREATE TABLE `t_ds_relation_process_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `parent_process_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id', `parent_task_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id', `process_instance_id` int(11) DEFAULT NULL COMMENT 'child process instance id', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_process_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_project_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_project_user`; CREATE TABLE `t_ds_relation_project_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `project_id` int(11) DEFAULT NULL COMMENT 'project id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), KEY `user_id_index` (`user_id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_project_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_resources_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_resources_user`; CREATE TABLE `t_ds_relation_resources_user` ( `id` int(11) NOT NULL AUTO_INCREMENT, `user_id` int(11) NOT NULL COMMENT 'user id', `resources_id` int(11) DEFAULT NULL COMMENT 'resource id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_resources_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_udfs_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_udfs_user`; CREATE TABLE `t_ds_relation_udfs_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'userid', `udf_id` int(11) DEFAULT NULL COMMENT 'udf id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_resources -- ---------------------------- DROP TABLE IF EXISTS `t_ds_resources`; CREATE TABLE `t_ds_resources` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `alias` varchar(64) DEFAULT NULL COMMENT 'alias', `file_name` varchar(64) DEFAULT NULL COMMENT 'file name', `description` varchar(255) DEFAULT NULL, `user_id` int(11) DEFAULT NULL COMMENT 'user id', `type` tinyint(4) DEFAULT NULL COMMENT 'resource type,0:FILE,1:UDF', `size` bigint(20) DEFAULT NULL COMMENT 'resource size', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `pid` int(11) DEFAULT NULL, `full_name` varchar(64) DEFAULT NULL, `is_directory` tinyint(4) DEFAULT NULL, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_resources_un` (`full_name`,`type`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_resources -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_schedules -- ---------------------------- DROP TABLE IF EXISTS `t_ds_schedules`; CREATE TABLE `t_ds_schedules` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `process_definition_code` bigint(20) NOT NULL COMMENT 'process definition code', `start_time` datetime NOT NULL COMMENT 'start time', `end_time` datetime NOT NULL COMMENT 'end time', `timezone_id` varchar(40) DEFAULT NULL COMMENT 'schedule timezone id', `crontab` varchar(255) NOT NULL COMMENT 'crontab description', `failure_strategy` tinyint(4) NOT NULL COMMENT 'failure strategy. 0:end,1:continue', `user_id` int(11) NOT NULL COMMENT 'user id', `release_state` tinyint(4) NOT NULL COMMENT 'release state. 0:offline,1:online ', `warning_type` tinyint(4) NOT NULL COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT '' COMMENT 'worker group id', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_schedules -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_session -- ---------------------------- DROP TABLE IF EXISTS `t_ds_session`; CREATE TABLE `t_ds_session` ( `id` varchar(64) NOT NULL COMMENT 'key', `user_id` int(11) DEFAULT NULL COMMENT 'user id', `ip` varchar(45) DEFAULT NULL COMMENT 'ip', `last_login_time` datetime DEFAULT NULL COMMENT 'last login time', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_session -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_task_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_instance`; CREATE TABLE `t_ds_task_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(255) DEFAULT NULL COMMENT 'task name', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_code` bigint(20) NOT NULL COMMENT 'task definition code', `task_definition_version` int(11) DEFAULT '0' COMMENT 'task definition version', `process_instance_id` int(11) DEFAULT NULL COMMENT 'process instance id', `state` tinyint(4) DEFAULT NULL COMMENT 'Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete', `submit_time` datetime DEFAULT NULL COMMENT 'task submit time', `start_time` datetime DEFAULT NULL COMMENT 'task start time', `end_time` datetime DEFAULT NULL COMMENT 'task end time', `host` varchar(135) DEFAULT NULL COMMENT 'host of task running on', `execute_path` varchar(200) DEFAULT NULL COMMENT 'task execute path in the host', `log_path` varchar(200) DEFAULT NULL COMMENT 'task log path', `alert_flag` tinyint(4) DEFAULT NULL COMMENT 'whether alert', `retry_times` int(4) DEFAULT '0' COMMENT 'task retry times', `pid` int(4) DEFAULT NULL COMMENT 'pid of task', `app_link` text COMMENT 'yarn app id', `task_params` longtext COMMENT 'job custom parameters', `flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available', `retry_interval` int(4) DEFAULT NULL COMMENT 'retry interval when task failed ', `max_retry_times` int(2) DEFAULT NULL COMMENT 'max retry times', `task_instance_priority` int(11) DEFAULT NULL COMMENT 'task instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `environment_config` text COMMENT 'this config contains many environment variables config', `executor_id` int(11) DEFAULT NULL, `first_submit_time` datetime DEFAULT NULL COMMENT 'task first submit time', `delay_time` int(4) DEFAULT '0' COMMENT 'task delay execution time', `var_pool` longtext COMMENT 'var_pool', `task_group_id` int(11) DEFAULT NULL COMMENT 'task group id', `dry_run` tinyint(4) DEFAULT '0' COMMENT 'dry run flag: 0 normal, 1 dry run', PRIMARY KEY (`id`), KEY `process_instance_id` (`process_instance_id`) USING BTREE, KEY `idx_code_version` (`task_code`, `task_definition_version`) USING BTREE, CONSTRAINT `foreign_key_instance_id` FOREIGN KEY (`process_instance_id`) REFERENCES `t_ds_process_instance` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_task_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_tenant -- ---------------------------- DROP TABLE IF EXISTS `t_ds_tenant`; CREATE TABLE `t_ds_tenant` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `tenant_code` varchar(64) DEFAULT NULL COMMENT 'tenant code', `description` varchar(255) DEFAULT NULL, `queue_id` int(11) DEFAULT NULL COMMENT 'queue id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_tenant -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_udfs -- ---------------------------- DROP TABLE IF EXISTS `t_ds_udfs`; CREATE TABLE `t_ds_udfs` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `func_name` varchar(100) NOT NULL COMMENT 'UDF function name', `class_name` varchar(255) NOT NULL COMMENT 'class of udf', `type` tinyint(4) NOT NULL COMMENT 'Udf function type', `arg_types` varchar(255) DEFAULT NULL COMMENT 'arguments types', `database` varchar(255) DEFAULT NULL COMMENT 'data base', `description` varchar(255) DEFAULT NULL, `resource_id` int(11) NOT NULL COMMENT 'resource id', `resource_name` varchar(255) NOT NULL COMMENT 'resource name', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_udfs -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_user`; CREATE TABLE `t_ds_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'user id', `user_name` varchar(64) DEFAULT NULL COMMENT 'user name', `user_password` varchar(64) DEFAULT NULL COMMENT 'user password', `user_type` tinyint(4) DEFAULT NULL COMMENT 'user type, 0:administrator,1:ordinary user', `email` varchar(64) DEFAULT NULL COMMENT 'email', `phone` varchar(11) DEFAULT NULL COMMENT 'phone', `tenant_id` int(11) DEFAULT NULL COMMENT 'tenant id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `queue` varchar(64) DEFAULT NULL COMMENT 'queue', `state` tinyint(4) DEFAULT '1' COMMENT 'state 0:disable 1:enable', `time_zone` varchar(32) DEFAULT NULL COMMENT 'time zone', PRIMARY KEY (`id`), UNIQUE KEY `user_name_unique` (`user_name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_worker_group -- ---------------------------- DROP TABLE IF EXISTS `t_ds_worker_group`; CREATE TABLE `t_ds_worker_group` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `name` varchar(255) NOT NULL COMMENT 'worker group name', `addr_list` text NULL DEFAULT NULL COMMENT 'worker addr list. split by [,]', `create_time` datetime NULL DEFAULT NULL COMMENT 'create time', `update_time` datetime NULL DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `name_unique` (`name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_worker_group -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_version -- ---------------------------- DROP TABLE IF EXISTS `t_ds_version`; CREATE TABLE `t_ds_version` ( `id` int(11) NOT NULL AUTO_INCREMENT, `version` varchar(200) NOT NULL, PRIMARY KEY (`id`), UNIQUE KEY `version_UNIQUE` (`version`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8 COMMENT='version'; -- ---------------------------- -- Records of t_ds_version -- ---------------------------- INSERT INTO `t_ds_version` VALUES ('1', '2.0.2'); -- ---------------------------- -- Records of t_ds_alertgroup -- ---------------------------- INSERT INTO `t_ds_alertgroup`(alert_instance_ids, create_user_id, group_name, description, create_time, update_time) VALUES ("1,2", 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39'); -- ---------------------------- -- Records of t_ds_user -- ---------------------------- INSERT INTO `t_ds_user` VALUES ('1', 'admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', '', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22', null, 1, null); -- ---------------------------- -- Table structure for t_ds_plugin_define -- ---------------------------- SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY','')); DROP TABLE IF EXISTS `t_ds_plugin_define`; CREATE TABLE `t_ds_plugin_define` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_name` varchar(100) NOT NULL COMMENT 'the name of plugin eg: email', `plugin_type` varchar(100) NOT NULL COMMENT 'plugin type . alert=alert plugin, job=job plugin', `plugin_params` text COMMENT 'plugin params', `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_plugin_define_UN` (`plugin_name`,`plugin_type`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_alert_plugin_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert_plugin_instance`; CREATE TABLE `t_ds_alert_plugin_instance` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_define_id` int NOT NULL, `plugin_instance_params` text COMMENT 'plugin instance params. Also contain the params value which user input in web ui.', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `instance_name` varchar(200) DEFAULT NULL COMMENT 'alert instance name', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table `t_ds_dq_comparison_type` -- DROP TABLE IF EXISTS `t_ds_dq_comparison_type`; CREATE TABLE `t_ds_dq_comparison_type` ( `id` int(11) NOT NULL AUTO_INCREMENT, `type` varchar(100) NOT NULL, `execute_sql` text DEFAULT NULL, `output_table` varchar(100) DEFAULT NULL, `name` varchar(100) DEFAULT NULL, `create_time` datetime DEFAULT NULL, `update_time` datetime DEFAULT NULL, `is_inner_source` tinyint(1) DEFAULT '0', PRIMARY KEY (`id`) )ENGINE=InnoDB DEFAULT CHARSET=utf8; INSERT INTO `t_ds_dq_comparison_type` (`id`, `type`, `execute_sql`, `output_table`, `name`, `create_time`, `update_time`, `is_inner_source`) VALUES(1, 'FixValue', NULL, NULL, NULL, '2021-06-30 00:00:00.000', '2021-06-30 00:00:00.000', false); INSERT INTO `t_ds_dq_comparison_type` (`id`, `type`, `execute_sql`, `output_table`, `name`, `create_time`, `update_time`, `is_inner_source`) VALUES(2, 'DailyAvg', 'select round(avg(statistics_value),2) as day_avg from t_ds_dq_task_statistics_value where data_time >=date_trunc(''DAY'', ${data_time}) and data_time < date_add(date_trunc(''day'', ${data_time}),1) and unique_code = ${unique_code} and statistics_name = ''${statistics_name}''', 'day_range', 'day_range.day_avg', '2021-06-30 00:00:00.000', '2021-06-30 00:00:00.000', true); INSERT INTO `t_ds_dq_comparison_type` (`id`, `type`, `execute_sql`, `output_table`, `name`, `create_time`, `update_time`, `is_inner_source`) VALUES(3, 'WeeklyAvg', 'select round(avg(statistics_value),2) as week_avg from t_ds_dq_task_statistics_value where data_time >= date_trunc(''WEEK'', ${data_time}) and data_time <date_trunc(''day'', ${data_time}) and unique_code = ${unique_code} and statistics_name = ''${statistics_name}''', 'week_range', 'week_range.week_avg', '2021-06-30 00:00:00.000', '2021-06-30 00:00:00.000', true); INSERT INTO `t_ds_dq_comparison_type` (`id`, `type`, `execute_sql`, `output_table`, `name`, `create_time`, `update_time`, `is_inner_source`) VALUES(4, 'MonthlyAvg', 'select round(avg(statistics_value),2) as month_avg from t_ds_dq_task_statistics_value where data_time >= date_trunc(''MONTH'', ${data_time}) and data_time <date_trunc(''day'', ${data_time}) and unique_code = ${unique_code} and statistics_name = ''${statistics_name}''', 'month_range', 'month_range.month_avg', '2021-06-30 00:00:00.000', '2021-06-30 00:00:00.000', true); INSERT INTO `t_ds_dq_comparison_type` (`id`, `type`, `execute_sql`, `output_table`, `name`, `create_time`, `update_time`, `is_inner_source`) VALUES(5, 'Last7DayAvg', 'select round(avg(statistics_value),2) as last_7_avg from t_ds_dq_task_statistics_value where data_time >= date_add(date_trunc(''day'', ${data_time}),-7) and data_time <date_trunc(''day'', ${data_time}) and unique_code = ${unique_code} and statistics_name = ''${statistics_name}''', 'last_seven_days', 'last_seven_days.last_7_avg', '2021-06-30 00:00:00.000', '2021-06-30 00:00:00.000', true); INSERT INTO `t_ds_dq_comparison_type` (`id`, `type`, `execute_sql`, `output_table`, `name`, `create_time`, `update_time`, `is_inner_source`) VALUES(6, 'Last30DayAvg', 'select round(avg(statistics_value),2) as last_30_avg from t_ds_dq_task_statistics_value where data_time >= date_add(date_trunc(''day'', ${data_time}),-30) and data_time < date_trunc(''day'', ${data_time}) and unique_code = ${unique_code} and statistics_name = ''${statistics_name}''', 'last_thirty_days', 'last_thirty_days.last_30_avg', '2021-06-30 00:00:00.000', '2021-06-30 00:00:00.000', true); INSERT INTO `t_ds_dq_comparison_type` (`id`, `type`, `execute_sql`, `output_table`, `name`, `create_time`, `update_time`, `is_inner_source`) VALUES(7, 'SrcTableTotalRows', 'SELECT COUNT(*) AS total FROM ${src_table} WHERE (${src_filter})', 'total_count', 'total_count.total', '2021-06-30 00:00:00.000', '2021-06-30 00:00:00.000', false); INSERT INTO `t_ds_dq_comparison_type` (`id`, `type`, `execute_sql`, `output_table`, `name`, `create_time`, `update_time`, `is_inner_source`) VALUES(8, 'TargetTableTotalRows', 'SELECT COUNT(*) AS total FROM ${target_table} WHERE (${target_filter})', 'total_count', 'total_count.total', '2021-06-30 00:00:00.000', '2021-06-30 00:00:00.000', false); -- -- Table structure for table `t_ds_dq_execute_result` -- DROP TABLE IF EXISTS `t_ds_dq_execute_result`; CREATE TABLE `t_ds_dq_execute_result` ( `id` int(11) NOT NULL AUTO_INCREMENT, `process_definition_id` int(11) DEFAULT NULL, `process_instance_id` int(11) DEFAULT NULL, `task_instance_id` int(11) DEFAULT NULL, `rule_type` int(11) DEFAULT NULL, `rule_name` varchar(255) DEFAULT NULL, `statistics_value` double DEFAULT NULL, `comparison_value` double DEFAULT NULL, `check_type` int(11) DEFAULT NULL, `threshold` double DEFAULT NULL, `operator` int(11) DEFAULT NULL, `failure_strategy` int(11) DEFAULT NULL, `state` int(11) DEFAULT NULL, `user_id` int(11) DEFAULT NULL, `comparison_type` int(11) DEFAULT NULL, `error_output_path` text DEFAULT NULL, `create_time` datetime DEFAULT NULL, `update_time` datetime DEFAULT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table t_ds_dq_rule -- DROP TABLE IF EXISTS `t_ds_dq_rule`; CREATE TABLE `t_ds_dq_rule` ( `id` int(11) NOT NULL AUTO_INCREMENT, `name` varchar(100) DEFAULT NULL, `type` int(11) DEFAULT NULL, `user_id` int(11) DEFAULT NULL, `create_time` datetime DEFAULT NULL, `update_time` datetime DEFAULT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; INSERT INTO `t_ds_dq_rule` (`id`, `name`, `type`, `user_id`, `create_time`, `update_time`) VALUES(1, '$t(null_check)', 0, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); INSERT INTO `t_ds_dq_rule` (`id`, `name`, `type`, `user_id`, `create_time`, `update_time`) VALUES(2, '$t(custom_sql)', 1, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); INSERT INTO `t_ds_dq_rule` (`id`, `name`, `type`, `user_id`, `create_time`, `update_time`) VALUES(3, '$t(multi_table_accuracy)', 2, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); INSERT INTO `t_ds_dq_rule` (`id`, `name`, `type`, `user_id`, `create_time`, `update_time`) VALUES(4, '$t(multi_table_value_comparison)', 3, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); INSERT INTO `t_ds_dq_rule` (`id`, `name`, `type`, `user_id`, `create_time`, `update_time`) VALUES(5, '$t(field_length_check)', 0, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); INSERT INTO `t_ds_dq_rule` (`id`, `name`, `type`, `user_id`, `create_time`, `update_time`) VALUES(6, '$t(uniqueness_check)', 0, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); INSERT INTO `t_ds_dq_rule` (`id`, `name`, `type`, `user_id`, `create_time`, `update_time`) VALUES(7, '$t(regexp_check)', 0, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); INSERT INTO `t_ds_dq_rule` (`id`, `name`, `type`, `user_id`, `create_time`, `update_time`) VALUES(8, '$t(timeliness_check)', 0, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); INSERT INTO `t_ds_dq_rule` (`id`, `name`, `type`, `user_id`, `create_time`, `update_time`) VALUES(9, '$t(enumeration_check)', 0, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); INSERT INTO `t_ds_dq_rule` (`id`, `name`, `type`, `user_id`, `create_time`, `update_time`) VALUES(10, '$t(table_count_check)', 0, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); -- -- Table structure for table `t_ds_dq_rule_execute_sql` -- DROP TABLE IF EXISTS `t_ds_dq_rule_execute_sql`; CREATE TABLE `t_ds_dq_rule_execute_sql` ( `id` int(11) NOT NULL AUTO_INCREMENT, `index` int(11) DEFAULT NULL, `sql` text DEFAULT NULL, `table_alias` varchar(255) DEFAULT NULL, `type` int(11) DEFAULT NULL, `is_error_output_sql` tinyint(1) DEFAULT '0', `create_time` datetime DEFAULT NULL, `update_time` datetime DEFAULT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; INSERT INTO `t_ds_dq_rule_execute_sql` (`id`, `index`, `sql`, `table_alias`, `type`, `is_error_output_sql`, `create_time`, `update_time`) VALUES(1, 1, 'SELECT COUNT(*) AS nulls FROM null_items', 'null_count', 1, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_execute_sql` (`id`, `index`, `sql`, `table_alias`, `type`, `is_error_output_sql`, `create_time`, `update_time`) VALUES(2, 1, 'SELECT COUNT(*) AS total FROM ${src_table} WHERE (${src_filter})', 'total_count', 2, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_execute_sql` (`id`, `index`, `sql`, `table_alias`, `type`, `is_error_output_sql`, `create_time`, `update_time`) VALUES(3, 1, 'SELECT COUNT(*) AS miss from miss_items', 'miss_count', 1, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_execute_sql` (`id`, `index`, `sql`, `table_alias`, `type`, `is_error_output_sql`, `create_time`, `update_time`) VALUES(4, 1, 'SELECT COUNT(*) AS valids FROM invalid_length_items', 'invalid_length_count', 1, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_execute_sql` (`id`, `index`, `sql`, `table_alias`, `type`, `is_error_output_sql`, `create_time`, `update_time`) VALUES(5, 1, 'SELECT COUNT(*) AS total FROM ${target_table} WHERE (${target_filter})', 'total_count', 2, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_execute_sql` (`id`, `index`, `sql`, `table_alias`, `type`, `is_error_output_sql`, `create_time`, `update_time`) VALUES(6, 1, 'SELECT ${src_field} FROM ${src_table} group by ${src_field} having count(*) > 1', 'duplicate_items', 0, true, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_execute_sql` (`id`, `index`, `sql`, `table_alias`, `type`, `is_error_output_sql`, `create_time`, `update_time`) VALUES(7, 1, 'SELECT COUNT(*) AS duplicates FROM duplicate_items', 'duplicate_count', 1, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_execute_sql` (`id`, `index`, `sql`, `table_alias`, `type`, `is_error_output_sql`, `create_time`, `update_time`) VALUES(8, 1, 'SELECT ${src_table}.* FROM (SELECT * FROM ${src_table} WHERE (${src_filter})) ${src_table} LEFT JOIN (SELECT * FROM ${target_table} WHERE (${target_filter})) ${target_table} ON ${on_clause} WHERE ${where_clause}', 'miss_items', 0, true, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_execute_sql` (`id`, `index`, `sql`, `table_alias`, `type`, `is_error_output_sql`, `create_time`, `update_time`) VALUES(9, 1, 'SELECT * FROM ${src_table} WHERE (${src_field} not regexp ''${regexp_pattern}'') AND (${src_filter}) ', 'regexp_items', 0, true, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_execute_sql` (`id`, `index`, `sql`, `table_alias`, `type`, `is_error_output_sql`, `create_time`, `update_time`) VALUES(10, 1, 'SELECT COUNT(*) AS regexps FROM regexp_items', 'regexp_count', 1, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_execute_sql` (`id`, `index`, `sql`, `table_alias`, `type`, `is_error_output_sql`, `create_time`, `update_time`) VALUES(11, 1, 'SELECT * FROM ${src_table} WHERE (to_unix_timestamp(${src_field}, ''${datetime_format}'')-to_unix_timestamp(''${deadline}'', ''${datetime_format}'') <= 0) AND (to_unix_timestamp(${src_field}, ''${datetime_format}'')-to_unix_timestamp(''${begin_time}'', ''${datetime_format}'') >= 0) AND (${src_filter}) ', 'timeliness_items', 0, 1, '2021-03-03 11:31:24.0', '2021-03-03 11:31:24.0'); INSERT INTO `t_ds_dq_rule_execute_sql` (`id`, `index`, `sql`, `table_alias`, `type`, `is_error_output_sql`, `create_time`, `update_time`) VALUES(12, 1, 'SELECT COUNT(*) AS timeliness FROM timeliness_items', 'timeliness_count', 1, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_execute_sql` (`id`, `index`, `sql`, `table_alias`, `type`, `is_error_output_sql`, `create_time`, `update_time`) VALUES(13, 1, 'SELECT * FROM ${src_table} where (${src_field} not in ( ${enum_list} ) or ${src_field} is null) AND (${src_filter}) ', 'enum_items', 0, true, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_execute_sql` (`id`, `index`, `sql`, `table_alias`, `type`, `is_error_output_sql`, `create_time`, `update_time`) VALUES(14, 1, 'SELECT COUNT(*) AS enums FROM enum_items', 'enum_count', 1, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_execute_sql` (`id`, `index`, `sql`, `table_alias`, `type`, `is_error_output_sql`, `create_time`, `update_time`) VALUES(15, 1, 'SELECT COUNT(*) AS total FROM ${src_table} WHERE (${src_filter})', 'table_count', 1, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_execute_sql` (`id`, `index`, `sql`, `table_alias`, `type`, `is_error_output_sql`, `create_time`, `update_time`) VALUES(16, 1, 'SELECT * FROM ${src_table} WHERE (${src_field} is null or ${src_field} = '''') AND (${src_filter})', 'null_items', 0, true, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_execute_sql` (`id`, `index`, `sql`, `table_alias`, `type`, `is_error_output_sql`, `create_time`, `update_time`) VALUES(17, 1, 'SELECT * FROM ${src_table} WHERE (length(${src_field}) ${logic_operator} ${field_length}) AND (${src_filter})', 'invalid_length_items', 0, true, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); -- -- Table structure for table `t_ds_dq_rule_input_entry` -- DROP TABLE IF EXISTS `t_ds_dq_rule_input_entry`; CREATE TABLE `t_ds_dq_rule_input_entry` ( `id` int(11) NOT NULL AUTO_INCREMENT, `field` varchar(255) DEFAULT NULL, `type` varchar(255) DEFAULT NULL, `title` varchar(255) DEFAULT NULL, `value` varchar(255) DEFAULT NULL, `options` text DEFAULT NULL, `placeholder` varchar(255) DEFAULT NULL, `option_source_type` int(11) DEFAULT NULL, `value_type` int(11) DEFAULT NULL, `input_type` int(11) DEFAULT NULL, `is_show` tinyint(1) DEFAULT '1', `can_edit` tinyint(1) DEFAULT '1', `is_emit` tinyint(1) DEFAULT '0', `is_validate` tinyint(1) DEFAULT '1', `create_time` datetime DEFAULT NULL, `update_time` datetime DEFAULT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(1, 'src_connector_type', 'select', '$t(src_connector_type)', '', '[{"label":"HIVE","value":"HIVE"},{"label":"JDBC","value":"JDBC"}]', 'please select source connector type', 2, 2, 0, 1, 1, 1, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(2, 'src_datasource_id', 'select', '$t(src_datasource_id)', '', NULL, 'please select source datasource id', 1, 2, 0, 1, 1, 1, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(3, 'src_table', 'select', '$t(src_table)', NULL, NULL, 'Please enter source table name', 0, 0, 0, 1, 1, 1, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(4, 'src_filter', 'input', '$t(src_filter)', NULL, NULL, 'Please enter filter expression', 0, 3, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(5, 'src_field', 'select', '$t(src_field)', NULL, NULL, 'Please enter column, only single column is supported', 0, 0, 0, 1, 1, 0, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(6, 'statistics_name', 'input', '$t(statistics_name)', NULL, NULL, 'Please enter statistics name, the alias in statistics execute sql', 0, 0, 1, 0, 0, 0, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(7, 'check_type', 'select', '$t(check_type)', '0', '[{"label":"Expected - Actual","value":"0"},{"label":"Actual - Expected","value":"1"},{"label":"Actual / Expected","value":"2"},{"label":"(Expected - Actual) / Expected","value":"3"}]', 'please select check type', 0, 0, 3, 1, 1, 1, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(8, 'operator', 'select', '$t(operator)', '0', '[{"label":"=","value":"0"},{"label":"<","value":"1"},{"label":"<=","value":"2"},{"label":">","value":"3"},{"label":">=","value":"4"},{"label":"!=","value":"5"}]', 'please select operator', 0, 0, 3, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(9, 'threshold', 'input', '$t(threshold)', NULL, NULL, 'Please enter threshold, number is needed', 0, 2, 3, 1, 1, 0, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(10, 'failure_strategy', 'select', '$t(failure_strategy)', '0', '[{"label":"Alert","value":"0"},{"label":"Block","value":"1"}]', 'please select failure strategy', 0, 0, 3, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(11, 'target_connector_type', 'select', '$t(target_connector_type)', '', '[{"label":"HIVE","value":"HIVE"},{"label":"JDBC","value":"JDBC"}]', 'Please select target connector type', 2, 0, 0, 1, 1, 1, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(12, 'target_datasource_id', 'select', '$t(target_datasource_id)', '', NULL, 'Please select target datasource', 1, 2, 0, 1, 1, 1, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(13, 'target_table', 'select', '$t(target_table)', NULL, NULL, 'Please enter target table', 0, 0, 0, 1, 1, 1, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(14, 'target_filter', 'input', '$t(target_filter)', NULL, NULL, 'Please enter target filter expression', 0, 3, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(15, 'mapping_columns', 'group', '$t(mapping_columns)', NULL, '[{"field":"src_field","props":{"placeholder":"Please input src field","rows":0,"disabled":false,"size":"small"},"type":"input","title":"src_field"},{"field":"operator","props":{"placeholder":"Please input operator","rows":0,"disabled":false,"size":"small"},"type":"input","title":"operator"},{"field":"target_field","props":{"placeholder":"Please input target field","rows":0,"disabled":false,"size":"small"},"type":"input","title":"target_field"}]', 'please enter mapping columns', 0, 0, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(16, 'statistics_execute_sql', 'textarea', '$t(statistics_execute_sql)', NULL, NULL, 'Please enter statistics execute sql', 0, 3, 0, 1, 1, 0, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(17, 'comparison_name', 'input', '$t(comparison_name)', NULL, NULL, 'Please enter comparison name, the alias in comparison execute sql', 0, 0, 0, 0, 0, 0, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(18, 'comparison_execute_sql', 'textarea', '$t(comparison_execute_sql)', NULL, NULL, 'Please enter comparison execute sql', 0, 3, 0, 1, 1, 0, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(19, 'comparison_type', 'select', '$t(comparison_type)', '', NULL, 'Please enter comparison title', 3, 0, 2, 1, 0, 1, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(20, 'writer_connector_type', 'select', '$t(writer_connector_type)', '', '[{"label":"MYSQL","value":"0"},{"label":"POSTGRESQL","value":"1"}]', 'please select writer connector type', 0, 2, 0, 1, 1, 1, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(21, 'writer_datasource_id', 'select', '$t(writer_datasource_id)', '', NULL, 'please select writer datasource id', 1, 2, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(22, 'target_field', 'select', '$t(target_field)', NULL, NULL, 'Please enter column, only single column is supported', 0, 0, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(23, 'field_length', 'input', '$t(field_length)', NULL, NULL, 'Please enter length limit', 0, 3, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(24, 'logic_operator', 'select', '$t(logic_operator)', '=', '[{"label":"=","value":"="},{"label":"<","value":"<"},{"label":"<=","value":"<="},{"label":">","value":">"},{"label":">=","value":">="},{"label":"<>","value":"<>"}]', 'please select logic operator', 0, 0, 3, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(25, 'regexp_pattern', 'input', '$t(regexp_pattern)', NULL, NULL, 'Please enter regexp pattern', 0, 0, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(26, 'deadline', 'input', '$t(deadline)', NULL, NULL, 'Please enter deadline', 0, 0, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(27, 'datetime_format', 'input', '$t(datetime_format)', NULL, NULL, 'Please enter datetime format', 0, 0, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(28, 'enum_list', 'input', '$t(enum_list)', NULL, NULL, 'Please enter enumeration', 0, 0, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_dq_rule_input_entry` (`id`, `field`, `type`, `title`, `value`, `options`, `placeholder`, `option_source_type`, `value_type`, `input_type`, `is_show`, `can_edit`, `is_emit`, `is_validate`, `create_time`, `update_time`) VALUES(29, 'begin_time', 'input', '$t(begin_time)', NULL, NULL, 'Please enter begin time', 0, 0, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.0', '2021-03-03 11:31:24.0'); -- -- Table structure for table `t_ds_dq_task_statistics_value` -- DROP TABLE IF EXISTS `t_ds_dq_task_statistics_value`; CREATE TABLE `t_ds_dq_task_statistics_value` ( `id` int(11) NOT NULL AUTO_INCREMENT, `process_definition_id` int(11) DEFAULT NULL, `task_instance_id` int(11) DEFAULT NULL, `rule_id` int(11) NOT NULL, `unique_code` varchar(255) NULL, `statistics_name` varchar(255) NULL, `statistics_value` double NULL, `data_time` datetime DEFAULT NULL, `create_time` datetime DEFAULT NULL, `update_time` datetime DEFAULT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table `t_ds_relation_rule_execute_sql` -- DROP TABLE IF EXISTS `t_ds_relation_rule_execute_sql`; CREATE TABLE `t_ds_relation_rule_execute_sql` ( `id` int(11) NOT NULL AUTO_INCREMENT, `rule_id` int(11) DEFAULT NULL, `execute_sql_id` int(11) DEFAULT NULL, `create_time` datetime NULL, `update_time` datetime NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; INSERT INTO `t_ds_relation_rule_execute_sql` (`id`, `rule_id`, `execute_sql_id`, `create_time`, `update_time`) VALUES(1, 1, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_execute_sql` (`id`, `rule_id`, `execute_sql_id`, `create_time`, `update_time`) VALUES(3, 5, 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_execute_sql` (`id`, `rule_id`, `execute_sql_id`, `create_time`, `update_time`) VALUES(2, 3, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_execute_sql` (`id`, `rule_id`, `execute_sql_id`, `create_time`, `update_time`) VALUES(4, 3, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_execute_sql` (`id`, `rule_id`, `execute_sql_id`, `create_time`, `update_time`) VALUES(5, 6, 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_execute_sql` (`id`, `rule_id`, `execute_sql_id`, `create_time`, `update_time`) VALUES(6, 6, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_execute_sql` (`id`, `rule_id`, `execute_sql_id`, `create_time`, `update_time`) VALUES(7, 7, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_execute_sql` (`id`, `rule_id`, `execute_sql_id`, `create_time`, `update_time`) VALUES(8, 7, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_execute_sql` (`id`, `rule_id`, `execute_sql_id`, `create_time`, `update_time`) VALUES(9, 8, 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_execute_sql` (`id`, `rule_id`, `execute_sql_id`, `create_time`, `update_time`) VALUES(10, 8, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_execute_sql` (`id`, `rule_id`, `execute_sql_id`, `create_time`, `update_time`) VALUES(11, 9, 13, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_execute_sql` (`id`, `rule_id`, `execute_sql_id`, `create_time`, `update_time`) VALUES(12, 9, 14, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_execute_sql` (`id`, `rule_id`, `execute_sql_id`, `create_time`, `update_time`) VALUES(13, 10, 15, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_execute_sql` (`id`, `rule_id`, `execute_sql_id`, `create_time`, `update_time`) VALUES(14, 1, 16, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_execute_sql` (`id`, `rule_id`, `execute_sql_id`, `create_time`, `update_time`) VALUES(15, 5, 17, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); -- -- Table structure for table `t_ds_relation_rule_input_entry` -- DROP TABLE IF EXISTS `t_ds_relation_rule_input_entry`; CREATE TABLE `t_ds_relation_rule_input_entry` ( `id` int(11) NOT NULL AUTO_INCREMENT, `rule_id` int(11) DEFAULT NULL, `rule_input_entry_id` int(11) DEFAULT NULL, `values_map` text DEFAULT NULL, `index` int(11) DEFAULT NULL, `create_time` datetime DEFAULT NULL, `update_time` datetime DEFAULT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(1, 1, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(2, 1, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(3, 1, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(4, 1, 4, NULL, 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(5, 1, 5, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(6, 1, 6, '{"statistics_name":"null_count.nulls"}', 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(7, 1, 7, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(8, 1, 8, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(9, 1, 9, NULL, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(10, 1, 10, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(11, 1, 17, '', 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(12, 1, 19, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(13, 2, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(14, 2, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(15, 2, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(16, 2, 6, '{"is_show":"true","can_edit":"true"}', 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(17, 2, 16, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(18, 2, 4, NULL, 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(19, 2, 7, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(20, 2, 8, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(21, 2, 9, NULL, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(22, 2, 10, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(24, 2, 19, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(25, 3, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(26, 3, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(27, 3, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(28, 3, 4, NULL, 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(29, 3, 11, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(30, 3, 12, NULL, 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(31, 3, 13, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(32, 3, 14, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(33, 3, 15, NULL, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(34, 3, 7, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(35, 3, 8, NULL, 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(36, 3, 9, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(37, 3, 10, NULL, 13, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(38, 3, 17, '{"comparison_name":"total_count.total"}', 14, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(39, 3, 19, NULL, 15, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(40, 4, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(41, 4, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(42, 4, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(43, 4, 6, '{"is_show":"true","can_edit":"true"}', 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(44, 4, 16, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(45, 4, 11, NULL, 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(46, 4, 12, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(47, 4, 13, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(48, 4, 17, '{"is_show":"true","can_edit":"true"}', 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(49, 4, 18, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(50, 4, 7, NULL, 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(51, 4, 8, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(52, 4, 9, NULL, 13, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(53, 4, 10, NULL, 14, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(62, 3, 6, '{"statistics_name":"miss_count.miss"}', 18, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(63, 5, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(64, 5, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(65, 5, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(66, 5, 4, NULL, 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(67, 5, 5, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(68, 5, 6, '{"statistics_name":"invalid_length_count.valids"}', 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(69, 5, 24, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(70, 5, 23, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(71, 5, 7, NULL, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(72, 5, 8, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(73, 5, 9, NULL, 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(74, 5, 10, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(75, 5, 17, '', 13, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(76, 5, 19, NULL, 14, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(79, 6, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(80, 6, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(81, 6, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(82, 6, 4, NULL, 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(83, 6, 5, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(84, 6, 6, '{"statistics_name":"duplicate_count.duplicates"}', 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(85, 6, 7, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(86, 6, 8, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(87, 6, 9, NULL, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(88, 6, 10, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(89, 6, 17, '', 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(90, 6, 19, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(93, 7, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(94, 7, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(95, 7, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(96, 7, 4, NULL, 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(97, 7, 5, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(98, 7, 6, '{"statistics_name":"regexp_count.regexps"}', 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(99, 7, 25, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(100, 7, 7, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(101, 7, 8, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(102, 7, 9, NULL, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(103, 7, 10, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(104, 7, 17, NULL, 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(105, 7, 19, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(108, 8, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(109, 8, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(110, 8, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(111, 8, 4, NULL, 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(112, 8, 5, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(113, 8, 6, '{"statistics_name":"timeliness_count.timeliness"}', 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(114, 8, 26, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(115, 8, 27, NULL, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(116, 8, 7, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(117, 8, 8, NULL, 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(118, 8, 9, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(119, 8, 10, NULL, 13, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(120, 8, 17, NULL, 14, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(121, 8, 19, NULL, 15, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(124, 9, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(125, 9, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(126, 9, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(127, 9, 4, NULL, 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(128, 9, 5, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(129, 9, 6, '{"statistics_name":"enum_count.enums"}', 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(130, 9, 28, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(131, 9, 7, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(132, 9, 8, NULL, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(133, 9, 9, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(134, 9, 10, NULL, 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(135, 9, 17, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(136, 9, 19, NULL, 13, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(139, 10, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(140, 10, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(141, 10, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(142, 10, 4, NULL, 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(143, 10, 6, '{"statistics_name":"table_count.total"}', 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(144, 10, 7, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(145, 10, 8, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(146, 10, 9, NULL, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(147, 10, 10, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(148, 10, 17, NULL, 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO `t_ds_relation_rule_input_entry` (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(149, 10, 19, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (`id`, `rule_id`, `rule_input_entry_id`, `values_map`, `index`, `create_time`, `update_time`) VALUES(150, 8, 29, NULL, 7, '2021-03-03 11:31:24.0', '2021-03-03 11:31:24.0'); -- ---------------------------- -- Table structure for t_ds_environment -- ---------------------------- DROP TABLE IF EXISTS `t_ds_environment`; CREATE TABLE `t_ds_environment` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `code` bigint(20) DEFAULT NULL COMMENT 'encoding', `name` varchar(100) NOT NULL COMMENT 'environment name', `config` text NULL DEFAULT NULL COMMENT 'this config contains many environment variables config', `description` text NULL DEFAULT NULL COMMENT 'the details', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `environment_name_unique` (`name`), UNIQUE KEY `environment_code_unique` (`code`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_environment_worker_group_relation -- ---------------------------- DROP TABLE IF EXISTS `t_ds_environment_worker_group_relation`; CREATE TABLE `t_ds_environment_worker_group_relation` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `environment_code` bigint(20) NOT NULL COMMENT 'environment code', `worker_group` varchar(255) NOT NULL COMMENT 'worker group id', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `environment_worker_group_unique` (`environment_code`,`worker_group`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_task_group_queue -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_group_queue`; CREATE TABLE `t_ds_task_group_queue` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT'key', `task_id` int(11) DEFAULT NULL COMMENT 'taskintanceid', `task_name` varchar(100) DEFAULT NULL COMMENT 'TaskInstance name', `group_id` int(11) DEFAULT NULL COMMENT 'taskGroup id', `process_id` int(11) DEFAULT NULL COMMENT 'processInstace id', `priority` int(8) DEFAULT '0' COMMENT 'priority', `status` tinyint(4) DEFAULT '-1' COMMENT '-1: waiting 1: running 2: finished', `force_start` tinyint(4) DEFAULT '0' COMMENT 'is force start 0 NO ,1 YES', `in_queue` tinyint(4) DEFAULT '0' COMMENT 'ready to get the queue by other task finish 0 NO ,1 YES', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY( `id` ) )ENGINE= INNODB AUTO_INCREMENT= 1 DEFAULT CHARSET= utf8; -- ---------------------------- -- Table structure for t_ds_task_group -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_group`; CREATE TABLE `t_ds_task_group` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT'key', `name` varchar(100) DEFAULT NULL COMMENT 'task_group name', `description` varchar(200) DEFAULT NULL, `group_size` int (11) NOT NULL COMMENT'group size', `use_size` int (11) DEFAULT '0' COMMENT 'used size', `user_id` int(11) DEFAULT NULL COMMENT 'creator id', `project_code` bigint(20) DEFAULT 0 COMMENT 'project code', `status` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY(`id`) ) ENGINE= INNODB AUTO_INCREMENT= 1 DEFAULT CHARSET= utf8; -- ---------------------------- -- Table structure for t_ds_audit_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_audit_log`; CREATE TABLE `t_ds_audit_log` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT'key', `user_id` int(11) NOT NULL COMMENT 'user id', `resource_type` int(11) NOT NULL COMMENT 'resource type', `operation` int(11) NOT NULL COMMENT 'operation', `time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT 'create time', `resource_id` int(11) NULL DEFAULT NULL COMMENT 'resource id', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT= 1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_k8s -- ---------------------------- DROP TABLE IF EXISTS `t_ds_k8s`; CREATE TABLE `t_ds_k8s` ( `id` int(11) NOT NULL AUTO_INCREMENT, `k8s_name` varchar(100) DEFAULT NULL, `k8s_config` text DEFAULT NULL, `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE= INNODB AUTO_INCREMENT= 1 DEFAULT CHARSET= utf8; -- ---------------------------- -- Table structure for t_ds_k8s_namespace -- ---------------------------- DROP TABLE IF EXISTS `t_ds_k8s_namespace`; CREATE TABLE `t_ds_k8s_namespace` ( `id` int(11) NOT NULL AUTO_INCREMENT, `limits_memory` int(11) DEFAULT NULL, `namespace` varchar(100) DEFAULT NULL, `online_job_num` int(11) DEFAULT NULL, `owner` varchar(100) DEFAULT NULL, `pod_replicas` int(11) DEFAULT NULL, `pod_request_cpu` decimal(14,3) DEFAULT NULL, `pod_request_memory` int(11) DEFAULT NULL, `tag` varchar(100) DEFAULT NULL, `limits_cpu` decimal(14,3) DEFAULT NULL, `k8s` varchar(100) DEFAULT NULL, `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `k8s_namespace_unique` (`namespace`,`k8s`) ) ENGINE= INNODB AUTO_INCREMENT= 1 DEFAULT CHARSET= utf8;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,499
[Feature][Master] Optimize workflowlieage query speed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Optimize workflowlieage query speed ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8499
https://github.com/apache/dolphinscheduler/pull/8500
64baf6dc3db9f58d32947539a6fa36ac6e1eb200
512f8cb5dde7fb42398e7caa50c0c6665b6ec45f
"2022-02-23T05:37:00Z"
java
"2022-02-23T08:53:36Z"
dolphinscheduler-dao/src/main/resources/sql/dolphinscheduler_postgresql.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS; DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS; DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE; DROP TABLE IF EXISTS QRTZ_LOCKS; DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS; DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS; DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS; DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS; DROP TABLE IF EXISTS QRTZ_TRIGGERS; DROP TABLE IF EXISTS QRTZ_JOB_DETAILS; DROP TABLE IF EXISTS QRTZ_CALENDARS; CREATE TABLE QRTZ_JOB_DETAILS ( SCHED_NAME character varying(120) NOT NULL, JOB_NAME character varying(200) NOT NULL, JOB_GROUP character varying(200) NOT NULL, DESCRIPTION character varying(250) NULL, JOB_CLASS_NAME character varying(250) NOT NULL, IS_DURABLE boolean NOT NULL, IS_NONCONCURRENT boolean NOT NULL, IS_UPDATE_DATA boolean NOT NULL, REQUESTS_RECOVERY boolean NOT NULL, JOB_DATA bytea NULL ); alter table QRTZ_JOB_DETAILS add primary key(SCHED_NAME,JOB_NAME,JOB_GROUP); CREATE TABLE QRTZ_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, JOB_NAME character varying(200) NOT NULL, JOB_GROUP character varying(200) NOT NULL, DESCRIPTION character varying(250) NULL, NEXT_FIRE_TIME BIGINT NULL, PREV_FIRE_TIME BIGINT NULL, PRIORITY INTEGER NULL, TRIGGER_STATE character varying(16) NOT NULL, TRIGGER_TYPE character varying(8) NOT NULL, START_TIME BIGINT NOT NULL, END_TIME BIGINT NULL, CALENDAR_NAME character varying(200) NULL, MISFIRE_INSTR SMALLINT NULL, JOB_DATA bytea NULL ) ; alter table QRTZ_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_SIMPLE_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, REPEAT_COUNT BIGINT NOT NULL, REPEAT_INTERVAL BIGINT NOT NULL, TIMES_TRIGGERED BIGINT NOT NULL ) ; alter table QRTZ_SIMPLE_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_CRON_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, CRON_EXPRESSION character varying(120) NOT NULL, TIME_ZONE_ID character varying(80) ) ; alter table QRTZ_CRON_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_SIMPROP_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, STR_PROP_1 character varying(512) NULL, STR_PROP_2 character varying(512) NULL, STR_PROP_3 character varying(512) NULL, INT_PROP_1 INT NULL, INT_PROP_2 INT NULL, LONG_PROP_1 BIGINT NULL, LONG_PROP_2 BIGINT NULL, DEC_PROP_1 NUMERIC(13,4) NULL, DEC_PROP_2 NUMERIC(13,4) NULL, BOOL_PROP_1 boolean NULL, BOOL_PROP_2 boolean NULL ) ; alter table QRTZ_SIMPROP_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_BLOB_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, BLOB_DATA bytea NULL ) ; alter table QRTZ_BLOB_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_CALENDARS ( SCHED_NAME character varying(120) NOT NULL, CALENDAR_NAME character varying(200) NOT NULL, CALENDAR bytea NOT NULL ) ; alter table QRTZ_CALENDARS add primary key(SCHED_NAME,CALENDAR_NAME); CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL ) ; alter table QRTZ_PAUSED_TRIGGER_GRPS add primary key(SCHED_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_FIRED_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, ENTRY_ID character varying(200) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, INSTANCE_NAME character varying(200) NOT NULL, FIRED_TIME BIGINT NOT NULL, SCHED_TIME BIGINT NOT NULL, PRIORITY INTEGER NOT NULL, STATE character varying(16) NOT NULL, JOB_NAME character varying(200) NULL, JOB_GROUP character varying(200) NULL, IS_NONCONCURRENT boolean NULL, REQUESTS_RECOVERY boolean NULL ) ; alter table QRTZ_FIRED_TRIGGERS add primary key(SCHED_NAME,ENTRY_ID); CREATE TABLE QRTZ_SCHEDULER_STATE ( SCHED_NAME character varying(120) NOT NULL, INSTANCE_NAME character varying(200) NOT NULL, LAST_CHECKIN_TIME BIGINT NOT NULL, CHECKIN_INTERVAL BIGINT NOT NULL ) ; alter table QRTZ_SCHEDULER_STATE add primary key(SCHED_NAME,INSTANCE_NAME); CREATE TABLE QRTZ_LOCKS ( SCHED_NAME character varying(120) NOT NULL, LOCK_NAME character varying(40) NOT NULL ) ; alter table QRTZ_LOCKS add primary key(SCHED_NAME,LOCK_NAME); CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY); CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME); CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME); CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME); CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME); CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME); CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY); CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); -- -- Table structure for table t_ds_access_token -- DROP TABLE IF EXISTS t_ds_access_token; CREATE TABLE t_ds_access_token ( id int NOT NULL , user_id int DEFAULT NULL , token varchar(64) DEFAULT NULL , expire_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_alert -- DROP TABLE IF EXISTS t_ds_alert; CREATE TABLE t_ds_alert ( id int NOT NULL , title varchar(64) DEFAULT NULL , content text , alert_status int DEFAULT '0' , log text , alertgroup_id int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_alertgroup -- DROP TABLE IF EXISTS t_ds_alertgroup; CREATE TABLE t_ds_alertgroup( id int NOT NULL, alert_instance_ids varchar (255) DEFAULT NULL, create_user_id int4 DEFAULT NULL, group_name varchar(255) DEFAULT NULL, description varchar(255) DEFAULT NULL, create_time timestamp DEFAULT NULL, update_time timestamp DEFAULT NULL, PRIMARY KEY (id), CONSTRAINT t_ds_alertgroup_name_un UNIQUE (group_name) ) ; -- -- Table structure for table t_ds_command -- DROP TABLE IF EXISTS t_ds_command; CREATE TABLE t_ds_command ( id int NOT NULL , command_type int DEFAULT NULL , process_definition_code bigint NOT NULL , command_param text , task_depend_type int DEFAULT NULL , failure_strategy int DEFAULT '0' , warning_type int DEFAULT '0' , warning_group_id int DEFAULT NULL , schedule_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL , executor_id int DEFAULT NULL , update_time timestamp DEFAULT NULL , process_instance_priority int DEFAULT NULL , worker_group varchar(64), environment_code bigint DEFAULT '-1', dry_run int DEFAULT '0' , process_instance_id int DEFAULT 0, process_definition_version int DEFAULT 0, PRIMARY KEY (id) ) ; create index priority_id_index on t_ds_command (process_instance_priority,id); -- -- Table structure for table t_ds_datasource -- DROP TABLE IF EXISTS t_ds_datasource; CREATE TABLE t_ds_datasource ( id int NOT NULL , name varchar(64) NOT NULL , note varchar(255) DEFAULT NULL , type int NOT NULL , user_id int NOT NULL , connection_params text NOT NULL , create_time timestamp NOT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id), CONSTRAINT t_ds_datasource_name_un UNIQUE (name, type) ) ; -- -- Table structure for table t_ds_error_command -- DROP TABLE IF EXISTS t_ds_error_command; CREATE TABLE t_ds_error_command ( id int NOT NULL , command_type int DEFAULT NULL , process_definition_code bigint NOT NULL , command_param text , task_depend_type int DEFAULT NULL , failure_strategy int DEFAULT '0' , warning_type int DEFAULT '0' , warning_group_id int DEFAULT NULL , schedule_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL , executor_id int DEFAULT NULL , update_time timestamp DEFAULT NULL , process_instance_priority int DEFAULT NULL , worker_group varchar(64), environment_code bigint DEFAULT '-1', dry_run int DEFAULT '0' , message text , process_instance_id int DEFAULT 0, process_definition_version int DEFAULT 0, PRIMARY KEY (id) ); -- -- Table structure for table t_ds_process_definition -- DROP TABLE IF EXISTS t_ds_process_definition; CREATE TABLE t_ds_process_definition ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int NOT NULL , description text , project_code bigint DEFAULT NULL , release_state int DEFAULT NULL , user_id int DEFAULT NULL , global_params text , locations text , warning_group_id int DEFAULT NULL , flag int DEFAULT NULL , timeout int DEFAULT '0' , tenant_id int DEFAULT '-1' , execution_type int DEFAULT '0', create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT process_definition_unique UNIQUE (name, project_code) ) ; create index process_definition_index on t_ds_process_definition (code,id); -- -- Table structure for table t_ds_process_definition_log -- DROP TABLE IF EXISTS t_ds_process_definition_log; CREATE TABLE t_ds_process_definition_log ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int NOT NULL , description text , project_code bigint DEFAULT NULL , release_state int DEFAULT NULL , user_id int DEFAULT NULL , global_params text , locations text , warning_group_id int DEFAULT NULL , flag int DEFAULT NULL , timeout int DEFAULT '0' , tenant_id int DEFAULT '-1' , execution_type int DEFAULT '0', operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_task_definition -- DROP TABLE IF EXISTS t_ds_task_definition; CREATE TABLE t_ds_task_definition ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int NOT NULL , description text , project_code bigint DEFAULT NULL , user_id int DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_params text , flag int DEFAULT NULL , task_priority int DEFAULT NULL , worker_group varchar(255) DEFAULT NULL , environment_code bigint DEFAULT '-1', fail_retry_times int DEFAULT NULL , fail_retry_interval int DEFAULT NULL , timeout_flag int DEFAULT NULL , timeout_notify_strategy int DEFAULT NULL , timeout int DEFAULT '0' , delay_time int DEFAULT '0' , task_group_id int DEFAULT NULL, task_group_priority int DEFAULT '0', resource_ids text , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; create index task_definition_index on t_ds_task_definition (project_code,id); -- -- Table structure for table t_ds_task_definition_log -- DROP TABLE IF EXISTS t_ds_task_definition_log; CREATE TABLE t_ds_task_definition_log ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int NOT NULL , description text , project_code bigint DEFAULT NULL , user_id int DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_params text , flag int DEFAULT NULL , task_priority int DEFAULT NULL , worker_group varchar(255) DEFAULT NULL , environment_code bigint DEFAULT '-1', fail_retry_times int DEFAULT NULL , fail_retry_interval int DEFAULT NULL , timeout_flag int DEFAULT NULL , timeout_notify_strategy int DEFAULT NULL , timeout int DEFAULT '0' , delay_time int DEFAULT '0' , resource_ids text , operator int DEFAULT NULL , task_group_id int DEFAULT NULL, task_group_priority int DEFAULT '0', operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; create index idx_task_definition_log_code_version on t_ds_task_definition_log (code,version); create index idx_task_definition_log_project_code on t_ds_task_definition_log (project_code); -- -- Table structure for table t_ds_process_task_relation -- DROP TABLE IF EXISTS t_ds_process_task_relation; CREATE TABLE t_ds_process_task_relation ( id int NOT NULL , name varchar(255) DEFAULT NULL , project_code bigint DEFAULT NULL , process_definition_code bigint DEFAULT NULL , process_definition_version int DEFAULT NULL , pre_task_code bigint DEFAULT NULL , pre_task_version int DEFAULT '0' , post_task_code bigint DEFAULT NULL , post_task_version int DEFAULT '0' , condition_type int DEFAULT NULL , condition_params text , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; create index process_task_relation_idx_project_code_process_definition_code on t_ds_process_task_relation (project_code,process_definition_code); -- -- Table structure for table t_ds_process_task_relation_log -- DROP TABLE IF EXISTS t_ds_process_task_relation_log; CREATE TABLE t_ds_process_task_relation_log ( id int NOT NULL , name varchar(255) DEFAULT NULL , project_code bigint DEFAULT NULL , process_definition_code bigint DEFAULT NULL , process_definition_version int DEFAULT NULL , pre_task_code bigint DEFAULT NULL , pre_task_version int DEFAULT '0' , post_task_code bigint DEFAULT NULL , post_task_version int DEFAULT '0' , condition_type int DEFAULT NULL , condition_params text , operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; create index process_task_relation_log_idx_project_code_process_definition_code on t_ds_process_task_relation_log (project_code,process_definition_code); -- -- Table structure for table t_ds_process_instance -- DROP TABLE IF EXISTS t_ds_process_instance; CREATE TABLE t_ds_process_instance ( id int NOT NULL , name varchar(255) DEFAULT NULL , process_definition_code bigint DEFAULT NULL , process_definition_version int DEFAULT NULL , state int DEFAULT NULL , recovery int DEFAULT NULL , start_time timestamp DEFAULT NULL , end_time timestamp DEFAULT NULL , run_times int DEFAULT NULL , host varchar(135) DEFAULT NULL , command_type int DEFAULT NULL , command_param text , task_depend_type int DEFAULT NULL , max_try_times int DEFAULT '0' , failure_strategy int DEFAULT '0' , warning_type int DEFAULT '0' , warning_group_id int DEFAULT NULL , schedule_time timestamp DEFAULT NULL , command_start_time timestamp DEFAULT NULL , global_params text , process_instance_json text , flag int DEFAULT '1' , update_time timestamp NULL , is_sub_process int DEFAULT '0' , executor_id int NOT NULL , history_cmd text , dependence_schedule_times text , process_instance_priority int DEFAULT NULL , worker_group varchar(64) , environment_code bigint DEFAULT '-1', timeout int DEFAULT '0' , tenant_id int NOT NULL DEFAULT '-1' , var_pool text , dry_run int DEFAULT '0' , next_process_instance_id int DEFAULT '0', restart_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; create index process_instance_index on t_ds_process_instance (process_definition_code,id); create index start_time_index on t_ds_process_instance (start_time,end_time); -- -- Table structure for table t_ds_project -- DROP TABLE IF EXISTS t_ds_project; CREATE TABLE t_ds_project ( id int NOT NULL , name varchar(100) DEFAULT NULL , code bigint NOT NULL, description varchar(200) DEFAULT NULL , user_id int DEFAULT NULL , flag int DEFAULT '1' , create_time timestamp DEFAULT CURRENT_TIMESTAMP , update_time timestamp DEFAULT CURRENT_TIMESTAMP , PRIMARY KEY (id) ) ; create index user_id_index on t_ds_project (user_id); -- -- Table structure for table t_ds_queue -- DROP TABLE IF EXISTS t_ds_queue; CREATE TABLE t_ds_queue ( id int NOT NULL , queue_name varchar(64) DEFAULT NULL , queue varchar(64) DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_relation_datasource_user -- DROP TABLE IF EXISTS t_ds_relation_datasource_user; CREATE TABLE t_ds_relation_datasource_user ( id int NOT NULL , user_id int NOT NULL , datasource_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; ; -- -- Table structure for table t_ds_relation_process_instance -- DROP TABLE IF EXISTS t_ds_relation_process_instance; CREATE TABLE t_ds_relation_process_instance ( id int NOT NULL , parent_process_instance_id int DEFAULT NULL , parent_task_instance_id int DEFAULT NULL , process_instance_id int DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_relation_project_user -- DROP TABLE IF EXISTS t_ds_relation_project_user; CREATE TABLE t_ds_relation_project_user ( id int NOT NULL , user_id int NOT NULL , project_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; create index relation_project_user_id_index on t_ds_relation_project_user (user_id); -- -- Table structure for table t_ds_relation_resources_user -- DROP TABLE IF EXISTS t_ds_relation_resources_user; CREATE TABLE t_ds_relation_resources_user ( id int NOT NULL , user_id int NOT NULL , resources_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_relation_udfs_user -- DROP TABLE IF EXISTS t_ds_relation_udfs_user; CREATE TABLE t_ds_relation_udfs_user ( id int NOT NULL , user_id int NOT NULL , udf_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; ; -- -- Table structure for table t_ds_resources -- DROP TABLE IF EXISTS t_ds_resources; CREATE TABLE t_ds_resources ( id int NOT NULL , alias varchar(64) DEFAULT NULL , file_name varchar(64) DEFAULT NULL , description varchar(255) DEFAULT NULL , user_id int DEFAULT NULL , type int DEFAULT NULL , size bigint DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , pid int, full_name varchar(64), is_directory boolean DEFAULT FALSE, PRIMARY KEY (id), CONSTRAINT t_ds_resources_un UNIQUE (full_name, type) ) ; -- -- Table structure for table t_ds_schedules -- DROP TABLE IF EXISTS t_ds_schedules; CREATE TABLE t_ds_schedules ( id int NOT NULL , process_definition_code bigint NOT NULL , start_time timestamp NOT NULL , end_time timestamp NOT NULL , timezone_id varchar(40) default NULL , crontab varchar(255) NOT NULL , failure_strategy int NOT NULL , user_id int NOT NULL , release_state int NOT NULL , warning_type int NOT NULL , warning_group_id int DEFAULT NULL , process_instance_priority int DEFAULT NULL , worker_group varchar(64), environment_code bigint DEFAULT '-1', create_time timestamp NOT NULL , update_time timestamp NOT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_session -- DROP TABLE IF EXISTS t_ds_session; CREATE TABLE t_ds_session ( id varchar(64) NOT NULL , user_id int DEFAULT NULL , ip varchar(45) DEFAULT NULL , last_login_time timestamp DEFAULT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_task_instance -- DROP TABLE IF EXISTS t_ds_task_instance; CREATE TABLE t_ds_task_instance ( id int NOT NULL , name varchar(255) DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_code bigint NOT NULL, task_definition_version int DEFAULT NULL , process_instance_id int DEFAULT NULL , state int DEFAULT NULL , submit_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL , end_time timestamp DEFAULT NULL , host varchar(135) DEFAULT NULL , execute_path varchar(200) DEFAULT NULL , log_path varchar(200) DEFAULT NULL , alert_flag int DEFAULT NULL , retry_times int DEFAULT '0' , pid int DEFAULT NULL , app_link text , task_params text , flag int DEFAULT '1' , retry_interval int DEFAULT NULL , max_retry_times int DEFAULT NULL , task_instance_priority int DEFAULT NULL , worker_group varchar(64), environment_code bigint DEFAULT '-1', environment_config text, executor_id int DEFAULT NULL , first_submit_time timestamp DEFAULT NULL , delay_time int DEFAULT '0' , task_group_id int DEFAULT NULL, var_pool text , dry_run int DEFAULT '0' , PRIMARY KEY (id), CONSTRAINT foreign_key_instance_id FOREIGN KEY(process_instance_id) REFERENCES t_ds_process_instance(id) ON DELETE CASCADE ) ; create index idx_task_instance_code_version on t_ds_task_instance (task_code, task_definition_version); -- -- Table structure for table t_ds_tenant -- DROP TABLE IF EXISTS t_ds_tenant; CREATE TABLE t_ds_tenant ( id int NOT NULL , tenant_code varchar(64) DEFAULT NULL , description varchar(255) DEFAULT NULL , queue_id int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_udfs -- DROP TABLE IF EXISTS t_ds_udfs; CREATE TABLE t_ds_udfs ( id int NOT NULL , user_id int NOT NULL , func_name varchar(100) NOT NULL , class_name varchar(255) NOT NULL , type int NOT NULL , arg_types varchar(255) DEFAULT NULL , database varchar(255) DEFAULT NULL , description varchar(255) DEFAULT NULL , resource_id int NOT NULL , resource_name varchar(255) NOT NULL , create_time timestamp NOT NULL , update_time timestamp NOT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_user -- DROP TABLE IF EXISTS t_ds_user; CREATE TABLE t_ds_user ( id int NOT NULL , user_name varchar(64) DEFAULT NULL , user_password varchar(64) DEFAULT NULL , user_type int DEFAULT NULL , email varchar(64) DEFAULT NULL , phone varchar(11) DEFAULT NULL , tenant_id int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , queue varchar(64) DEFAULT NULL , state int DEFAULT 1 , time_zone varchar(32) DEFAULT NULL, PRIMARY KEY (id) ); comment on column t_ds_user.state is 'state 0:disable 1:enable'; -- -- Table structure for table t_ds_version -- DROP TABLE IF EXISTS t_ds_version; CREATE TABLE t_ds_version ( id int NOT NULL , version varchar(200) NOT NULL, PRIMARY KEY (id) ) ; create index version_index on t_ds_version(version); -- -- Table structure for table t_ds_worker_group -- DROP TABLE IF EXISTS t_ds_worker_group; CREATE TABLE t_ds_worker_group ( id bigint NOT NULL , name varchar(255) NOT NULL , addr_list text DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT name_unique UNIQUE (name) ) ; -- -- Table structure for table t_ds_worker_server -- DROP TABLE IF EXISTS t_ds_worker_server; CREATE TABLE t_ds_worker_server ( id int NOT NULL , host varchar(45) DEFAULT NULL , port int DEFAULT NULL , zk_directory varchar(64) DEFAULT NULL , res_info varchar(255) DEFAULT NULL , create_time timestamp DEFAULT NULL , last_heartbeat_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP SEQUENCE IF EXISTS t_ds_access_token_id_sequence; CREATE SEQUENCE t_ds_access_token_id_sequence; ALTER TABLE t_ds_access_token ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_access_token_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_alert_id_sequence; CREATE SEQUENCE t_ds_alert_id_sequence; ALTER TABLE t_ds_alert ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alert_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_alertgroup_id_sequence; CREATE SEQUENCE t_ds_alertgroup_id_sequence; ALTER TABLE t_ds_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alertgroup_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_command_id_sequence; CREATE SEQUENCE t_ds_command_id_sequence; ALTER TABLE t_ds_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_command_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_datasource_id_sequence; CREATE SEQUENCE t_ds_datasource_id_sequence; ALTER TABLE t_ds_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_datasource_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_definition_id_sequence; CREATE SEQUENCE t_ds_process_definition_id_sequence; ALTER TABLE t_ds_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_definition_log_id_sequence; CREATE SEQUENCE t_ds_process_definition_log_id_sequence; ALTER TABLE t_ds_process_definition_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_log_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_task_definition_id_sequence; CREATE SEQUENCE t_ds_task_definition_id_sequence; ALTER TABLE t_ds_task_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_definition_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_task_definition_log_id_sequence; CREATE SEQUENCE t_ds_task_definition_log_id_sequence; ALTER TABLE t_ds_task_definition_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_definition_log_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_task_relation_id_sequence; CREATE SEQUENCE t_ds_process_task_relation_id_sequence; ALTER TABLE t_ds_process_task_relation ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_task_relation_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_task_relation_log_id_sequence; CREATE SEQUENCE t_ds_process_task_relation_log_id_sequence; ALTER TABLE t_ds_process_task_relation_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_task_relation_log_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_instance_id_sequence; CREATE SEQUENCE t_ds_process_instance_id_sequence; ALTER TABLE t_ds_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_instance_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_project_id_sequence; CREATE SEQUENCE t_ds_project_id_sequence; ALTER TABLE t_ds_project ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_project_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_queue_id_sequence; CREATE SEQUENCE t_ds_queue_id_sequence; ALTER TABLE t_ds_queue ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_queue_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_datasource_user_id_sequence; CREATE SEQUENCE t_ds_relation_datasource_user_id_sequence; ALTER TABLE t_ds_relation_datasource_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_datasource_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_process_instance_id_sequence; CREATE SEQUENCE t_ds_relation_process_instance_id_sequence; ALTER TABLE t_ds_relation_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_process_instance_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_project_user_id_sequence; CREATE SEQUENCE t_ds_relation_project_user_id_sequence; ALTER TABLE t_ds_relation_project_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_project_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_resources_user_id_sequence; CREATE SEQUENCE t_ds_relation_resources_user_id_sequence; ALTER TABLE t_ds_relation_resources_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_resources_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_udfs_user_id_sequence; CREATE SEQUENCE t_ds_relation_udfs_user_id_sequence; ALTER TABLE t_ds_relation_udfs_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_udfs_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_resources_id_sequence; CREATE SEQUENCE t_ds_resources_id_sequence; ALTER TABLE t_ds_resources ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_resources_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_schedules_id_sequence; CREATE SEQUENCE t_ds_schedules_id_sequence; ALTER TABLE t_ds_schedules ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_schedules_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_task_instance_id_sequence; CREATE SEQUENCE t_ds_task_instance_id_sequence; ALTER TABLE t_ds_task_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_instance_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_tenant_id_sequence; CREATE SEQUENCE t_ds_tenant_id_sequence; ALTER TABLE t_ds_tenant ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_tenant_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_udfs_id_sequence; CREATE SEQUENCE t_ds_udfs_id_sequence; ALTER TABLE t_ds_udfs ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_udfs_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_user_id_sequence; CREATE SEQUENCE t_ds_user_id_sequence; ALTER TABLE t_ds_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_version_id_sequence; CREATE SEQUENCE t_ds_version_id_sequence; ALTER TABLE t_ds_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_version_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_worker_group_id_sequence; CREATE SEQUENCE t_ds_worker_group_id_sequence; ALTER TABLE t_ds_worker_group ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_group_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_worker_server_id_sequence; CREATE SEQUENCE t_ds_worker_server_id_sequence; ALTER TABLE t_ds_worker_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_server_id_sequence'); -- Records of t_ds_user?user : admin , password : dolphinscheduler123 INSERT INTO t_ds_user(user_name, user_password, user_type, email, phone, tenant_id, state, create_time, update_time, time_zone) VALUES ('admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', '', '0', 1, '2018-03-27 15:48:50', '2018-10-24 17:40:22', 'Asia/Shanghai'); -- Records of t_ds_alertgroup, default admin warning group INSERT INTO t_ds_alertgroup(alert_instance_ids, create_user_id, group_name, description, create_time, update_time) VALUES ('1,2', 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39'); -- Records of t_ds_queue,default queue name : default INSERT INTO t_ds_queue(queue_name, queue, create_time, update_time) VALUES ('default', 'default', '2018-11-29 10:22:33', '2018-11-29 10:22:33'); -- Records of t_ds_queue,default queue name : default INSERT INTO t_ds_version(version) VALUES ('1.4.0'); -- -- Table structure for table t_ds_plugin_define -- DROP TABLE IF EXISTS t_ds_plugin_define; CREATE TABLE t_ds_plugin_define ( id serial NOT NULL, plugin_name varchar(100) NOT NULL, plugin_type varchar(100) NOT NULL, plugin_params text NULL, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_plugin_define_pk PRIMARY KEY (id), CONSTRAINT t_ds_plugin_define_un UNIQUE (plugin_name, plugin_type) ); -- -- Table structure for table t_ds_alert_plugin_instance -- DROP TABLE IF EXISTS t_ds_alert_plugin_instance; CREATE TABLE t_ds_alert_plugin_instance ( id serial NOT NULL, plugin_define_id int4 NOT NULL, plugin_instance_params text NULL, create_time timestamp NULL, update_time timestamp NULL, instance_name varchar(200) NULL, CONSTRAINT t_ds_alert_plugin_instance_pk PRIMARY KEY (id) ); -- -- Table structure for table t_ds_dq_comparison_type -- DROP TABLE IF EXISTS t_ds_dq_comparison_type; CREATE TABLE t_ds_dq_comparison_type ( id serial NOT NULL, "type" varchar NOT NULL, execute_sql varchar NULL, output_table varchar NULL, "name" varchar NULL, create_time timestamp NULL, update_time timestamp NULL, is_inner_source bool NULL, CONSTRAINT t_ds_dq_comparison_type_pk PRIMARY KEY (id) ); INSERT INTO t_ds_dq_comparison_type (id, "type", execute_sql, output_table, "name", create_time, update_time, is_inner_source) VALUES(1, 'FixValue', NULL, NULL, NULL, '2021-06-30 00:00:00.000', '2021-06-30 00:00:00.000', false); INSERT INTO t_ds_dq_comparison_type (id, "type", execute_sql, output_table, "name", create_time, update_time, is_inner_source) VALUES(2, 'DailyAvg', 'select round(avg(statistics_value),2) as day_avg from t_ds_dq_task_statistics_value where data_time >=date_trunc(''DAY'', ${data_time}) and data_time < date_add(date_trunc(''day'', ${data_time}),1) and unique_code = ${unique_code} and statistics_name = ''${statistics_name}''', 'day_range', 'day_range.day_avg', '2021-06-30 00:00:00.000', '2021-06-30 00:00:00.000', true); INSERT INTO t_ds_dq_comparison_type (id, "type", execute_sql, output_table, "name", create_time, update_time, is_inner_source) VALUES(3, 'WeeklyAvg', 'select round(avg(statistics_value),2) as week_avg from t_ds_dq_task_statistics_value where data_time >= date_trunc(''WEEK'', ${data_time}) and data_time <date_trunc(''day'', ${data_time}) and unique_code = ${unique_code} and statistics_name = ''${statistics_name}''', 'week_range', 'week_range.week_avg', '2021-06-30 00:00:00.000', '2021-06-30 00:00:00.000', true); INSERT INTO t_ds_dq_comparison_type (id, "type", execute_sql, output_table, "name", create_time, update_time, is_inner_source) VALUES(4, 'MonthlyAvg', 'select round(avg(statistics_value),2) as month_avg from t_ds_dq_task_statistics_value where data_time >= date_trunc(''MONTH'', ${data_time}) and data_time <date_trunc(''day'', ${data_time}) and unique_code = ${unique_code} and statistics_name = ''${statistics_name}''', 'month_range', 'month_range.month_avg', '2021-06-30 00:00:00.000', '2021-06-30 00:00:00.000', true); INSERT INTO t_ds_dq_comparison_type (id, "type", execute_sql, output_table, "name", create_time, update_time, is_inner_source) VALUES(5, 'Last7DayAvg', 'select round(avg(statistics_value),2) as last_7_avg from t_ds_dq_task_statistics_value where data_time >= date_add(date_trunc(''day'', ${data_time}),-7) and data_time <date_trunc(''day'', ${data_time}) and unique_code = ${unique_code} and statistics_name = ''${statistics_name}''', 'last_seven_days', 'last_seven_days.last_7_avg', '2021-06-30 00:00:00.000', '2021-06-30 00:00:00.000', true); INSERT INTO t_ds_dq_comparison_type (id, "type", execute_sql, output_table, "name", create_time, update_time, is_inner_source) VALUES(6, 'Last30DayAvg', 'select round(avg(statistics_value),2) as last_30_avg from t_ds_dq_task_statistics_value where data_time >= date_add(date_trunc(''day'', ${data_time}),-30) and data_time < date_trunc(''day'', ${data_time}) and unique_code = ${unique_code} and statistics_name = ''${statistics_name}''', 'last_thirty_days', 'last_thirty_days.last_30_avg', '2021-06-30 00:00:00.000', '2021-06-30 00:00:00.000', true); INSERT INTO t_ds_dq_comparison_type (id, "type", execute_sql, output_table, "name", create_time, update_time, is_inner_source) VALUES(7, 'SrcTableTotalRows', 'SELECT COUNT(*) AS total FROM ${src_table} WHERE (${src_filter})', 'total_count', 'total_count.total', '2021-06-30 00:00:00.000', '2021-06-30 00:00:00.000', false); INSERT INTO t_ds_dq_comparison_type (id, "type", execute_sql, output_table, "name", create_time, update_time, is_inner_source) VALUES(8, 'TargetTableTotalRows', 'SELECT COUNT(*) AS total FROM ${target_table} WHERE (${target_filter})', 'total_count', 'total_count.total', '2021-06-30 00:00:00.000', '2021-06-30 00:00:00.000', false); -- -- Table structure for table t_ds_dq_execute_result -- DROP TABLE IF EXISTS t_ds_dq_execute_result; CREATE TABLE t_ds_dq_execute_result ( id serial NOT NULL, process_definition_id int4 NULL, process_instance_id int4 NULL, task_instance_id int4 NULL, rule_type int4 NULL, rule_name varchar(255) DEFAULT NULL, statistics_value float8 NULL, comparison_value float8 NULL, check_type int4 NULL, threshold float8 NULL, "operator" int4 NULL, failure_strategy int4 NULL, state int4 NULL, user_id int4 NULL, create_time timestamp NULL, update_time timestamp NULL, comparison_type int4 NULL, error_output_path text NULL, CONSTRAINT t_ds_dq_execute_result_pk PRIMARY KEY (id) ); -- -- Table structure for table t_ds_dq_rule -- DROP TABLE IF EXISTS t_ds_dq_rule; CREATE TABLE t_ds_dq_rule ( id serial NOT NULL, "name" varchar(100) DEFAULT NULL, "type" int4 NULL, user_id int4 NULL, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_dq_rule_pk PRIMARY KEY (id) ); INSERT INTO t_ds_dq_rule (id, "name", "type", user_id, create_time, update_time) VALUES(1, '$t(null_check)', 0, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); INSERT INTO t_ds_dq_rule (id, "name", "type", user_id, create_time, update_time) VALUES(2, '$t(custom_sql)', 1, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); INSERT INTO t_ds_dq_rule (id, "name", "type", user_id, create_time, update_time) VALUES(3, '$t(multi_table_accuracy)', 2, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); INSERT INTO t_ds_dq_rule (id, "name", "type", user_id, create_time, update_time) VALUES(4, '$t(multi_table_value_comparison)', 3, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); INSERT INTO t_ds_dq_rule (id, "name", "type", user_id, create_time, update_time) VALUES(5, '$t(field_length_check)', 0, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); INSERT INTO t_ds_dq_rule (id, "name", "type", user_id, create_time, update_time) VALUES(6, '$t(uniqueness_check)', 0, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); INSERT INTO t_ds_dq_rule (id, "name", "type", user_id, create_time, update_time) VALUES(7, '$t(regexp_check)', 0, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); INSERT INTO t_ds_dq_rule (id, "name", "type", user_id, create_time, update_time) VALUES(8, '$t(timeliness_check)', 0, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); INSERT INTO t_ds_dq_rule (id, "name", "type", user_id, create_time, update_time) VALUES(9, '$t(enumeration_check)', 0, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); INSERT INTO t_ds_dq_rule (id, "name", "type", user_id, create_time, update_time) VALUES(10, '$t(table_count_check)', 0, 1, '2020-01-12 00:00:00.000', '2020-01-12 00:00:00.000'); -- -- Table structure for table t_ds_dq_rule_execute_sql -- DROP TABLE IF EXISTS t_ds_dq_rule_execute_sql; CREATE TABLE t_ds_dq_rule_execute_sql ( id serial NOT NULL, "index" int4 NULL, "sql" text NULL, table_alias varchar(255) DEFAULT NULL, "type" int4 NULL, create_time timestamp NULL, update_time timestamp NULL, is_error_output_sql bool NULL, CONSTRAINT t_ds_dq_rule_execute_sql_pk PRIMARY KEY (id) ); INSERT INTO t_ds_dq_rule_execute_sql (id, "index", "sql", table_alias, "type", is_error_output_sql, create_time, update_time) VALUES(1, 1, 'SELECT COUNT(*) AS nulls FROM null_items', 'null_count', 1, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_execute_sql (id, "index", "sql", table_alias, "type", is_error_output_sql, create_time, update_time) VALUES(2, 1, 'SELECT COUNT(*) AS total FROM ${src_table} WHERE (${src_filter})', 'total_count', 2, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_execute_sql (id, "index", "sql", table_alias, "type", is_error_output_sql, create_time, update_time) VALUES(3, 1, 'SELECT COUNT(*) AS miss from miss_items', 'miss_count', 1, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_execute_sql (id, "index", "sql", table_alias, "type", is_error_output_sql, create_time, update_time) VALUES(4, 1, 'SELECT COUNT(*) AS valids FROM invalid_length_items', 'invalid_length_count', 1, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_execute_sql (id, "index", "sql", table_alias, "type", is_error_output_sql, create_time, update_time) VALUES(5, 1, 'SELECT COUNT(*) AS total FROM ${target_table} WHERE (${target_filter})', 'total_count', 2, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_execute_sql (id, "index", "sql", table_alias, "type", is_error_output_sql, create_time, update_time) VALUES(6, 1, 'SELECT ${src_field} FROM ${src_table} group by ${src_field} having count(*) > 1', 'duplicate_items', 0, true, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_execute_sql (id, "index", "sql", table_alias, "type", is_error_output_sql, create_time, update_time) VALUES(7, 1, 'SELECT COUNT(*) AS duplicates FROM duplicate_items', 'duplicate_count', 1, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_execute_sql (id, "index", "sql", table_alias, "type", is_error_output_sql, create_time, update_time) VALUES(8, 1, 'SELECT ${src_table}.* FROM (SELECT * FROM ${src_table} WHERE (${src_filter})) ${src_table} LEFT JOIN (SELECT * FROM ${target_table} WHERE (${target_filter})) ${target_table} ON ${on_clause} WHERE ${where_clause}', 'miss_items', 0, true, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_execute_sql (id, "index", "sql", table_alias, "type", is_error_output_sql, create_time, update_time) VALUES(9, 1, 'SELECT * FROM ${src_table} WHERE (${src_field} not regexp ''${regexp_pattern}'') AND (${src_filter}) ', 'regexp_items', 0, true, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_execute_sql (id, "index", "sql", table_alias, "type", is_error_output_sql, create_time, update_time) VALUES(10, 1, 'SELECT COUNT(*) AS regexps FROM regexp_items', 'regexp_count', 1, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_execute_sql (id, "index", "sql", table_alias, "type", is_error_output_sql, create_time, update_time) VALUES(11, 1, 'SELECT * FROM ${src_table} WHERE (to_unix_timestamp(${src_field}, ''${datetime_format}'')-to_unix_timestamp(''${deadline}'', ''${datetime_format}'') <= 0) AND (to_unix_timestamp(${src_field}, ''${datetime_format}'')-to_unix_timestamp(''${begin_time}'', ''${datetime_format}'') >= 0) AND (${src_filter}) ', 'timeliness_items', 0, true, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_execute_sql (id, "index", "sql", table_alias, "type", is_error_output_sql, create_time, update_time) VALUES(12, 1, 'SELECT COUNT(*) AS timeliness FROM timeliness_items', 'timeliness_count', 1, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_execute_sql (id, "index", "sql", table_alias, "type", is_error_output_sql, create_time, update_time) VALUES(13, 1, 'SELECT * FROM ${src_table} where (${src_field} not in ( ${enum_list} ) or ${src_field} is null) AND (${src_filter}) ', 'enum_items', 0, true, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_execute_sql (id, "index", "sql", table_alias, "type", is_error_output_sql, create_time, update_time) VALUES(14, 1, 'SELECT COUNT(*) AS enums FROM enum_items', 'enum_count', 1, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_execute_sql (id, "index", "sql", table_alias, "type", is_error_output_sql, create_time, update_time) VALUES(15, 1, 'SELECT COUNT(*) AS total FROM ${src_table} WHERE (${src_filter})', 'table_count', 1, false, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_execute_sql (id, "index", "sql", table_alias, "type", is_error_output_sql, create_time, update_time) VALUES(16, 1, 'SELECT * FROM ${src_table} WHERE (${src_field} is null or ${src_field} = '''') AND (${src_filter})', 'null_items', 0, true, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_execute_sql (id, "index", "sql", table_alias, "type", is_error_output_sql, create_time, update_time) VALUES(17, 1, 'SELECT * FROM ${src_table} WHERE (length(${src_field}) ${logic_operator} ${field_length}) AND (${src_filter})', 'invalid_length_items', 0, true, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); -- -- Table structure for table t_ds_dq_rule_input_entry -- DROP TABLE IF EXISTS t_ds_dq_rule_input_entry; CREATE TABLE t_ds_dq_rule_input_entry ( id serial NOT NULL, field varchar(255) DEFAULT NULL, "type" varchar(255) DEFAULT NULL, title varchar(255) DEFAULT NULL, value varchar(255) DEFAULT NULL, "options" text DEFAULT NULL, placeholder varchar(255) DEFAULT NULL, option_source_type int4 NULL, value_type int4 NULL, input_type int4 NULL, is_show int2 NULL DEFAULT '1'::smallint, can_edit int2 NULL DEFAULT '1'::smallint, is_emit int2 NULL DEFAULT '0'::smallint, is_validate int2 NULL DEFAULT '0'::smallint, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_dq_rule_input_entry_pk PRIMARY KEY (id) ); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(1, 'src_connector_type', 'select', '$t(src_connector_type)', '', '[{"label":"HIVE","value":"HIVE"},{"label":"JDBC","value":"JDBC"}]', 'please select source connector type', 2, 2, 0, 1, 1, 1, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(2, 'src_datasource_id', 'select', '$t(src_datasource_id)', '', NULL, 'please select source datasource id', 1, 2, 0, 1, 1, 1, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(3, 'src_table', 'select', '$t(src_table)', NULL, NULL, 'Please enter source table name', 0, 0, 0, 1, 1, 1, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(4, 'src_filter', 'input', '$t(src_filter)', NULL, NULL, 'Please enter filter expression', 0, 3, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(5, 'src_field', 'select', '$t(src_field)', NULL, NULL, 'Please enter column, only single column is supported', 0, 0, 0, 1, 1, 0, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(6, 'statistics_name', 'input', '$t(statistics_name)', NULL, NULL, 'Please enter statistics name, the alias in statistics execute sql', 0, 0, 1, 0, 0, 0, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(7, 'check_type', 'select', '$t(check_type)', '0', '[{"label":"Expected - Actual","value":"0"},{"label":"Actual - Expected","value":"1"},{"label":"Actual / Expected","value":"2"},{"label":"(Expected - Actual) / Expected","value":"3"}]', 'please select check type', 0, 0, 3, 1, 1, 1, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(8, 'operator', 'select', '$t(operator)', '0', '[{"label":"=","value":"0"},{"label":"<","value":"1"},{"label":"<=","value":"2"},{"label":">","value":"3"},{"label":">=","value":"4"},{"label":"!=","value":"5"}]', 'please select operator', 0, 0, 3, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(9, 'threshold', 'input', '$t(threshold)', NULL, NULL, 'Please enter threshold, number is needed', 0, 2, 3, 1, 1, 0, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(10, 'failure_strategy', 'select', '$t(failure_strategy)', '0', '[{"label":"Alert","value":"0"},{"label":"Block","value":"1"}]', 'please select failure strategy', 0, 0, 3, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(11, 'target_connector_type', 'select', '$t(target_connector_type)', '', '[{"label":"HIVE","value":"HIVE"},{"label":"JDBC","value":"JDBC"}]', 'Please select target connector type', 2, 0, 0, 1, 1, 1, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(12, 'target_datasource_id', 'select', '$t(target_datasource_id)', '', NULL, 'Please select target datasource', 1, 2, 0, 1, 1, 1, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(13, 'target_table', 'select', '$t(target_table)', NULL, NULL, 'Please enter target table', 0, 0, 0, 1, 1, 1, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(14, 'target_filter', 'input', '$t(target_filter)', NULL, NULL, 'Please enter target filter expression', 0, 3, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(15, 'mapping_columns', 'group', '$t(mapping_columns)', NULL, '[{"field":"src_field","props":{"placeholder":"Please input src field","rows":0,"disabled":false,"size":"small"},"type":"input","title":"src_field"},{"field":"operator","props":{"placeholder":"Please input operator","rows":0,"disabled":false,"size":"small"},"type":"input","title":"operator"},{"field":"target_field","props":{"placeholder":"Please input target field","rows":0,"disabled":false,"size":"small"},"type":"input","title":"target_field"}]', 'please enter mapping columns', 0, 0, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(16, 'statistics_execute_sql', 'textarea', '$t(statistics_execute_sql)', NULL, NULL, 'Please enter statistics execute sql', 0, 3, 0, 1, 1, 0, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(17, 'comparison_name', 'input', '$t(comparison_name)', NULL, NULL, 'Please enter comparison name, the alias in comparison execute sql', 0, 0, 0, 0, 0, 0, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(18, 'comparison_execute_sql', 'textarea', '$t(comparison_execute_sql)', NULL, NULL, 'Please enter comparison execute sql', 0, 3, 0, 1, 1, 0, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(19, 'comparison_type', 'select', '$t(comparison_type)', '', NULL, 'Please enter comparison title', 3, 0, 2, 1, 0, 1, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(20, 'writer_connector_type', 'select', '$t(writer_connector_type)', '', '[{"label":"MYSQL","value":"0"},{"label":"POSTGRESQL","value":"1"}]', 'please select writer connector type', 0, 2, 0, 1, 1, 1, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(21, 'writer_datasource_id', 'select', '$t(writer_datasource_id)', '', NULL, 'please select writer datasource id', 1, 2, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(22, 'target_field', 'select', '$t(target_field)', NULL, NULL, 'Please enter column, only single column is supported', 0, 0, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(23, 'field_length', 'input', '$t(field_length)', NULL, NULL, 'Please enter length limit', 0, 3, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(24, 'logic_operator', 'select', '$t(logic_operator)', '=', '[{"label":"=","value":"="},{"label":"<","value":"<"},{"label":"<=","value":"<="},{"label":">","value":">"},{"label":">=","value":">="},{"label":"<>","value":"<>"}]', 'please select logic operator', 0, 0, 3, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(25, 'regexp_pattern', 'input', '$t(regexp_pattern)', NULL, NULL, 'Please enter regexp pattern', 0, 0, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(26, 'deadline', 'input', '$t(deadline)', NULL, NULL, 'Please enter deadline', 0, 0, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(27, 'datetime_format', 'input', '$t(datetime_format)', NULL, NULL, 'Please enter datetime format', 0, 0, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(28, 'enum_list', 'input', '$t(enum_list)', NULL, NULL, 'Please enter enumeration', 0, 0, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_dq_rule_input_entry (id, field, "type", title, value, "options", placeholder, option_source_type, value_type, input_type, is_show, can_edit, is_emit, is_validate, create_time, update_time) VALUES(29, 'begin_time', 'input', '$t(begin_time)', NULL, NULL, 'Please enter begin time', 0, 0, 0, 1, 1, 0, 0, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); -- -- Table structure for table t_ds_dq_task_statistics_value -- DROP TABLE IF EXISTS t_ds_dq_task_statistics_value; CREATE TABLE t_ds_dq_task_statistics_value ( id serial NOT NULL, process_definition_id int4 NOT NULL, task_instance_id int4 NULL, rule_id int4 NOT NULL, unique_code varchar NOT NULL, statistics_name varchar NULL, statistics_value float8 NULL, data_time timestamp(0) NULL, create_time timestamp(0) NULL, update_time timestamp(0) NULL, CONSTRAINT t_ds_dq_task_statistics_value_pk PRIMARY KEY (id) ); -- -- Table structure for table t_ds_relation_rule_execute_sql -- DROP TABLE IF EXISTS t_ds_relation_rule_execute_sql; CREATE TABLE t_ds_relation_rule_execute_sql ( id serial NOT NULL, rule_id int4 NULL, execute_sql_id int4 NULL, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_relation_rule_execute_sql_pk PRIMARY KEY (id) ); INSERT INTO t_ds_relation_rule_execute_sql (id, rule_id, execute_sql_id, create_time, update_time) VALUES(1, 1, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_execute_sql (id, rule_id, execute_sql_id, create_time, update_time) VALUES(3, 5, 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_execute_sql (id, rule_id, execute_sql_id, create_time, update_time) VALUES(2, 3, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_execute_sql (id, rule_id, execute_sql_id, create_time, update_time) VALUES(4, 3, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_execute_sql (id, rule_id, execute_sql_id, create_time, update_time) VALUES(5, 6, 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_execute_sql (id, rule_id, execute_sql_id, create_time, update_time) VALUES(6, 6, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_execute_sql (id, rule_id, execute_sql_id, create_time, update_time) VALUES(7, 7, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_execute_sql (id, rule_id, execute_sql_id, create_time, update_time) VALUES(8, 7, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_execute_sql (id, rule_id, execute_sql_id, create_time, update_time) VALUES(9, 8, 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_execute_sql (id, rule_id, execute_sql_id, create_time, update_time) VALUES(10, 8, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_execute_sql (id, rule_id, execute_sql_id, create_time, update_time) VALUES(11, 9, 13, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_execute_sql (id, rule_id, execute_sql_id, create_time, update_time) VALUES(12, 9, 14, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_execute_sql (id, rule_id, execute_sql_id, create_time, update_time) VALUES(13, 10, 15, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_execute_sql (id, rule_id, execute_sql_id, create_time, update_time) VALUES(14, 1, 16, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_execute_sql (id, rule_id, execute_sql_id, create_time, update_time) VALUES(15, 5, 17, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); -- -- Table structure for table t_ds_relation_rule_input_entry -- DROP TABLE IF EXISTS t_ds_relation_rule_input_entry; CREATE TABLE t_ds_relation_rule_input_entry ( id serial NOT NULL, rule_id int4 NULL, rule_input_entry_id int4 NULL, values_map text NULL, "index" int4 NULL, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_relation_rule_input_entry_pk PRIMARY KEY (id) ); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(1, 1, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(2, 1, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(3, 1, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(4, 1, 4, NULL, 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(5, 1, 5, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(6, 1, 6, '{"statistics_name":"null_count.nulls"}', 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(7, 1, 7, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(8, 1, 8, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(9, 1, 9, NULL, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(10, 1, 10, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(11, 1, 17, '', 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(12, 1, 19, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(13, 2, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(14, 2, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(15, 2, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(16, 2, 6, '{"is_show":"true","can_edit":"true"}', 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(17, 2, 16, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(18, 2, 4, NULL, 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(19, 2, 7, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(20, 2, 8, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(21, 2, 9, NULL, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(22, 2, 10, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(24, 2, 19, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(25, 3, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(26, 3, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(27, 3, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(28, 3, 4, NULL, 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(29, 3, 11, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(30, 3, 12, NULL, 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(31, 3, 13, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(32, 3, 14, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(33, 3, 15, NULL, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(34, 3, 7, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(35, 3, 8, NULL, 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(36, 3, 9, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(37, 3, 10, NULL, 13, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(38, 3, 17, '{"comparison_name":"total_count.total"}', 14, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(39, 3, 19, NULL, 15, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(40, 4, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(41, 4, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(42, 4, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(43, 4, 6, '{"is_show":"true","can_edit":"true"}', 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(44, 4, 16, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(45, 4, 11, NULL, 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(46, 4, 12, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(47, 4, 13, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(48, 4, 17, '{"is_show":"true","can_edit":"true"}', 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(49, 4, 18, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(50, 4, 7, NULL, 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(51, 4, 8, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(52, 4, 9, NULL, 13, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(53, 4, 10, NULL, 14, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(62, 3, 6, '{"statistics_name":"miss_count.miss"}', 18, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(63, 5, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(64, 5, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(65, 5, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(66, 5, 4, NULL, 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(67, 5, 5, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(68, 5, 6, '{"statistics_name":"invalid_length_count.valids"}', 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(69, 5, 24, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(70, 5, 23, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(71, 5, 7, NULL, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(72, 5, 8, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(73, 5, 9, NULL, 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(74, 5, 10, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(75, 5, 17, '', 13, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(76, 5, 19, NULL, 14, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(79, 6, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(80, 6, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(81, 6, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(82, 6, 4, NULL, 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(83, 6, 5, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(84, 6, 6, '{"statistics_name":"duplicate_count.duplicates"}', 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(85, 6, 7, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(86, 6, 8, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(87, 6, 9, NULL, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(88, 6, 10, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(89, 6, 17, '', 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(90, 6, 19, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(93, 7, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(94, 7, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(95, 7, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(96, 7, 4, NULL, 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(97, 7, 5, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(98, 7, 6, '{"statistics_name":"regexp_count.regexps"}', 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(99, 7, 25, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(100, 7, 7, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(101, 7, 8, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(102, 7, 9, NULL, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(103, 7, 10, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(104, 7, 17, NULL, 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(105, 7, 19, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(108, 8, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(109, 8, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(110, 8, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(111, 8, 4, NULL, 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(112, 8, 5, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(113, 8, 6, '{"statistics_name":"timeliness_count.timeliness"}', 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(114, 8, 26, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(115, 8, 27, NULL, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(116, 8, 7, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(117, 8, 8, NULL, 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(118, 8, 9, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(119, 8, 10, NULL, 13, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(120, 8, 17, NULL, 14, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(121, 8, 19, NULL, 15, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(124, 9, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(125, 9, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(126, 9, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(127, 9, 4, NULL, 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(128, 9, 5, NULL, 5, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(129, 9, 6, '{"statistics_name":"enum_count.enums"}', 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(130, 9, 28, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(131, 9, 7, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(132, 9, 8, NULL, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(133, 9, 9, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(134, 9, 10, NULL, 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(135, 9, 17, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(136, 9, 19, NULL, 13, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(139, 10, 1, NULL, 1, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(140, 10, 2, NULL, 2, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(141, 10, 3, NULL, 3, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(142, 10, 4, NULL, 4, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(143, 10, 6, '{"statistics_name":"table_count.total"}', 6, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(144, 10, 7, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(145, 10, 8, NULL, 8, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(146, 10, 9, NULL, 9, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(147, 10, 10, NULL, 10, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(148, 10, 17, NULL, 11, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(149, 10, 19, NULL, 12, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); INSERT INTO t_ds_relation_rule_input_entry (id, rule_id, rule_input_entry_id, values_map, "index", create_time, update_time) VALUES(150, 8, 29, NULL, 7, '2021-03-03 11:31:24.000', '2021-03-03 11:31:24.000'); -- -- Table structure for table t_ds_environment -- DROP TABLE IF EXISTS t_ds_environment; CREATE TABLE t_ds_environment ( id serial NOT NULL, code bigint NOT NULL, name varchar(100) DEFAULT NULL, config text DEFAULT NULL, description text, operator int DEFAULT NULL, create_time timestamp DEFAULT NULL, update_time timestamp DEFAULT NULL, PRIMARY KEY (id), CONSTRAINT environment_name_unique UNIQUE (name), CONSTRAINT environment_code_unique UNIQUE (code) ); -- -- Table structure for table t_ds_environment_worker_group_relation -- DROP TABLE IF EXISTS t_ds_environment_worker_group_relation; CREATE TABLE t_ds_environment_worker_group_relation ( id serial NOT NULL, environment_code bigint NOT NULL, worker_group varchar(255) NOT NULL, operator int DEFAULT NULL, create_time timestamp DEFAULT NULL, update_time timestamp DEFAULT NULL, PRIMARY KEY (id) , CONSTRAINT environment_worker_group_unique UNIQUE (environment_code,worker_group) ); -- -- Table structure for table t_ds_task_group_queue -- DROP TABLE IF EXISTS t_ds_task_group_queue; CREATE TABLE t_ds_task_group_queue ( id serial NOT NULL, task_id int DEFAULT NULL , task_name VARCHAR(100) DEFAULT NULL , group_id int DEFAULT NULL , process_id int DEFAULT NULL , priority int DEFAULT '0' , status int DEFAULT '-1' , force_start int DEFAULT '0' , in_queue int DEFAULT '0' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_task_group -- DROP TABLE IF EXISTS t_ds_task_group; CREATE TABLE t_ds_task_group ( id serial NOT NULL, name varchar(100) DEFAULT NULL , description varchar(200) DEFAULT NULL , group_size int NOT NULL , project_code bigint DEFAULT '0' , use_size int DEFAULT '0' , user_id int DEFAULT NULL , status int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY(id) ); -- ---------------------------- -- Table structure for t_ds_audit_log -- ---------------------------- DROP TABLE IF EXISTS t_ds_audit_log; CREATE TABLE t_ds_audit_log ( id serial NOT NULL, user_id int NOT NULL, resource_type int NOT NULL, operation int NOT NULL, time timestamp DEFAULT NULL , resource_id int NOT NULL, PRIMARY KEY (id) ); -- -- Table structure for table t_ds_k8s -- DROP TABLE IF EXISTS t_ds_k8s; CREATE TABLE t_ds_k8s ( id serial NOT NULL, k8s_name VARCHAR(100) DEFAULT NULL , k8s_config text , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_k8s_namespace -- DROP TABLE IF EXISTS t_ds_k8s_namespace; CREATE TABLE t_ds_k8s_namespace ( id serial NOT NULL, limits_memory int DEFAULT NULL , namespace varchar(100) DEFAULT NULL , online_job_num int DEFAULT '0' , owner varchar(100) DEFAULT NULL, pod_replicas int DEFAULT NULL, pod_request_cpu NUMERIC(13,4) NULL, pod_request_memory int DEFAULT NULL, tag varchar(100) DEFAULT NULL, limits_cpu NUMERIC(13,4) NULL, k8s varchar(100) DEFAULT NULL, create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT k8s_namespace_unique UNIQUE (namespace,k8s) );
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,499
[Feature][Master] Optimize workflowlieage query speed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Optimize workflowlieage query speed ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8499
https://github.com/apache/dolphinscheduler/pull/8500
64baf6dc3db9f58d32947539a6fa36ac6e1eb200
512f8cb5dde7fb42398e7caa50c0c6665b6ec45f
"2022-02-23T05:37:00Z"
java
"2022-02-23T08:53:36Z"
dolphinscheduler-dao/src/main/resources/sql/upgrade/2.1.0_schema/mysql/dolphinscheduler_ddl.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ ALTER TABLE `t_ds_task_instance` ADD INDEX `idx_code_version` (`task_code`, `task_definition_version`) USING BTREE; ALTER TABLE `t_ds_task_instance` MODIFY COLUMN `task_params` longtext COMMENT 'job custom parameters' AFTER `app_link`; ALTER TABLE `t_ds_process_task_relation` ADD KEY `idx_code` (`project_code`, `process_definition_code`) USING BTREE; ALTER TABLE `t_ds_process_task_relation_log` ADD KEY `idx_process_code_version` (`process_definition_code`,`process_definition_version`) USING BTREE; ALTER TABLE `t_ds_task_definition_log` ADD INDEX `idx_project_code` (`project_code`) USING BTREE; ALTER TABLE `t_ds_task_definition_log` ADD INDEX `idx_code_version` (`code`,`version`) USING BTREE; alter table t_ds_task_definition_log add `task_group_id` int(11) DEFAULT NULL COMMENT 'task group id' AFTER `resource_ids`; alter table t_ds_task_definition_log add `task_group_priority` int(11) DEFAULT NULL COMMENT 'task group id' AFTER `task_group_id`; alter table t_ds_task_definition add `task_group_id` int(11) DEFAULT NULL COMMENT 'task group id' AFTER `resource_ids`; alter table t_ds_task_definition add `task_group_priority` int(11) DEFAULT '0' COMMENT 'task group id' AFTER `task_group_id`; ALTER TABLE `t_ds_user` ADD COLUMN `time_zone` varchar(32) DEFAULT NULL COMMENT 'time zone'; -- -- Table structure for table `t_ds_dq_comparison_type` -- DROP TABLE IF EXISTS `t_ds_dq_comparison_type`; CREATE TABLE `t_ds_dq_comparison_type` ( `id` int(11) NOT NULL AUTO_INCREMENT, `type` varchar(100) NOT NULL, `execute_sql` text DEFAULT NULL, `output_table` varchar(100) DEFAULT NULL, `name` varchar(100) DEFAULT NULL, `create_time` datetime DEFAULT NULL, `update_time` datetime DEFAULT NULL, `is_inner_source` tinyint(1) DEFAULT '0', PRIMARY KEY (`id`) )ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table t_ds_dq_execute_result -- DROP TABLE IF EXISTS `t_ds_dq_execute_result`; CREATE TABLE `t_ds_dq_execute_result` ( `id` int(11) NOT NULL AUTO_INCREMENT, `process_definition_id` int(11) DEFAULT NULL, `process_instance_id` int(11) DEFAULT NULL, `task_instance_id` int(11) DEFAULT NULL, `rule_type` int(11) DEFAULT NULL, `rule_name` varchar(255) DEFAULT NULL, `statistics_value` double DEFAULT NULL, `comparison_value` double DEFAULT NULL, `check_type` int(11) DEFAULT NULL, `threshold` double DEFAULT NULL, `operator` int(11) DEFAULT NULL, `failure_strategy` int(11) DEFAULT NULL, `state` int(11) DEFAULT NULL, `user_id` int(11) DEFAULT NULL, `comparison_type` int(11) DEFAULT NULL, `error_output_path` text DEFAULT NULL, `create_time` datetime DEFAULT NULL, `update_time` datetime DEFAULT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table t_ds_dq_rule -- DROP TABLE IF EXISTS `t_ds_dq_rule`; CREATE TABLE `t_ds_dq_rule` ( `id` int(11) NOT NULL AUTO_INCREMENT, `name` varchar(100) DEFAULT NULL, `type` int(11) DEFAULT NULL, `user_id` int(11) DEFAULT NULL, `create_time` datetime DEFAULT NULL, `update_time` datetime DEFAULT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table t_ds_dq_rule_execute_sql -- DROP TABLE IF EXISTS `t_ds_dq_rule_execute_sql`; CREATE TABLE `t_ds_dq_rule_execute_sql` ( `id` int(11) NOT NULL AUTO_INCREMENT, `index` int(11) DEFAULT NULL, `sql` text DEFAULT NULL, `table_alias` varchar(255) DEFAULT NULL, `type` int(11) DEFAULT NULL, `is_error_output_sql` tinyint(1) DEFAULT '0', `create_time` datetime DEFAULT NULL, `update_time` datetime DEFAULT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table t_ds_dq_rule_input_entry -- DROP TABLE IF EXISTS `t_ds_dq_rule_input_entry`; CREATE TABLE `t_ds_dq_rule_input_entry` ( `id` int(11) NOT NULL AUTO_INCREMENT, `field` varchar(255) DEFAULT NULL, `type` varchar(255) DEFAULT NULL, `title` varchar(255) DEFAULT NULL, `value` varchar(255) DEFAULT NULL, `options` text DEFAULT NULL, `placeholder` varchar(255) DEFAULT NULL, `option_source_type` int(11) DEFAULT NULL, `value_type` int(11) DEFAULT NULL, `input_type` int(11) DEFAULT NULL, `is_show` tinyint(1) DEFAULT '1', `can_edit` tinyint(1) DEFAULT '1', `is_emit` tinyint(1) DEFAULT '0', `is_validate` tinyint(1) DEFAULT '1', `create_time` datetime DEFAULT NULL, `update_time` datetime DEFAULT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table t_ds_dq_task_statistics_value -- DROP TABLE IF EXISTS `t_ds_dq_task_statistics_value`; CREATE TABLE `t_ds_dq_task_statistics_value` ( `id` int(11) NOT NULL AUTO_INCREMENT, `process_definition_id` int(11) DEFAULT NULL, `task_instance_id` int(11) DEFAULT NULL, `rule_id` int(11) NOT NULL, `unique_code` varchar(255) NULL, `statistics_name` varchar(255) NULL, `statistics_value` double NULL, `data_time` datetime DEFAULT NULL, `create_time` datetime DEFAULT NULL, `update_time` datetime DEFAULT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table t_ds_relation_rule_execute_sql -- DROP TABLE IF EXISTS `t_ds_relation_rule_execute_sql`; CREATE TABLE `t_ds_relation_rule_execute_sql` ( `id` int(11) NOT NULL AUTO_INCREMENT, `rule_id` int(11) DEFAULT NULL, `execute_sql_id` int(11) DEFAULT NULL, `create_time` datetime NULL, `update_time` datetime NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table t_ds_relation_rule_input_entry -- DROP TABLE IF EXISTS `t_ds_relation_rule_input_entry`; CREATE TABLE `t_ds_relation_rule_input_entry` ( `id` int(11) NOT NULL AUTO_INCREMENT, `rule_id` int(11) DEFAULT NULL, `rule_input_entry_id` int(11) DEFAULT NULL, `values_map` text DEFAULT NULL, `index` int(11) DEFAULT NULL, `create_time` datetime DEFAULT NULL, `update_time` datetime DEFAULT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_k8s -- ---------------------------- DROP TABLE IF EXISTS `t_ds_k8s`; CREATE TABLE `t_ds_k8s` ( `id` int(11) NOT NULL AUTO_INCREMENT, `k8s_name` varchar(100) DEFAULT NULL, `k8s_config` text DEFAULT NULL, `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE= INNODB AUTO_INCREMENT= 1 DEFAULT CHARSET= utf8; -- ---------------------------- -- Table structure for t_ds_k8s_namespace -- ---------------------------- DROP TABLE IF EXISTS `t_ds_k8s_namespace`; CREATE TABLE `t_ds_k8s_namespace` ( `id` int(11) NOT NULL AUTO_INCREMENT, `limits_memory` int(11) DEFAULT NULL, `namespace` varchar(100) DEFAULT NULL, `online_job_num` int(11) DEFAULT NULL, `owner` varchar(100) DEFAULT NULL, `pod_replicas` int(11) DEFAULT NULL, `pod_request_cpu` decimal(14,3) DEFAULT NULL, `pod_request_memory` int(11) DEFAULT NULL, `tag` varchar(100) DEFAULT NULL, `limits_cpu` decimal(14,3) DEFAULT NULL, `k8s` varchar(100) DEFAULT NULL, `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `k8s_namespace_unique` (`namespace`,`k8s`) ) ENGINE= INNODB AUTO_INCREMENT= 1 DEFAULT CHARSET= utf8;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,499
[Feature][Master] Optimize workflowlieage query speed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Optimize workflowlieage query speed ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8499
https://github.com/apache/dolphinscheduler/pull/8500
64baf6dc3db9f58d32947539a6fa36ac6e1eb200
512f8cb5dde7fb42398e7caa50c0c6665b6ec45f
"2022-02-23T05:37:00Z"
java
"2022-02-23T08:53:36Z"
dolphinscheduler-dao/src/main/resources/sql/upgrade/2.1.0_schema/postgresql/dolphinscheduler_ddl.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ delimiter d// CREATE OR REPLACE FUNCTION public.dolphin_update_metadata( ) RETURNS character varying LANGUAGE 'plpgsql' COST 100 VOLATILE PARALLEL UNSAFE AS $BODY$ DECLARE v_schema varchar; BEGIN ---get schema name v_schema =current_schema(); EXECUTE 'DROP INDEX IF EXISTS "process_task_relation_idx_project_code_process_definition_code"'; EXECUTE 'CREATE INDEX IF NOT EXISTS process_task_relation_idx_project_code_process_definition_code ON ' || quote_ident(v_schema) ||'.t_ds_process_task_relation USING Btree("project_code","process_definition_code")'; EXECUTE 'DROP INDEX IF EXISTS "process_task_relation_log_idx_project_code_process_definition_code"'; EXECUTE 'CREATE INDEX IF NOT EXISTS process_task_relation_log_idx_project_code_process_definition_code ON ' || quote_ident(v_schema) ||'.t_ds_process_task_relation_log USING Btree("project_code","process_definition_code")'; EXECUTE 'DROP INDEX IF EXISTS "idx_task_definition_log_code_version"'; EXECUTE 'CREATE INDEX IF NOT EXISTS idx_task_definition_log_code_version ON ' || quote_ident(v_schema) ||'.t_ds_task_definition_log USING Btree("code","version")'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_user ADD COLUMN IF NOT EXISTS "time_zone" varchar(32) DEFAULT NULL'; EXECUTE 'CREATE TABLE IF NOT EXISTS' || quote_ident(v_schema) ||'."t_ds_dq_comparison_type" ( id serial NOT NULL, "type" varchar NOT NULL, execute_sql varchar NULL, output_table varchar NULL, "name" varchar NULL, create_time timestamp NULL, update_time timestamp NULL, is_inner_source bool NULL, CONSTRAINT t_ds_dq_comparison_type_pk PRIMARY KEY (id) )'; EXECUTE 'CREATE TABLE IF NOT EXISTS' || quote_ident(v_schema) ||'."t_ds_dq_execute_result" ( id serial NOT NULL, process_definition_id int4 NULL, process_instance_id int4 NULL, task_instance_id int4 NULL, rule_type int4 NULL, rule_name varchar(255) DEFAULT NULL, statistics_value float8 NULL, comparison_value float8 NULL, check_type int4 NULL, threshold float8 NULL, "operator" int4 NULL, failure_strategy int4 NULL, state int4 NULL, user_id int4 NULL, create_time timestamp NULL, update_time timestamp NULL, comparison_type int4 NULL, error_output_path text NULL, CONSTRAINT t_ds_dq_execute_result_pk PRIMARY KEY (id) )'; EXECUTE 'CREATE TABLE IF NOT EXISTS' || quote_ident(v_schema) ||'."t_ds_dq_rule" ( id serial NOT NULL, "name" varchar(100) DEFAULT NULL, "type" int4 NULL, user_id int4 NULL, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_dq_rule_pk PRIMARY KEY (id) )'; EXECUTE 'CREATE TABLE IF NOT EXISTS' || quote_ident(v_schema) ||'."t_ds_dq_rule_execute_sql" ( id serial NOT NULL, "index" int4 NULL, "sql" text NULL, table_alias varchar(255) DEFAULT NULL, "type" int4 NULL, create_time timestamp NULL, update_time timestamp NULL, is_error_output_sql bool NULL, CONSTRAINT t_ds_dq_rule_execute_sql_pk PRIMARY KEY (id) )'; EXECUTE 'CREATE TABLE IF NOT EXISTS' || quote_ident(v_schema) ||'."t_ds_dq_rule_input_entry" ( id serial NOT NULL, field varchar(255) DEFAULT NULL, "type" varchar(255) DEFAULT NULL, title varchar(255) DEFAULT NULL, value varchar(255) DEFAULT NULL, "options" text DEFAULT NULL, placeholder varchar(255) DEFAULT NULL, option_source_type int4 NULL, value_type int4 NULL, input_type int4 NULL, is_show int2 NULL DEFAULT "1"::smallint, can_edit int2 NULL DEFAULT "1"::smallint, is_emit int2 NULL DEFAULT "0"::smallint, is_validate int2 NULL DEFAULT "0"::smallint, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_dq_rule_input_entry_pk PRIMARY KEY (id) )'; EXECUTE 'CREATE TABLE IF NOT EXISTS' || quote_ident(v_schema) ||'."t_ds_dq_task_statistics_value" ( id serial NOT NULL, process_definition_id int4 NOT NULL, task_instance_id int4 NULL, rule_id int4 NOT NULL, unique_code varchar NOT NULL, statistics_name varchar NULL, statistics_value float8 NULL, data_time timestamp(0) NULL, create_time timestamp(0) NULL, update_time timestamp(0) NULL, CONSTRAINT t_ds_dq_task_statistics_value_pk PRIMARY KEY (id) )'; EXECUTE 'CREATE TABLE IF NOT EXISTS' || quote_ident(v_schema) ||'."t_ds_relation_rule_execute_sql" ( id serial NOT NULL, rule_id int4 NULL, execute_sql_id int4 NULL, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_relation_rule_execute_sql_pk PRIMARY KEY (id) )'; EXECUTE 'CREATE TABLE IF NOT EXISTS' || quote_ident(v_schema) ||'."t_ds_relation_rule_input_entry" ( id serial NOT NULL, rule_id int4 NULL, rule_input_entry_id int4 NULL, values_map text NULL, "index" int4 NULL, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_relation_rule_input_entry_pk PRIMARY KEY (id) )'; EXECUTE 'DROP INDEX IF EXISTS "idx_task_definition_log_project_code"'; EXECUTE 'CREATE INDEX IF NOT EXISTS idx_task_definition_log_project_code ON ' || quote_ident(v_schema) ||'.t_ds_task_definition_log USING Btree("project_code")'; EXECUTE 'DROP INDEX IF EXISTS "idx_task_instance_code_version"'; EXECUTE 'CREATE INDEX IF NOT EXISTS idx_task_instance_code_version ON' || quote_ident(v_schema) ||'.t_ds_task_instance USING Btree("task_code","task_definition_version")'; EXECUTE 'CREATE TABLE IF NOT EXISTS '|| quote_ident(v_schema) ||'."t_ds_k8s" ( id serial NOT NULL, k8s_name VARCHAR(100) DEFAULT NULL , k8s_config text , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) )'; EXECUTE 'CREATE TABLE IF NOT EXISTS '|| quote_ident(v_schema) ||'."t_ds_k8s_namespace" ( id serial NOT NULL, limits_memory int DEFAULT NULL , namespace varchar(100) DEFAULT NULL , online_job_num int DEFAULT ''0'' , owner varchar(100) DEFAULT NULL, pod_replicas int DEFAULT NULL, pod_request_cpu NUMERIC(13,4) NULL, pod_request_memory int DEFAULT NULL, tag varchar(100) DEFAULT NULL, limits_cpu NUMERIC(13,4) NULL, k8s varchar(100) DEFAULT NULL, create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT k8s_namespace_unique UNIQUE (namespace,k8s) )'; return 'Success!'; exception when others then ---Raise EXCEPTION '(%)',SQLERRM; return SQLERRM; END; $BODY$; select dolphin_update_metadata(); d//
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,110
[Bug] [Worker/Master] Data duplication caused by worker faking death
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened 1. Because of cpu load is too high, worker lost connection with Zookeeper. 2. Master failover the worker and restart the tasks, but cannot kill the tasks. 3. Then the same task would be run twice and result data would be duplicate. ### What you expected to happen 1. Worker should check resource before submitting task to execution thread. 2. Can worker actively reduce CPU load through killing task ? ### How to reproduce 1. CPU load is too high: ![image](https://user-images.githubusercontent.com/29528966/149973563-b8f5c39a-9930-4ab2-8997-8416eac44430.png) 2. worker lost connection with zk: ![image](https://user-images.githubusercontent.com/29528966/149975735-5e70fd4a-0b01-4e0b-a7bf-452f3f11eefc.png) ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8110
https://github.com/apache/dolphinscheduler/pull/8490
512f8cb5dde7fb42398e7caa50c0c6665b6ec45f
f2b9796bc20ef40e1737ea3afe9a00503b44ae3d
"2022-01-18T16:27:25Z"
java
"2022-02-23T10:22:42Z"
dolphinscheduler-spi/src/main/java/org/apache/dolphinscheduler/spi/task/TaskExecutionContextCacheManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.spi.task; import org.apache.dolphinscheduler.spi.task.request.TaskRequest; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; public class TaskExecutionContextCacheManager { private TaskExecutionContextCacheManager() { throw new IllegalStateException("Utility class"); } /** * taskInstance cache */ private static Map<Integer, TaskRequest> taskRequestContextCache = new ConcurrentHashMap<>(); /** * get taskInstance by taskInstance id * * @param taskInstanceId taskInstanceId * @return taskInstance */ public static TaskRequest getByTaskInstanceId(Integer taskInstanceId) { return taskRequestContextCache.get(taskInstanceId); } /** * cache taskInstance * * @param request request */ public static void cacheTaskExecutionContext(TaskRequest request) { taskRequestContextCache.put(request.getTaskInstanceId(), request); } /** * remove taskInstance by taskInstanceId * * @param taskInstanceId taskInstanceId */ public static void removeByTaskInstanceId(Integer taskInstanceId) { taskRequestContextCache.remove(taskInstanceId); } public static boolean updateTaskExecutionContext(TaskRequest request) { taskRequestContextCache.computeIfPresent(request.getTaskInstanceId(), (k, v) -> request); return taskRequestContextCache.containsKey(request.getTaskInstanceId()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,110
[Bug] [Worker/Master] Data duplication caused by worker faking death
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened 1. Because of cpu load is too high, worker lost connection with Zookeeper. 2. Master failover the worker and restart the tasks, but cannot kill the tasks. 3. Then the same task would be run twice and result data would be duplicate. ### What you expected to happen 1. Worker should check resource before submitting task to execution thread. 2. Can worker actively reduce CPU load through killing task ? ### How to reproduce 1. CPU load is too high: ![image](https://user-images.githubusercontent.com/29528966/149973563-b8f5c39a-9930-4ab2-8997-8416eac44430.png) 2. worker lost connection with zk: ![image](https://user-images.githubusercontent.com/29528966/149975735-5e70fd4a-0b01-4e0b-a7bf-452f3f11eefc.png) ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8110
https://github.com/apache/dolphinscheduler/pull/8490
512f8cb5dde7fb42398e7caa50c0c6665b6ec45f
f2b9796bc20ef40e1737ea3afe9a00503b44ae3d
"2022-01-18T16:27:25Z"
java
"2022-02-23T10:22:42Z"
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/WorkerServer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.IStoppable; import org.apache.dolphinscheduler.common.enums.NodeType; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.remote.NettyRemotingServer; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.config.NettyServerConfig; import org.apache.dolphinscheduler.server.log.LoggerRequestProcessor; import org.apache.dolphinscheduler.server.worker.config.WorkerConfig; import org.apache.dolphinscheduler.server.worker.plugin.TaskPluginManager; import org.apache.dolphinscheduler.server.worker.processor.DBTaskAckProcessor; import org.apache.dolphinscheduler.server.worker.processor.DBTaskResponseProcessor; import org.apache.dolphinscheduler.server.worker.processor.HostUpdateProcessor; import org.apache.dolphinscheduler.server.worker.processor.TaskExecuteProcessor; import org.apache.dolphinscheduler.server.worker.processor.TaskKillProcessor; import org.apache.dolphinscheduler.server.worker.registry.WorkerRegistryClient; import org.apache.dolphinscheduler.server.worker.runner.RetryReportTaskStatusThread; import org.apache.dolphinscheduler.server.worker.runner.WorkerManagerThread; import org.apache.dolphinscheduler.service.alert.AlertClientService; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import java.util.Set; import javax.annotation.PostConstruct; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.context.annotation.ComponentScan; import org.springframework.transaction.annotation.EnableTransactionManagement; @SpringBootApplication @EnableTransactionManagement @ComponentScan("org.apache.dolphinscheduler") public class WorkerServer implements IStoppable { /** * logger */ private static final Logger logger = LoggerFactory.getLogger(WorkerServer.class); /** * netty remote server */ private NettyRemotingServer nettyRemotingServer; /** * worker config */ @Autowired private WorkerConfig workerConfig; /** * spring application context * only use it for initialization */ @Autowired private SpringApplicationContext springApplicationContext; /** * alert model netty remote server */ @Autowired private AlertClientService alertClientService; @Autowired private RetryReportTaskStatusThread retryReportTaskStatusThread; @Autowired private WorkerManagerThread workerManagerThread; /** * worker registry */ @Autowired private WorkerRegistryClient workerRegistryClient; @Autowired private TaskPluginManager taskPluginManager; @Autowired private TaskExecuteProcessor taskExecuteProcessor; @Autowired private TaskKillProcessor taskKillProcessor; @Autowired private DBTaskAckProcessor dbTaskAckProcessor; @Autowired private DBTaskResponseProcessor dbTaskResponseProcessor; @Autowired private HostUpdateProcessor hostUpdateProcessor; @Autowired private LoggerRequestProcessor loggerRequestProcessor; /** * worker server startup, not use web service * * @param args arguments */ public static void main(String[] args) { Thread.currentThread().setName(Constants.THREAD_NAME_WORKER_SERVER); SpringApplication.run(WorkerServer.class); } /** * worker server run */ @PostConstruct public void run() { // init remoting server NettyServerConfig serverConfig = new NettyServerConfig(); serverConfig.setListenPort(workerConfig.getListenPort()); this.nettyRemotingServer = new NettyRemotingServer(serverConfig); this.nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_REQUEST, taskExecuteProcessor); this.nettyRemotingServer.registerProcessor(CommandType.TASK_KILL_REQUEST, taskKillProcessor); this.nettyRemotingServer.registerProcessor(CommandType.DB_TASK_ACK, dbTaskAckProcessor); this.nettyRemotingServer.registerProcessor(CommandType.DB_TASK_RESPONSE, dbTaskResponseProcessor); this.nettyRemotingServer.registerProcessor(CommandType.PROCESS_HOST_UPDATE_REQUEST, hostUpdateProcessor); // logger server this.nettyRemotingServer.registerProcessor(CommandType.GET_LOG_BYTES_REQUEST, loggerRequestProcessor); this.nettyRemotingServer.registerProcessor(CommandType.ROLL_VIEW_LOG_REQUEST, loggerRequestProcessor); this.nettyRemotingServer.registerProcessor(CommandType.VIEW_WHOLE_LOG_REQUEST, loggerRequestProcessor); this.nettyRemotingServer.registerProcessor(CommandType.REMOVE_TAK_LOG_REQUEST, loggerRequestProcessor); this.nettyRemotingServer.start(); // worker registry try { this.workerRegistryClient.registry(); this.workerRegistryClient.setRegistryStoppable(this); Set<String> workerZkPaths = this.workerRegistryClient.getWorkerZkPaths(); this.workerRegistryClient.handleDeadServer(workerZkPaths, NodeType.WORKER, Constants.DELETE_OP); } catch (Exception e) { logger.error(e.getMessage(), e); throw new RuntimeException(e); } // task execute manager this.workerManagerThread.start(); // retry report task status this.retryReportTaskStatusThread.start(); /* * registry hooks, which are called before the process exits */ Runtime.getRuntime().addShutdownHook(new Thread(() -> { if (Stopper.isRunning()) { close("shutdownHook"); } })); } public void close(String cause) { try { // execute only once if (Stopper.isStopped()) { return; } logger.info("worker server is stopping ..., cause : {}", cause); // set stop signal is true Stopper.stop(); try { // thread sleep 3 seconds for thread quitely stop Thread.sleep(3000L); } catch (Exception e) { logger.warn("thread sleep exception", e); } // close this.nettyRemotingServer.close(); this.workerRegistryClient.unRegistry(); this.alertClientService.close(); this.springApplicationContext.close(); } catch (Exception e) { logger.error("worker server stop exception ", e); } } @Override public void stop(String cause) { close(cause); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,431
[Bug] [API] The name of the process definition doesn't display on the page of task definition.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened When I browsed the list page of task definition I found that the name of process definition didn't display. So I checked the result of the requested api and make sure that this issue is caused by the backend. ![image](https://user-images.githubusercontent.com/4928204/154665579-30b8f1e3-b7f9-4b07-9f34-5773f84eace4.png) ### What you expected to happen I expect that the name of process definition can display correctly. ### How to reproduce You can browse the list page of task definition and then you will see it. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8431
https://github.com/apache/dolphinscheduler/pull/8509
6fb112d1daf1a07dee15dcc0ca00ed272d2e387b
c590ad43d7826a2c441801d71ee07ba284cfb042
"2022-02-18T10:35:35Z"
java
"2022-02-24T00:50:03Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/TaskDefinitionServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ServiceException; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.api.service.TaskDefinitionService; import org.apache.dolphinscheduler.api.utils.CheckUtils; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.ConditionType; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.TaskType; import org.apache.dolphinscheduler.common.utils.CodeGenerateUtils; import org.apache.dolphinscheduler.common.utils.CodeGenerateUtils.CodeGenerateException; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskMainInfo; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.service.permission.PermissionCheck; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.google.common.collect.Lists; /** * task definition service impl */ @Service public class TaskDefinitionServiceImpl extends BaseServiceImpl implements TaskDefinitionService { private static final Logger logger = LoggerFactory.getLogger(TaskDefinitionServiceImpl.class); private static final String RELEASESTATE = "releaseState"; @Autowired private ProjectMapper projectMapper; @Autowired private ProjectService projectService; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper; @Autowired private ProcessService processService; /** * create task definition * * @param loginUser login user * @param projectCode project code * @param taskDefinitionJson task definition json */ @Transactional(rollbackFor = RuntimeException.class) @Override public Map<String, Object> createTaskDefinition(User loginUser, long projectCode, String taskDefinitionJson) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); if (taskDefinitionLogs.isEmpty()) { logger.error("taskDefinitionJson invalid: {}", taskDefinitionJson); putMsg(result, Status.DATA_IS_NOT_VALID, taskDefinitionJson); return result; } for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { if (!CheckUtils.checkTaskDefinitionParameters(taskDefinitionLog)) { logger.error("task definition {} parameter invalid", taskDefinitionLog.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskDefinitionLog.getName()); return result; } } int saveTaskResult = processService.saveTaskDefine(loginUser, projectCode, taskDefinitionLogs, Boolean.TRUE); if (saveTaskResult == Constants.DEFINITION_FAILURE) { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_TASK_DEFINITION_ERROR); } Map<String, Object> resData = new HashMap<>(); resData.put("total", taskDefinitionLogs.size()); resData.put("code", StringUtils.join(taskDefinitionLogs.stream().map(TaskDefinition::getCode).collect(Collectors.toList()), ",")); putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, resData); return result; } /** * create single task definition that binds the workflow * * @param loginUser login user * @param projectCode project code * @param processDefinitionCode process definition code * @param taskDefinitionJsonObj task definition json object * @param upstreamCodes upstream task codes, sep comma * @return create result code */ @Transactional(rollbackFor = RuntimeException.class) @Override public Map<String, Object> createTaskBindsWorkFlow(User loginUser, long projectCode, long processDefinitionCode, String taskDefinitionJsonObj, String upstreamCodes) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(processDefinitionCode); if (processDefinition == null || projectCode != processDefinition.getProjectCode()) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefinitionCode); return result; } if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE, processDefinitionCode); return result; } TaskDefinitionLog taskDefinition = JSONUtils.parseObject(taskDefinitionJsonObj, TaskDefinitionLog.class); if (taskDefinition == null) { logger.error("taskDefinitionJsonObj is not valid json"); putMsg(result, Status.DATA_IS_NOT_VALID, taskDefinitionJsonObj); return result; } if (!CheckUtils.checkTaskDefinitionParameters(taskDefinition)) { logger.error("task definition {} parameter invalid", taskDefinition.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskDefinition.getName()); return result; } long taskCode = taskDefinition.getCode(); if (taskCode == 0) { try { taskCode = CodeGenerateUtils.getInstance().genCode(); taskDefinition.setCode(taskCode); } catch (CodeGenerateException e) { logger.error("Task code get error, ", e); putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS, taskDefinitionJsonObj); return result; } } if (StringUtils.isNotBlank(upstreamCodes)) { Set<Long> upstreamTaskCodes = Arrays.stream(upstreamCodes.split(Constants.COMMA)).map(Long::parseLong).collect(Collectors.toSet()); List<TaskDefinition> upstreamTaskDefinitionList = taskDefinitionMapper.queryByCodeList(upstreamTaskCodes); Set<Long> queryUpStreamTaskCodes = upstreamTaskDefinitionList.stream().map(TaskDefinition::getCode).collect(Collectors.toSet()); // upstreamTaskCodes - queryUpStreamTaskCodes Set<Long> diffCode = upstreamTaskCodes.stream().filter(code -> !queryUpStreamTaskCodes.contains(code)).collect(Collectors.toSet()); if (!diffCode.isEmpty()) { putMsg(result, Status.TASK_DEFINE_NOT_EXIST, StringUtils.join(diffCode, Constants.COMMA)); return result; } List<ProcessTaskRelationLog> processTaskRelationLogList = Lists.newArrayList(); for (TaskDefinition upstreamTask : upstreamTaskDefinitionList) { ProcessTaskRelationLog processTaskRelationLog = new ProcessTaskRelationLog(); processTaskRelationLog.setPreTaskCode(upstreamTask.getCode()); processTaskRelationLog.setPreTaskVersion(upstreamTask.getVersion()); processTaskRelationLog.setPostTaskCode(taskCode); processTaskRelationLog.setPostTaskVersion(Constants.VERSION_FIRST); processTaskRelationLog.setConditionType(ConditionType.NONE); processTaskRelationLog.setConditionParams("{}"); processTaskRelationLogList.add(processTaskRelationLog); } List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); if (!processTaskRelationList.isEmpty()) { processTaskRelationLogList.addAll(processTaskRelationList.stream().map(ProcessTaskRelationLog::new).collect(Collectors.toList())); } int insertResult = processService.saveTaskRelation(loginUser, projectCode, processDefinition.getCode(), processDefinition.getVersion(), processTaskRelationLogList, Lists.newArrayList(), Boolean.TRUE); if (insertResult != Constants.EXIT_CODE_SUCCESS) { putMsg(result, Status.CREATE_PROCESS_TASK_RELATION_ERROR); throw new ServiceException(Status.CREATE_PROCESS_TASK_RELATION_ERROR); } } int saveTaskResult = processService.saveTaskDefine(loginUser, projectCode, Lists.newArrayList(taskDefinition), Boolean.TRUE); if (saveTaskResult == Constants.DEFINITION_FAILURE) { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_TASK_DEFINITION_ERROR); } putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, taskDefinition); return result; } /** * query task definition * * @param loginUser login user * @param projectCode project code * @param taskName task name */ @Override public Map<String, Object> queryTaskDefinitionByName(User loginUser, long projectCode, String taskName) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } TaskDefinition taskDefinition = taskDefinitionMapper.queryByName(project.getCode(), taskName); if (taskDefinition == null) { putMsg(result, Status.TASK_DEFINE_NOT_EXIST, taskName); } else { result.put(Constants.DATA_LIST, taskDefinition); putMsg(result, Status.SUCCESS); } return result; } /** * delete task definition * Only offline and no downstream dependency can be deleted * * @param loginUser login user * @param projectCode project code * @param taskCode task code */ @Transactional(rollbackFor = RuntimeException.class) @Override public Map<String, Object> deleteTaskDefinitionByCode(User loginUser, long projectCode, long taskCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (taskCode == 0) { putMsg(result, Status.DELETE_TASK_DEFINE_BY_CODE_ERROR); return result; } TaskDefinition taskDefinition = taskDefinitionMapper.queryByCode(taskCode); if (taskDefinition == null || projectCode != taskDefinition.getProjectCode()) { putMsg(result, Status.TASK_DEFINE_NOT_EXIST, taskCode); return result; } if (processService.isTaskOnline(taskCode) && taskDefinition.getFlag() == Flag.YES) { putMsg(result, Status.TASK_DEFINE_STATE_ONLINE, taskCode); return result; } List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryDownstreamByTaskCode(taskCode); if (!processTaskRelationList.isEmpty()) { Set<Long> postTaskCodes = processTaskRelationList .stream() .map(ProcessTaskRelation::getPostTaskCode) .collect(Collectors.toSet()); putMsg(result, Status.TASK_HAS_DOWNSTREAM, StringUtils.join(postTaskCodes, ",")); return result; } int delete = taskDefinitionMapper.deleteByCode(taskCode); if (delete > 0) { List<ProcessTaskRelation> taskRelationList = processTaskRelationMapper.queryUpstreamByCode(projectCode, taskCode); if (!taskRelationList.isEmpty()) { long processDefinitionCode = taskRelationList.get(0).getProcessDefinitionCode(); List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); List<ProcessTaskRelation> relationList = processTaskRelations.stream().filter(r -> r.getPostTaskCode() != taskCode).collect(Collectors.toList()); updateDag(loginUser, result, processDefinitionCode, relationList, Lists.newArrayList()); } else { putMsg(result, Status.SUCCESS); } } else { putMsg(result, Status.DELETE_TASK_DEFINE_BY_CODE_ERROR); throw new ServiceException(Status.DELETE_TASK_DEFINE_BY_CODE_ERROR); } return result; } private void updateDag(User loginUser, Map<String, Object> result, long processDefinitionCode, List<ProcessTaskRelation> processTaskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) { ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(processDefinitionCode); if (processDefinition == null) { throw new ServiceException(Status.PROCESS_DEFINE_NOT_EXIST); } int insertVersion = processService.saveProcessDefine(loginUser, processDefinition, Boolean.TRUE, Boolean.TRUE); if (insertVersion <= 0) { throw new ServiceException(Status.UPDATE_PROCESS_DEFINITION_ERROR); } List<ProcessTaskRelationLog> relationLogs = processTaskRelationList.stream().map(ProcessTaskRelationLog::new).collect(Collectors.toList()); int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, relationLogs, taskDefinitionLogs, Boolean.TRUE); if (insertResult == Constants.EXIT_CODE_SUCCESS) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.UPDATE_PROCESS_DEFINITION_ERROR); } } /** * update task definition * * @param loginUser login user * @param projectCode project code * @param taskCode task code * @param taskDefinitionJsonObj task definition json object */ @Transactional(rollbackFor = RuntimeException.class) @Override public Map<String, Object> updateTaskDefinition(User loginUser, long projectCode, long taskCode, String taskDefinitionJsonObj) { Map<String, Object> result = new HashMap<>(); TaskDefinitionLog taskDefinitionToUpdate = updateTask(loginUser, projectCode, taskCode, taskDefinitionJsonObj, result); if (taskDefinitionToUpdate == null) { return result; } List<ProcessTaskRelation> taskRelationList = processTaskRelationMapper.queryUpstreamByCode(projectCode, taskCode); if (!taskRelationList.isEmpty()) { long processDefinitionCode = taskRelationList.get(0).getProcessDefinitionCode(); List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); updateDag(loginUser, result, processDefinitionCode, processTaskRelations, Lists.newArrayList(taskDefinitionToUpdate)); } result.put(Constants.DATA_LIST, taskCode); putMsg(result, Status.SUCCESS); return result; } private TaskDefinitionLog updateTask(User loginUser, long projectCode, long taskCode, String taskDefinitionJsonObj, Map<String, Object> result) { Project project = projectMapper.queryByCode(projectCode); //check user access for project result.putAll(projectService.checkProjectAndAuth(loginUser, project, projectCode)); if (result.get(Constants.STATUS) != Status.SUCCESS) { return null; } TaskDefinition taskDefinition = taskDefinitionMapper.queryByCode(taskCode); if (taskDefinition == null) { putMsg(result, Status.TASK_DEFINE_NOT_EXIST, taskCode); return null; } if (processService.isTaskOnline(taskCode) && taskDefinition.getFlag() == Flag.YES) { putMsg(result, Status.NOT_SUPPORT_UPDATE_TASK_DEFINITION); return null; } TaskDefinitionLog taskDefinitionToUpdate = JSONUtils.parseObject(taskDefinitionJsonObj, TaskDefinitionLog.class); if (taskDefinition.equals(taskDefinitionToUpdate)) { return null; } if (taskDefinitionToUpdate == null) { logger.error("taskDefinitionJson is not valid json"); putMsg(result, Status.DATA_IS_NOT_VALID, taskDefinitionJsonObj); return null; } if (!CheckUtils.checkTaskDefinitionParameters(taskDefinitionToUpdate)) { logger.error("task definition {} parameter invalid", taskDefinitionToUpdate.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskDefinitionToUpdate.getName()); return null; } Integer version = taskDefinitionLogMapper.queryMaxVersionForDefinition(taskCode); if (version == null || version == 0) { putMsg(result, Status.DATA_IS_NOT_VALID, taskCode); return null; } Date now = new Date(); taskDefinitionToUpdate.setCode(taskCode); taskDefinitionToUpdate.setId(taskDefinition.getId()); taskDefinitionToUpdate.setProjectCode(projectCode); taskDefinitionToUpdate.setUserId(taskDefinition.getUserId()); taskDefinitionToUpdate.setVersion(++version); taskDefinitionToUpdate.setTaskType(taskDefinitionToUpdate.getTaskType().toUpperCase()); taskDefinitionToUpdate.setResourceIds(processService.getResourceIds(taskDefinitionToUpdate)); taskDefinitionToUpdate.setUpdateTime(now); int update = taskDefinitionMapper.updateById(taskDefinitionToUpdate); taskDefinitionToUpdate.setOperator(loginUser.getId()); taskDefinitionToUpdate.setOperateTime(now); taskDefinitionToUpdate.setCreateTime(now); int insert = taskDefinitionLogMapper.insert(taskDefinitionToUpdate); if ((update & insert) != 1) { putMsg(result, Status.UPDATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.UPDATE_TASK_DEFINITION_ERROR); } return taskDefinitionToUpdate; } /** * update task definition and upstream * * @param loginUser login user * @param projectCode project code * @param taskCode task definition code * @param taskDefinitionJsonObj task definition json object * @param upstreamCodes upstream task codes, sep comma * @return update result code */ @Override public Map<String, Object> updateTaskWithUpstream(User loginUser, long projectCode, long taskCode, String taskDefinitionJsonObj, String upstreamCodes) { Map<String, Object> result = new HashMap<>(); TaskDefinitionLog taskDefinitionToUpdate = updateTask(loginUser, projectCode, taskCode, taskDefinitionJsonObj, result); if (result.get(Constants.STATUS) != Status.SUCCESS && taskDefinitionToUpdate == null) { return result; } List<ProcessTaskRelation> upstreamTaskRelations = processTaskRelationMapper.queryUpstreamByCode(projectCode, taskCode); Set<Long> upstreamCodeSet = upstreamTaskRelations.stream().map(ProcessTaskRelation::getPreTaskCode).collect(Collectors.toSet()); Set<Long> upstreamTaskCodes = Collections.emptySet(); if (StringUtils.isNotEmpty(upstreamCodes)) { upstreamTaskCodes = Arrays.stream(upstreamCodes.split(Constants.COMMA)).map(Long::parseLong).collect(Collectors.toSet()); } if (CollectionUtils.isEqualCollection(upstreamCodeSet, upstreamTaskCodes) && taskDefinitionToUpdate == null) { putMsg(result, Status.SUCCESS); return result; } else { if (taskDefinitionToUpdate == null) { taskDefinitionToUpdate = JSONUtils.parseObject(taskDefinitionJsonObj, TaskDefinitionLog.class); } } Map<Long, TaskDefinition> queryUpStreamTaskCodeMap; if (!upstreamTaskCodes.isEmpty()) { List<TaskDefinition> upstreamTaskDefinitionList = taskDefinitionMapper.queryByCodeList(upstreamTaskCodes); queryUpStreamTaskCodeMap = upstreamTaskDefinitionList.stream().collect(Collectors.toMap(TaskDefinition::getCode, taskDefinition -> taskDefinition)); // upstreamTaskCodes - queryUpStreamTaskCodeMap.keySet upstreamTaskCodes.removeAll(queryUpStreamTaskCodeMap.keySet()); if (!upstreamTaskCodes.isEmpty()) { putMsg(result, Status.TASK_DEFINE_NOT_EXIST, StringUtils.join(upstreamTaskCodes, Constants.COMMA)); return result; } } else { queryUpStreamTaskCodeMap = new HashMap<>(); } if (!upstreamTaskRelations.isEmpty()) { ProcessTaskRelation taskRelation = upstreamTaskRelations.get(0); List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(projectCode, taskRelation.getProcessDefinitionCode()); List<ProcessTaskRelation> processTaskRelationList = Lists.newArrayList(processTaskRelations); List<ProcessTaskRelation> relationList = Lists.newArrayList(); for (ProcessTaskRelation processTaskRelation : processTaskRelationList) { if (processTaskRelation.getPostTaskCode() == taskCode) { if (queryUpStreamTaskCodeMap.containsKey(processTaskRelation.getPreTaskCode()) && processTaskRelation.getPreTaskCode() != 0L) { queryUpStreamTaskCodeMap.remove(processTaskRelation.getPreTaskCode()); } else { processTaskRelation.setPreTaskCode(0L); processTaskRelation.setPreTaskVersion(0); relationList.add(processTaskRelation); } } } processTaskRelationList.removeAll(relationList); for (Map.Entry<Long, TaskDefinition> queryUpStreamTask : queryUpStreamTaskCodeMap.entrySet()) { taskRelation.setPreTaskCode(queryUpStreamTask.getKey()); taskRelation.setPreTaskVersion(queryUpStreamTask.getValue().getVersion()); processTaskRelationList.add(taskRelation); } if (queryUpStreamTaskCodeMap.isEmpty() && !processTaskRelationList.isEmpty()) { processTaskRelationList.add(processTaskRelationList.get(0)); } updateDag(loginUser, result, taskRelation.getProcessDefinitionCode(), processTaskRelations, Lists.newArrayList(taskDefinitionToUpdate)); } result.put(Constants.DATA_LIST, taskCode); putMsg(result, Status.SUCCESS); return result; } /** * switch task definition * * @param loginUser login user * @param projectCode project code * @param taskCode task code * @param version the version user want to switch */ @Transactional(rollbackFor = RuntimeException.class) @Override public Map<String, Object> switchVersion(User loginUser, long projectCode, long taskCode, int version) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (processService.isTaskOnline(taskCode)) { putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE); return result; } TaskDefinition taskDefinition = taskDefinitionMapper.queryByCode(taskCode); if (taskDefinition == null || projectCode != taskDefinition.getProjectCode()) { putMsg(result, Status.TASK_DEFINE_NOT_EXIST, taskCode); return result; } TaskDefinitionLog taskDefinitionUpdate = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskCode, version); taskDefinitionUpdate.setUserId(loginUser.getId()); taskDefinitionUpdate.setUpdateTime(new Date()); taskDefinitionUpdate.setId(taskDefinition.getId()); int switchVersion = taskDefinitionMapper.updateById(taskDefinitionUpdate); if (switchVersion > 0) { List<ProcessTaskRelation> taskRelationList = processTaskRelationMapper.queryUpstreamByCode(projectCode, taskCode); if (!taskRelationList.isEmpty()) { long processDefinitionCode = taskRelationList.get(0).getProcessDefinitionCode(); List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); updateDag(loginUser, result, processDefinitionCode, processTaskRelations, Lists.newArrayList(taskDefinitionUpdate)); } else { putMsg(result, Status.SUCCESS); } } else { putMsg(result, Status.SWITCH_TASK_DEFINITION_VERSION_ERROR); } return result; } @Override public Result queryTaskDefinitionVersions(User loginUser, long projectCode, long taskCode, int pageNo, int pageSize) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); // check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } PageInfo<TaskDefinitionLog> pageInfo = new PageInfo<>(pageNo, pageSize); Page<TaskDefinitionLog> page = new Page<>(pageNo, pageSize); IPage<TaskDefinitionLog> taskDefinitionVersionsPaging = taskDefinitionLogMapper.queryTaskDefinitionVersionsPaging(page, taskCode, projectCode); List<TaskDefinitionLog> taskDefinitionLogs = taskDefinitionVersionsPaging.getRecords(); pageInfo.setTotalList(taskDefinitionLogs); pageInfo.setTotal((int) taskDefinitionVersionsPaging.getTotal()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } @Override public Map<String, Object> deleteByCodeAndVersion(User loginUser, long projectCode, long taskCode, int version) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } TaskDefinition taskDefinition = taskDefinitionMapper.queryByCode(taskCode); if (taskDefinition == null) { putMsg(result, Status.TASK_DEFINE_NOT_EXIST, taskCode); } else { if (taskDefinition.getVersion() == version) { putMsg(result, Status.MAIN_TABLE_USING_VERSION); return result; } int delete = taskDefinitionLogMapper.deleteByCodeAndVersion(taskCode, version); if (delete > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.DELETE_TASK_DEFINITION_VERSION_ERROR); } } return result; } @Override public Map<String, Object> queryTaskDefinitionDetail(User loginUser, long projectCode, long taskCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } TaskDefinition taskDefinition = taskDefinitionMapper.queryByCode(taskCode); if (taskDefinition == null || projectCode != taskDefinition.getProjectCode()) { putMsg(result, Status.TASK_DEFINE_NOT_EXIST, taskCode); } else { result.put(Constants.DATA_LIST, taskDefinition); putMsg(result, Status.SUCCESS); } return result; } @Override public Result queryTaskDefinitionListPaging(User loginUser, long projectCode, String searchWorkflowName, String searchTaskName, TaskType taskType, Integer pageNo, Integer pageSize) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } Page<TaskMainInfo> page = new Page<>(pageNo, pageSize); IPage<TaskMainInfo> taskMainInfoIPage = taskDefinitionMapper.queryDefineListPaging(page, projectCode, searchWorkflowName, searchTaskName, taskType == null ? StringUtils.EMPTY : taskType.getDesc()); List<TaskMainInfo> records = taskMainInfoIPage.getRecords(); if (!records.isEmpty()) { Map<Long, TaskMainInfo> taskMainInfoMap = new HashMap<>(); for (TaskMainInfo info : records) { taskMainInfoMap.compute(info.getTaskCode(), (k, v) -> { if (v == null) { Map<Long, String> upstreamTaskMap = new HashMap<>(); if (info.getUpstreamTaskCode() != 0) { upstreamTaskMap.put(info.getUpstreamTaskCode(), info.getUpstreamTaskName()); info.setUpstreamTaskCode(0L); info.setUpstreamTaskName(StringUtils.EMPTY); } info.setUpstreamTaskMap(upstreamTaskMap); v = info; } if (info.getUpstreamTaskCode() != 0) { v.getUpstreamTaskMap().put(info.getUpstreamTaskCode(), info.getUpstreamTaskName()); } return v; }); } taskMainInfoIPage.setRecords(Lists.newArrayList(taskMainInfoMap.values())); taskMainInfoIPage.setTotal(taskMainInfoMap.values().size()); } PageInfo<TaskMainInfo> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotal((int) taskMainInfoIPage.getTotal()); pageInfo.setTotalList(taskMainInfoIPage.getRecords()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } @Override public Map<String, Object> genTaskCodeList(Integer genNum) { Map<String, Object> result = new HashMap<>(); if (genNum == null || genNum < 1 || genNum > 100) { logger.error("the genNum must be great than 1 and less than 100"); putMsg(result, Status.DATA_IS_NOT_VALID, genNum); return result; } List<Long> taskCodes = new ArrayList<>(); try { for (int i = 0; i < genNum; i++) { taskCodes.add(CodeGenerateUtils.getInstance().genCode()); } } catch (CodeGenerateException e) { logger.error("Task code get error, ", e); putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS, "Error generating task definition code"); } putMsg(result, Status.SUCCESS); // return processDefinitionCode result.put(Constants.DATA_LIST, taskCodes); return result; } /** * release task definition * * @param loginUser login user * @param projectCode project code * @param code task definition code * @param releaseState releaseState * @return update result code */ @Transactional(rollbackFor = RuntimeException.class) @Override public Map<String, Object> releaseTaskDefinition(User loginUser, long projectCode, long code, ReleaseState releaseState) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) result.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { return result; } if (null == releaseState) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } TaskDefinition taskDefinition = taskDefinitionMapper.queryByCode(code); if (taskDefinition == null || projectCode != taskDefinition.getProjectCode()) { putMsg(result, Status.TASK_DEFINE_NOT_EXIST, code); return result; } TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(code, taskDefinition.getVersion()); if (taskDefinitionLog == null) { putMsg(result, Status.TASK_DEFINE_NOT_EXIST, code); return result; } switch (releaseState) { case OFFLINE: taskDefinition.setFlag(Flag.NO); taskDefinitionLog.setFlag(Flag.NO); break; case ONLINE: String resourceIds = taskDefinition.getResourceIds(); if (StringUtils.isNotBlank(resourceIds)) { Integer[] resourceIdArray = Arrays.stream(resourceIds.split(",")).map(Integer::parseInt).toArray(Integer[]::new); PermissionCheck<Integer> permissionCheck = new PermissionCheck(AuthorizationType.RESOURCE_FILE_ID, processService, resourceIdArray, loginUser.getId(), logger); try { permissionCheck.checkPermission(); } catch (Exception e) { logger.error(e.getMessage(), e); putMsg(result, Status.RESOURCE_NOT_EXIST_OR_NO_PERMISSION); return result; } } taskDefinition.setFlag(Flag.YES); taskDefinitionLog.setFlag(Flag.YES); break; default: putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } int update = taskDefinitionMapper.updateById(taskDefinition); int updateLog = taskDefinitionLogMapper.updateById(taskDefinitionLog); if ((update == 0 && updateLog == 1) || (update == 1 && updateLog == 0)) { putMsg(result, Status.UPDATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.UPDATE_TASK_DEFINITION_ERROR); } putMsg(result, Status.SUCCESS); return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,518
[Feature][python] Add basic components for CLI
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Add basic configure for CLI, which includes: * Third part CLI package * Basics usage for CLI * Document for CLI * Verify work after installation * Test on it ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8518
https://github.com/apache/dolphinscheduler/pull/8516
52ed895338a9dcb473fc16c6b8b61b59a19eb1f8
3025f67d370e02ebae075a4ef68e1d7f0461b2d6
"2022-02-24T07:09:07Z"
java
"2022-02-24T08:05:55Z"
dolphinscheduler-python/pydolphinscheduler/README.md
<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # pydolphinscheduler [![PyPi Version](https://img.shields.io/pypi/v/apache-dolphinscheduler.svg?style=flat-square&logo=PyPi)](https://pypi.org/project/apache-dolphinscheduler/) [![PyPi Python Versions](https://img.shields.io/pypi/pyversions/apache-dolphinscheduler.svg?style=flat-square&logo=python)](https://pypi.org/project/apache-dolphinscheduler/) [![PyPi License](https://img.shields.io/pypi/l/apache-dolphinscheduler.svg?style=flat-square)](https://pypi.org/project/apache-dolphinscheduler/) [![PyPi Status](https://img.shields.io/pypi/status/apache-dolphinscheduler.svg?style=flat-square)](https://pypi.org/project/apache-dolphinscheduler/) [![PyPi Downloads](https://img.shields.io/pypi/dm/apache-dolphinscheduler?style=flat-square)](https://pypi.org/project/apache-dolphinscheduler/) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg?style=flat-square)](https://github.com/psf/black) [![Imports: isort](https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat-square&labelColor=ef8336)](https://pycqa.github.io/isort) [![GitHub Build](https://github.com/apache/dolphinscheduler/actions/workflows/py-ci.yml/badge.svg?branch=dev)](https://github.com/apache/dolphinscheduler/actions?query=workflow%3A%22Python+API%22) **PyDolphinScheduler** is python API for Apache DolphinScheduler, which allow you definition your workflow by python code, aka workflow-as-codes. ## Quick Start ### Installation ```shell # Install $ pip install apache-dolphinscheduler # Check installation, it is success if you see version output, here we use 0.1.0 as example $ python -c "import pydolphinscheduler; print(pydolphinscheduler.__version__)" 0.1.0 ``` Here we show you how to install and run a simple example of pydolphinscheduler ### Start Server And Run Example Before you run an example, you have to start backend server. You could follow [development setup](https://dolphinscheduler.apache.org/en-us/development/development-environment-setup.html) section "DolphinScheduler Standalone Quick Start" to set up developer environment. You have to start backend and frontend server in this step, which mean that you could view DolphinScheduler UI in your browser with URL http://localhost:12345/dolphinscheduler After backend server is being start, all requests from `pydolphinscheduler` would be sent to backend server. And for now we could run a simple example by: <!-- TODO Add examples directory to dist package later. --> ```shell # Please make sure your terminal could curl https://raw.githubusercontent.com/apache/dolphinscheduler/dev/dolphinscheduler-python/pydolphinscheduler/examples/tutorial.py -o ./tutorial.py python ./tutorial.py ``` > **_NOTICE:_** Since Apache DolphinScheduler's tenant is requests while running command, you might need to change > tenant value in `example/tutorial.py`. For now the value is `tenant_exists`, please change it to username exists > in you environment. After command execute, you could see a new project with single process definition named *tutorial* in the [UI-project list](https://dolphinscheduler.apache.org/en-us/docs/latest/user_doc/guide/project/project-list.html). ## Develop Until now, we finish quick start by an example of pydolphinscheduler and run it. If you want to inspect or join pydolphinscheduler develop, you could take a look at [develop](./DEVELOP.md) ## Release If you are interested in how to release **PyDolphinScheduler**, you could go and see at [release](./RELEASE.md) ## What's more For more detail information, please go to see **PyDolphinScheduler** [document](https://dolphinscheduler.apache.org/python/index.html)
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,518
[Feature][python] Add basic components for CLI
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Add basic configure for CLI, which includes: * Third part CLI package * Basics usage for CLI * Document for CLI * Verify work after installation * Test on it ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8518
https://github.com/apache/dolphinscheduler/pull/8516
52ed895338a9dcb473fc16c6b8b61b59a19eb1f8
3025f67d370e02ebae075a4ef68e1d7f0461b2d6
"2022-02-24T07:09:07Z"
java
"2022-02-24T08:05:55Z"
dolphinscheduler-python/pydolphinscheduler/docs/source/cli.rst
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,518
[Feature][python] Add basic components for CLI
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Add basic configure for CLI, which includes: * Third part CLI package * Basics usage for CLI * Document for CLI * Verify work after installation * Test on it ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8518
https://github.com/apache/dolphinscheduler/pull/8516
52ed895338a9dcb473fc16c6b8b61b59a19eb1f8
3025f67d370e02ebae075a4ef68e1d7f0461b2d6
"2022-02-24T07:09:07Z"
java
"2022-02-24T08:05:55Z"
dolphinscheduler-python/pydolphinscheduler/docs/source/conf.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = "pydolphinscheduler" copyright = "2022, apache" author = "apache" # The full version, including alpha/beta/rc tags release = "0.0.1" # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ # Measures durations of Sphinx processing "sphinx.ext.duration", # Semi-automatic make docstrings to document "sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinx.ext.autosectionlabel", "sphinx_rtd_theme", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] autodoc_default_options = { "members": True, "show-inheritance": True, "private-members": True, "undoc-members": True, "member-order": "groupwise", } autosectionlabel_prefix_document = True # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"]
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,518
[Feature][python] Add basic components for CLI
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Add basic configure for CLI, which includes: * Third part CLI package * Basics usage for CLI * Document for CLI * Verify work after installation * Test on it ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8518
https://github.com/apache/dolphinscheduler/pull/8516
52ed895338a9dcb473fc16c6b8b61b59a19eb1f8
3025f67d370e02ebae075a4ef68e1d7f0461b2d6
"2022-02-24T07:09:07Z"
java
"2022-02-24T08:05:55Z"
dolphinscheduler-python/pydolphinscheduler/docs/source/index.rst
.. Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at .. http://www.apache.org/licenses/LICENSE-2.0 .. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. PyDolphinScheduler ================== **PyDolphinScheduler** is Python API for `Apache DolphinScheduler <https://dolphinscheduler.apache.org>`_, which allow you definition your workflow by Python code, aka workflow-as-codes. I could go and find how to :ref:`install <start:getting started>` the project. Or if you want to see simply example then go and see :doc:`tutorial` for more detail. .. toctree:: :maxdepth: 2 start tutorial concept tasks/index api Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search`
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,518
[Feature][python] Add basic components for CLI
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Add basic configure for CLI, which includes: * Third part CLI package * Basics usage for CLI * Document for CLI * Verify work after installation * Test on it ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8518
https://github.com/apache/dolphinscheduler/pull/8516
52ed895338a9dcb473fc16c6b8b61b59a19eb1f8
3025f67d370e02ebae075a4ef68e1d7f0461b2d6
"2022-02-24T07:09:07Z"
java
"2022-02-24T08:05:55Z"
dolphinscheduler-python/pydolphinscheduler/setup.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """The script for setting up pydolphinscheduler.""" import sys from os.path import dirname, join from setuptools import find_packages, setup if sys.version_info[0] < 3: raise Exception( "pydolphinscheduler does not support Python 2. Please upgrade to Python 3." ) version = "0.1.0" # Start package required prod = [ "py4j~=0.10", ] doc = [ "sphinx>=4.3", "sphinx_rtd_theme>=1.0", ] test = [ "pytest>=6.2", "freezegun>=1.1", "coverage>=6.1", ] style = [ "flake8>=4.0", "flake8-docstrings>=1.6", "flake8-black>=0.2", "isort>=5.10", ] dev = style + test + doc all_dep = prod + dev # End package required def read(*names, **kwargs): """Read file content from given file path.""" return open( join(dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8") ).read() setup( name="apache-dolphinscheduler", version=version, license="Apache License 2.0", description="Apache DolphinScheduler Python API", long_description=read("README.md"), # Make sure pypi is expecting markdown long_description_content_type="text/markdown", author="Apache Software Foundation", author_email="dev@dolphinscheduler.apache.org", url="https://dolphinscheduler.apache.org/", python_requires=">=3.6", keywords=[ "dolphinscheduler", "workflow", "scheduler", "taskflow", ], project_urls={ "Homepage": "https://dolphinscheduler.apache.org", "Documentation": "https://dolphinscheduler.apache.org/python/index.html", "Source": "https://github.com/apache/dolphinscheduler/dolphinscheduler-python/pydolphinscheduler", "Issue Tracker": "https://github.com/apache/dolphinscheduler/issues", "Discussion": "https://github.com/apache/dolphinscheduler/discussions", "Twitter": "https://twitter.com/dolphinschedule", }, packages=find_packages(where="src"), package_dir={"": "src"}, include_package_data=True, package_data={ "examples": ["examples.tutorial.py"], }, platforms=["any"], classifiers=[ # complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers "Development Status :: 3 - Alpha", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: Unix", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: User Interfaces", ], install_requires=prod, extras_require={ "all": all_dep, "dev": dev, "style": style, "test": test, "doc": doc, }, )
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,518
[Feature][python] Add basic components for CLI
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Add basic configure for CLI, which includes: * Third part CLI package * Basics usage for CLI * Document for CLI * Verify work after installation * Test on it ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8518
https://github.com/apache/dolphinscheduler/pull/8516
52ed895338a9dcb473fc16c6b8b61b59a19eb1f8
3025f67d370e02ebae075a4ef68e1d7f0461b2d6
"2022-02-24T07:09:07Z"
java
"2022-02-24T08:05:55Z"
dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/cli/__init__.py
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,518
[Feature][python] Add basic components for CLI
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Add basic configure for CLI, which includes: * Third part CLI package * Basics usage for CLI * Document for CLI * Verify work after installation * Test on it ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8518
https://github.com/apache/dolphinscheduler/pull/8516
52ed895338a9dcb473fc16c6b8b61b59a19eb1f8
3025f67d370e02ebae075a4ef68e1d7f0461b2d6
"2022-02-24T07:09:07Z"
java
"2022-02-24T08:05:55Z"
dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/cli/commands.py
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,518
[Feature][python] Add basic components for CLI
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Add basic configure for CLI, which includes: * Third part CLI package * Basics usage for CLI * Document for CLI * Verify work after installation * Test on it ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8518
https://github.com/apache/dolphinscheduler/pull/8516
52ed895338a9dcb473fc16c6b8b61b59a19eb1f8
3025f67d370e02ebae075a4ef68e1d7f0461b2d6
"2022-02-24T07:09:07Z"
java
"2022-02-24T08:05:55Z"
dolphinscheduler-python/pydolphinscheduler/tests/cli/__init__.py
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,518
[Feature][python] Add basic components for CLI
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Add basic configure for CLI, which includes: * Third part CLI package * Basics usage for CLI * Document for CLI * Verify work after installation * Test on it ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8518
https://github.com/apache/dolphinscheduler/pull/8516
52ed895338a9dcb473fc16c6b8b61b59a19eb1f8
3025f67d370e02ebae075a4ef68e1d7f0461b2d6
"2022-02-24T07:09:07Z"
java
"2022-02-24T08:05:55Z"
dolphinscheduler-python/pydolphinscheduler/tests/cli/test_version.py
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,518
[Feature][python] Add basic components for CLI
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Add basic configure for CLI, which includes: * Third part CLI package * Basics usage for CLI * Document for CLI * Verify work after installation * Test on it ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8518
https://github.com/apache/dolphinscheduler/pull/8516
52ed895338a9dcb473fc16c6b8b61b59a19eb1f8
3025f67d370e02ebae075a4ef68e1d7f0461b2d6
"2022-02-24T07:09:07Z"
java
"2022-02-24T08:05:55Z"
dolphinscheduler-python/pydolphinscheduler/tests/testing/cli.py
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,311
[Bug] [Worker Server] shell task error java.nio.file.NoSuchFileException
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ``` [INFO] 2022-02-09 04:15:03.388 TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.shell.ShellTask:[138] - task execute path : /data/dolphinscheduler/exec/process/853772144123904/853777148452864_2/308426/645680 [ERROR] 2022-02-09 04:15:03.389 TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.shell.ShellTask:[103] - shell task error java.nio.file.NoSuchFileException: /data/dolphinscheduler/exec/process/853772144123904/853777148452864_2/308426/645680/308426_645680_node.sh at sun.nio.fs.UnixException.translateToIOException(UnixException.java:86) at sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:102) at sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:107) at sun.nio.fs.UnixFileSystemProvider.newByteChannel(UnixFileSystemProvider.java:214) at java.nio.file.Files.newByteChannel(Files.java:361) at java.nio.file.Files.createFile(Files.java:632) at org.apache.dolphinscheduler.plugin.task.shell.ShellTask.buildCommand(ShellTask.java:146) at org.apache.dolphinscheduler.plugin.task.shell.ShellTask.handle(ShellTask.java:96) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:193) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ``` ### What you expected to happen Shell task created all files successfully. ### How to reproduce The bug appears accidentally. It is speculated that too many files are created at the same time. ### Anything else _No response_ ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8311
https://github.com/apache/dolphinscheduler/pull/8526
3025f67d370e02ebae075a4ef68e1d7f0461b2d6
dd1397aaed9b498357c8e6f7fb2311ed61660630
"2022-02-09T01:58:48Z"
java
"2022-02-24T09:36:54Z"
dolphinscheduler-task-plugin/dolphinscheduler-task-shell/src/main/java/org/apache/dolphinscheduler/plugin/task/shell/ShellTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.task.shell; import static org.apache.dolphinscheduler.spi.task.TaskConstants.EXIT_CODE_FAILURE; import static org.apache.dolphinscheduler.spi.task.TaskConstants.RWXR_XR_X; import org.apache.dolphinscheduler.plugin.task.api.AbstractTaskExecutor; import org.apache.dolphinscheduler.plugin.task.api.ShellCommandExecutor; import org.apache.dolphinscheduler.plugin.task.api.TaskResponse; import org.apache.dolphinscheduler.plugin.task.util.OSUtils; import org.apache.dolphinscheduler.spi.task.AbstractParameters; import org.apache.dolphinscheduler.spi.task.Property; import org.apache.dolphinscheduler.spi.task.paramparser.ParamUtils; import org.apache.dolphinscheduler.spi.task.paramparser.ParameterUtils; import org.apache.dolphinscheduler.spi.task.request.TaskRequest; import org.apache.dolphinscheduler.spi.utils.JSONUtils; import org.apache.commons.collections4.MapUtils; import java.io.File; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.nio.file.attribute.FileAttribute; import java.nio.file.attribute.PosixFilePermission; import java.nio.file.attribute.PosixFilePermissions; import java.util.HashMap; import java.util.Map; import java.util.Set; /** * shell task */ public class ShellTask extends AbstractTaskExecutor { /** * shell parameters */ private ShellParameters shellParameters; /** * shell command executor */ private ShellCommandExecutor shellCommandExecutor; /** * taskExecutionContext */ private TaskRequest taskExecutionContext; /** * constructor * * @param taskExecutionContext taskExecutionContext */ public ShellTask(TaskRequest taskExecutionContext) { super(taskExecutionContext); this.taskExecutionContext = taskExecutionContext; this.shellCommandExecutor = new ShellCommandExecutor(this::logHandle, taskExecutionContext, logger); } @Override public void init() { logger.info("shell task params {}", taskExecutionContext.getTaskParams()); shellParameters = JSONUtils.parseObject(taskExecutionContext.getTaskParams(), ShellParameters.class); if (!shellParameters.checkParameters()) { throw new RuntimeException("shell task params is not valid"); } } @Override public void handle() throws Exception { try { // construct process String command = buildCommand(); TaskResponse commandExecuteResult = shellCommandExecutor.run(command); setExitStatusCode(commandExecuteResult.getExitStatusCode()); setAppIds(commandExecuteResult.getAppIds()); setProcessId(commandExecuteResult.getProcessId()); shellParameters.dealOutParam(shellCommandExecutor.getVarPool()); } catch (Exception e) { logger.error("shell task error", e); setExitStatusCode(EXIT_CODE_FAILURE); throw e; } } @Override public void cancelApplication(boolean cancelApplication) throws Exception { // cancel process shellCommandExecutor.cancelApplication(); } /** * create command * * @return file name * @throws Exception exception */ private String buildCommand() throws Exception { // generate scripts String fileName = String.format("%s/%s_node.%s", taskExecutionContext.getExecutePath(), taskExecutionContext.getTaskAppId(), OSUtils.isWindows() ? "bat" : "sh"); Path path = new File(fileName).toPath(); if (Files.exists(path)) { return fileName; } String script = shellParameters.getRawScript().replaceAll("\\r\\n", "\n"); script = parseScript(script); shellParameters.setRawScript(script); logger.info("raw script : {}", shellParameters.getRawScript()); logger.info("task execute path : {}", taskExecutionContext.getExecutePath()); Set<PosixFilePermission> perms = PosixFilePermissions.fromString(RWXR_XR_X); FileAttribute<Set<PosixFilePermission>> attr = PosixFilePermissions.asFileAttribute(perms); if (OSUtils.isWindows()) { Files.createFile(path); } else { Files.createFile(path, attr); } Files.write(path, shellParameters.getRawScript().getBytes(), StandardOpenOption.APPEND); return fileName; } @Override public AbstractParameters getParameters() { return shellParameters; } private String parseScript(String script) { // combining local and global parameters Map<String, Property> paramsMap = ParamUtils.convert(taskExecutionContext,getParameters()); if (MapUtils.isEmpty(paramsMap)) { paramsMap = new HashMap<>(); } if (MapUtils.isNotEmpty(taskExecutionContext.getParamsMap())) { paramsMap.putAll(taskExecutionContext.getParamsMap()); } return ParameterUtils.convertParameterPlaceholders(script, ParamUtils.convert(paramsMap)); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,892
[Bug] [api] When a non-admin user creates a udf function, resources other than .jar will be displayed in the UDF resource
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened 1. When a non-admin user has authorized other resources, when creating a udf function, the UDF resource will display non `.jar` resources ![图片](https://user-images.githubusercontent.com/56599784/148630186-cf3de53b-e696-44f3-bd93-a4c8bfeb624e.png) 2. UDF resource displays correctly when creating user with admin user ### What you expected to happen It is wrong to display non-.jar resources in UDF resources ### How to reproduce step1: Authorize some resources to user a under the administrator user step2: Log in with user a, and create or modify UDF functions, UDF resources display non `.jar` resources ### Anything else Discovered by looking at the code In the `org.apache.dolphinscheduler.api.service.impl.ResourcesServiceImpl#queryAuthoredResourceList` code, when querying UDF resources, the `t_ds_relation_udfs_user` table is not filtered by type, but the `t_ds_relation_resources_user` table is directly queried ![图片](https://user-images.githubusercontent.com/56599784/148630485-991ebe4d-b818-4526-90b0-156a3b4664fe.png) ### Version 2.0.1 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7892
https://github.com/apache/dolphinscheduler/pull/8458
b92f6d876d41c7e419572ac9ced3d0aad895d293
1ffb5d6e8d6512ee0be958069e8bbd1c105b2989
"2022-01-08T03:49:51Z"
java
"2022-02-24T10:17:25Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ResourcesServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import static org.apache.dolphinscheduler.common.Constants.ALIAS; import static org.apache.dolphinscheduler.common.Constants.CONTENT; import static org.apache.dolphinscheduler.common.Constants.JAR; import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent; import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter; import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor; import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ServiceException; import org.apache.dolphinscheduler.api.service.ResourcesService; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.RegexUtils; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ProgramType; import org.apache.dolphinscheduler.spi.enums.ResourceType; import org.apache.dolphinscheduler.common.utils.FileUtils; import org.apache.dolphinscheduler.common.utils.HadoopUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.PropertyUtils; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.ResourcesUser; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.UdfFunc; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils; import org.apache.commons.beanutils.BeanMap; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import java.io.IOException; import java.text.MessageFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.regex.Matcher; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.dao.DuplicateKeyException; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import org.springframework.web.multipart.MultipartFile; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.fasterxml.jackson.databind.SerializationFeature; import com.google.common.io.Files; /** * resources service impl */ @Service public class ResourcesServiceImpl extends BaseServiceImpl implements ResourcesService { private static final Logger logger = LoggerFactory.getLogger(ResourcesServiceImpl.class); @Autowired private ResourceMapper resourcesMapper; @Autowired private UdfFuncMapper udfFunctionMapper; @Autowired private TenantMapper tenantMapper; @Autowired private UserMapper userMapper; @Autowired private ResourceUserMapper resourceUserMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper; /** * create directory * * @param loginUser login user * @param name alias * @param description description * @param type type * @param pid parent id * @param currentDir current directory * @return create directory result */ @Override @Transactional(rollbackFor = Exception.class) public Result<Object> createDirectory(User loginUser, String name, String description, ResourceType type, int pid, String currentDir) { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name) : String.format("%s/%s",currentDir,name); result = verifyResource(loginUser, type, fullName, pid); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } if (checkResourceExists(fullName, type.ordinal())) { logger.error("resource directory {} has exist, can't recreate", fullName); putMsg(result, Status.RESOURCE_EXIST); return result; } Date now = new Date(); Resource resource = new Resource(pid,name,fullName,true,description,name,loginUser.getId(),type,0,now,now); try { resourcesMapper.insert(resource); putMsg(result, Status.SUCCESS); Map<Object, Object> dataMap = new BeanMap(resource); Map<String, Object> resultMap = new HashMap<>(); for (Map.Entry<Object, Object> entry: dataMap.entrySet()) { if (!"class".equalsIgnoreCase(entry.getKey().toString())) { resultMap.put(entry.getKey().toString(), entry.getValue()); } } result.setData(resultMap); } catch (DuplicateKeyException e) { logger.error("resource directory {} has exist, can't recreate", fullName); putMsg(result, Status.RESOURCE_EXIST); return result; } catch (Exception e) { logger.error("resource already exists, can't recreate ", e); throw new ServiceException("resource already exists, can't recreate"); } //create directory in hdfs createDirectory(loginUser,fullName,type,result); return result; } /** * create resource * * @param loginUser login user * @param name alias * @param desc description * @param file file * @param type type * @param pid parent id * @param currentDir current directory * @return create result code */ @Override @Transactional(rollbackFor = Exception.class) public Result<Object> createResource(User loginUser, String name, String desc, ResourceType type, MultipartFile file, int pid, String currentDir) { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } result = verifyPid(loginUser, pid); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } result = verifyFile(name, type, file); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } // check resource name exists String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name) : String.format("%s/%s",currentDir,name); if (checkResourceExists(fullName, type.ordinal())) { logger.error("resource {} has exist, can't recreate", RegexUtils.escapeNRT(name)); putMsg(result, Status.RESOURCE_EXIST); return result; } Date now = new Date(); Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now); try { resourcesMapper.insert(resource); putMsg(result, Status.SUCCESS); Map<Object, Object> dataMap = new BeanMap(resource); Map<String, Object> resultMap = new HashMap<>(); for (Map.Entry<Object, Object> entry: dataMap.entrySet()) { if (!"class".equalsIgnoreCase(entry.getKey().toString())) { resultMap.put(entry.getKey().toString(), entry.getValue()); } } result.setData(resultMap); } catch (Exception e) { logger.error("resource already exists, can't recreate ", e); throw new ServiceException("resource already exists, can't recreate"); } // fail upload if (!upload(loginUser, fullName, file, type)) { logger.error("upload resource: {} file: {} failed.", RegexUtils.escapeNRT(name), RegexUtils.escapeNRT(file.getOriginalFilename())); putMsg(result, Status.HDFS_OPERATION_ERROR); throw new ServiceException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename())); } return result; } /** * check resource is exists * * @param fullName fullName * @param type type * @return true if resource exists */ private boolean checkResourceExists(String fullName, int type) { Boolean existResource = resourcesMapper.existResource(fullName, type); return existResource == Boolean.TRUE; } /** * update resource * @param loginUser login user * @param resourceId resource id * @param name name * @param desc description * @param type resource type * @param file resource file * @return update result code */ @Override @Transactional(rollbackFor = Exception.class) public Result<Object> updateResource(User loginUser, int resourceId, String name, String desc, ResourceType type, MultipartFile file) { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } if (!hasPerm(loginUser, resource.getUserId())) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } if (file == null && name.equals(resource.getAlias()) && desc.equals(resource.getDescription())) { putMsg(result, Status.SUCCESS); return result; } //check resource already exists String originFullName = resource.getFullName(); String originResourceName = resource.getAlias(); String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/") + 1),name); if (!originResourceName.equals(name) && checkResourceExists(fullName, type.ordinal())) { logger.error("resource {} already exists, can't recreate", name); putMsg(result, Status.RESOURCE_EXIST); return result; } result = verifyFile(name, type, file); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } // query tenant by user id String tenantCode = getTenantCode(resource.getUserId(),result); if (StringUtils.isEmpty(tenantCode)) { return result; } // verify whether the resource exists in storage // get the path of origin file in storage String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName); try { if (!HadoopUtils.getInstance().exists(originHdfsFileName)) { logger.error("{} not exist", originHdfsFileName); putMsg(result,Status.RESOURCE_NOT_EXIST); return result; } } catch (IOException e) { logger.error(e.getMessage(),e); throw new ServiceException(Status.HDFS_OPERATION_ERROR); } if (!resource.isDirectory()) { //get the origin file suffix String originSuffix = Files.getFileExtension(originFullName); String suffix = Files.getFileExtension(fullName); boolean suffixIsChanged = false; if (StringUtils.isBlank(suffix) && StringUtils.isNotBlank(originSuffix)) { suffixIsChanged = true; } if (StringUtils.isNotBlank(suffix) && !suffix.equals(originSuffix)) { suffixIsChanged = true; } //verify whether suffix is changed if (suffixIsChanged) { //need verify whether this resource is authorized to other users Map<String, Object> columnMap = new HashMap<>(); columnMap.put("resources_id", resourceId); List<ResourcesUser> resourcesUsers = resourceUserMapper.selectByMap(columnMap); if (CollectionUtils.isNotEmpty(resourcesUsers)) { List<Integer> userIds = resourcesUsers.stream().map(ResourcesUser::getUserId).collect(Collectors.toList()); List<User> users = userMapper.selectBatchIds(userIds); String userNames = users.stream().map(User::getUserName).collect(Collectors.toList()).toString(); logger.error("resource is authorized to user {},suffix not allowed to be modified", userNames); putMsg(result,Status.RESOURCE_IS_AUTHORIZED,userNames); return result; } } } // updateResource data Date now = new Date(); resource.setAlias(name); resource.setFileName(name); resource.setFullName(fullName); resource.setDescription(desc); resource.setUpdateTime(now); if (file != null) { resource.setSize(file.getSize()); } try { resourcesMapper.updateById(resource); if (resource.isDirectory()) { List<Integer> childrenResource = listAllChildren(resource,false); if (CollectionUtils.isNotEmpty(childrenResource)) { String matcherFullName = Matcher.quoteReplacement(fullName); List<Resource> childResourceList; Integer[] childResIdArray = childrenResource.toArray(new Integer[childrenResource.size()]); List<Resource> resourceList = resourcesMapper.listResourceByIds(childResIdArray); childResourceList = resourceList.stream().map(t -> { t.setFullName(t.getFullName().replaceFirst(originFullName, matcherFullName)); t.setUpdateTime(now); return t; }).collect(Collectors.toList()); resourcesMapper.batchUpdateResource(childResourceList); if (ResourceType.UDF.equals(resource.getType())) { List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(childResIdArray); if (CollectionUtils.isNotEmpty(udfFuncs)) { udfFuncs = udfFuncs.stream().map(t -> { t.setResourceName(t.getResourceName().replaceFirst(originFullName, matcherFullName)); t.setUpdateTime(now); return t; }).collect(Collectors.toList()); udfFunctionMapper.batchUpdateUdfFunc(udfFuncs); } } } } else if (ResourceType.UDF.equals(resource.getType())) { List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(new Integer[]{resourceId}); if (CollectionUtils.isNotEmpty(udfFuncs)) { udfFuncs = udfFuncs.stream().map(t -> { t.setResourceName(fullName); t.setUpdateTime(now); return t; }).collect(Collectors.toList()); udfFunctionMapper.batchUpdateUdfFunc(udfFuncs); } } putMsg(result, Status.SUCCESS); Map<Object, Object> dataMap = new BeanMap(resource); Map<String, Object> resultMap = new HashMap<>(); for (Map.Entry<Object, Object> entry: dataMap.entrySet()) { if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) { resultMap.put(entry.getKey().toString(), entry.getValue()); } } result.setData(resultMap); } catch (Exception e) { logger.error(Status.UPDATE_RESOURCE_ERROR.getMsg(), e); throw new ServiceException(Status.UPDATE_RESOURCE_ERROR); } // if name unchanged, return directly without moving on HDFS if (originResourceName.equals(name) && file == null) { return result; } if (file != null) { // fail upload if (!upload(loginUser, fullName, file, type)) { logger.error("upload resource: {} file: {} failed.", name, RegexUtils.escapeNRT(file.getOriginalFilename())); putMsg(result, Status.HDFS_OPERATION_ERROR); throw new ServiceException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename())); } if (!fullName.equals(originFullName)) { try { HadoopUtils.getInstance().delete(originHdfsFileName,false); } catch (IOException e) { logger.error(e.getMessage(),e); throw new ServiceException(String.format("delete resource: %s failed.", originFullName)); } } return result; } // get the path of dest file in hdfs String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName); try { logger.info("start hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName); HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true); } catch (Exception e) { logger.error(MessageFormat.format("hdfs copy {0} -> {1} fail", originHdfsFileName, destHdfsFileName), e); putMsg(result,Status.HDFS_COPY_FAIL); throw new ServiceException(Status.HDFS_COPY_FAIL); } return result; } private Result<Object> verifyFile(String name, ResourceType type, MultipartFile file) { Result<Object> result = new Result<>(); putMsg(result, Status.SUCCESS); if (file != null) { // file is empty if (file.isEmpty()) { logger.error("file is empty: {}", RegexUtils.escapeNRT(file.getOriginalFilename())); putMsg(result, Status.RESOURCE_FILE_IS_EMPTY); return result; } // file suffix String fileSuffix = Files.getFileExtension(file.getOriginalFilename()); String nameSuffix = Files.getFileExtension(name); // determine file suffix if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) { // rename file suffix and original suffix must be consistent logger.error("rename file suffix and original suffix must be consistent: {}", RegexUtils.escapeNRT(file.getOriginalFilename())); putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE); return result; } //If resource type is UDF, only jar packages are allowed to be uploaded, and the suffix must be .jar if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(fileSuffix)) { logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg()); putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR); return result; } if (file.getSize() > Constants.MAX_FILE_SIZE) { logger.error("file size is too large: {}", RegexUtils.escapeNRT(file.getOriginalFilename())); putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT); return result; } } return result; } /** * query resources list paging * * @param loginUser login user * @param type resource type * @param searchVal search value * @param pageNo page number * @param pageSize page size * @return resource list page */ @Override public Result queryResourceListPaging(User loginUser, int directoryId, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) { Result result = new Result(); Page<Resource> page = new Page<>(pageNo, pageSize); int userId = loginUser.getId(); if (isAdmin(loginUser)) { userId = 0; } if (directoryId != -1) { Resource directory = resourcesMapper.selectById(directoryId); if (directory == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } } List<Integer> resourcesIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 0); IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page, userId, directoryId, type.ordinal(), searchVal,resourcesIds); PageInfo<Resource> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotal((int)resourceIPage.getTotal()); pageInfo.setTotalList(resourceIPage.getRecords()); result.setData(pageInfo); putMsg(result,Status.SUCCESS); return result; } /** * create directory * @param loginUser login user * @param fullName full name * @param type resource type * @param result Result */ private void createDirectory(User loginUser,String fullName,ResourceType type,Result<Object> result) { String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode(); String directoryName = HadoopUtils.getHdfsFileName(type,tenantCode,fullName); String resourceRootPath = HadoopUtils.getHdfsDir(type,tenantCode); try { if (!HadoopUtils.getInstance().exists(resourceRootPath)) { createTenantDirIfNotExists(tenantCode); } if (!HadoopUtils.getInstance().mkdir(directoryName)) { logger.error("create resource directory {} of hdfs failed",directoryName); putMsg(result,Status.HDFS_OPERATION_ERROR); throw new ServiceException(String.format("create resource directory: %s failed.", directoryName)); } } catch (Exception e) { logger.error("create resource directory {} of hdfs failed",directoryName); putMsg(result,Status.HDFS_OPERATION_ERROR); throw new ServiceException(String.format("create resource directory: %s failed.", directoryName)); } } /** * upload file to hdfs * * @param loginUser login user * @param fullName full name * @param file file */ private boolean upload(User loginUser, String fullName, MultipartFile file, ResourceType type) { // save to local String fileSuffix = Files.getFileExtension(file.getOriginalFilename()); String nameSuffix = Files.getFileExtension(fullName); // determine file suffix if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) { return false; } // query tenant String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode(); // random file name String localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString()); // save file to hdfs, and delete original file String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName); String resourcePath = HadoopUtils.getHdfsDir(type,tenantCode); try { // if tenant dir not exists if (!HadoopUtils.getInstance().exists(resourcePath)) { createTenantDirIfNotExists(tenantCode); } org.apache.dolphinscheduler.api.utils.FileUtils.copyInputStreamToFile(file, localFilename); HadoopUtils.getInstance().copyLocalToHdfs(localFilename, hdfsFilename, true, true); } catch (Exception e) { FileUtils.deleteFile(localFilename); logger.error(e.getMessage(), e); return false; } return true; } /** * query resource list * * @param loginUser login user * @param type resource type * @return resource list */ @Override public Map<String, Object> queryResourceList(User loginUser, ResourceType type) { Map<String, Object> result = new HashMap<>(); List<Resource> allResourceList = queryAuthoredResourceList(loginUser, type); Visitor resourceTreeVisitor = new ResourceTreeVisitor(allResourceList); result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren()); putMsg(result, Status.SUCCESS); return result; } /** * query resource list by program type * * @param loginUser login user * @param type resource type * @return resource list */ @Override public Map<String, Object> queryResourceByProgramType(User loginUser, ResourceType type, ProgramType programType) { Map<String, Object> result = new HashMap<>(); List<Resource> allResourceList = queryAuthoredResourceList(loginUser, type); String suffix = ".jar"; if (programType != null) { switch (programType) { case JAVA: case SCALA: break; case PYTHON: suffix = ".py"; break; default: } } List<Resource> resources = new ResourceFilter(suffix, new ArrayList<>(allResourceList)).filter(); Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources); result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren()); putMsg(result, Status.SUCCESS); return result; } /** * delete resource * * @param loginUser login user * @param resourceId resource id * @return delete result code * @throws IOException exception */ @Override @Transactional(rollbackFor = Exception.class) public Result<Object> delete(User loginUser, int resourceId) throws IOException { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } // get resource by id Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } if (!hasPerm(loginUser, resource.getUserId())) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } String tenantCode = getTenantCode(resource.getUserId(),result); if (StringUtils.isEmpty(tenantCode)) { return result; } // get all resource id of process definitions those is released List<Map<String, Object>> list = processDefinitionMapper.listResources(); Map<Integer, Set<Long>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list); Set<Integer> resourceIdSet = resourceProcessMap.keySet(); // get all children of the resource List<Integer> allChildren = listAllChildren(resource,true); Integer[] needDeleteResourceIdArray = allChildren.toArray(new Integer[allChildren.size()]); //if resource type is UDF,need check whether it is bound by UDF function if (resource.getType() == (ResourceType.UDF)) { List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(needDeleteResourceIdArray); if (CollectionUtils.isNotEmpty(udfFuncs)) { logger.error("can't be deleted,because it is bound by UDF functions:{}", udfFuncs); putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName()); return result; } } if (resourceIdSet.contains(resource.getPid())) { logger.error("can't be deleted,because it is used of process definition"); putMsg(result, Status.RESOURCE_IS_USED); return result; } resourceIdSet.retainAll(allChildren); if (CollectionUtils.isNotEmpty(resourceIdSet)) { logger.error("can't be deleted,because it is used of process definition"); for (Integer resId : resourceIdSet) { logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId)); } putMsg(result, Status.RESOURCE_IS_USED); return result; } // get hdfs file by type String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName()); //delete data in database resourcesMapper.deleteIds(needDeleteResourceIdArray); resourceUserMapper.deleteResourceUserArray(0, needDeleteResourceIdArray); //delete file on hdfs HadoopUtils.getInstance().delete(hdfsFilename, true); putMsg(result, Status.SUCCESS); return result; } /** * verify resource by name and type * @param loginUser login user * @param fullName resource full name * @param type resource type * @return true if the resource name not exists, otherwise return false */ @Override public Result<Object> verifyResourceName(String fullName, ResourceType type, User loginUser) { Result<Object> result = new Result<>(); putMsg(result, Status.SUCCESS); if (checkResourceExists(fullName, type.ordinal())) { logger.error("resource type:{} name:{} has exist, can't create again.", type, RegexUtils.escapeNRT(fullName)); putMsg(result, Status.RESOURCE_EXIST); } else { // query tenant Tenant tenant = tenantMapper.queryById(loginUser.getTenantId()); if (tenant != null) { String tenantCode = tenant.getTenantCode(); try { String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName); if (HadoopUtils.getInstance().exists(hdfsFilename)) { logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, RegexUtils.escapeNRT(fullName), hdfsFilename); putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename); } } catch (Exception e) { logger.error(e.getMessage(),e); putMsg(result,Status.HDFS_OPERATION_ERROR); } } else { putMsg(result,Status.CURRENT_LOGIN_USER_TENANT_NOT_EXIST); } } return result; } /** * verify resource by full name or pid and type * @param fullName resource full name * @param id resource id * @param type resource type * @return true if the resource full name or pid not exists, otherwise return false */ @Override public Result<Object> queryResource(String fullName, Integer id, ResourceType type) { Result<Object> result = new Result<>(); if (StringUtils.isBlank(fullName) && id == null) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR); return result; } if (StringUtils.isNotBlank(fullName)) { List<Resource> resourceList = resourcesMapper.queryResource(fullName,type.ordinal()); if (CollectionUtils.isEmpty(resourceList)) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } putMsg(result, Status.SUCCESS); result.setData(resourceList.get(0)); } else { Resource resource = resourcesMapper.selectById(id); if (resource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } Resource parentResource = resourcesMapper.selectById(resource.getPid()); if (parentResource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } putMsg(result, Status.SUCCESS); result.setData(parentResource); } return result; } /** * view resource file online * * @param resourceId resource id * @param skipLineNum skip line number * @param limit limit * @return resource content */ @Override public Result<Object> readResource(int resourceId, int skipLineNum, int limit) { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } // get resource by id Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } //check preview or not by file suffix String nameSuffix = Files.getFileExtension(resource.getAlias()); String resourceViewSuffixs = FileUtils.getResourceViewSuffixs(); if (StringUtils.isNotEmpty(resourceViewSuffixs)) { List<String> strList = Arrays.asList(resourceViewSuffixs.split(",")); if (!strList.contains(nameSuffix)) { logger.error("resource suffix {} not support view, resource id {}", nameSuffix, resourceId); putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW); return result; } } String tenantCode = getTenantCode(resource.getUserId(),result); if (StringUtils.isEmpty(tenantCode)) { return result; } // hdfs path String hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resource.getFullName()); logger.info("resource hdfs path is {}", hdfsFileName); try { if (HadoopUtils.getInstance().exists(hdfsFileName)) { List<String> content = HadoopUtils.getInstance().catFile(hdfsFileName, skipLineNum, limit); putMsg(result, Status.SUCCESS); Map<String, Object> map = new HashMap<>(); map.put(ALIAS, resource.getAlias()); map.put(CONTENT, String.join("\n", content)); result.setData(map); } else { logger.error("read file {} not exist in hdfs", hdfsFileName); putMsg(result, Status.RESOURCE_FILE_NOT_EXIST,hdfsFileName); } } catch (Exception e) { logger.error("Resource {} read failed", hdfsFileName, e); putMsg(result, Status.HDFS_OPERATION_ERROR); } return result; } /** * create resource file online * * @param loginUser login user * @param type resource type * @param fileName file name * @param fileSuffix file suffix * @param desc description * @param content content * @param pid pid * @param currentDir current directory * @return create result code */ @Override @Transactional(rollbackFor = Exception.class) public Result<Object> onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content,int pid,String currentDir) { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } //check file suffix String nameSuffix = fileSuffix.trim(); String resourceViewSuffixs = FileUtils.getResourceViewSuffixs(); if (StringUtils.isNotEmpty(resourceViewSuffixs)) { List<String> strList = Arrays.asList(resourceViewSuffixs.split(",")); if (!strList.contains(nameSuffix)) { logger.error("resource suffix {} not support create", nameSuffix); putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW); return result; } } String name = fileName.trim() + "." + nameSuffix; String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name) : String.format("%s/%s",currentDir,name); result = verifyResource(loginUser, type, fullName, pid); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } // save data Date now = new Date(); Resource resource = new Resource(pid,name,fullName,false,desc,name,loginUser.getId(),type,content.getBytes().length,now,now); resourcesMapper.insert(resource); putMsg(result, Status.SUCCESS); Map<Object, Object> dataMap = new BeanMap(resource); Map<String, Object> resultMap = new HashMap<>(); for (Map.Entry<Object, Object> entry: dataMap.entrySet()) { if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) { resultMap.put(entry.getKey().toString(), entry.getValue()); } } result.setData(resultMap); String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode(); result = uploadContentToHdfs(fullName, tenantCode, content); if (!result.getCode().equals(Status.SUCCESS.getCode())) { throw new ServiceException(result.getMsg()); } return result; } private Result<Object> checkResourceUploadStartupState() { Result<Object> result = new Result<>(); putMsg(result, Status.SUCCESS); // if resource upload startup if (!PropertyUtils.getResUploadStartupState()) { logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState()); putMsg(result, Status.HDFS_NOT_STARTUP); return result; } return result; } private Result<Object> verifyResource(User loginUser, ResourceType type, String fullName, int pid) { Result<Object> result = verifyResourceName(fullName, type, loginUser); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } return verifyPid(loginUser, pid); } private Result<Object> verifyPid(User loginUser, int pid) { Result<Object> result = new Result<>(); putMsg(result, Status.SUCCESS); if (pid != -1) { Resource parentResource = resourcesMapper.selectById(pid); if (parentResource == null) { putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST); return result; } if (!hasPerm(loginUser, parentResource.getUserId())) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } } return result; } /** * updateProcessInstance resource * * @param resourceId resource id * @param content content * @return update result cod */ @Override @Transactional(rollbackFor = Exception.class) public Result<Object> updateResourceContent(int resourceId, String content) { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { logger.error("read file not exist, resource id {}", resourceId); putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } //check can edit by file suffix String nameSuffix = Files.getFileExtension(resource.getAlias()); String resourceViewSuffixs = FileUtils.getResourceViewSuffixs(); if (StringUtils.isNotEmpty(resourceViewSuffixs)) { List<String> strList = Arrays.asList(resourceViewSuffixs.split(",")); if (!strList.contains(nameSuffix)) { logger.error("resource suffix {} not support updateProcessInstance, resource id {}", nameSuffix, resourceId); putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW); return result; } } String tenantCode = getTenantCode(resource.getUserId(),result); if (StringUtils.isEmpty(tenantCode)) { return result; } resource.setSize(content.getBytes().length); resource.setUpdateTime(new Date()); resourcesMapper.updateById(resource); result = uploadContentToHdfs(resource.getFullName(), tenantCode, content); if (!result.getCode().equals(Status.SUCCESS.getCode())) { throw new ServiceException(result.getMsg()); } return result; } /** * @param resourceName resource name * @param tenantCode tenant code * @param content content * @return result */ private Result<Object> uploadContentToHdfs(String resourceName, String tenantCode, String content) { Result<Object> result = new Result<>(); String localFilename = ""; String hdfsFileName = ""; try { localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString()); if (!FileUtils.writeContent2File(content, localFilename)) { // write file fail logger.error("file {} fail, content is {}", localFilename, RegexUtils.escapeNRT(content)); putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } // get resource file hdfs path hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resourceName); String resourcePath = HadoopUtils.getHdfsResDir(tenantCode); logger.info("resource hdfs path is {}, resource dir is {}", hdfsFileName, resourcePath); HadoopUtils hadoopUtils = HadoopUtils.getInstance(); if (!hadoopUtils.exists(resourcePath)) { // create if tenant dir not exists createTenantDirIfNotExists(tenantCode); } if (hadoopUtils.exists(hdfsFileName)) { hadoopUtils.delete(hdfsFileName, false); } hadoopUtils.copyLocalToHdfs(localFilename, hdfsFileName, true, true); } catch (Exception e) { logger.error(e.getMessage(), e); result.setCode(Status.HDFS_OPERATION_ERROR.getCode()); result.setMsg(String.format("copy %s to hdfs %s fail", localFilename, hdfsFileName)); return result; } putMsg(result, Status.SUCCESS); return result; } /** * download file * * @param resourceId resource id * @return resource content * @throws IOException exception */ @Override public org.springframework.core.io.Resource downloadResource(int resourceId) throws IOException { // if resource upload startup if (!PropertyUtils.getResUploadStartupState()) { logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState()); throw new ServiceException("hdfs not startup"); } Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { logger.error("download file not exist, resource id {}", resourceId); return null; } if (resource.isDirectory()) { logger.error("resource id {} is directory,can't download it", resourceId); throw new ServiceException("can't download directory"); } int userId = resource.getUserId(); User user = userMapper.selectById(userId); if (user == null) { logger.error("user id {} not exists", userId); throw new ServiceException(String.format("resource owner id %d not exist",userId)); } Tenant tenant = tenantMapper.queryById(user.getTenantId()); if (tenant == null) { logger.error("tenant id {} not exists", user.getTenantId()); throw new ServiceException(String.format("The tenant id %d of resource owner not exist",user.getTenantId())); } String tenantCode = tenant.getTenantCode(); String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName()); String localFileName = FileUtils.getDownloadFilename(resource.getAlias()); logger.info("resource hdfs path is {}, download local filename is {}", hdfsFileName, localFileName); HadoopUtils.getInstance().copyHdfsToLocal(hdfsFileName, localFileName, false, true); return org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(localFileName); } /** * list all file * * @param loginUser login user * @param userId user id * @return unauthorized result code */ @Override public Map<String, Object> authorizeResourceTree(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(); List<Resource> resourceList; if (isAdmin(loginUser)) { // admin gets all resources except userId resourceList = resourcesMapper.queryResourceExceptUserId(userId); } else { // non-admins users get their own resources resourceList = resourcesMapper.queryResourceListAuthored(loginUser.getId(), -1); } List<ResourceComponent> list; if (CollectionUtils.isNotEmpty(resourceList)) { Visitor visitor = new ResourceTreeVisitor(resourceList); list = visitor.visit().getChildren(); } else { list = new ArrayList<>(0); } result.put(Constants.DATA_LIST, list); putMsg(result, Status.SUCCESS); return result; } /** * unauthorized file * * @param loginUser login user * @param userId user id * @return unauthorized result code */ @Override public Map<String, Object> unauthorizedFile(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(); List<Resource> resourceList; if (isAdmin(loginUser)) { // admin gets all resources except userId resourceList = resourcesMapper.queryResourceExceptUserId(userId); } else { // non-admins users get their own resources resourceList = resourcesMapper.queryResourceListAuthored(loginUser.getId(), -1); } List<Resource> list; if (resourceList != null && !resourceList.isEmpty()) { Set<Resource> resourceSet = new HashSet<>(resourceList); List<Resource> authedResourceList = queryResourceList(userId, Constants.AUTHORIZE_WRITABLE_PERM); getAuthorizedResourceList(resourceSet, authedResourceList); list = new ArrayList<>(resourceSet); } else { list = new ArrayList<>(0); } Visitor visitor = new ResourceTreeVisitor(list); result.put(Constants.DATA_LIST, visitor.visit().getChildren()); putMsg(result, Status.SUCCESS); return result; } /** * unauthorized udf function * * @param loginUser login user * @param userId user id * @return unauthorized result code */ @Override public Map<String, Object> unauthorizedUDFFunction(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(); List<UdfFunc> udfFuncList; if (isAdmin(loginUser)) { // admin gets all udfs except userId udfFuncList = udfFunctionMapper.queryUdfFuncExceptUserId(userId); } else { // non-admins users get their own udfs udfFuncList = udfFunctionMapper.selectByMap(Collections.singletonMap("user_id", loginUser.getId())); } List<UdfFunc> resultList = new ArrayList<>(); Set<UdfFunc> udfFuncSet; if (CollectionUtils.isNotEmpty(udfFuncList)) { udfFuncSet = new HashSet<>(udfFuncList); List<UdfFunc> authedUDFFuncList = udfFunctionMapper.queryAuthedUdfFunc(userId); getAuthorizedResourceList(udfFuncSet, authedUDFFuncList); resultList = new ArrayList<>(udfFuncSet); } result.put(Constants.DATA_LIST, resultList); putMsg(result, Status.SUCCESS); return result; } /** * authorized udf function * * @param loginUser login user * @param userId user id * @return authorized result code */ @Override public Map<String, Object> authorizedUDFFunction(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(); List<UdfFunc> udfFuncs = udfFunctionMapper.queryAuthedUdfFunc(userId); result.put(Constants.DATA_LIST, udfFuncs); putMsg(result, Status.SUCCESS); return result; } /** * authorized file * * @param loginUser login user * @param userId user id * @return authorized result */ @Override public Map<String, Object> authorizedFile(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(); List<Resource> authedResources = queryResourceList(userId, Constants.AUTHORIZE_WRITABLE_PERM); Visitor visitor = new ResourceTreeVisitor(authedResources); String visit = JSONUtils.toJsonString(visitor.visit(), SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS); logger.info(visit); String jsonTreeStr = JSONUtils.toJsonString(visitor.visit().getChildren(), SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS); logger.info(jsonTreeStr); result.put(Constants.DATA_LIST, visitor.visit().getChildren()); putMsg(result,Status.SUCCESS); return result; } /** * get authorized resource list * * @param resourceSet resource set * @param authedResourceList authorized resource list */ private void getAuthorizedResourceList(Set<?> resourceSet, List<?> authedResourceList) { Set<?> authedResourceSet; if (CollectionUtils.isNotEmpty(authedResourceList)) { authedResourceSet = new HashSet<>(authedResourceList); resourceSet.removeAll(authedResourceSet); } } /** * get tenantCode by UserId * * @param userId user id * @param result return result * @return tenant code */ private String getTenantCode(int userId,Result<Object> result) { User user = userMapper.selectById(userId); if (user == null) { logger.error("user {} not exists", userId); putMsg(result, Status.USER_NOT_EXIST,userId); return null; } Tenant tenant = tenantMapper.queryById(user.getTenantId()); if (tenant == null) { logger.error("tenant not exists"); putMsg(result, Status.CURRENT_LOGIN_USER_TENANT_NOT_EXIST); return null; } return tenant.getTenantCode(); } /** * list all children id * @param resource resource * @param containSelf whether add self to children list * @return all children id */ List<Integer> listAllChildren(Resource resource,boolean containSelf) { List<Integer> childList = new ArrayList<>(); if (resource.getId() != -1 && containSelf) { childList.add(resource.getId()); } if (resource.isDirectory()) { listAllChildren(resource.getId(),childList); } return childList; } /** * list all children id * @param resourceId resource id * @param childList child list */ void listAllChildren(int resourceId,List<Integer> childList) { List<Integer> children = resourcesMapper.listChildren(resourceId); for (int childId : children) { childList.add(childId); listAllChildren(childId, childList); } } /** * query authored resource list (own and authorized) * @param loginUser login user * @param type ResourceType * @return all authored resource list */ private List<Resource> queryAuthoredResourceList(User loginUser, ResourceType type) { List<Resource> relationResources; int userId = loginUser.getId(); if (isAdmin(loginUser)) { userId = 0; relationResources = new ArrayList<>(); } else { // query resource relation relationResources = queryResourceList(userId, 0); } List<Resource> ownResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal()); ownResourceList.addAll(relationResources); return ownResourceList; } /** * query resource list by userId and perm * @param userId userId * @param perm perm * @return resource list */ private List<Resource> queryResourceList(Integer userId, int perm) { List<Integer> resIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, perm); return CollectionUtils.isEmpty(resIds) ? new ArrayList<>() : resourcesMapper.queryResourceListById(resIds); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,892
[Bug] [api] When a non-admin user creates a udf function, resources other than .jar will be displayed in the UDF resource
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened 1. When a non-admin user has authorized other resources, when creating a udf function, the UDF resource will display non `.jar` resources ![图片](https://user-images.githubusercontent.com/56599784/148630186-cf3de53b-e696-44f3-bd93-a4c8bfeb624e.png) 2. UDF resource displays correctly when creating user with admin user ### What you expected to happen It is wrong to display non-.jar resources in UDF resources ### How to reproduce step1: Authorize some resources to user a under the administrator user step2: Log in with user a, and create or modify UDF functions, UDF resources display non `.jar` resources ### Anything else Discovered by looking at the code In the `org.apache.dolphinscheduler.api.service.impl.ResourcesServiceImpl#queryAuthoredResourceList` code, when querying UDF resources, the `t_ds_relation_udfs_user` table is not filtered by type, but the `t_ds_relation_resources_user` table is directly queried ![图片](https://user-images.githubusercontent.com/56599784/148630485-991ebe4d-b818-4526-90b0-156a3b4664fe.png) ### Version 2.0.1 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7892
https://github.com/apache/dolphinscheduler/pull/8458
b92f6d876d41c7e419572ac9ced3d0aad895d293
1ffb5d6e8d6512ee0be958069e8bbd1c105b2989
"2022-01-08T03:49:51Z"
java
"2022-02-24T10:17:25Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.impl.ResourcesServiceImpl; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.utils.FileUtils; import org.apache.dolphinscheduler.common.utils.HadoopUtils; import org.apache.dolphinscheduler.common.utils.PropertyUtils; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.UdfFunc; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.spi.enums.ResourceType; import org.apache.commons.collections.CollectionUtils; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Mockito; import org.powermock.api.mockito.PowerMockito; import org.powermock.core.classloader.annotations.PowerMockIgnore; import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.mock.web.MockMultipartFile; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.google.common.io.Files; /** * resources service test */ @RunWith(PowerMockRunner.class) @PowerMockIgnore({"sun.security.*", "javax.net.*"}) @PrepareForTest({HadoopUtils.class, PropertyUtils.class, FileUtils.class, org.apache.dolphinscheduler.api.utils.FileUtils.class, Files.class}) public class ResourcesServiceTest { private static final Logger logger = LoggerFactory.getLogger(ResourcesServiceTest.class); @InjectMocks private ResourcesServiceImpl resourcesService; @Mock private ResourceMapper resourcesMapper; @Mock private TenantMapper tenantMapper; @Mock private HadoopUtils hadoopUtils; @Mock private UserMapper userMapper; @Mock private UdfFuncMapper udfFunctionMapper; @Mock private ProcessDefinitionMapper processDefinitionMapper; @Mock private ResourceUserMapper resourceUserMapper; @Before public void setUp() { PowerMockito.mockStatic(HadoopUtils.class); PowerMockito.mockStatic(FileUtils.class); PowerMockito.mockStatic(Files.class); PowerMockito.mockStatic(org.apache.dolphinscheduler.api.utils.FileUtils.class); try { // new HadoopUtils PowerMockito.whenNew(HadoopUtils.class).withNoArguments().thenReturn(hadoopUtils); } catch (Exception e) { e.printStackTrace(); } PowerMockito.when(HadoopUtils.getInstance()).thenReturn(hadoopUtils); PowerMockito.mockStatic(PropertyUtils.class); } @Test public void testCreateResource() { PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false); User user = new User(); //HDFS_NOT_STARTUP Result result = resourcesService.createResource(user, "ResourcesServiceTest", "ResourcesServiceTest", ResourceType.FILE, null, -1, "/"); logger.info(result.toString()); Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg()); //RESOURCE_FILE_IS_EMPTY MockMultipartFile mockMultipartFile = new MockMultipartFile("test.pdf", "".getBytes()); PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true); result = resourcesService.createResource(user, "ResourcesServiceTest", "ResourcesServiceTest", ResourceType.FILE, mockMultipartFile, -1, "/"); logger.info(result.toString()); Assert.assertEquals(Status.RESOURCE_FILE_IS_EMPTY.getMsg(), result.getMsg()); //RESOURCE_SUFFIX_FORBID_CHANGE mockMultipartFile = new MockMultipartFile("test.pdf", "test.pdf", "pdf", "test".getBytes()); PowerMockito.when(Files.getFileExtension("test.pdf")).thenReturn("pdf"); PowerMockito.when(Files.getFileExtension("ResourcesServiceTest.jar")).thenReturn("jar"); result = resourcesService.createResource(user, "ResourcesServiceTest.jar", "ResourcesServiceTest", ResourceType.FILE, mockMultipartFile, -1, "/"); logger.info(result.toString()); Assert.assertEquals(Status.RESOURCE_SUFFIX_FORBID_CHANGE.getMsg(), result.getMsg()); //UDF_RESOURCE_SUFFIX_NOT_JAR mockMultipartFile = new MockMultipartFile("ResourcesServiceTest.pdf", "ResourcesServiceTest.pdf", "pdf", "test".getBytes()); PowerMockito.when(Files.getFileExtension("ResourcesServiceTest.pdf")).thenReturn("pdf"); result = resourcesService.createResource(user, "ResourcesServiceTest.pdf", "ResourcesServiceTest", ResourceType.UDF, mockMultipartFile, -1, "/"); logger.info(result.toString()); Assert.assertEquals(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg(), result.getMsg()); } @Test public void testCreateDirecotry() { PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false); User user = new User(); //HDFS_NOT_STARTUP Result result = resourcesService.createDirectory(user, "directoryTest", "directory test", ResourceType.FILE, -1, "/"); logger.info(result.toString()); Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg()); //PARENT_RESOURCE_NOT_EXIST user.setId(1); user.setTenantId(1); Mockito.when(userMapper.selectById(1)).thenReturn(getUser()); Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true); Mockito.when(resourcesMapper.selectById(Mockito.anyInt())).thenReturn(null); result = resourcesService.createDirectory(user, "directoryTest", "directory test", ResourceType.FILE, 1, "/"); logger.info(result.toString()); Assert.assertEquals(Status.PARENT_RESOURCE_NOT_EXIST.getMsg(), result.getMsg()); //RESOURCE_EXIST PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true); Mockito.when(resourcesMapper.existResource("/directoryTest", 0)).thenReturn(true); result = resourcesService.createDirectory(user, "directoryTest", "directory test", ResourceType.FILE, -1, "/"); logger.info(result.toString()); Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(), result.getMsg()); } @Test public void testUpdateResource() { PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false); User user = new User(); //HDFS_NOT_STARTUP Result result = resourcesService.updateResource(user, 1, "ResourcesServiceTest", "ResourcesServiceTest", ResourceType.FILE, null); logger.info(result.toString()); Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg()); //RESOURCE_NOT_EXIST Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource()); PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true); result = resourcesService.updateResource(user, 0, "ResourcesServiceTest", "ResourcesServiceTest", ResourceType.FILE, null); logger.info(result.toString()); Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg()); //USER_NO_OPERATION_PERM result = resourcesService.updateResource(user, 1, "ResourcesServiceTest", "ResourcesServiceTest", ResourceType.FILE, null); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM.getMsg(), result.getMsg()); //RESOURCE_NOT_EXIST user.setId(1); Mockito.when(userMapper.selectById(1)).thenReturn(getUser()); Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); PowerMockito.when(HadoopUtils.getHdfsFileName(Mockito.any(), Mockito.any(), Mockito.anyString())).thenReturn("test1"); try { Mockito.when(HadoopUtils.getInstance().exists(Mockito.any())).thenReturn(false); } catch (IOException e) { logger.error(e.getMessage(), e); } result = resourcesService.updateResource(user, 1, "ResourcesServiceTest1.jar", "ResourcesServiceTest", ResourceType.UDF, null); Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg()); //SUCCESS user.setId(1); Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser()); Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); try { Mockito.when(HadoopUtils.getInstance().exists(Mockito.any())).thenReturn(true); } catch (IOException e) { logger.error(e.getMessage(), e); } result = resourcesService.updateResource(user, 1, "ResourcesServiceTest.jar", "ResourcesServiceTest", ResourceType.FILE, null); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg()); //RESOURCE_EXIST Mockito.when(resourcesMapper.existResource("/ResourcesServiceTest1.jar", 0)).thenReturn(true); result = resourcesService.updateResource(user, 1, "ResourcesServiceTest1.jar", "ResourcesServiceTest", ResourceType.FILE, null); logger.info(result.toString()); Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(), result.getMsg()); //USER_NOT_EXIST Mockito.when(userMapper.selectById(Mockito.anyInt())).thenReturn(null); result = resourcesService.updateResource(user, 1, "ResourcesServiceTest1.jar", "ResourcesServiceTest", ResourceType.UDF, null); logger.info(result.toString()); Assert.assertTrue(Status.USER_NOT_EXIST.getCode() == result.getCode()); //TENANT_NOT_EXIST Mockito.when(userMapper.selectById(1)).thenReturn(getUser()); Mockito.when(tenantMapper.queryById(Mockito.anyInt())).thenReturn(null); result = resourcesService.updateResource(user, 1, "ResourcesServiceTest1.jar", "ResourcesServiceTest", ResourceType.UDF, null); logger.info(result.toString()); Assert.assertEquals(Status.CURRENT_LOGIN_USER_TENANT_NOT_EXIST.getMsg(), result.getMsg()); //SUCCESS Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test"); try { PowerMockito.when(HadoopUtils.getInstance().copy(Mockito.anyString(), Mockito.anyString(), true, true)).thenReturn(true); } catch (Exception e) { logger.error(e.getMessage(), e); } result = resourcesService.updateResource(user, 1, "ResourcesServiceTest1.jar", "ResourcesServiceTest1.jar", ResourceType.UDF, null); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg()); } @Test public void testQueryResourceListPaging() { User loginUser = new User(); loginUser.setUserType(UserType.ADMIN_USER); IPage<Resource> resourcePage = new Page<>(1, 10); resourcePage.setTotal(1); resourcePage.setRecords(getResourceList()); Mockito.when(resourcesMapper.queryResourcePaging(Mockito.any(Page.class), Mockito.eq(0), Mockito.eq(-1), Mockito.eq(0), Mockito.eq("test"), Mockito.any())).thenReturn(resourcePage); Result result = resourcesService.queryResourceListPaging(loginUser, -1, ResourceType.FILE, "test", 1, 10); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS.getCode(), (int) result.getCode()); PageInfo pageInfo = (PageInfo) result.getData(); Assert.assertTrue(CollectionUtils.isNotEmpty(pageInfo.getTotalList())); } @Test public void testQueryResourceList() { User loginUser = new User(); loginUser.setId(0); loginUser.setUserType(UserType.ADMIN_USER); Mockito.when(resourcesMapper.queryResourceListAuthored(0, 0)).thenReturn(getResourceList()); Map<String, Object> result = resourcesService.queryResourceList(loginUser, ResourceType.FILE); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); List<Resource> resourceList = (List<Resource>) result.get(Constants.DATA_LIST); Assert.assertTrue(CollectionUtils.isNotEmpty(resourceList)); } @Test public void testDelete() { User loginUser = new User(); loginUser.setId(0); PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false); Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); try { // HDFS_NOT_STARTUP Result result = resourcesService.delete(loginUser, 1); logger.info(result.toString()); Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg()); //RESOURCE_NOT_EXIST PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true); Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource()); result = resourcesService.delete(loginUser, 2); logger.info(result.toString()); Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg()); // USER_NO_OPERATION_PERM result = resourcesService.delete(loginUser, 2); logger.info(result.toString()); Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg()); //TENANT_NOT_EXIST loginUser.setUserType(UserType.ADMIN_USER); loginUser.setTenantId(2); Mockito.when(userMapper.selectById(Mockito.anyInt())).thenReturn(loginUser); result = resourcesService.delete(loginUser, 1); logger.info(result.toString()); Assert.assertEquals(Status.CURRENT_LOGIN_USER_TENANT_NOT_EXIST.getMsg(), result.getMsg()); //SUCCESS loginUser.setTenantId(1); Mockito.when(hadoopUtils.delete(Mockito.anyString(), Mockito.anyBoolean())).thenReturn(true); Mockito.when(processDefinitionMapper.listResources()).thenReturn(getResources()); Mockito.when(resourcesMapper.deleteIds(Mockito.any())).thenReturn(1); Mockito.when(resourceUserMapper.deleteResourceUserArray(Mockito.anyInt(), Mockito.any())).thenReturn(1); result = resourcesService.delete(loginUser, 1); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg()); } catch (Exception e) { logger.error("delete error", e); Assert.assertTrue(false); } } @Test public void testVerifyResourceName() { User user = new User(); user.setId(1); Mockito.when(resourcesMapper.existResource("/ResourcesServiceTest.jar", 0)).thenReturn(true); Result result = resourcesService.verifyResourceName("/ResourcesServiceTest.jar", ResourceType.FILE, user); logger.info(result.toString()); Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(), result.getMsg()); //TENANT_NOT_EXIST Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); String unExistFullName = "/test.jar"; try { Mockito.when(hadoopUtils.exists(unExistFullName)).thenReturn(false); } catch (IOException e) { logger.error("hadoop error", e); } result = resourcesService.verifyResourceName("/test.jar", ResourceType.FILE, user); logger.info(result.toString()); Assert.assertEquals(Status.CURRENT_LOGIN_USER_TENANT_NOT_EXIST.getMsg(), result.getMsg()); //RESOURCE_FILE_EXIST user.setTenantId(1); try { Mockito.when(hadoopUtils.exists("test")).thenReturn(true); } catch (IOException e) { logger.error("hadoop error", e); } PowerMockito.when(HadoopUtils.getHdfsResourceFileName("123", "test1")).thenReturn("test"); result = resourcesService.verifyResourceName("/ResourcesServiceTest.jar", ResourceType.FILE, user); logger.info(result.toString()); Assert.assertTrue(Status.RESOURCE_EXIST.getCode() == result.getCode()); //SUCCESS result = resourcesService.verifyResourceName("test2", ResourceType.FILE, user); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg()); } @Test public void testReadResource() { PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false); //HDFS_NOT_STARTUP Result result = resourcesService.readResource(1, 1, 10); logger.info(result.toString()); Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg()); //RESOURCE_NOT_EXIST Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource()); PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true); result = resourcesService.readResource(2, 1, 10); logger.info(result.toString()); Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg()); //RESOURCE_SUFFIX_NOT_SUPPORT_VIEW PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class"); PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true); result = resourcesService.readResource(1, 1, 10); logger.info(result.toString()); Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(), result.getMsg()); //USER_NOT_EXIST PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar"); PowerMockito.when(Files.getFileExtension("ResourcesServiceTest.jar")).thenReturn("jar"); result = resourcesService.readResource(1, 1, 10); logger.info(result.toString()); Assert.assertTrue(Status.USER_NOT_EXIST.getCode() == result.getCode()); //TENANT_NOT_EXIST Mockito.when(userMapper.selectById(1)).thenReturn(getUser()); result = resourcesService.readResource(1, 1, 10); logger.info(result.toString()); Assert.assertEquals(Status.CURRENT_LOGIN_USER_TENANT_NOT_EXIST.getMsg(), result.getMsg()); //RESOURCE_FILE_NOT_EXIST Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); try { Mockito.when(hadoopUtils.exists(Mockito.anyString())).thenReturn(false); } catch (IOException e) { logger.error("hadoop error", e); } result = resourcesService.readResource(1, 1, 10); logger.info(result.toString()); Assert.assertTrue(Status.RESOURCE_FILE_NOT_EXIST.getCode() == result.getCode()); //SUCCESS try { Mockito.when(hadoopUtils.exists(null)).thenReturn(true); Mockito.when(hadoopUtils.catFile(null, 1, 10)).thenReturn(getContent()); } catch (IOException e) { logger.error("hadoop error", e); } result = resourcesService.readResource(1, 1, 10); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg()); } @Test public void testOnlineCreateResource() { PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false); PowerMockito.when(HadoopUtils.getHdfsResDir("hdfsdDir")).thenReturn("hdfsDir"); PowerMockito.when(HadoopUtils.getHdfsUdfDir("udfDir")).thenReturn("udfDir"); User user = getUser(); //HDFS_NOT_STARTUP Result result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content", -1, "/"); logger.info(result.toString()); Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg()); //RESOURCE_SUFFIX_NOT_SUPPORT_VIEW PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true); PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class"); result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content", -1, "/"); logger.info(result.toString()); Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(), result.getMsg()); //RuntimeException try { PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar"); Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content", -1, "/"); } catch (RuntimeException ex) { logger.info(result.toString()); Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), ex.getMessage()); } //SUCCESS Mockito.when(FileUtils.getUploadFilename(Mockito.anyString(), Mockito.anyString())).thenReturn("test"); PowerMockito.when(FileUtils.writeContent2File(Mockito.anyString(), Mockito.anyString())).thenReturn(true); result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content", -1, "/"); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg()); } @Test public void testUpdateResourceContent() { PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false); // HDFS_NOT_STARTUP Result result = resourcesService.updateResourceContent(1, "content"); logger.info(result.toString()); Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg()); //RESOURCE_NOT_EXIST PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true); Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource()); result = resourcesService.updateResourceContent(2, "content"); logger.info(result.toString()); Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg()); //RESOURCE_SUFFIX_NOT_SUPPORT_VIEW PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true); PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class"); result = resourcesService.updateResourceContent(1, "content"); logger.info(result.toString()); Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(), result.getMsg()); //USER_NOT_EXIST PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar"); PowerMockito.when(Files.getFileExtension("ResourcesServiceTest.jar")).thenReturn("jar"); result = resourcesService.updateResourceContent(1, "content"); logger.info(result.toString()); Assert.assertTrue(Status.USER_NOT_EXIST.getCode() == result.getCode()); //TENANT_NOT_EXIST Mockito.when(userMapper.selectById(1)).thenReturn(getUser()); result = resourcesService.updateResourceContent(1, "content"); logger.info(result.toString()); Assert.assertTrue(Status.CURRENT_LOGIN_USER_TENANT_NOT_EXIST.getCode() == result.getCode()); //SUCCESS Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); Mockito.when(FileUtils.getUploadFilename(Mockito.anyString(), Mockito.anyString())).thenReturn("test"); PowerMockito.when(FileUtils.writeContent2File(Mockito.anyString(), Mockito.anyString())).thenReturn(true); result = resourcesService.updateResourceContent(1, "content"); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg()); } @Test public void testDownloadResource() { PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true); Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); Mockito.when(userMapper.selectById(1)).thenReturn(getUser()); org.springframework.core.io.Resource resourceMock = Mockito.mock(org.springframework.core.io.Resource.class); try { //resource null org.springframework.core.io.Resource resource = resourcesService.downloadResource(1); Assert.assertNull(resource); Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource()); PowerMockito.when(org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(Mockito.any())).thenReturn(resourceMock); resource = resourcesService.downloadResource(1); Assert.assertNotNull(resource); } catch (Exception e) { logger.error("DownloadResource error", e); Assert.assertTrue(false); } } @Test public void testAuthorizeResourceTree() { User user = getUser(); user.setId(1); user.setUserType(UserType.ADMIN_USER); int userId = 3; // test admin user List<Integer> resIds = new ArrayList<>(); resIds.add(1); Mockito.when(resourcesMapper.queryResourceExceptUserId(userId)).thenReturn(getResourceList()); Map<String, Object> result = resourcesService.authorizeResourceTree(user, userId); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); List<Resource> resources = (List<Resource>) result.get(Constants.DATA_LIST); Assert.assertTrue(CollectionUtils.isNotEmpty(resources)); // test non-admin user user.setId(2); user.setUserType(UserType.GENERAL_USER); Mockito.when(resourcesMapper.queryResourceListAuthored(user.getId(), -1)).thenReturn(getResourceList()); result = resourcesService.authorizeResourceTree(user, userId); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); resources = (List<Resource>) result.get(Constants.DATA_LIST); Assert.assertTrue(CollectionUtils.isNotEmpty(resources)); } @Test public void testUnauthorizedFile() { User user = getUser(); user.setId(1); user.setUserType(UserType.ADMIN_USER); int userId = 3; // test admin user List<Integer> resIds = new ArrayList<>(); resIds.add(1); Mockito.when(resourcesMapper.queryResourceExceptUserId(userId)).thenReturn(getResourceList()); Mockito.when(resourceUserMapper.queryResourcesIdListByUserIdAndPerm(Mockito.anyInt(), Mockito.anyInt())).thenReturn(resIds); Mockito.when(resourcesMapper.queryResourceListById(Mockito.any())).thenReturn(getSingleResourceList()); Map<String, Object> result = resourcesService.unauthorizedFile(user, userId); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); List<Resource> resources = (List<Resource>) result.get(Constants.DATA_LIST); Assert.assertTrue(CollectionUtils.isNotEmpty(resources)); // test non-admin user user.setId(2); user.setUserType(UserType.GENERAL_USER); Mockito.when(resourcesMapper.queryResourceListAuthored(user.getId(), -1)).thenReturn(getResourceList()); result = resourcesService.unauthorizedFile(user, userId); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); resources = (List<Resource>) result.get(Constants.DATA_LIST); Assert.assertTrue(CollectionUtils.isNotEmpty(resources)); } @Test public void testUnauthorizedUDFFunction() { User user = getUser(); user.setId(1); user.setUserType(UserType.ADMIN_USER); int userId = 3; // test admin user Mockito.when(udfFunctionMapper.queryUdfFuncExceptUserId(userId)).thenReturn(getUdfFuncList()); Mockito.when(udfFunctionMapper.queryAuthedUdfFunc(userId)).thenReturn(getSingleUdfFuncList()); Map<String, Object> result = resourcesService.unauthorizedUDFFunction(user, userId); logger.info(result.toString()); List<UdfFunc> udfFuncs = (List<UdfFunc>) result.get(Constants.DATA_LIST); Assert.assertTrue(CollectionUtils.isNotEmpty(udfFuncs)); // test non-admin user user.setId(2); user.setUserType(UserType.GENERAL_USER); Mockito.when(udfFunctionMapper.selectByMap(Collections.singletonMap("user_id", user.getId()))).thenReturn(getUdfFuncList()); result = resourcesService.unauthorizedUDFFunction(user, userId); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); udfFuncs = (List<UdfFunc>) result.get(Constants.DATA_LIST); Assert.assertTrue(CollectionUtils.isNotEmpty(udfFuncs)); } @Test public void testAuthorizedUDFFunction() { User user = getUser(); user.setId(1); user.setUserType(UserType.ADMIN_USER); int userId = 3; // test admin user Mockito.when(udfFunctionMapper.queryAuthedUdfFunc(userId)).thenReturn(getUdfFuncList()); Map<String, Object> result = resourcesService.authorizedUDFFunction(user, userId); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); List<UdfFunc> udfFuncs = (List<UdfFunc>) result.get(Constants.DATA_LIST); Assert.assertTrue(CollectionUtils.isNotEmpty(udfFuncs)); // test non-admin user user.setUserType(UserType.GENERAL_USER); user.setId(2); Mockito.when(udfFunctionMapper.queryAuthedUdfFunc(userId)).thenReturn(getUdfFuncList()); result = resourcesService.authorizedUDFFunction(user, userId); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); udfFuncs = (List<UdfFunc>) result.get(Constants.DATA_LIST); Assert.assertTrue(CollectionUtils.isNotEmpty(udfFuncs)); } @Test public void testAuthorizedFile() { User user = getUser(); user.setId(1); user.setUserType(UserType.ADMIN_USER); int userId = 3; // test admin user List<Integer> resIds = new ArrayList<>(); resIds.add(1); Mockito.when(resourceUserMapper.queryResourcesIdListByUserIdAndPerm(Mockito.anyInt(), Mockito.anyInt())).thenReturn(resIds); Mockito.when(resourcesMapper.queryResourceListById(Mockito.any())).thenReturn(getResourceList()); Map<String, Object> result = resourcesService.authorizedFile(user, userId); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); List<Resource> resources = (List<Resource>) result.get(Constants.DATA_LIST); Assert.assertTrue(CollectionUtils.isNotEmpty(resources)); // test non-admin user user.setId(2); user.setUserType(UserType.GENERAL_USER); Mockito.when(resourceUserMapper.queryResourcesIdListByUserIdAndPerm(Mockito.anyInt(), Mockito.anyInt())).thenReturn(resIds); Mockito.when(resourcesMapper.queryResourceListById(Mockito.any())).thenReturn(getResourceList()); result = resourcesService.authorizedFile(user, userId); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); resources = (List<Resource>) result.get(Constants.DATA_LIST); Assert.assertTrue(CollectionUtils.isNotEmpty(resources)); } @Test public void testCatFile() { PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false); //SUCCESS try { Mockito.when(hadoopUtils.exists(null)).thenReturn(true); Mockito.when(hadoopUtils.catFile(null, 1, 10)).thenReturn(getContent()); List<String> list = hadoopUtils.catFile(null, 1, 10); Assert.assertNotNull(list); } catch (IOException e) { logger.error("hadoop error", e); } } private List<Resource> getResourceList() { List<Resource> resources = new ArrayList<>(); resources.add(getResource(1)); resources.add(getResource(2)); resources.add(getResource(3)); return resources; } private List<Resource> getSingleResourceList() { return Collections.singletonList(getResource(1)); } private Tenant getTenant() { Tenant tenant = new Tenant(); tenant.setTenantCode("123"); return tenant; } private Resource getResource() { Resource resource = new Resource(); resource.setPid(-1); resource.setUserId(1); resource.setDescription("ResourcesServiceTest.jar"); resource.setAlias("ResourcesServiceTest.jar"); resource.setFullName("/ResourcesServiceTest.jar"); resource.setType(ResourceType.FILE); return resource; } private Resource getResource(int resourceId) { Resource resource = new Resource(); resource.setId(resourceId); resource.setPid(-1); resource.setUserId(1); resource.setDescription("ResourcesServiceTest.jar"); resource.setAlias("ResourcesServiceTest.jar"); resource.setFullName("/ResourcesServiceTest.jar"); resource.setType(ResourceType.FILE); return resource; } private Resource getUdfResource() { Resource resource = new Resource(); resource.setUserId(1); resource.setDescription("udfTest"); resource.setAlias("udfTest.jar"); resource.setFullName("/udfTest.jar"); resource.setType(ResourceType.UDF); return resource; } private UdfFunc getUdfFunc() { UdfFunc udfFunc = new UdfFunc(); udfFunc.setId(1); return udfFunc; } private UdfFunc getUdfFunc(int udfId) { UdfFunc udfFunc = new UdfFunc(); udfFunc.setId(udfId); return udfFunc; } private List<UdfFunc> getUdfFuncList() { List<UdfFunc> udfFuncs = new ArrayList<>(); udfFuncs.add(getUdfFunc(1)); udfFuncs.add(getUdfFunc(2)); udfFuncs.add(getUdfFunc(3)); return udfFuncs; } private List<UdfFunc> getSingleUdfFuncList() { return Collections.singletonList(getUdfFunc(3)); } private User getUser() { User user = new User(); user.setId(1); user.setTenantId(1); return user; } private List<String> getContent() { List<String> contentList = new ArrayList<>(); contentList.add("test"); return contentList; } private List<Map<String, Object>> getResources() { List<Map<String, Object>> resources = new ArrayList<>(); Map<String, Object> resource = new HashMap<>(); resource.put("id", 1); resource.put("resource_ids", "1"); resources.add(resource); return resources; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,513
[Upgrade] Upgrade mybatis-plus to 3.4.3
### Discussed in https://github.com/apache/dolphinscheduler/discussions/8162 <div type='discussions-op-text'> <sup>Originally posted by **YiuTerran** January 24, 2022</sup> Mybatis-plus v3.2.0 using mybatis 3.5.2. There are high level security problems for this version. See [CVE-2020-26945](https://nvd.nist.gov/vuln/detail/CVE-2020-26945)</div>
https://github.com/apache/dolphinscheduler/issues/8513
https://github.com/apache/dolphinscheduler/pull/8515
33b50bab7232434a7ac5787b20f4e08db69a7d95
ca6d148a1b48195a359a3ab29d449229a87dd05a
"2022-02-24T03:07:16Z"
java
"2022-02-25T00:31:12Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/SpringConnectionFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.dao.datasource; import org.apache.ibatis.mapping.DatabaseIdProvider; import org.apache.ibatis.mapping.VendorDatabaseIdProvider; import org.apache.ibatis.session.SqlSessionFactory; import org.apache.ibatis.type.JdbcType; import java.util.Properties; import javax.sql.DataSource; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.core.io.support.PathMatchingResourcePatternResolver; import org.springframework.core.io.support.ResourcePatternResolver; import org.springframework.jdbc.datasource.DataSourceTransactionManager; import com.baomidou.mybatisplus.annotation.IdType; import com.baomidou.mybatisplus.core.MybatisConfiguration; import com.baomidou.mybatisplus.core.config.GlobalConfig; import com.baomidou.mybatisplus.extension.plugins.PaginationInterceptor; import com.baomidou.mybatisplus.extension.spring.MybatisSqlSessionFactoryBean; @Configuration public class SpringConnectionFactory { @Bean public PaginationInterceptor paginationInterceptor() { return new PaginationInterceptor(); } @Bean public DataSourceTransactionManager transactionManager(DataSource dataSource) { return new DataSourceTransactionManager(dataSource); } @Bean public SqlSessionFactory sqlSessionFactory(DataSource dataSource) throws Exception { MybatisConfiguration configuration = new MybatisConfiguration(); configuration.setMapUnderscoreToCamelCase(true); configuration.setCacheEnabled(false); configuration.setCallSettersOnNulls(true); configuration.setJdbcTypeForNull(JdbcType.NULL); configuration.addInterceptor(paginationInterceptor()); configuration.setGlobalConfig(new GlobalConfig().setBanner(false)); MybatisSqlSessionFactoryBean sqlSessionFactoryBean = new MybatisSqlSessionFactoryBean(); sqlSessionFactoryBean.setConfiguration(configuration); sqlSessionFactoryBean.setDataSource(dataSource); GlobalConfig.DbConfig dbConfig = new GlobalConfig.DbConfig(); dbConfig.setIdType(IdType.AUTO); GlobalConfig globalConfig = new GlobalConfig().setBanner(false); globalConfig.setDbConfig(dbConfig); sqlSessionFactoryBean.setGlobalConfig(globalConfig); sqlSessionFactoryBean.setTypeAliasesPackage("org.apache.dolphinscheduler.dao.entity"); ResourcePatternResolver resolver = new PathMatchingResourcePatternResolver(); sqlSessionFactoryBean.setMapperLocations(resolver.getResources("org/apache/dolphinscheduler/dao/mapper/*Mapper.xml")); sqlSessionFactoryBean.setTypeEnumsPackage("org.apache.dolphinscheduler.*.enums"); sqlSessionFactoryBean.setDatabaseIdProvider(databaseIdProvider()); return sqlSessionFactoryBean.getObject(); } @Bean public DatabaseIdProvider databaseIdProvider() { DatabaseIdProvider databaseIdProvider = new VendorDatabaseIdProvider(); Properties properties = new Properties(); properties.setProperty("MySQL", "mysql"); properties.setProperty("PostgreSQL", "pg"); properties.setProperty("h2", "h2"); databaseIdProvider.setProperties(properties); return databaseIdProvider; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,513
[Upgrade] Upgrade mybatis-plus to 3.4.3
### Discussed in https://github.com/apache/dolphinscheduler/discussions/8162 <div type='discussions-op-text'> <sup>Originally posted by **YiuTerran** January 24, 2022</sup> Mybatis-plus v3.2.0 using mybatis 3.5.2. There are high level security problems for this version. See [CVE-2020-26945](https://nvd.nist.gov/vuln/detail/CVE-2020-26945)</div>
https://github.com/apache/dolphinscheduler/issues/8513
https://github.com/apache/dolphinscheduler/pull/8515
33b50bab7232434a7ac5787b20f4e08db69a7d95
ca6d148a1b48195a359a3ab29d449229a87dd05a
"2022-02-24T03:07:16Z"
java
"2022-02-25T00:31:12Z"
dolphinscheduler-dist/release-docs/LICENSE
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================================================================= Apache DolphinScheduler Subcomponents: The Apache DolphinScheduler project contains subcomponents with separate copyright notices and license terms. Your use of the source code for the these subcomponents is subject to the terms and conditions of the following licenses. ======================================================================== Apache 2.0 licenses ======================================================================== The following components are provided under the Apache License. See project link for details. The text of each license is also included at licenses/LICENSE-[project].txt. apacheds-i18n 2.0.0-M15: https://mvnrepository.com/artifact/org.apache.directory.server/apacheds-i18n/2.0.0-M15, Apache 2.0 apacheds-kerberos-codec 2.0.0-M15: https://mvnrepository.com/artifact/org.apache.directory.server/apacheds-kerberos-codec/2.0.0-M15, Apache 2.0 tomcat-embed-el 9.0.54: https://mvnrepository.com/artifact/org.apache.tomcat.embed/tomcat-embed-el/9.0.54, Apache 2.0 api-asn1-api 1.0.0-M20: https://mvnrepository.com/artifact/org.apache.directory.api/api-asn1-api/1.0.0-M20, Apache 2.0 api-util 1.0.0-M20: https://mvnrepository.com/artifact/org.apache.directory.api/api-util/1.0.0-M20, Apache 2.0 audience-annotations 0.5.0: https://mvnrepository.com/artifact/org.apache.yetus/audience-annotations/0.5.0, Apache 2.0 avro 1.7.4: https://github.com/apache/avro, Apache 2.0 aws-sdk-java 1.7.4: https://mvnrepository.com/artifact/com.amazonaws/aws-java-sdk/1.7.4, Apache 2.0 bonecp 0.8.0.RELEASE: https://github.com/wwadge/bonecp, Apache 2.0 byte-buddy 1.9.16: https://mvnrepository.com/artifact/net.bytebuddy/byte-buddy/1.9.16, Apache 2.0 caffeine 2.9.2: https://mvnrepository.com/artifact/com.github.ben-manes.caffeine/caffeine/2.9.2, Apache 2.0 classmate 1.5.1: https://mvnrepository.com/artifact/com.fasterxml/classmate/1.5.1, Apache 2.0 clickhouse-jdbc 0.1.52: https://mvnrepository.com/artifact/ru.yandex.clickhouse/clickhouse-jdbc/0.1.52, Apache 2.0 commons-beanutils 1.9.4 https://mvnrepository.com/artifact/commons-beanutils/commons-beanutils/1.9.4, Apache 2.0 commons-cli 1.2: https://mvnrepository.com/artifact/commons-cli/commons-cli/1.2, Apache 2.0 commons-codec 1.11: https://mvnrepository.com/artifact/commons-codec/commons-codec/1.11, Apache 2.0 commons-collections 3.2.2: https://mvnrepository.com/artifact/commons-collections/commons-collections/3.2.2, Apache 2.0 commons-collections4 4.1: https://mvnrepository.com/artifact/org.apache.commons/commons-collections4/4.1, Apache 2.0 commons-compress 1.19: https://mvnrepository.com/artifact/org.apache.commons/commons-compress/1.19, Apache 2.0 commons-configuration 1.10: https://mvnrepository.com/artifact/commons-configuration/commons-configuration/1.10, Apache 2.0 commons-daemon 1.0.13 https://mvnrepository.com/artifact/commons-daemon/commons-daemon/1.0.13, Apache 2.0 commons-dbcp 1.4: https://github.com/apache/commons-dbcp, Apache 2.0 commons-email 1.5: https://github.com/apache/commons-email, Apache 2.0 commons-httpclient 3.0.1: https://mvnrepository.com/artifact/commons-httpclient/commons-httpclient/3.0.1, Apache 2.0 commons-io 2.4: https://github.com/apache/commons-io, Apache 2.0 commons-lang 2.6: https://github.com/apache/commons-lang, Apache 2.0 commons-logging 1.1.1: https://github.com/apache/commons-logging, Apache 2.0 commons-math3 3.1.1: https://mvnrepository.com/artifact/org.apache.commons/commons-math3/3.1.1, Apache 2.0 commons-net 3.1: https://github.com/apache/commons-net, Apache 2.0 commons-pool 1.6: https://github.com/apache/commons-pool, Apache 2.0 cron-utils 9.1.3: https://mvnrepository.com/artifact/com.cronutils/cron-utils/9.1.3, Apache 2.0 commons-lang3 3.12.0: https://mvnrepository.com/artifact/org.apache.commons/commons-lang3/3.12.0, Apache 2.0 curator-client 4.3.0: https://mvnrepository.com/artifact/org.apache.curator/curator-client/4.3.0, Apache 2.0 curator-framework 4.3.0: https://mvnrepository.com/artifact/org.apache.curator/curator-framework/4.3.0, Apache 2.0 curator-recipes 4.3.0: https://mvnrepository.com/artifact/org.apache.curator/curator-recipes/4.3.0, Apache 2.0 curator-test 2.12.0: https://mvnrepository.com/artifact/org.apache.curator/curator-test/2.12.0, Apache 2.0 datanucleus-api-jdo 4.2.1: https://mvnrepository.com/artifact/org.datanucleus/datanucleus-api-jdo/4.2.1, Apache 2.0 datanucleus-core 4.1.6: https://mvnrepository.com/artifact/org.datanucleus/datanucleus-core/4.1.6, Apache 2.0 datanucleus-rdbms 4.1.7: https://mvnrepository.com/artifact/org.datanucleus/datanucleus-rdbms/4.1.7, Apache 2.0 derby 10.14.2.0: https://github.com/apache/derby, Apache 2.0 druid 1.1.14: https://mvnrepository.com/artifact/com.alibaba/druid/1.1.14, Apache 2.0 error_prone_annotations 2.1.3 https://mvnrepository.com/artifact/com.google.errorprone/error_prone_annotations/2.1.3, Apache 2.0 gson 2.8.8: https://github.com/google/gson, Apache 2.0 guava 24.1-jre: https://mvnrepository.com/artifact/com.google.guava/guava/24.1-jre, Apache 2.0 guava-retrying 2.0.0: https://mvnrepository.com/artifact/com.github.rholder/guava-retrying/2.0.0, Apache 2.0 guice 3.0: https://mvnrepository.com/artifact/com.google.inject/guice/3.0, Apache 2.0 guice-servlet 3.0: https://mvnrepository.com/artifact/com.google.inject.extensions/guice-servlet/3.0, Apache 2.0 hadoop-annotations 2.7.3:https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-annotations/2.7.3, Apache 2.0 hadoop-auth 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-auth/2.7.3, Apache 2.0 hadoop-aws 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-aws/2.7.3, Apache 2.0 hadoop-client 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-client/2.7.3, Apache 2.0 hadoop-common 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-common/2.7.3, Apache 2.0 hadoop-hdfs 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-hdfs/2.7.3, Apache 2.0 hadoop-mapreduce-client-app 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-mapreduce-client-app/2.7.3, Apache 2.0 hadoop-mapreduce-client-common 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-mapreduce-client-common/2.7.3, Apache 2.0 hadoop-mapreduce-client-core 2.7.3: https://mvnrepository.com/artifact/io.hops/hadoop-mapreduce-client-core/2.7.3, Apache 2.0 hadoop-mapreduce-client-jobclient 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-mapreduce-client-jobclient/2.7.3, Apache 2.0 hadoop-mapreduce-client-shuffle 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-mapreduce-client-shuffle/2.7.3, Apache 2.0 hadoop-yarn-api 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-yarn-api/2.7.3, Apache 2.0 hadoop-yarn-client 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-yarn-client/2.7.3, Apache 2.0 hadoop-yarn-common 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-yarn-common/2.7.3, Apache 2.0 hadoop-yarn-server-common 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-yarn-server-common/2.7.3, Apache 2.0 HikariCP 4.0.3: https://mvnrepository.com/artifact/com.zaxxer/HikariCP/4.0.3, Apache 2.0 hive-common 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-common/2.1.0, Apache 2.0 hive-jdbc 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-jdbc/2.1.0, Apache 2.0 hive-metastore 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-metastore/2.1.0, Apache 2.0 hive-orc 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-orc/2.1.0, Apache 2.0 hive-serde 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-serde/2.1.0, Apache 2.0 hive-service 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-service/2.1.0, Apache 2.0 hive-service-rpc 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-service-rpc/2.1.0, Apache 2.0 hive-storage-api 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-storage-api/2.1.0, Apache 2.0 htrace-core 3.1.0-incubating: https://mvnrepository.com/artifact/org.apache.htrace/htrace-core/3.1.0-incubating, Apache 2.0 httpclient 4.4.1: https://mvnrepository.com/artifact/org.apache.httpcomponents/httpclient/4.4.1, Apache 2.0 httpcore 4.4.1: https://mvnrepository.com/artifact/org.apache.httpcomponents/httpcore/4.4.1, Apache 2.0 httpmime 4.5.13: https://mvnrepository.com/artifact/org.apache.httpcomponents/httpmime/4.5.13, Apache 2.0 jackson-annotations 2.10.5: https://mvnrepository.com/artifact/com.fasterxml.jackson.core/jackson-annotations/2.10.5, Apache 2.0 jackson-core 2.10.5: https://github.com/FasterXML/jackson-core, Apache 2.0 jackson-core-asl 1.9.13: https://mvnrepository.com/artifact/org.codehaus.jackson/jackson-core-asl/1.9.13, Apache 2.0 jackson-databind 2.10.5: https://github.com/FasterXML/jackson-databind, Apache 2.0 jackson-datatype-jdk8 2.12.5: https://mvnrepository.com/artifact/com.fasterxml.jackson.datatype/jackson-datatype-jdk8/2.12.5, Apache 2.0 jackson-datatype-jsr310 2.12.5: https://mvnrepository.com/artifact/com.fasterxml.jackson.datatype/jackson-datatype-jsr310/2.12.5, Apache 2.0 jackson-jaxrs 1.9.13: https://mvnrepository.com/artifact/org.codehaus.jackson/jackson-jaxrs/1.9.13, Apache 2.0 and LGPL 2.1 jackson-mapper-asl 1.9.13: https://mvnrepository.com/artifact/org.codehaus.jackson/jackson-mapper-asl/1.9.13, Apache 2.0 jackson-module-parameter-names 2.12.5: https://mvnrepository.com/artifact/com.fasterxml.jackson.module/jackson-module-parameter-names/2.12.5, Apache 2.0 jackson-xc 1.9.13: https://mvnrepository.com/artifact/org.codehaus.jackson/jackson-xc/1.9.13, Apache 2.0 and LGPL 2.1 javax.inject 1: https://mvnrepository.com/artifact/javax.inject/javax.inject/1, Apache 2.0 javax.jdo-3.2.0-m3: https://mvnrepository.com/artifact/org.datanucleus/javax.jdo/3.2.0-m3, Apache 2.0 java-xmlbuilder 0.4 : https://mvnrepository.com/artifact/com.jamesmurty.utils/java-xmlbuilder/0.4, Apache 2.0 jdo-api 3.0.1: https://mvnrepository.com/artifact/javax.jdo/jdo-api/3.0.1, Apache 2.0 jets3t 0.9.0: https://mvnrepository.com/artifact/net.java.dev.jets3t/jets3t/0.9.0, Apache 2.0 jettison 1.1: https://github.com/jettison-json/jettison, Apache 2.0 jetty 6.1.26: https://mvnrepository.com/artifact/org.mortbay.jetty/jetty/6.1.26, Apache 2.0 and EPL 1.0 jetty-continuation 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-continuation/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-http 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-http/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-io 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-io/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-security 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-security/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-server 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-server/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-servlet 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-servlet/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-servlets 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-servlets/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-util 6.1.26: https://mvnrepository.com/artifact/org.mortbay.jetty/jetty-util/6.1.26, Apache 2.0 and EPL 1.0 jetty-util 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-util/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-util-ajax 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-util-ajax/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-webapp 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-webapp/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jetty-xml 9.4.44.v20210927: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-xml/9.4.44.v20210927, Apache 2.0 and EPL 1.0 jna 5.10.0: https://mvnrepository.com/artifact/net.java.dev.jna/jna/5.10.0, Apache 2.0 and LGPL 2.1 jna-platform 5.10.0: https://mvnrepository.com/artifact/net.java.dev.jna/jna-platform/5.10.0, Apache 2.0 and LGPL 2.1 joda-time 2.10.13: https://github.com/JodaOrg/joda-time, Apache 2.0 jpam 1.1: https://mvnrepository.com/artifact/net.sf.jpam/jpam/1.1, Apache 2.0 jsqlparser 2.1: https://github.com/JSQLParser/JSqlParser, Apache 2.0 or LGPL 2.1 jsr305 3.0.0: https://mvnrepository.com/artifact/com.google.code.findbugs/jsr305, Apache 2.0 j2objc-annotations 1.1 https://mvnrepository.com/artifact/com.google.j2objc/j2objc-annotations/1.1, Apache 2.0 libfb303 0.9.3: https://mvnrepository.com/artifact/org.apache.thrift/libfb303/0.9.3, Apache 2.0 libthrift 0.9.3: https://mvnrepository.com/artifact/org.apache.thrift/libthrift/0.9.3, Apache 2.0 log4j-api 2.11.2: https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-api/2.11.2, Apache 2.0 log4j-core-2.11.2: https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-core/2.11.2, Apache 2.0 log4j 1.2.17: https://mvnrepository.com/artifact/log4j/log4j/1.2.17, Apache 2.0 log4j-1.2-api 2.14.1: https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-1.2-api/2.14.1, Apache 2.0 lz4 1.3.0: https://mvnrepository.com/artifact/net.jpountz.lz4/lz4/1.3.0, Apache 2.0 mapstruct 1.2.0.Final: https://github.com/mapstruct/mapstruct, Apache 2.0 mybatis 3.5.2 https://mvnrepository.com/artifact/org.mybatis/mybatis/3.5.2, Apache 2.0 mybatis-plus 3.2.0: https://github.com/baomidou/mybatis-plus, Apache 2.0 mybatis-plus-annotation 3.2.0: https://mvnrepository.com/artifact/com.baomidou/mybatis-plus-annotation/3.2.0, Apache 2.0 mybatis-plus-boot-starter 3.2.0: https://mvnrepository.com/artifact/com.baomidou/mybatis-plus-boot-starter/3.2.0, Apache 2.0 mybatis-plus-core 3.2.0: https://mvnrepository.com/artifact/com.baomidou/mybatis-plus-core/3.2.0, Apache 2.0 mybatis-plus-extension 3.2.0: https://mvnrepository.com/artifact/com.baomidou/mybatis-plus-extension/3.2.0, Apache 2.0 mybatis-spring 2.0.2: https://mvnrepository.com/artifact/org.mybatis/mybatis-spring/2.0.2, Apache 2.0 netty 3.6.2.Final: https://github.com/netty/netty, Apache 2.0 netty 4.1.53.Final: https://github.com/netty/netty/blob/netty-4.1.53.Final/LICENSE.txt, Apache 2.0 opencsv 2.3: https://mvnrepository.com/artifact/net.sf.opencsv/opencsv/2.3, Apache 2.0 parquet-hadoop-bundle 1.8.1: https://mvnrepository.com/artifact/org.apache.parquet/parquet-hadoop-bundle/1.8.1, Apache 2.0 poi 4.1.2: https://mvnrepository.com/artifact/org.apache.poi/poi/4.1.2, Apache 2.0 poi-ooxml 4.1.2: https://mvnrepository.com/artifact/org.apache.poi/poi-ooxml/4.1.2, Apache 2.0 poi-ooxml-schemas-4.1.2: https://mvnrepository.com/artifact/org.apache.poi/poi-ooxml-schemas/4.1.2, Apache 2.0 quartz 2.3.2: https://mvnrepository.com/artifact/org.quartz-scheduler/quartz/2.3.2, Apache 2.0 quartz-jobs 2.3.2: https://mvnrepository.com/artifact/org.quartz-scheduler/quartz-jobs/2.3.2, Apache 2.0 snakeyaml 1.28: https://mvnrepository.com/artifact/org.yaml/snakeyaml/1.28, Apache 2.0 snappy 0.2: https://mvnrepository.com/artifact/org.iq80.snappy/snappy/0.2, Apache 2.0 snappy-java 1.0.4.1: https://github.com/xerial/snappy-java, Apache 2.0 SparseBitSet 1.2: https://mvnrepository.com/artifact/com.zaxxer/SparseBitSet, Apache 2.0 spring-aop 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-aop/5.3.12, Apache 2.0 spring-beans 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-beans/5.3.12, Apache 2.0 spring-boot 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot/2.5.6, Apache 2.0 spring-boot-actuator 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-actuator/2.5.6, Apache 2.0 spring-boot-actuator-autoconfigure 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-actuator-autoconfigure/2.5.6, Apache 2.0 spring-boot-configuration-processor 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-configuration-processor/2.5.6, Apache 2.0 spring-boot-autoconfigure 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-autoconfigure/2.5.6, Apache 2.0 spring-boot-starter 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter/2.5.6, Apache 2.0 spring-boot-starter-actuator 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-actuator/2.5.6, Apache 2.0 spring-boot-starter-aop 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-aop/2.5.6, Apache 2.0 spring-boot-starter-jdbc 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-jdbc/2.5.6, Apache 2.0 spring-boot-starter-jetty 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-jetty/2.5.6, Apache 2.0 spring-boot-starter-json 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-json/2.5.6, Apache 2.0 spring-boot-starter-logging 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-logging/2.5.6, Apache 2.0 spring-boot-starter-quartz 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-quartz/2.5.6, Apache 2.0 spring-boot-starter-web 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-web/2.5.6, Apache 2.0 spring-boot-starter-cache 2.5.6: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-cache/2.5.6, Apache 2.0 spring-context 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-context/5.3.12, Apache 2.0 spring-context-support 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-context-support/5.3.12, Apache 2.0 spring-core 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-core, Apache 2.0 spring-expression 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-expression, Apache 2.0 springfox-core 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-core, Apache 2.0 springfox-schema 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-schema, Apache 2.0 springfox-spi 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-spi, Apache 2.0 springfox-spring-web 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-spring-web/2.9.2, Apache 2.0 springfox-swagger2 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-swagger2/2.9.2, Apache 2.0 springfox-swagger-common 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-swagger-common/2.9.2, Apache 2.0 springfox-swagger-ui 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-swagger-ui/2.9.2, Apache 2.0 spring-jcl 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-jcl/5.3.12, Apache 2.0 spring-jdbc 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-jdbc/5.3.12, Apache 2.0 spring-plugin-core 1.2.0.RELEASE: https://mvnrepository.com/artifact/org.springframework.plugin/spring-plugin-core/1.2.0.RELEASE, Apache 2.0 spring-plugin-metadata 1.2.0.RELEASE: https://mvnrepository.com/artifact/org.springframework.plugin/spring-plugin-metadata/1.2.0.RELEASE, Apache 2.0 spring-tx 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-tx/5.3.12, Apache 2.0 spring-web 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-web/5.3.12, Apache 2.0 spring-webmvc 5.3.12: https://mvnrepository.com/artifact/org.springframework/spring-webmvc/5.3.12, Apache 2.0 swagger-annotations 1.5.20: https://mvnrepository.com/artifact/io.swagger/swagger-annotations/1.5.20, Apache 2.0 swagger-bootstrap-ui 1.9.3: https://mvnrepository.com/artifact/com.github.xiaoymin/swagger-bootstrap-ui/1.9.3, Apache 2.0 swagger-models 1.5.24: https://mvnrepository.com/artifact/io.swagger/swagger-models/1.5.24, Apache 2.0 tephra-api 0.6.0: https://mvnrepository.com/artifact/co.cask.tephra/tephra-api/0.6.0, Apache 2.0 tomcat-embed-el 9.0.54: https://mvnrepository.com/artifact/org.apache.tomcat.embed/tomcat-embed-el/9.0.54, Apache 2.0 xercesImpl 2.9.1: https://mvnrepository.com/artifact/xerces/xercesImpl/2.9.1, Apache 2.0 xmlbeans 3.1.0: https://mvnrepository.com/artifact/org.apache.xmlbeans/xmlbeans/3.1.0, Apache 2.0 xml-apis 1.3.04: https://mvnrepository.com/artifact/xml-apis/xml-apis/1.3.04, Apache 2.0 and W3C zookeeper 3.4.14: https://mvnrepository.com/artifact/org.apache.zookeeper/zookeeper/3.4.14, Apache 2.0 presto-jdbc 0.238.1 https://mvnrepository.com/artifact/com.facebook.presto/presto-jdbc/0.238.1 protostuff-core 1.7.2: https://github.com/protostuff/protostuff/protostuff-core Apache-2.0 protostuff-runtime 1.7.2: https://github.com/protostuff/protostuff/protostuff-core Apache-2.0 protostuff-api 1.7.2: https://github.com/protostuff/protostuff/protostuff-api Apache-2.0 protostuff-collectionschema 1.7.2: https://github.com/protostuff/protostuff/protostuff-collectionschema Apache-2.0 prometheus client_java(simpleclient) 0.12.0: https://github.com/prometheus/client_java, Apache 2.0 snowflake snowflake-2010: https://github.com/twitter-archive/snowflake/tree/snowflake-2010, Apache 2.0 kubernetes-client 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-client/5.8.0, Apache 2.0 kubernetes-model-admissionregistration 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-admissionregistration/5.8.0, Apache 2.0 kubernetes-model-apiextensions 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-apiextensions/5.8.0, Apache 2.0 kubernetes-model-apps 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-apps/5.8.0, Apache 2.0 kubernetes-model-autoscaling 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-autoscaling/5.8.0, Apache 2.0 kubernetes-model-batch 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-autoscaling/5.8.0, Apache 2.0 kubernetes-model-certificates 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-certificates/5.8.0, Apache 2.0 kubernetes-model-common 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-common/5.8.0, Apache 2.0 kubernetes-model-coordination 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-coordination/5.8.0, Apache 2.0 kubernetes-model-core 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-core/5.8.0, Apache 2.0 kubernetes-model-discovery 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-discovery/5.8.0, Apache 2.0 kubernetes-model-events 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-events/5.8.0, Apache 2.0 kubernetes-model-extensions 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-extensions/5.8.0, Apache 2.0 kubernetes-model-flowcontrol 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-flowcontrol/5.8.0, Apache 2.0 kubernetes-model-metrics 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-metrics/5.8.0, Apache 2.0 kubernetes-model-networking 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-networking/5.8.0, Apache 2.0 kubernetes-model-node 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-node/5.8.0, Apache 2.0 kubernetes-model-policy 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-policy/5.8.0, Apache 2.0 kubernetes-model-rbac 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-rbac/5.8.0, Apache 2.0 kubernetes-model-scheduling 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-scheduling/5.8.0, Apache 2.0 kubernetes-model-storageclass 5.8.0: https://mvnrepository.com/artifact/io.fabric8/kubernetes-model-storageclass/5.8.0, Apache 2.0 zjsonpatch 0.3.0 https://mvnrepository.com/artifact/io.fabric8/zjsonpatch/0.3.0, Apache 2.0 generex 1.0.2 https://mvnrepository.com/artifact/com.github.mifmif/generex/1.0.2, Apache 2.0 jackson-dataformat-yaml 2.12.5 https://mvnrepository.com/artifact/com.fasterxml.jackson.dataformat/jackson-dataformat-yaml/2.12.5, Apache 2.0 logging-interceptor 3.14.9 https://mvnrepository.com/artifact/com.squareup.okhttp3/logging-interceptor/3.14.9, Apache 2.0 okhttp 3.14.3 https://mvnrepository.com/artifact/com.squareup.okhttp3/okhttp/3.14.3, Apache 2.0 okio 1.17.2 https://mvnrepository.com/artifact/com.squareup.okio/okio/1.17.2, Apache 2.0 hibernate-validator 6.2.2.Final https://mvnrepository.com/artifact/org.hibernate.validator/hibernate-validator/6.2.2.Final, Apache 2.0 jakarta.validation-api 2.0.2 https://mvnrepository.com/artifact/jakarta.validation/jakarta.validation-api/2.0.2, Apache 2.0 jboss-logging:jar 3.4.2.Final https://mvnrepository.com/artifact/org.jboss.logging/jboss-logging/3.4.2.Final, Apache 2.0 ======================================================================== BSD licenses ======================================================================== The following components are provided under a BSD license. See project link for details. The text of each license is also included at licenses/LICENSE-[project].txt. asm 3.1: https://github.com/jdf/javalin/tree/master/lib/asm-3.1, BSD curvesapi 1.06: https://mvnrepository.com/artifact/com.github.virtuald/curvesapi/1.06, BSD 3-clause javolution 5.5.1: https://mvnrepository.com/artifact/javolution/javolution/5.5.1, BSD jline 0.9.94: https://github.com/jline/jline3, BSD jsch 0.1.42: https://mvnrepository.com/artifact/com.jcraft/jsch/0.1.42, BSD leveldbjni-all 1.8: https://github.com/fusesource/leveldbjni, BSD-3-Clause postgresql 42.2.5: https://mvnrepository.com/artifact/org.postgresql/postgresql/42.2.5, BSD 2-clause protobuf-java 2.5.0: https://mvnrepository.com/artifact/com.google.protobuf/protobuf-java/2.5.0, BSD 2-clause paranamer 2.3: https://mvnrepository.com/artifact/com.thoughtworks.paranamer/paranamer/2.3, BSD threetenbp 1.3.6: https://mvnrepository.com/artifact/org.threeten/threetenbp/1.3.6, BSD 3-clause xmlenc 0.52: https://mvnrepository.com/artifact/xmlenc/xmlenc/0.52, BSD py4j 0.10.9: https://mvnrepository.com/artifact/net.sf.py4j/py4j/0.10.9, BSD 2-clause LatencyUtils 2.0.3: https://github.com/LatencyUtils/LatencyUtils, BSD-2-Clause janino 3.1.6: https://mvnrepository.com/artifact/org.codehaus.janino/janino/3.1.6, BSD 3-clause commons-compiler 3.1.6: https://mvnrepository.com/artifact/org.codehaus.janino/janino/3.1.6, BSD 3-clause automaton 1.11-8 https://mvnrepository.com/artifact/dk.brics.automaton/automaton/1.11-8, BSD 2-clause ======================================================================== CDDL licenses ======================================================================== The following components are provided under the CDDL License. See project link for details. The text of each license is also included at licenses/LICENSE-[project].txt. activation 1.1: https://mvnrepository.com/artifact/javax.activation/activation/1.1 CDDL 1.0 javax.activation-api 1.2.0: https://mvnrepository.com/artifact/javax.activation/javax.activation-api/1.2.0, CDDL and LGPL 2.0 javax.annotation-api 1.3.2: https://mvnrepository.com/artifact/javax.annotation/javax.annotation-api/1.3.2, CDDL + GPLv2 javax.mail 1.6.2: https://mvnrepository.com/artifact/com.sun.mail/javax.mail/1.6.2, CDDL/GPLv2 javax.servlet-api 3.1.0: https://mvnrepository.com/artifact/javax.servlet/javax.servlet-api/3.1.0, CDDL + GPLv2 jaxb-api 2.3.1: https://mvnrepository.com/artifact/javax.xml.bind/jaxb-api/2.3.1, CDDL 1.1 jaxb-impl 2.2.3-1: https://mvnrepository.com/artifact/com.sun.xml.bind/jaxb-impl/2.2.3-1, CDDL and GPL 1.1 jersey-client 1.9: https://mvnrepository.com/artifact/com.sun.jersey/jersey-client/1.9, CDDL 1.1 and GPL 1.1 jersey-core 1.9: https://mvnrepository.com/artifact/com.sun.jersey/jersey-core/1.9, CDDL 1.1 and GPL 1.1 jersey-guice 1.9: https://mvnrepository.com/artifact/com.sun.jersey.contribs/jersey-guice/1.9, CDDL 1.1 and GPL 1.1 jersey-json 1.9: https://mvnrepository.com/artifact/com.sun.jersey/jersey-json/1.9, CDDL 1.1 and GPL 1.1 jersey-server 1.9: https://mvnrepository.com/artifact/com.sun.jersey/jersey-server/1.9, CDDL 1.1 and GPL 1.1 jta 1.1: https://mvnrepository.com/artifact/javax.transaction/jta/1.1, CDDL 1.0 transaction-api 1.1: https://mvnrepository.com/artifact/javax.transaction/transaction-api/1.1, CDDL 1.0 javax.el 3.0.0: https://mvnrepository.com/artifact/org.glassfish/javax.el/3.0.0, CDDL and GPL and GPL 2.0 ======================================================================== EPL licenses ======================================================================== The following components are provided under the EPL License. See project link for details. The text of each license is also included at licenses/LICENSE-[project].txt. aspectjweaver 1.9.7:https://mvnrepository.com/artifact/org.aspectj/aspectjweaver/1.9.7, EPL 1.0 logback-classic 1.2.3: https://mvnrepository.com/artifact/ch.qos.logback/logback-classic/1.2.3, EPL 1.0 and LGPL 2.1 logback-core 1.2.3: https://mvnrepository.com/artifact/ch.qos.logback/logback-core/1.2.3, EPL 1.0 and LGPL 2.1 h2-1.4.200 https://github.com/h2database/h2database/blob/master/LICENSE.txt, MPL 2.0 or EPL 1.0 ======================================================================== MIT licenses ======================================================================== The following components are provided under a MIT 2.0 license. See project link for details. The text of each license is also included at licenses/LICENSE-[project].txt. jul-to-slf4j 1.7.32: https://mvnrepository.com/artifact/org.slf4j/jul-to-slf4j/1.7.32, MIT mssql-jdbc 6.1.0.jre8: https://mvnrepository.com/artifact/com.microsoft.sqlserver/mssql-jdbc/6.1.0.jre8, MIT slf4j-api 1.7.5: https://mvnrepository.com/artifact/org.slf4j/slf4j-api/1.7.5, MIT animal-sniffer-annotations 1.14 https://mvnrepository.com/artifact/org.codehaus.mojo/animal-sniffer-annotations/1.14, MIT checker-compat-qual 2.0.0 https://mvnrepository.com/artifact/org.checkerframework/checker-compat-qual/2.0.0, MIT + GPLv2 checker-qual 3.10.0 https://mvnrepository.com/artifact/org.checkerframework/checker-qual/3.10.0, MIT + GPLv2 Java-WebSocket 1.5.1: https://github.com/TooTallNate/Java-WebSocket MIT oshi-core 6.1.1: https://mvnrepository.com/artifact/com.github.oshi/oshi-core/6.1.1, MIT ======================================================================== MPL 1.1 licenses ======================================================================== The following components are provided under a MPL 1.1 license. See project link for details. The text of each license is also included at licenses/LICENSE-[project].txt. jamon-runtime 2.3.1: https://mvnrepository.com/artifact/org.jamon/jamon-runtime/2.3.1, MPL-1.1 javassist 3.27.0-GA: https://github.com/jboss-javassist/javassist, MPL-1.1 ======================================================================== Public Domain licenses ======================================================================== aopalliance 1.0: https://mvnrepository.com/artifact/aopalliance/aopalliance/1.0, Public Domain ======================================== WTFPL License ======================================== reflections 0.9.12: https://github.com/ronmamo/reflections WTFPL ======================================== CC0-1.0 licenses ======================================== HdrHistogram 2.1.12: https://github.com/HdrHistogram/HdrHistogram , CC0-1.0 and BSD 2-Clause ======================================================================== UI related licenses ======================================================================== The following components are used in UI.See project link for details. The text of each license is also included at licenses/ui-licenses/LICENSE-[project].txt. ======================================== MIT licenses ======================================== @form-create/element-ui 1.0.18: https://github.com/xaboy/form-create MIT axios 0.16.2: https://github.com/axios/axios MIT bootstrap 3.3.7: https://github.com/twbs/bootstrap MIT canvg 1.5.1: https://github.com/canvg/canvg MIT clipboard 2.0.1: https://github.com/zenorocha/clipboard.js MIT codemirror 5.43.0: https://github.com/codemirror/CodeMirror MIT dayjs 1.7.8: https://github.com/iamkun/dayjs MIT element-ui 2.13.2: https://github.com/ElemeFE/element MIT html2canvas 0.5.0-beta4: https://github.com/niklasvh/html2canvas MIT jquery 3.3.1: https://github.com/jquery/jquery MIT jquery-ui 1.12.1: https://github.com/jquery/jquery-ui MIT js-cookie 2.2.1: https://github.com/js-cookie/js-cookie MIT jsplumb 2.8.6: https://github.com/jsplumb/jsplumb MIT and GPLv2 lodash 4.17.11: https://github.com/lodash/lodash MIT moment-timezone 0.5.33: https://github.com/moment/moment-timezone MIT vue-treeselect 0.4.0: https://github.com/riophae/vue-treeselect MIT vue 2.5.17: https://github.com/vuejs/vue MIT vue-router 2.7.0: https://github.com/vuejs/vue-router MIT vuex 3.0.0: https://github.com/vuejs/vuex MIT vuex-router-sync 4.1.2: https://github.com/vuejs/vuex-router-sync MIT dagre 0.8.5: https://github.com/dagrejs/dagre MIT ======================================== Apache 2.0 licenses ======================================== echarts 4.1.0: https://github.com/apache/incubator-echarts Apache-2.0 remixicon 2.5.0 https://github.com/Remix-Design/remixicon Apache-2.0 ======================================== BSD licenses ======================================== d3 3.5.17: https://github.com/d3/d3 BSD-3-Clause
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,513
[Upgrade] Upgrade mybatis-plus to 3.4.3
### Discussed in https://github.com/apache/dolphinscheduler/discussions/8162 <div type='discussions-op-text'> <sup>Originally posted by **YiuTerran** January 24, 2022</sup> Mybatis-plus v3.2.0 using mybatis 3.5.2. There are high level security problems for this version. See [CVE-2020-26945](https://nvd.nist.gov/vuln/detail/CVE-2020-26945)</div>
https://github.com/apache/dolphinscheduler/issues/8513
https://github.com/apache/dolphinscheduler/pull/8515
33b50bab7232434a7ac5787b20f4e08db69a7d95
ca6d148a1b48195a359a3ab29d449229a87dd05a
"2022-02-24T03:07:16Z"
java
"2022-02-25T00:31:12Z"
pom.xml
<?xml version="1.0" encoding="UTF-8"?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler</artifactId> <version>2.0.4-SNAPSHOT</version> <packaging>pom</packaging> <name>${project.artifactId}</name> <url>https://dolphinscheduler.apache.org</url> <description>Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing. </description> <scm> <connection>scm:git:https://github.com/apache/dolphinscheduler.git</connection> <developerConnection>scm:git:https://github.com/apache/dolphinscheduler.git</developerConnection> <url>https://github.com/apache/dolphinscheduler</url> <tag>HEAD</tag> </scm> <mailingLists> <mailingList> <name>DolphinScheduler Developer List</name> <post>dev@dolphinscheduler.apache.org</post> <subscribe>dev-subscribe@dolphinscheduler.apache.org</subscribe> <unsubscribe>dev-unsubscribe@dolphinscheduler.apache.org</unsubscribe> </mailingList> </mailingLists> <parent> <groupId>org.apache</groupId> <artifactId>apache</artifactId> <version>25</version> </parent> <properties> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding> <curator.version>4.3.0</curator.version> <zookeeper.version>3.4.14</zookeeper.version> <spring.version>5.3.12</spring.version> <spring.boot.version>2.5.6</spring.boot.version> <java.version>1.8</java.version> <logback.version>1.2.3</logback.version> <hadoop.version>2.7.3</hadoop.version> <quartz.version>2.3.2</quartz.version> <jackson.version>2.10.5</jackson.version> <mybatis-plus.version>3.2.0</mybatis-plus.version> <mybatis.spring.version>2.0.1</mybatis.spring.version> <cron.utils.version>9.1.3</cron.utils.version> <druid.version>1.2.4</druid.version> <h2.version>1.4.200</h2.version> <commons.codec.version>1.11</commons.codec.version> <commons.logging.version>1.1.1</commons.logging.version> <httpclient.version>4.4.1</httpclient.version> <httpcore.version>4.4.1</httpcore.version> <junit.version>4.12</junit.version> <mysql.connector.version>8.0.16</mysql.connector.version> <slf4j.api.version>1.7.5</slf4j.api.version> <slf4j.log4j12.version>1.7.5</slf4j.log4j12.version> <commons.collections.version>3.2.2</commons.collections.version> <commons.httpclient>3.0.1</commons.httpclient> <commons.beanutils.version>1.9.4</commons.beanutils.version> <commons.configuration.version>1.10</commons.configuration.version> <commons.lang.version>2.6</commons.lang.version> <commons.email.version>1.5</commons.email.version> <poi.version>4.1.2</poi.version> <javax.servlet.api.version>3.1.0</javax.servlet.api.version> <commons.collections4.version>4.1</commons.collections4.version> <guava.version>24.1-jre</guava.version> <postgresql.version>42.2.5</postgresql.version> <hive.jdbc.version>2.1.0</hive.jdbc.version> <commons.io.version>2.4</commons.io.version> <oshi.core.version>6.1.1</oshi.core.version> <clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version> <mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version> <presto.jdbc.version>0.238.1</presto.jdbc.version> <spotbugs.version>3.1.12</spotbugs.version> <checkstyle.version>3.1.2</checkstyle.version> <curator.test>2.12.0</curator.test> <frontend-maven-plugin.version>1.6</frontend-maven-plugin.version> <maven-compiler-plugin.version>3.3</maven-compiler-plugin.version> <maven-assembly-plugin.version>3.3.0</maven-assembly-plugin.version> <maven-release-plugin.version>2.5.3</maven-release-plugin.version> <maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version> <maven-source-plugin.version>2.4</maven-source-plugin.version> <maven-surefire-plugin.version>2.22.1</maven-surefire-plugin.version> <maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version> <rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version> <jacoco.version>0.8.7</jacoco.version> <jcip.version>1.0</jcip.version> <maven.deploy.skip>false</maven.deploy.skip> <cobertura-maven-plugin.version>2.7</cobertura-maven-plugin.version> <servlet-api.version>2.5</servlet-api.version> <swagger.version>1.9.3</swagger.version> <springfox.version>2.9.2</springfox.version> <swagger-models.version>1.5.24</swagger-models.version> <guava-retry.version>2.0.0</guava-retry.version> <protostuff.version>1.7.2</protostuff.version> <reflections.version>0.9.12</reflections.version> <byte-buddy.version>1.9.16</byte-buddy.version> <java-websocket.version>1.5.1</java-websocket.version> <py4j.version>0.10.9</py4j.version> <auto-service.version>1.0.1</auto-service.version> <jacoco.skip>false</jacoco.skip> <netty.version>4.1.53.Final</netty.version> <maven-jar-plugin.version>3.2.0</maven-jar-plugin.version> <powermock.version>2.0.9</powermock.version> <jsr305.version>3.0.0</jsr305.version> <commons-compress.version>1.19</commons-compress.version> <commons-math3.version>3.1.1</commons-math3.version> <error_prone_annotations.version>2.5.1</error_prone_annotations.version> <exec-maven-plugin.version>3.0.0</exec-maven-plugin.version> <janino.version>3.1.6</janino.version> <kubernetes.version>5.8.0</kubernetes.version> <hibernate.validator.version>6.2.2.Final</hibernate.validator.version> <docker.hub>apache</docker.hub> <docker.repo>${project.name}</docker.repo> <docker.tag>${project.version}</docker.tag> </properties> <dependencyManagement> <dependencies> <dependency> <groupId>io.netty</groupId> <artifactId>netty-bom</artifactId> <version>${netty.version}</version> <scope>import</scope> <type>pom</type> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-parent</artifactId> <version>${spring.boot.version}</version> <type>pom</type> <scope>import</scope> </dependency> <dependency> <groupId>io.netty</groupId> <artifactId>netty-all</artifactId> <version>${netty.version}</version> </dependency> <dependency> <groupId>org.java-websocket</groupId> <artifactId>Java-WebSocket</artifactId> <version>${java-websocket.version}</version> </dependency> <dependency> <groupId>com.baomidou</groupId> <artifactId>mybatis-plus-boot-starter</artifactId> <version>${mybatis-plus.version}</version> </dependency> <dependency> <groupId>com.baomidou</groupId> <artifactId>mybatis-plus</artifactId> <version>${mybatis-plus.version}</version> </dependency> <!-- quartz--> <dependency> <groupId>org.quartz-scheduler</groupId> <artifactId>quartz</artifactId> <version>${quartz.version}</version> </dependency> <dependency> <groupId>org.quartz-scheduler</groupId> <artifactId>quartz-jobs</artifactId> <version>${quartz.version}</version> </dependency> <dependency> <groupId>com.cronutils</groupId> <artifactId>cron-utils</artifactId> <version>${cron.utils.version}</version> </dependency> <dependency> <groupId>com.alibaba</groupId> <artifactId>druid</artifactId> <version>${druid.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-core</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-context</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-beans</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-tx</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-jdbc</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-test</artifactId> <version>${spring.version}</version> <scope>test</scope> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-server</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-master</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-worker</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-log-server</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-standalone-server</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-common</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-plugin</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-registry-plugin</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-dao</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-api</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-remote</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-service</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-meter</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-spi</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-data-quality</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-python</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-api</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-server</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-dingtalk</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-email</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-feishu</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-http</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-script</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-slack</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-wechat</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-pagerduty</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-webexteams</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-telegram</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-registry-api</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-registry-zookeeper</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-plugin</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-all</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-api</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-clickhouse</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-db2</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-hive</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-mysql</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-oracle</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-postgresql</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-sqlserver</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-api</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-datax</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-flink</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-http</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-mr</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-pigeon</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-procedure</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-python</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-shell</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-spark</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-sql</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-sqoop</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-seatunnel</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-ui</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-tools</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-dataquality</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-framework</artifactId> <version>${curator.version}</version> <exclusions> <exclusion> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.zookeeper</groupId> <artifactId>zookeeper</artifactId> <version>${zookeeper.version}</version> <exclusions> <exclusion> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> </exclusion> <exclusion> <artifactId>netty</artifactId> <groupId>io.netty</groupId> </exclusion> <exclusion> <groupId>com.github.spotbugs</groupId> <artifactId>spotbugs-annotations</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-client</artifactId> <version>${curator.version}</version> <exclusions> <exclusion> <groupId>log4j-1.2-api</groupId> <artifactId>org.apache.logging.log4j</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-recipes</artifactId> <version>${curator.version}</version> <exclusions> <exclusion> <groupId>org.apache.zookeeper</groupId> <artifactId>zookeeper</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-test</artifactId> <version>${curator.test}</version> </dependency> <dependency> <groupId>commons-codec</groupId> <artifactId>commons-codec</artifactId> <version>${commons.codec.version}</version> </dependency> <dependency> <groupId>commons-logging</groupId> <artifactId>commons-logging</artifactId> <version>${commons.logging.version}</version> </dependency> <dependency> <groupId>org.apache.httpcomponents</groupId> <artifactId>httpclient</artifactId> <version>${httpclient.version}</version> </dependency> <dependency> <groupId>org.apache.httpcomponents</groupId> <artifactId>httpcore</artifactId> <version>${httpcore.version}</version> </dependency> <dependency> <groupId>com.fasterxml.jackson.core</groupId> <artifactId>jackson-annotations</artifactId> <version>${jackson.version}</version> </dependency> <dependency> <groupId>com.fasterxml.jackson.core</groupId> <artifactId>jackson-databind</artifactId> <version>${jackson.version}</version> </dependency> <dependency> <groupId>com.fasterxml.jackson.core</groupId> <artifactId>jackson-core</artifactId> <version>${jackson.version}</version> </dependency> <!--protostuff--> <dependency> <groupId>io.protostuff</groupId> <artifactId>protostuff-core</artifactId> <version>${protostuff.version}</version> </dependency> <dependency> <groupId>io.protostuff</groupId> <artifactId>protostuff-runtime</artifactId> <version>${protostuff.version}</version> </dependency> <dependency> <groupId>net.bytebuddy</groupId> <artifactId>byte-buddy</artifactId> <version>${byte-buddy.version}</version> </dependency> <dependency> <groupId>org.reflections</groupId> <artifactId>reflections</artifactId> <version>${reflections.version}</version> </dependency> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>${junit.version}</version> </dependency> <dependency> <groupId>mysql</groupId> <artifactId>mysql-connector-java</artifactId> <version>${mysql.connector.version}</version> <scope>test</scope> </dependency> <dependency> <groupId>com.h2database</groupId> <artifactId>h2</artifactId> <version>${h2.version}</version> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-api</artifactId> <version>${slf4j.api.version}</version> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> <version>${slf4j.log4j12.version}</version> </dependency> <dependency> <groupId>commons-collections</groupId> <artifactId>commons-collections</artifactId> <version>${commons.collections.version}</version> </dependency> <dependency> <groupId>commons-httpclient</groupId> <artifactId>commons-httpclient</artifactId> <version>${commons.httpclient}</version> </dependency> <dependency> <groupId>commons-beanutils</groupId> <artifactId>commons-beanutils</artifactId> <version>${commons.beanutils.version}</version> </dependency> <dependency> <groupId>commons-configuration</groupId> <artifactId>commons-configuration</artifactId> <version>${commons.configuration.version}</version> </dependency> <dependency> <groupId>commons-lang</groupId> <artifactId>commons-lang</artifactId> <version>${commons.lang.version}</version> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-classic</artifactId> <version>${logback.version}</version> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-core</artifactId> <version>${logback.version}</version> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-email</artifactId> <version>${commons.email.version}</version> </dependency> <!--excel poi--> <dependency> <groupId>org.apache.poi</groupId> <artifactId>poi</artifactId> <version>${poi.version}</version> </dependency> <dependency> <groupId>org.apache.poi</groupId> <artifactId>poi-ooxml</artifactId> <version>${poi.version}</version> </dependency> <!-- hadoop --> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-common</artifactId> <version>${hadoop.version}</version> <exclusions> <exclusion> <artifactId>slf4j-log4j12</artifactId> <groupId>org.slf4j</groupId> </exclusion> <exclusion> <artifactId>com.sun.jersey</artifactId> <groupId>jersey-json</groupId> </exclusion> <exclusion> <groupId>junit</groupId> <artifactId>junit</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-client</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-hdfs</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-yarn-common</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-aws</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-collections4</artifactId> <version>${commons.collections4.version}</version> </dependency> <dependency> <groupId>com.google.guava</groupId> <artifactId>guava</artifactId> <version>${guava.version}</version> </dependency> <dependency> <groupId>org.postgresql</groupId> <artifactId>postgresql</artifactId> <version>${postgresql.version}</version> </dependency> <dependency> <groupId>org.apache.hive</groupId> <artifactId>hive-jdbc</artifactId> <version>${hive.jdbc.version}</version> </dependency> <dependency> <groupId>commons-io</groupId> <artifactId>commons-io</artifactId> <version>${commons.io.version}</version> </dependency> <dependency> <groupId>com.github.oshi</groupId> <artifactId>oshi-core</artifactId> <version>${oshi.core.version}</version> <exclusions> <exclusion> <groupId>org.slf4j</groupId> <artifactId>slf4j-simple</artifactId> </exclusion> <exclusion> <groupId>org.junit.jupiter</groupId> <artifactId>junit-jupiter-api</artifactId> </exclusion> <exclusion> <groupId>org.hamcrest</groupId> <artifactId>hamcrest</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>ru.yandex.clickhouse</groupId> <artifactId>clickhouse-jdbc</artifactId> <version>${clickhouse.jdbc.version}</version> </dependency> <dependency> <groupId>com.microsoft.sqlserver</groupId> <artifactId>mssql-jdbc</artifactId> <version>${mssql.jdbc.version}</version> </dependency> <dependency> <groupId>com.facebook.presto</groupId> <artifactId>presto-jdbc</artifactId> <version>${presto.jdbc.version}</version> </dependency> <dependency> <groupId>javax.servlet</groupId> <artifactId>servlet-api</artifactId> <version>${servlet-api.version}</version> </dependency> <dependency> <groupId>javax.servlet</groupId> <artifactId>javax.servlet-api</artifactId> <version>${javax.servlet.api.version}</version> </dependency> <dependency> <groupId>io.springfox</groupId> <artifactId>springfox-swagger2</artifactId> <version>${springfox.version}</version> </dependency> <dependency> <groupId>io.springfox</groupId> <artifactId>springfox-swagger-ui</artifactId> <version>${springfox.version}</version> </dependency> <dependency> <groupId>io.swagger</groupId> <artifactId>swagger-models</artifactId> <version>${swagger-models.version}</version> </dependency> <dependency> <groupId>com.github.xiaoymin</groupId> <artifactId>swagger-bootstrap-ui</artifactId> <version>${swagger.version}</version> </dependency> <dependency> <groupId>com.github.rholder</groupId> <artifactId>guava-retrying</artifactId> <version>${guava-retry.version}</version> </dependency> <dependency> <groupId>org.ow2.asm</groupId> <artifactId>asm</artifactId> <version>6.2.1</version> </dependency> <dependency> <groupId>javax.activation</groupId> <artifactId>activation</artifactId> <version>1.1</version> </dependency> <dependency> <groupId>com.sun.mail</groupId> <artifactId>javax.mail</artifactId> <version>1.6.2</version> </dependency> <dependency> <groupId>net.sf.py4j</groupId> <artifactId>py4j</artifactId> <version>${py4j.version}</version> </dependency> <dependency> <groupId>org.codehaus.janino</groupId> <artifactId>janino</artifactId> <version>${janino.version}</version> </dependency> <dependency> <groupId>com.google.code.findbugs</groupId> <artifactId>jsr305</artifactId> <version>${jsr305.version}</version> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-compress</artifactId> <version>${commons-compress.version}</version> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-math3</artifactId> <version>${commons-math3.version}</version> </dependency> <dependency> <groupId>com.google.errorprone</groupId> <artifactId>error_prone_annotations</artifactId> <version>${error_prone_annotations.version}</version> </dependency> <dependency> <groupId>io.fabric8</groupId> <artifactId>kubernetes-client</artifactId> <version>${kubernetes.version}</version> </dependency> <!-- https://mvnrepository.com/artifact/org.hibernate.validator/hibernate-validator --> <dependency> <groupId>org.hibernate.validator</groupId> <artifactId>hibernate-validator</artifactId> <version>${hibernate.validator.version}</version> </dependency> </dependencies> </dependencyManagement> <build> <pluginManagement> <plugins> <plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>rpm-maven-plugin</artifactId> <version>${rpm-maven-plugion.version}</version> <inherited>false</inherited> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <configuration> <source>${java.version}</source> <target>${java.version}</target> <testSource>${java.version}</testSource> <testTarget>${java.version}</testTarget> </configuration> <version>${maven-compiler-plugin.version}</version> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-release-plugin</artifactId> <version>${maven-release-plugin.version}</version> <configuration> <tagNameFormat>@{project.version}</tagNameFormat> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-assembly-plugin</artifactId> <version>${maven-assembly-plugin.version}</version> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-javadoc-plugin</artifactId> <version>${maven-javadoc-plugin.version}</version> <configuration> <source>8</source> <failOnError>false</failOnError> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-dependency-plugin</artifactId> <version>${maven-dependency-plugin.version}</version> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-jar-plugin</artifactId> <version>${maven-jar-plugin.version}</version> </plugin> <plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>exec-maven-plugin</artifactId> <version>${exec-maven-plugin.version}</version> <executions> <execution> <id>docker-build</id> <phase>package</phase> <goals> <goal>exec</goal> </goals> <configuration> <environmentVariables> <DOCKER_BUILDKIT>1</DOCKER_BUILDKIT> </environmentVariables> <executable>docker</executable> <workingDirectory>${project.basedir}</workingDirectory> <arguments> <argument>build</argument> <argument>--no-cache</argument> <argument>-t</argument> <argument>${docker.hub}/${docker.repo}:${docker.tag}</argument> <argument>-t</argument> <argument>${docker.hub}/${docker.repo}:latest</argument> <argument>${project.basedir}</argument> <argument>--file=src/main/docker/Dockerfile</argument> </arguments> </configuration> </execution> <execution> <id>docker-push</id> <phase>deploy</phase> <goals> <goal>exec</goal> </goals> <configuration> <environmentVariables> <DOCKER_BUILDKIT>1</DOCKER_BUILDKIT> </environmentVariables> <executable>docker</executable> <workingDirectory>${project.basedir}</workingDirectory> <arguments> <argument>buildx</argument> <argument>build</argument> <argument>--no-cache</argument> <argument>--push</argument> <argument>-t</argument> <argument>${docker.hub}/${docker.repo}:${docker.tag}</argument> <argument>-t</argument> <argument>${docker.hub}/${docker.repo}:latest</argument> <argument>${project.basedir}</argument> <argument>--file=src/main/docker/Dockerfile</argument> </arguments> </configuration> </execution> </executions> </plugin> </plugins> </pluginManagement> <plugins> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-javadoc-plugin</artifactId> <version>${maven-javadoc-plugin.version}</version> <executions> <execution> <id>attach-javadocs</id> <goals> <goal>jar</goal> </goals> </execution> </executions> <configuration> <aggregate>true</aggregate> <charset>${project.build.sourceEncoding}</charset> <encoding>${project.build.sourceEncoding}</encoding> <docencoding>${project.build.sourceEncoding}</docencoding> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-release-plugin</artifactId> <version>${maven-release-plugin.version}</version> <configuration> <autoVersionSubmodules>true</autoVersionSubmodules> <tagNameFormat>@{project.version}</tagNameFormat> <tagBase>${project.version}</tagBase> </configuration> <dependencies> <dependency> <groupId>org.apache.maven.scm</groupId> <artifactId>maven-scm-provider-jgit</artifactId> <version>1.9.5</version> </dependency> </dependencies> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <version>${maven-compiler-plugin.version}</version> <configuration> <source>${java.version}</source> <target>${java.version}</target> <encoding>${project.build.sourceEncoding}</encoding> <skip>false</skip><!--not skip compile test classes--> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-surefire-plugin</artifactId> <version>${maven-surefire-plugin.version}</version> <dependencies> <dependency> <groupId>org.apache.maven.surefire</groupId> <artifactId>surefire-junit4</artifactId> <version>${maven-surefire-plugin.version}</version> </dependency> </dependencies> <configuration> <systemPropertyVariables> <jacoco-agent.destfile>${project.build.directory}/jacoco.exec</jacoco-agent.destfile> </systemPropertyVariables> </configuration> </plugin> <!-- jenkins plugin jacoco report--> <plugin> <groupId>org.jacoco</groupId> <artifactId>jacoco-maven-plugin</artifactId> <version>${jacoco.version}</version> <configuration> <skip>${jacoco.skip}</skip> <dataFile>${project.build.directory}/jacoco.exec</dataFile> </configuration> <executions> <execution> <id>default-instrument</id> <goals> <goal>instrument</goal> </goals> </execution> <execution> <id>default-restore-instrumented-classes</id> <goals> <goal>restore-instrumented-classes</goal> </goals> <configuration> <excludes>com/github/dreamhead/moco/*</excludes> </configuration> </execution> <execution> <id>default-report</id> <goals> <goal>report</goal> </goals> </execution> </executions> </plugin> <plugin> <groupId>com.github.spotbugs</groupId> <artifactId>spotbugs-maven-plugin</artifactId> <version>${spotbugs.version}</version> <configuration> <xmlOutput>true</xmlOutput> <threshold>medium</threshold> <effort>default</effort> <excludeFilterFile>dev-config/spotbugs-exclude.xml</excludeFilterFile> <failOnError>true</failOnError> </configuration> <dependencies> <dependency> <groupId>com.github.spotbugs</groupId> <artifactId>spotbugs</artifactId> <version>4.0.0-beta4</version> </dependency> </dependencies> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-checkstyle-plugin</artifactId> <version>${checkstyle.version}</version> <dependencies> <dependency> <groupId>com.puppycrawl.tools</groupId> <artifactId>checkstyle</artifactId> <version>8.45</version> </dependency> </dependencies> <configuration> <consoleOutput>true</consoleOutput> <encoding>UTF-8</encoding> <configLocation>style/checkstyle.xml</configLocation> <failOnViolation>true</failOnViolation> <violationSeverity>warning</violationSeverity> <includeTestSourceDirectory>true</includeTestSourceDirectory> <sourceDirectories> <sourceDirectory>${project.build.sourceDirectory}</sourceDirectory> </sourceDirectories> <excludes>**\/generated-sources\/</excludes> </configuration> <executions> <execution> <phase>compile</phase> <goals> <goal>check</goal> </goals> </execution> </executions> </plugin> <plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>cobertura-maven-plugin</artifactId> <version>${cobertura-maven-plugin.version}</version> <configuration> <check> </check> <aggregate>true</aggregate> <outputDirectory>./target/cobertura</outputDirectory> <encoding>${project.build.sourceEncoding}</encoding> <quiet>true</quiet> <format>xml</format> <instrumentation> <ignoreTrivial>true</ignoreTrivial> </instrumentation> </configuration> </plugin> <plugin> <artifactId>maven-source-plugin</artifactId> <version>${maven-source-plugin.version}</version> <executions> <execution> <id>attach-sources</id> <goals> <goal>jar</goal> </goals> </execution> </executions> </plugin> </plugins> </build> <dependencies> <!-- NOTE: only development / test phase dependencies (scope = test / provided) that won't be packaged into final jar can be declared here. For example: annotation processors, test dependencies that are used by most of the submodules. --> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <scope>test</scope> </dependency> <dependency> <groupId>org.jacoco</groupId> <artifactId>org.jacoco.agent</artifactId> <version>${jacoco.version}</version> <classifier>runtime</classifier> <scope>test</scope> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-configuration-processor</artifactId> <optional>true</optional> </dependency> <dependency> <groupId>com.google.auto.service</groupId> <artifactId>auto-service</artifactId> <version>${auto-service.version}</version> <scope>provided</scope> </dependency> <dependency> <groupId>org.powermock</groupId> <artifactId>powermock-api-mockito2</artifactId> <version>${powermock.version}</version> <scope>test</scope> </dependency> <dependency> <groupId>org.powermock</groupId> <artifactId>powermock-module-junit4</artifactId> <version>${powermock.version}</version> <scope>test</scope> </dependency> <dependency> <groupId>org.powermock</groupId> <artifactId>powermock-core</artifactId> <version>${powermock.version}</version> <scope>test</scope> </dependency> </dependencies> <modules> <module>dolphinscheduler-alert</module> <module>dolphinscheduler-spi</module> <module>dolphinscheduler-registry</module> <module>dolphinscheduler-task-plugin</module> <module>dolphinscheduler-ui</module> <module>dolphinscheduler-server</module> <module>dolphinscheduler-common</module> <module>dolphinscheduler-api</module> <module>dolphinscheduler-dao</module> <module>dolphinscheduler-dist</module> <module>dolphinscheduler-remote</module> <module>dolphinscheduler-service</module> <module>dolphinscheduler-microbench</module> <module>dolphinscheduler-data-quality</module> <module>dolphinscheduler-standalone-server</module> <module>dolphinscheduler-datasource-plugin</module> <module>dolphinscheduler-python</module> <module>dolphinscheduler-meter</module> <module>dolphinscheduler-master</module> <module>dolphinscheduler-worker</module> <module>dolphinscheduler-log-server</module> <module>dolphinscheduler-tools</module> <module>dolphinscheduler-ui-next</module> </modules> </project>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,513
[Upgrade] Upgrade mybatis-plus to 3.4.3
### Discussed in https://github.com/apache/dolphinscheduler/discussions/8162 <div type='discussions-op-text'> <sup>Originally posted by **YiuTerran** January 24, 2022</sup> Mybatis-plus v3.2.0 using mybatis 3.5.2. There are high level security problems for this version. See [CVE-2020-26945](https://nvd.nist.gov/vuln/detail/CVE-2020-26945)</div>
https://github.com/apache/dolphinscheduler/issues/8513
https://github.com/apache/dolphinscheduler/pull/8515
33b50bab7232434a7ac5787b20f4e08db69a7d95
ca6d148a1b48195a359a3ab29d449229a87dd05a
"2022-02-24T03:07:16Z"
java
"2022-02-25T00:31:12Z"
tools/dependencies/known-dependencies.txt
HdrHistogram-2.1.12.jar HikariCP-4.0.3.jar LatencyUtils-2.0.3.jar activation-1.1.jar animal-sniffer-annotations-1.14.jar aopalliance-1.0.jar apacheds-i18n-2.0.0-M15.jar apacheds-kerberos-codec-2.0.0-M15.jar api-asn1-api-1.0.0-M20.jar api-util-1.0.0-M20.jar asm-3.1.jar asm-6.2.1.jar aspectjweaver-1.9.7.jar audience-annotations-0.5.0.jar avro-1.7.4.jar aws-java-sdk-1.7.4.jar bonecp-0.8.0.RELEASE.jar byte-buddy-1.9.16.jar caffeine-2.9.2.jar checker-compat-qual-2.0.0.jar checker-qual-3.10.0.jar classmate-1.5.1.jar clickhouse-jdbc-0.1.52.jar commons-email-1.5.jar commons-cli-1.2.jar commons-codec-1.11.jar commons-collections-3.2.2.jar commons-collections4-4.1.jar commons-compiler-3.1.6.jar commons-compress-1.19.jar commons-configuration-1.10.jar commons-daemon-1.0.13.jar commons-beanutils-1.9.4.jar commons-dbcp-1.4.jar commons-httpclient-3.0.1.jar commons-io-2.4.jar commons-lang-2.6.jar commons-logging-1.1.1.jar commons-math3-3.1.1.jar commons-net-3.1.jar commons-pool-1.6.jar cron-utils-9.1.3.jar error_prone_annotations-2.5.1.jar janino-3.1.6.jar javax.el-3.0.0.jar commons-lang3-3.12.0.jar curator-client-4.3.0.jar curator-framework-4.3.0.jar curator-recipes-4.3.0.jar curator-test-2.12.0.jar curvesapi-1.06.jar datanucleus-api-jdo-4.2.1.jar datanucleus-core-4.1.6.jar datanucleus-rdbms-4.1.7.jar derby-10.14.2.0.jar druid-1.2.4.jar gson-2.8.8.jar guava-24.1-jre.jar guava-retrying-2.0.0.jar guice-3.0.jar guice-servlet-3.0.jar h2-1.4.200.jar hadoop-annotations-2.7.3.jar hadoop-auth-2.7.3.jar hadoop-aws-2.7.3.jar hadoop-client-2.7.3.jar hadoop-common-2.7.3.jar hadoop-hdfs-2.7.3.jar hadoop-mapreduce-client-app-2.7.3.jar hadoop-mapreduce-client-common-2.7.3.jar hadoop-mapreduce-client-core-2.7.3.jar hadoop-mapreduce-client-jobclient-2.7.3.jar hadoop-mapreduce-client-shuffle-2.7.3.jar hadoop-yarn-api-2.7.3.jar hadoop-yarn-client-2.7.3.jar hadoop-yarn-common-2.7.3.jar hadoop-yarn-server-common-2.7.3.jar hibernate-validator-6.2.2.Final.jar hive-common-2.1.0.jar hive-jdbc-2.1.0.jar hive-metastore-2.1.0.jar hive-orc-2.1.0.jar hive-serde-2.1.0.jar hive-service-2.1.0.jar hive-service-rpc-2.1.0.jar hive-storage-api-2.1.0.jar htrace-core-3.1.0-incubating.jar httpclient-4.4.1.jar httpcore-4.4.1.jar httpmime-4.5.13.jar j2objc-annotations-1.1.jar jackson-annotations-2.10.5.jar jackson-core-2.10.5.jar jackson-core-asl-1.9.13.jar jackson-databind-2.10.5.jar jackson-datatype-jdk8-2.12.5.jar jackson-datatype-jsr310-2.12.5.jar jackson-jaxrs-1.9.13.jar jackson-mapper-asl-1.9.13.jar jackson-module-parameter-names-2.12.5.jar jackson-xc-1.9.13.jar jakarta.annotation-api-1.3.5.jar jakarta.servlet-api-4.0.4.jar jakarta.validation-api-2.0.2.jar jakarta.websocket-api-1.1.2.jar jamon-runtime-2.3.1.jar java-xmlbuilder-0.4.jar javassist-3.27.0-GA.jar javax.annotation-api-1.3.2.jar javax.activation-api-1.2.0.jar javax.inject-1.jar javax.jdo-3.2.0-m3.jar javax.mail-1.6.2.jar javolution-5.5.1.jar jaxb-api-2.3.1.jar jaxb-impl-2.2.3-1.jar jboss-logging-3.4.2.Final.jar jdo-api-3.0.1.jar jersey-client-1.9.jar jersey-core-1.9.jar jersey-guice-1.9.jar jersey-json-1.9.jar jersey-server-1.9.jar jets3t-0.9.0.jar jettison-1.1.jar jetty-6.1.26.jar jetty-continuation-9.4.44.v20210927.jar jetty-http-9.4.44.v20210927.jar jetty-io-9.4.44.v20210927.jar jetty-security-9.4.44.v20210927.jar jetty-server-9.4.44.v20210927.jar jetty-servlet-9.4.44.v20210927.jar jetty-servlets-9.4.44.v20210927.jar jetty-util-6.1.26.jar jetty-util-9.4.44.v20210927.jar jetty-util-ajax-9.4.44.v20210927.jar jetty-webapp-9.4.44.v20210927.jar jetty-xml-9.4.44.v20210927.jar jline-0.9.94.jar jna-5.10.0.jar jna-platform-5.10.0.jar joda-time-2.10.13.jar jpam-1.1.jar jsch-0.1.42.jar jsp-api-2.1.jar jsqlparser-2.1.jar jsr305-3.0.0.jar jta-1.1.jar jul-to-slf4j-1.7.32.jar leveldbjni-all-1.8.jar libfb303-0.9.3.jar libthrift-0.9.3.jar log4j-1.2-api-2.14.1.jar log4j-1.2.17.jar logback-classic-1.2.3.jar logback-core-1.2.3.jar lz4-1.3.0.jar mapstruct-1.2.0.Final.jar micrometer-core-1.7.5.jar micrometer-registry-prometheus-1.7.5.jar mssql-jdbc-6.1.0.jre8.jar mybatis-3.5.2.jar mybatis-plus-3.2.0.jar mybatis-plus-annotation-3.2.0.jar mybatis-plus-boot-starter-3.2.0.jar mybatis-plus-core-3.2.0.jar mybatis-plus-extension-3.2.0.jar mybatis-spring-2.0.2.jar netty-3.6.2.Final.jar netty-all-4.1.53.Final.jar opencsv-2.3.jar oshi-core-6.1.1.jar paranamer-2.3.jar parquet-hadoop-bundle-1.8.1.jar poi-4.1.2.jar poi-ooxml-4.1.2.jar poi-ooxml-schemas-4.1.2.jar postgresql-42.2.5.jar presto-jdbc-0.238.1.jar protobuf-java-2.5.0.jar protostuff-core-1.7.2.jar protostuff-runtime-1.7.2.jar protostuff-api-1.7.2.jar protostuff-collectionschema-1.7.2.jar py4j-0.10.9.jar quartz-2.3.2.jar quartz-jobs-2.3.2.jar reflections-0.9.12.jar simpleclient-0.10.0.jar simpleclient_common-0.10.0.jar slf4j-api-1.7.5.jar snakeyaml-1.28.jar snappy-0.2.jar snappy-java-1.0.4.1.jar SparseBitSet-1.2.jar spring-aop-5.3.12.jar spring-beans-5.3.12.jar spring-boot-2.5.6.jar spring-boot-actuator-2.5.6.jar spring-boot-actuator-autoconfigure-2.5.6.jar spring-boot-autoconfigure-2.5.6.jar spring-boot-configuration-processor-2.5.6.jar spring-boot-starter-2.5.6.jar spring-boot-starter-actuator-2.5.6.jar spring-boot-starter-aop-2.5.6.jar spring-boot-starter-jdbc-2.5.6.jar spring-boot-starter-jetty-2.5.6.jar spring-boot-starter-json-2.5.6.jar spring-boot-starter-logging-2.5.6.jar spring-boot-starter-quartz-2.5.6.jar spring-boot-starter-web-2.5.6.jar spring-boot-starter-cache-2.5.6.jar spring-context-5.3.12.jar spring-context-support-5.3.12.jar spring-core-5.3.12.jar spring-expression-5.3.12.jar spring-jcl-5.3.12.jar spring-jdbc-5.3.12.jar spring-plugin-core-1.2.0.RELEASE.jar spring-plugin-metadata-1.2.0.RELEASE.jar spring-tx-5.3.12.jar spring-web-5.3.12.jar spring-webmvc-5.3.12.jar springfox-core-2.9.2.jar springfox-schema-2.9.2.jar springfox-spi-2.9.2.jar springfox-spring-web-2.9.2.jar springfox-swagger-common-2.9.2.jar springfox-swagger-ui-2.9.2.jar springfox-swagger2-2.9.2.jar swagger-annotations-1.5.20.jar swagger-bootstrap-ui-1.9.3.jar swagger-models-1.5.24.jar tomcat-embed-el-9.0.54.jar tephra-api-0.6.0.jar transaction-api-1.1.jar xercesImpl-2.9.1.jar xml-apis-1.3.04.jar xmlbeans-3.1.0.jar xmlenc-0.52.jar zookeeper-3.4.14.jar Java-WebSocket-1.5.1.jar kubernetes-client-5.8.0.jar kubernetes-model-admissionregistration-5.8.0.jar kubernetes-model-apiextensions-5.8.0.jar kubernetes-model-apps-5.8.0.jar kubernetes-model-autoscaling-5.8.0.jar kubernetes-model-batch-5.8.0.jar kubernetes-model-certificates-5.8.0.jar kubernetes-model-common-5.8.0.jar kubernetes-model-coordination-5.8.0.jar kubernetes-model-core-5.8.0.jar kubernetes-model-discovery-5.8.0.jar kubernetes-model-events-5.8.0.jar kubernetes-model-extensions-5.8.0.jar kubernetes-model-flowcontrol-5.8.0.jar kubernetes-model-metrics-5.8.0.jar kubernetes-model-networking-5.8.0.jar kubernetes-model-node-5.8.0.jar kubernetes-model-policy-5.8.0.jar kubernetes-model-rbac-5.8.0.jar kubernetes-model-scheduling-5.8.0.jar kubernetes-model-storageclass-5.8.0.jar zjsonpatch-0.3.0.jar automaton-1.11-8.jar generex-1.0.2.jar jackson-dataformat-yaml-2.12.5.jar logging-interceptor-3.14.9.jar okhttp-3.14.9.jar okio-1.17.2.jar
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,394
[Bug] [Worker] when the data is supplemented still date+1
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened I see the 2.0.2-release note, [Feature-7451][Worker]remove date+1 when the data is supplemented. But i use 2.0.3, when COMPLEMENT DATA the date still date+1: **run the process(have one shell task, just print the date)** <img width="716" alt="image" src="https://user-images.githubusercontent.com/48642743/154092379-98e96c05-e325-49c9-9841-647ff16d325e.png"> <img width="617" alt="image" src="https://user-images.githubusercontent.com/48642743/154092060-b68dee0d-3313-4daf-b149-3c321ef8136f.png"> **result** ``` [INFO] 2022-02-15 23:26:41.602 TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.shell.ShellTask:[66] - -> welcome to use bigdata scheduling system... 20220201 20220202 20220202000000 [INFO] 2022-02-15 23:26:41.602 TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.shell.ShellTask:[60] - FINALIZE_SESSION ...... [INFO] 2022-02-15 23:26:42.833 TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.shell.ShellTask:[66] - -> welcome to use bigdata scheduling system... 20220202 20220203 20220203000000 [INFO] 2022-02-15 23:26:42.835 TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.shell.ShellTask:[60] - FINALIZE_SESSION ``` ### What you expected to happen i think it will print like this ? ``` 20220131 20220101 20220201000000 ``` and ``` 20220201 20220202 20220202000000 ``` ### How to reproduce just like pictrue to do ### Anything else i show the change from [#7452](https://github.com/apache/dolphinscheduler/pull/7452/commits/1c2371f57be0e4b317e44c1f95d7ff4a0758d4a6) but, i think it's not in WokerServer, i think it's in MasterServer `BusinessTimeUtils.getBusinessTime(CommandType commandType, Date runTime)` <img width="888" alt="image" src="https://user-images.githubusercontent.com/48642743/154096291-72dd0cf4-62e2-456c-98d5-a47750b3b420.png"> ### Version 2.0.3 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8394
https://github.com/apache/dolphinscheduler/pull/8532
ca6d148a1b48195a359a3ab29d449229a87dd05a
e174d7a40ef8326d698701e97fe347fd730f1277
"2022-02-15T15:42:21Z"
java
"2022-02-25T02:27:08Z"
dolphinscheduler-spi/src/main/java/org/apache/dolphinscheduler/spi/task/paramparser/BusinessTimeUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.spi.task.paramparser; import static org.apache.dolphinscheduler.spi.task.TaskConstants.PARAMETER_BUSINESS_DATE; import static org.apache.dolphinscheduler.spi.task.TaskConstants.PARAMETER_CURRENT_DATE; import static org.apache.dolphinscheduler.spi.task.TaskConstants.PARAMETER_DATETIME; import static org.apache.dolphinscheduler.spi.task.TaskConstants.PARAMETER_FORMAT_DATE; import static org.apache.dolphinscheduler.spi.task.TaskConstants.PARAMETER_FORMAT_TIME; import static org.apache.dolphinscheduler.spi.utils.DateUtils.addDays; import static org.apache.dolphinscheduler.spi.utils.DateUtils.format; import org.apache.dolphinscheduler.spi.enums.CommandType; import java.util.Date; import java.util.HashMap; import java.util.Map; /** * business time utils */ public class BusinessTimeUtils { private BusinessTimeUtils() { throw new IllegalStateException("BusinessTimeUtils class"); } /** * get business time in parameters by different command types * * @param commandType command type * @param runTime run time or schedule time * @return business time */ public static Map<String, String> getBusinessTime(CommandType commandType, Date runTime) { Date businessDate = runTime; switch (commandType) { case COMPLEMENT_DATA: break; case START_PROCESS: case START_CURRENT_TASK_PROCESS: case RECOVER_TOLERANCE_FAULT_PROCESS: case RECOVER_SUSPENDED_PROCESS: case START_FAILURE_TASK_PROCESS: case REPEAT_RUNNING: case SCHEDULER: default: businessDate = addDays(new Date(), -1); if (runTime != null) { /** * If there is a scheduled time, take the scheduling time. Recovery from failed nodes, suspension of recovery, re-run for scheduling */ businessDate = addDays(runTime, -1); } break; } Date businessCurrentDate = addDays(businessDate, 1); Map<String, String> result = new HashMap<>(); result.put(PARAMETER_CURRENT_DATE, format(businessCurrentDate, PARAMETER_FORMAT_DATE)); result.put(PARAMETER_BUSINESS_DATE, format(businessDate, PARAMETER_FORMAT_DATE)); result.put(PARAMETER_DATETIME, format(businessCurrentDate, PARAMETER_FORMAT_TIME)); return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,343
[Feature][python] Fix pydolphinscheduler release to ASF's dist problem
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description We contain pydolphinscheduler source code in DolphinScheduler version 2.0.3 release in ASF's dist directory, combining with what dolphinscheduler which you can see in https://dist.apache.org/repos/dist/release/dolphinscheduler/2.0.3/ . The problem is we do not separate python code or tar ball and python distribute wheel in out of other source code. we should find a way to support it, and here it is an example in: * And independent directory to keep all python related packages: We already have example in [flink](https://dist.apache.org/repos/dist/release/flink/flink-1.14.3/) to implement it by split to directory `python` * About the detail file should include in python directory: We could found detail in [airflow](https://dist.apache.org/repos/dist/release/airflow/2.2.3/) ASF's distribute wheel, which including `*.source.tar.gz`, `*.tar.gz`, `*.py3-none-any.whl` ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8343
https://github.com/apache/dolphinscheduler/pull/8470
891a002beac4b429e20c03aeae77da4c72c39f6c
36964c2b5a63c563354e6528f88bbe6a4a30c4ee
"2022-02-11T03:50:48Z"
java
"2022-02-25T03:06:26Z"
dolphinscheduler-dist/pom.xml
<?xml version="1.0" encoding="UTF-8"?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <parent> <artifactId>dolphinscheduler</artifactId> <groupId>org.apache.dolphinscheduler</groupId> <version>2.0.4-SNAPSHOT</version> </parent> <modelVersion>4.0.0</modelVersion> <artifactId>dolphinscheduler-dist</artifactId> <name>${project.artifactId}</name> <properties> <maven.deploy.skip>true</maven.deploy.skip> </properties> <dependencies> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-server</artifactId> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-standalone-server</artifactId> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-api</artifactId> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-server</artifactId> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-data-quality</artifactId> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-ui</artifactId> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-tools</artifactId> </dependency> </dependencies> <profiles> <profile> <id>release</id> <build> <plugins> <plugin> <artifactId>maven-assembly-plugin</artifactId> <executions> <execution> <id>dolphinscheduler-bin</id> <phase>package</phase> <goals> <goal>single</goal> </goals> <configuration> <descriptors> <descriptor>src/main/assembly/dolphinscheduler-bin.xml</descriptor> </descriptors> <appendAssemblyId>true</appendAssemblyId> </configuration> </execution> <execution> <id>src</id> <phase>package</phase> <goals> <goal>single</goal> </goals> <configuration> <descriptors> <descriptor>src/main/assembly/dolphinscheduler-src.xml</descriptor> </descriptors> <appendAssemblyId>true</appendAssemblyId> </configuration> </execution> </executions> </plugin> </plugins> </build> </profile> </profiles> <build> <finalName>apache-dolphinscheduler-${project.version}</finalName> </build> </project>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,343
[Feature][python] Fix pydolphinscheduler release to ASF's dist problem
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description We contain pydolphinscheduler source code in DolphinScheduler version 2.0.3 release in ASF's dist directory, combining with what dolphinscheduler which you can see in https://dist.apache.org/repos/dist/release/dolphinscheduler/2.0.3/ . The problem is we do not separate python code or tar ball and python distribute wheel in out of other source code. we should find a way to support it, and here it is an example in: * And independent directory to keep all python related packages: We already have example in [flink](https://dist.apache.org/repos/dist/release/flink/flink-1.14.3/) to implement it by split to directory `python` * About the detail file should include in python directory: We could found detail in [airflow](https://dist.apache.org/repos/dist/release/airflow/2.2.3/) ASF's distribute wheel, which including `*.source.tar.gz`, `*.tar.gz`, `*.py3-none-any.whl` ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8343
https://github.com/apache/dolphinscheduler/pull/8470
891a002beac4b429e20c03aeae77da4c72c39f6c
36964c2b5a63c563354e6528f88bbe6a4a30c4ee
"2022-02-11T03:50:48Z"
java
"2022-02-25T03:06:26Z"
dolphinscheduler-dist/src/main/assembly/dolphinscheduler-python-api.xml
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,343
[Feature][python] Fix pydolphinscheduler release to ASF's dist problem
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description We contain pydolphinscheduler source code in DolphinScheduler version 2.0.3 release in ASF's dist directory, combining with what dolphinscheduler which you can see in https://dist.apache.org/repos/dist/release/dolphinscheduler/2.0.3/ . The problem is we do not separate python code or tar ball and python distribute wheel in out of other source code. we should find a way to support it, and here it is an example in: * And independent directory to keep all python related packages: We already have example in [flink](https://dist.apache.org/repos/dist/release/flink/flink-1.14.3/) to implement it by split to directory `python` * About the detail file should include in python directory: We could found detail in [airflow](https://dist.apache.org/repos/dist/release/airflow/2.2.3/) ASF's distribute wheel, which including `*.source.tar.gz`, `*.tar.gz`, `*.py3-none-any.whl` ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8343
https://github.com/apache/dolphinscheduler/pull/8470
891a002beac4b429e20c03aeae77da4c72c39f6c
36964c2b5a63c563354e6528f88bbe6a4a30c4ee
"2022-02-11T03:50:48Z"
java
"2022-02-25T03:06:26Z"
dolphinscheduler-dist/src/main/assembly/dolphinscheduler-src.xml
<!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd"> <id>src</id> <formats> <format>tar.gz</format> </formats> <includeBaseDirectory>true</includeBaseDirectory> <baseDirectory>${project.build.finalName}-src</baseDirectory> <fileSets> <fileSet> <directory>${basedir}/../</directory> <useDefaultExcludes>true</useDefaultExcludes> <includes> <include>**/*</include> </includes> <excludes> <!-- github ignore --> <exclude>**/.github/**</exclude> <exclude>.travis.yml</exclude> <!-- maven ignore --> <exclude>**/target/**</exclude> <exclude>**/*.class</exclude> <exclude>**/*.jar</exclude> <exclude>**/*.war</exclude> <exclude>**/*.zip</exclude> <exclude>**/*.tar</exclude> <exclude>**/*.tar.gz</exclude> <!-- maven plugin ignore --> <exclude>release.properties</exclude> <exclude>**/pom.xml.releaseBackup</exclude> <exclude>*.gpg</exclude> <!-- node ignore --> <exclude>**/dolphinscheduler-ui/dist/**</exclude> <exclude>**/dolphinscheduler-ui/node/**</exclude> <exclude>**/dolphinscheduler-ui/node_modules/**</exclude> <!-- eclipse ignore --> <exclude>**/.settings/**</exclude> <exclude>**/.project</exclude> <exclude>**/.classpath</exclude> <!-- idea ignore --> <exclude>**/.idea/**</exclude> <exclude>**/*.ipr</exclude> <exclude>**/*.iml</exclude> <exclude>**/*.iws</exclude> <!-- temp ignore --> <exclude>**/logs/**</exclude> <exclude>**/*.log</exclude> <exclude>**/*.doc</exclude> <exclude>**/*.cache</exclude> <exclude>**/*.diff</exclude> <exclude>**/*.patch</exclude> <exclude>**/*.tmp</exclude> <!-- system ignore --> <exclude>**/.DS_Store</exclude> <exclude>**/Thumbs.db</exclude> </excludes> </fileSet> </fileSets> </assembly>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,343
[Feature][python] Fix pydolphinscheduler release to ASF's dist problem
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description We contain pydolphinscheduler source code in DolphinScheduler version 2.0.3 release in ASF's dist directory, combining with what dolphinscheduler which you can see in https://dist.apache.org/repos/dist/release/dolphinscheduler/2.0.3/ . The problem is we do not separate python code or tar ball and python distribute wheel in out of other source code. we should find a way to support it, and here it is an example in: * And independent directory to keep all python related packages: We already have example in [flink](https://dist.apache.org/repos/dist/release/flink/flink-1.14.3/) to implement it by split to directory `python` * About the detail file should include in python directory: We could found detail in [airflow](https://dist.apache.org/repos/dist/release/airflow/2.2.3/) ASF's distribute wheel, which including `*.source.tar.gz`, `*.tar.gz`, `*.py3-none-any.whl` ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8343
https://github.com/apache/dolphinscheduler/pull/8470
891a002beac4b429e20c03aeae77da4c72c39f6c
36964c2b5a63c563354e6528f88bbe6a4a30c4ee
"2022-02-11T03:50:48Z"
java
"2022-02-25T03:06:26Z"
dolphinscheduler-python/pom.xml
<?xml version="1.0" encoding="UTF-8"?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler</artifactId> <version>2.0.4-SNAPSHOT</version> </parent> <artifactId>dolphinscheduler-python</artifactId> <name>${project.artifactId}</name> <packaging>jar</packaging> <dependencies> <!-- dolphinscheduler --> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-api</artifactId> </dependency> <!--springboot--> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-web</artifactId> <exclusions> <exclusion> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-tomcat</artifactId> </exclusion> <exclusion> <artifactId>log4j-to-slf4j</artifactId> <groupId>org.apache.logging.log4j</groupId> </exclusion> </exclusions> </dependency> <dependency> <groupId>net.sf.py4j</groupId> <artifactId>py4j</artifactId> </dependency> </dependencies> <build> <plugins> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-jar-plugin</artifactId> <configuration> <excludes> <exclude>*.yaml</exclude> <exclude>*.xml</exclude> </excludes> </configuration> </plugin> <plugin> <artifactId>maven-assembly-plugin</artifactId> <executions> <execution> <id>dolphinscheduler-python-gateway-server</id> <phase>package</phase> <goals> <goal>single</goal> </goals> <configuration> <finalName>python-gateway-server</finalName> <descriptors> <descriptor>src/main/assembly/dolphinscheduler-python-gateway-server.xml</descriptor> </descriptors> <appendAssemblyId>false</appendAssemblyId> </configuration> </execution> </executions> </plugin> </plugins> </build> <profiles> <profile> <id>docker</id> <build> <plugins> <plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>exec-maven-plugin</artifactId> </plugin> </plugins> </build> </profile> </profiles> </project>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,343
[Feature][python] Fix pydolphinscheduler release to ASF's dist problem
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description We contain pydolphinscheduler source code in DolphinScheduler version 2.0.3 release in ASF's dist directory, combining with what dolphinscheduler which you can see in https://dist.apache.org/repos/dist/release/dolphinscheduler/2.0.3/ . The problem is we do not separate python code or tar ball and python distribute wheel in out of other source code. we should find a way to support it, and here it is an example in: * And independent directory to keep all python related packages: We already have example in [flink](https://dist.apache.org/repos/dist/release/flink/flink-1.14.3/) to implement it by split to directory `python` * About the detail file should include in python directory: We could found detail in [airflow](https://dist.apache.org/repos/dist/release/airflow/2.2.3/) ASF's distribute wheel, which including `*.source.tar.gz`, `*.tar.gz`, `*.py3-none-any.whl` ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8343
https://github.com/apache/dolphinscheduler/pull/8470
891a002beac4b429e20c03aeae77da4c72c39f6c
36964c2b5a63c563354e6528f88bbe6a4a30c4ee
"2022-02-11T03:50:48Z"
java
"2022-02-25T03:06:26Z"
dolphinscheduler-python/pydolphinscheduler/setup.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """The script for setting up pydolphinscheduler.""" import sys from os.path import dirname, join from setuptools import find_packages, setup if sys.version_info[0] < 3: raise Exception( "pydolphinscheduler does not support Python 2. Please upgrade to Python 3." ) version = "0.1.0" # Start package required prod = [ "click>=8.0.0", "py4j~=0.10", ] doc = [ "sphinx>=4.3", "sphinx_rtd_theme>=1.0", "sphinx-click>=3.0", ] test = [ "pytest>=6.2", "freezegun>=1.1", "coverage>=6.1", ] style = [ "flake8>=4.0", "flake8-docstrings>=1.6", "flake8-black>=0.2", "isort>=5.10", ] dev = style + test + doc all_dep = prod + dev # End package required def read(*names, **kwargs): """Read file content from given file path.""" return open( join(dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8") ).read() setup( name="apache-dolphinscheduler", version=version, license="Apache License 2.0", description="Apache DolphinScheduler Python API", long_description=read("README.md"), # Make sure pypi is expecting markdown long_description_content_type="text/markdown", author="Apache Software Foundation", author_email="dev@dolphinscheduler.apache.org", url="https://dolphinscheduler.apache.org/", python_requires=">=3.6", keywords=[ "dolphinscheduler", "workflow", "scheduler", "taskflow", ], project_urls={ "Homepage": "https://dolphinscheduler.apache.org", "Documentation": "https://dolphinscheduler.apache.org/python/index.html", "Source": "https://github.com/apache/dolphinscheduler/dolphinscheduler-python/pydolphinscheduler", "Issue Tracker": "https://github.com/apache/dolphinscheduler/issues", "Discussion": "https://github.com/apache/dolphinscheduler/discussions", "Twitter": "https://twitter.com/dolphinschedule", }, packages=find_packages(where="src"), package_dir={"": "src"}, include_package_data=True, package_data={ "examples": ["examples.tutorial.py"], }, platforms=["any"], classifiers=[ # complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers "Development Status :: 3 - Alpha", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: Unix", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: User Interfaces", ], install_requires=prod, extras_require={ "all": all_dep, "dev": dev, "style": style, "test": test, "doc": doc, }, entry_points={ "console_scripts": [ "pydolphinscheduler = pydolphinscheduler.cli.commands:cli", ], }, )
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,343
[Feature][python] Fix pydolphinscheduler release to ASF's dist problem
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description We contain pydolphinscheduler source code in DolphinScheduler version 2.0.3 release in ASF's dist directory, combining with what dolphinscheduler which you can see in https://dist.apache.org/repos/dist/release/dolphinscheduler/2.0.3/ . The problem is we do not separate python code or tar ball and python distribute wheel in out of other source code. we should find a way to support it, and here it is an example in: * And independent directory to keep all python related packages: We already have example in [flink](https://dist.apache.org/repos/dist/release/flink/flink-1.14.3/) to implement it by split to directory `python` * About the detail file should include in python directory: We could found detail in [airflow](https://dist.apache.org/repos/dist/release/airflow/2.2.3/) ASF's distribute wheel, which including `*.source.tar.gz`, `*.tar.gz`, `*.py3-none-any.whl` ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8343
https://github.com/apache/dolphinscheduler/pull/8470
891a002beac4b429e20c03aeae77da4c72c39f6c
36964c2b5a63c563354e6528f88bbe6a4a30c4ee
"2022-02-11T03:50:48Z"
java
"2022-02-25T03:06:26Z"
pom.xml
<?xml version="1.0" encoding="UTF-8"?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler</artifactId> <version>2.0.4-SNAPSHOT</version> <packaging>pom</packaging> <name>${project.artifactId}</name> <url>https://dolphinscheduler.apache.org</url> <description>Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing. </description> <scm> <connection>scm:git:https://github.com/apache/dolphinscheduler.git</connection> <developerConnection>scm:git:https://github.com/apache/dolphinscheduler.git</developerConnection> <url>https://github.com/apache/dolphinscheduler</url> <tag>HEAD</tag> </scm> <mailingLists> <mailingList> <name>DolphinScheduler Developer List</name> <post>dev@dolphinscheduler.apache.org</post> <subscribe>dev-subscribe@dolphinscheduler.apache.org</subscribe> <unsubscribe>dev-unsubscribe@dolphinscheduler.apache.org</unsubscribe> </mailingList> </mailingLists> <parent> <groupId>org.apache</groupId> <artifactId>apache</artifactId> <version>25</version> </parent> <properties> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding> <curator.version>4.3.0</curator.version> <zookeeper.version>3.4.14</zookeeper.version> <spring.version>5.3.12</spring.version> <spring.boot.version>2.5.6</spring.boot.version> <java.version>1.8</java.version> <logback.version>1.2.3</logback.version> <hadoop.version>2.7.3</hadoop.version> <quartz.version>2.3.2</quartz.version> <jackson.version>2.10.5</jackson.version> <mybatis-plus.version>3.4.3</mybatis-plus.version> <mybatis.spring.version>2.0.6</mybatis.spring.version> <cron.utils.version>9.1.3</cron.utils.version> <druid.version>1.2.4</druid.version> <h2.version>1.4.200</h2.version> <commons.codec.version>1.11</commons.codec.version> <commons.logging.version>1.1.1</commons.logging.version> <httpclient.version>4.4.1</httpclient.version> <httpcore.version>4.4.1</httpcore.version> <junit.version>4.12</junit.version> <mysql.connector.version>8.0.16</mysql.connector.version> <slf4j.api.version>1.7.5</slf4j.api.version> <slf4j.log4j12.version>1.7.5</slf4j.log4j12.version> <commons.collections.version>3.2.2</commons.collections.version> <commons.httpclient>3.0.1</commons.httpclient> <commons.beanutils.version>1.9.4</commons.beanutils.version> <commons.configuration.version>1.10</commons.configuration.version> <commons.lang.version>2.6</commons.lang.version> <commons.email.version>1.5</commons.email.version> <poi.version>4.1.2</poi.version> <javax.servlet.api.version>3.1.0</javax.servlet.api.version> <commons.collections4.version>4.1</commons.collections4.version> <guava.version>24.1-jre</guava.version> <postgresql.version>42.2.5</postgresql.version> <hive.jdbc.version>2.1.0</hive.jdbc.version> <commons.io.version>2.4</commons.io.version> <oshi.core.version>6.1.1</oshi.core.version> <clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version> <mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version> <presto.jdbc.version>0.238.1</presto.jdbc.version> <spotbugs.version>3.1.12</spotbugs.version> <checkstyle.version>3.1.2</checkstyle.version> <curator.test>2.12.0</curator.test> <frontend-maven-plugin.version>1.6</frontend-maven-plugin.version> <maven-compiler-plugin.version>3.3</maven-compiler-plugin.version> <maven-assembly-plugin.version>3.3.0</maven-assembly-plugin.version> <maven-release-plugin.version>2.5.3</maven-release-plugin.version> <maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version> <maven-source-plugin.version>2.4</maven-source-plugin.version> <maven-surefire-plugin.version>2.22.1</maven-surefire-plugin.version> <maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version> <rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version> <jacoco.version>0.8.7</jacoco.version> <jcip.version>1.0</jcip.version> <maven.deploy.skip>false</maven.deploy.skip> <cobertura-maven-plugin.version>2.7</cobertura-maven-plugin.version> <servlet-api.version>2.5</servlet-api.version> <swagger.version>1.9.3</swagger.version> <springfox.version>2.9.2</springfox.version> <swagger-models.version>1.5.24</swagger-models.version> <guava-retry.version>2.0.0</guava-retry.version> <protostuff.version>1.7.2</protostuff.version> <reflections.version>0.9.12</reflections.version> <byte-buddy.version>1.9.16</byte-buddy.version> <java-websocket.version>1.5.1</java-websocket.version> <py4j.version>0.10.9</py4j.version> <auto-service.version>1.0.1</auto-service.version> <jacoco.skip>false</jacoco.skip> <netty.version>4.1.53.Final</netty.version> <maven-jar-plugin.version>3.2.0</maven-jar-plugin.version> <powermock.version>2.0.9</powermock.version> <jsr305.version>3.0.0</jsr305.version> <commons-compress.version>1.19</commons-compress.version> <commons-math3.version>3.1.1</commons-math3.version> <error_prone_annotations.version>2.5.1</error_prone_annotations.version> <exec-maven-plugin.version>3.0.0</exec-maven-plugin.version> <janino.version>3.1.6</janino.version> <kubernetes.version>5.8.0</kubernetes.version> <hibernate.validator.version>6.2.2.Final</hibernate.validator.version> <docker.hub>apache</docker.hub> <docker.repo>${project.name}</docker.repo> <docker.tag>${project.version}</docker.tag> </properties> <dependencyManagement> <dependencies> <dependency> <groupId>io.netty</groupId> <artifactId>netty-bom</artifactId> <version>${netty.version}</version> <scope>import</scope> <type>pom</type> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-parent</artifactId> <version>${spring.boot.version}</version> <type>pom</type> <scope>import</scope> </dependency> <dependency> <groupId>io.netty</groupId> <artifactId>netty-all</artifactId> <version>${netty.version}</version> </dependency> <dependency> <groupId>org.java-websocket</groupId> <artifactId>Java-WebSocket</artifactId> <version>${java-websocket.version}</version> </dependency> <dependency> <groupId>com.baomidou</groupId> <artifactId>mybatis-plus-boot-starter</artifactId> <version>${mybatis-plus.version}</version> </dependency> <dependency> <groupId>com.baomidou</groupId> <artifactId>mybatis-plus</artifactId> <version>${mybatis-plus.version}</version> </dependency> <!-- quartz--> <dependency> <groupId>org.quartz-scheduler</groupId> <artifactId>quartz</artifactId> <version>${quartz.version}</version> </dependency> <dependency> <groupId>org.quartz-scheduler</groupId> <artifactId>quartz-jobs</artifactId> <version>${quartz.version}</version> </dependency> <dependency> <groupId>com.cronutils</groupId> <artifactId>cron-utils</artifactId> <version>${cron.utils.version}</version> </dependency> <dependency> <groupId>com.alibaba</groupId> <artifactId>druid</artifactId> <version>${druid.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-core</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-context</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-beans</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-tx</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-jdbc</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-test</artifactId> <version>${spring.version}</version> <scope>test</scope> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-server</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-master</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-worker</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-log-server</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-standalone-server</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-common</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-plugin</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-registry-plugin</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-dao</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-api</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-remote</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-service</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-meter</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-spi</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-data-quality</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-python</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-api</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-server</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-dingtalk</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-email</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-feishu</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-http</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-script</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-slack</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-wechat</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-pagerduty</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-webexteams</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-telegram</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-registry-api</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-registry-zookeeper</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-plugin</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-all</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-api</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-clickhouse</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-db2</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-hive</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-mysql</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-oracle</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-postgresql</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-datasource-sqlserver</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-api</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-datax</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-flink</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-http</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-mr</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-pigeon</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-procedure</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-python</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-shell</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-spark</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-sql</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-sqoop</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-seatunnel</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-ui</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-tools</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-task-dataquality</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-framework</artifactId> <version>${curator.version}</version> <exclusions> <exclusion> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.zookeeper</groupId> <artifactId>zookeeper</artifactId> <version>${zookeeper.version}</version> <exclusions> <exclusion> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> </exclusion> <exclusion> <artifactId>netty</artifactId> <groupId>io.netty</groupId> </exclusion> <exclusion> <groupId>com.github.spotbugs</groupId> <artifactId>spotbugs-annotations</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-client</artifactId> <version>${curator.version}</version> <exclusions> <exclusion> <groupId>log4j-1.2-api</groupId> <artifactId>org.apache.logging.log4j</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-recipes</artifactId> <version>${curator.version}</version> <exclusions> <exclusion> <groupId>org.apache.zookeeper</groupId> <artifactId>zookeeper</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-test</artifactId> <version>${curator.test}</version> </dependency> <dependency> <groupId>commons-codec</groupId> <artifactId>commons-codec</artifactId> <version>${commons.codec.version}</version> </dependency> <dependency> <groupId>commons-logging</groupId> <artifactId>commons-logging</artifactId> <version>${commons.logging.version}</version> </dependency> <dependency> <groupId>org.apache.httpcomponents</groupId> <artifactId>httpclient</artifactId> <version>${httpclient.version}</version> </dependency> <dependency> <groupId>org.apache.httpcomponents</groupId> <artifactId>httpcore</artifactId> <version>${httpcore.version}</version> </dependency> <dependency> <groupId>com.fasterxml.jackson.core</groupId> <artifactId>jackson-annotations</artifactId> <version>${jackson.version}</version> </dependency> <dependency> <groupId>com.fasterxml.jackson.core</groupId> <artifactId>jackson-databind</artifactId> <version>${jackson.version}</version> </dependency> <dependency> <groupId>com.fasterxml.jackson.core</groupId> <artifactId>jackson-core</artifactId> <version>${jackson.version}</version> </dependency> <!--protostuff--> <dependency> <groupId>io.protostuff</groupId> <artifactId>protostuff-core</artifactId> <version>${protostuff.version}</version> </dependency> <dependency> <groupId>io.protostuff</groupId> <artifactId>protostuff-runtime</artifactId> <version>${protostuff.version}</version> </dependency> <dependency> <groupId>net.bytebuddy</groupId> <artifactId>byte-buddy</artifactId> <version>${byte-buddy.version}</version> </dependency> <dependency> <groupId>org.reflections</groupId> <artifactId>reflections</artifactId> <version>${reflections.version}</version> </dependency> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>${junit.version}</version> </dependency> <dependency> <groupId>mysql</groupId> <artifactId>mysql-connector-java</artifactId> <version>${mysql.connector.version}</version> <scope>test</scope> </dependency> <dependency> <groupId>com.h2database</groupId> <artifactId>h2</artifactId> <version>${h2.version}</version> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-api</artifactId> <version>${slf4j.api.version}</version> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> <version>${slf4j.log4j12.version}</version> </dependency> <dependency> <groupId>commons-collections</groupId> <artifactId>commons-collections</artifactId> <version>${commons.collections.version}</version> </dependency> <dependency> <groupId>commons-httpclient</groupId> <artifactId>commons-httpclient</artifactId> <version>${commons.httpclient}</version> </dependency> <dependency> <groupId>commons-beanutils</groupId> <artifactId>commons-beanutils</artifactId> <version>${commons.beanutils.version}</version> </dependency> <dependency> <groupId>commons-configuration</groupId> <artifactId>commons-configuration</artifactId> <version>${commons.configuration.version}</version> </dependency> <dependency> <groupId>commons-lang</groupId> <artifactId>commons-lang</artifactId> <version>${commons.lang.version}</version> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-classic</artifactId> <version>${logback.version}</version> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-core</artifactId> <version>${logback.version}</version> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-email</artifactId> <version>${commons.email.version}</version> </dependency> <!--excel poi--> <dependency> <groupId>org.apache.poi</groupId> <artifactId>poi</artifactId> <version>${poi.version}</version> </dependency> <dependency> <groupId>org.apache.poi</groupId> <artifactId>poi-ooxml</artifactId> <version>${poi.version}</version> </dependency> <!-- hadoop --> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-common</artifactId> <version>${hadoop.version}</version> <exclusions> <exclusion> <artifactId>slf4j-log4j12</artifactId> <groupId>org.slf4j</groupId> </exclusion> <exclusion> <artifactId>com.sun.jersey</artifactId> <groupId>jersey-json</groupId> </exclusion> <exclusion> <groupId>junit</groupId> <artifactId>junit</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-client</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-hdfs</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-yarn-common</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-aws</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-collections4</artifactId> <version>${commons.collections4.version}</version> </dependency> <dependency> <groupId>com.google.guava</groupId> <artifactId>guava</artifactId> <version>${guava.version}</version> </dependency> <dependency> <groupId>org.postgresql</groupId> <artifactId>postgresql</artifactId> <version>${postgresql.version}</version> </dependency> <dependency> <groupId>org.apache.hive</groupId> <artifactId>hive-jdbc</artifactId> <version>${hive.jdbc.version}</version> </dependency> <dependency> <groupId>commons-io</groupId> <artifactId>commons-io</artifactId> <version>${commons.io.version}</version> </dependency> <dependency> <groupId>com.github.oshi</groupId> <artifactId>oshi-core</artifactId> <version>${oshi.core.version}</version> <exclusions> <exclusion> <groupId>org.slf4j</groupId> <artifactId>slf4j-simple</artifactId> </exclusion> <exclusion> <groupId>org.junit.jupiter</groupId> <artifactId>junit-jupiter-api</artifactId> </exclusion> <exclusion> <groupId>org.hamcrest</groupId> <artifactId>hamcrest</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>ru.yandex.clickhouse</groupId> <artifactId>clickhouse-jdbc</artifactId> <version>${clickhouse.jdbc.version}</version> </dependency> <dependency> <groupId>com.microsoft.sqlserver</groupId> <artifactId>mssql-jdbc</artifactId> <version>${mssql.jdbc.version}</version> </dependency> <dependency> <groupId>com.facebook.presto</groupId> <artifactId>presto-jdbc</artifactId> <version>${presto.jdbc.version}</version> </dependency> <dependency> <groupId>javax.servlet</groupId> <artifactId>servlet-api</artifactId> <version>${servlet-api.version}</version> </dependency> <dependency> <groupId>javax.servlet</groupId> <artifactId>javax.servlet-api</artifactId> <version>${javax.servlet.api.version}</version> </dependency> <dependency> <groupId>io.springfox</groupId> <artifactId>springfox-swagger2</artifactId> <version>${springfox.version}</version> </dependency> <dependency> <groupId>io.springfox</groupId> <artifactId>springfox-swagger-ui</artifactId> <version>${springfox.version}</version> </dependency> <dependency> <groupId>io.swagger</groupId> <artifactId>swagger-models</artifactId> <version>${swagger-models.version}</version> </dependency> <dependency> <groupId>com.github.xiaoymin</groupId> <artifactId>swagger-bootstrap-ui</artifactId> <version>${swagger.version}</version> </dependency> <dependency> <groupId>com.github.rholder</groupId> <artifactId>guava-retrying</artifactId> <version>${guava-retry.version}</version> </dependency> <dependency> <groupId>org.ow2.asm</groupId> <artifactId>asm</artifactId> <version>6.2.1</version> </dependency> <dependency> <groupId>javax.activation</groupId> <artifactId>activation</artifactId> <version>1.1</version> </dependency> <dependency> <groupId>com.sun.mail</groupId> <artifactId>javax.mail</artifactId> <version>1.6.2</version> </dependency> <dependency> <groupId>net.sf.py4j</groupId> <artifactId>py4j</artifactId> <version>${py4j.version}</version> </dependency> <dependency> <groupId>org.codehaus.janino</groupId> <artifactId>janino</artifactId> <version>${janino.version}</version> </dependency> <dependency> <groupId>com.google.code.findbugs</groupId> <artifactId>jsr305</artifactId> <version>${jsr305.version}</version> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-compress</artifactId> <version>${commons-compress.version}</version> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-math3</artifactId> <version>${commons-math3.version}</version> </dependency> <dependency> <groupId>com.google.errorprone</groupId> <artifactId>error_prone_annotations</artifactId> <version>${error_prone_annotations.version}</version> </dependency> <dependency> <groupId>io.fabric8</groupId> <artifactId>kubernetes-client</artifactId> <version>${kubernetes.version}</version> </dependency> <!-- https://mvnrepository.com/artifact/org.hibernate.validator/hibernate-validator --> <dependency> <groupId>org.hibernate.validator</groupId> <artifactId>hibernate-validator</artifactId> <version>${hibernate.validator.version}</version> </dependency> </dependencies> </dependencyManagement> <build> <pluginManagement> <plugins> <plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>rpm-maven-plugin</artifactId> <version>${rpm-maven-plugion.version}</version> <inherited>false</inherited> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <configuration> <source>${java.version}</source> <target>${java.version}</target> <testSource>${java.version}</testSource> <testTarget>${java.version}</testTarget> </configuration> <version>${maven-compiler-plugin.version}</version> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-release-plugin</artifactId> <version>${maven-release-plugin.version}</version> <configuration> <tagNameFormat>@{project.version}</tagNameFormat> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-assembly-plugin</artifactId> <version>${maven-assembly-plugin.version}</version> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-javadoc-plugin</artifactId> <version>${maven-javadoc-plugin.version}</version> <configuration> <source>8</source> <failOnError>false</failOnError> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-dependency-plugin</artifactId> <version>${maven-dependency-plugin.version}</version> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-jar-plugin</artifactId> <version>${maven-jar-plugin.version}</version> </plugin> <plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>exec-maven-plugin</artifactId> <version>${exec-maven-plugin.version}</version> <executions> <execution> <id>docker-build</id> <phase>package</phase> <goals> <goal>exec</goal> </goals> <configuration> <environmentVariables> <DOCKER_BUILDKIT>1</DOCKER_BUILDKIT> </environmentVariables> <executable>docker</executable> <workingDirectory>${project.basedir}</workingDirectory> <arguments> <argument>build</argument> <argument>--no-cache</argument> <argument>-t</argument> <argument>${docker.hub}/${docker.repo}:${docker.tag}</argument> <argument>-t</argument> <argument>${docker.hub}/${docker.repo}:latest</argument> <argument>${project.basedir}</argument> <argument>--file=src/main/docker/Dockerfile</argument> </arguments> </configuration> </execution> <execution> <id>docker-push</id> <phase>deploy</phase> <goals> <goal>exec</goal> </goals> <configuration> <environmentVariables> <DOCKER_BUILDKIT>1</DOCKER_BUILDKIT> </environmentVariables> <executable>docker</executable> <workingDirectory>${project.basedir}</workingDirectory> <arguments> <argument>buildx</argument> <argument>build</argument> <argument>--no-cache</argument> <argument>--push</argument> <argument>-t</argument> <argument>${docker.hub}/${docker.repo}:${docker.tag}</argument> <argument>-t</argument> <argument>${docker.hub}/${docker.repo}:latest</argument> <argument>${project.basedir}</argument> <argument>--file=src/main/docker/Dockerfile</argument> </arguments> </configuration> </execution> </executions> </plugin> </plugins> </pluginManagement> <plugins> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-javadoc-plugin</artifactId> <version>${maven-javadoc-plugin.version}</version> <executions> <execution> <id>attach-javadocs</id> <goals> <goal>jar</goal> </goals> </execution> </executions> <configuration> <aggregate>true</aggregate> <charset>${project.build.sourceEncoding}</charset> <encoding>${project.build.sourceEncoding}</encoding> <docencoding>${project.build.sourceEncoding}</docencoding> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-release-plugin</artifactId> <version>${maven-release-plugin.version}</version> <configuration> <autoVersionSubmodules>true</autoVersionSubmodules> <tagNameFormat>@{project.version}</tagNameFormat> <tagBase>${project.version}</tagBase> </configuration> <dependencies> <dependency> <groupId>org.apache.maven.scm</groupId> <artifactId>maven-scm-provider-jgit</artifactId> <version>1.9.5</version> </dependency> </dependencies> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <version>${maven-compiler-plugin.version}</version> <configuration> <source>${java.version}</source> <target>${java.version}</target> <encoding>${project.build.sourceEncoding}</encoding> <skip>false</skip><!--not skip compile test classes--> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-surefire-plugin</artifactId> <version>${maven-surefire-plugin.version}</version> <dependencies> <dependency> <groupId>org.apache.maven.surefire</groupId> <artifactId>surefire-junit4</artifactId> <version>${maven-surefire-plugin.version}</version> </dependency> </dependencies> <configuration> <systemPropertyVariables> <jacoco-agent.destfile>${project.build.directory}/jacoco.exec</jacoco-agent.destfile> </systemPropertyVariables> </configuration> </plugin> <!-- jenkins plugin jacoco report--> <plugin> <groupId>org.jacoco</groupId> <artifactId>jacoco-maven-plugin</artifactId> <version>${jacoco.version}</version> <configuration> <skip>${jacoco.skip}</skip> <dataFile>${project.build.directory}/jacoco.exec</dataFile> </configuration> <executions> <execution> <id>default-instrument</id> <goals> <goal>instrument</goal> </goals> </execution> <execution> <id>default-restore-instrumented-classes</id> <goals> <goal>restore-instrumented-classes</goal> </goals> <configuration> <excludes>com/github/dreamhead/moco/*</excludes> </configuration> </execution> <execution> <id>default-report</id> <goals> <goal>report</goal> </goals> </execution> </executions> </plugin> <plugin> <groupId>com.github.spotbugs</groupId> <artifactId>spotbugs-maven-plugin</artifactId> <version>${spotbugs.version}</version> <configuration> <xmlOutput>true</xmlOutput> <threshold>medium</threshold> <effort>default</effort> <excludeFilterFile>dev-config/spotbugs-exclude.xml</excludeFilterFile> <failOnError>true</failOnError> </configuration> <dependencies> <dependency> <groupId>com.github.spotbugs</groupId> <artifactId>spotbugs</artifactId> <version>4.0.0-beta4</version> </dependency> </dependencies> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-checkstyle-plugin</artifactId> <version>${checkstyle.version}</version> <dependencies> <dependency> <groupId>com.puppycrawl.tools</groupId> <artifactId>checkstyle</artifactId> <version>8.45</version> </dependency> </dependencies> <configuration> <consoleOutput>true</consoleOutput> <encoding>UTF-8</encoding> <configLocation>style/checkstyle.xml</configLocation> <failOnViolation>true</failOnViolation> <violationSeverity>warning</violationSeverity> <includeTestSourceDirectory>true</includeTestSourceDirectory> <sourceDirectories> <sourceDirectory>${project.build.sourceDirectory}</sourceDirectory> </sourceDirectories> <excludes>**\/generated-sources\/</excludes> </configuration> <executions> <execution> <phase>compile</phase> <goals> <goal>check</goal> </goals> </execution> </executions> </plugin> <plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>cobertura-maven-plugin</artifactId> <version>${cobertura-maven-plugin.version}</version> <configuration> <check> </check> <aggregate>true</aggregate> <outputDirectory>./target/cobertura</outputDirectory> <encoding>${project.build.sourceEncoding}</encoding> <quiet>true</quiet> <format>xml</format> <instrumentation> <ignoreTrivial>true</ignoreTrivial> </instrumentation> </configuration> </plugin> <plugin> <artifactId>maven-source-plugin</artifactId> <version>${maven-source-plugin.version}</version> <executions> <execution> <id>attach-sources</id> <goals> <goal>jar</goal> </goals> </execution> </executions> </plugin> </plugins> </build> <dependencies> <!-- NOTE: only development / test phase dependencies (scope = test / provided) that won't be packaged into final jar can be declared here. For example: annotation processors, test dependencies that are used by most of the submodules. --> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <scope>test</scope> </dependency> <dependency> <groupId>org.jacoco</groupId> <artifactId>org.jacoco.agent</artifactId> <version>${jacoco.version}</version> <classifier>runtime</classifier> <scope>test</scope> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-configuration-processor</artifactId> <optional>true</optional> </dependency> <dependency> <groupId>com.google.auto.service</groupId> <artifactId>auto-service</artifactId> <version>${auto-service.version}</version> <scope>provided</scope> </dependency> <dependency> <groupId>org.powermock</groupId> <artifactId>powermock-api-mockito2</artifactId> <version>${powermock.version}</version> <scope>test</scope> </dependency> <dependency> <groupId>org.powermock</groupId> <artifactId>powermock-module-junit4</artifactId> <version>${powermock.version}</version> <scope>test</scope> </dependency> <dependency> <groupId>org.powermock</groupId> <artifactId>powermock-core</artifactId> <version>${powermock.version}</version> <scope>test</scope> </dependency> </dependencies> <modules> <module>dolphinscheduler-alert</module> <module>dolphinscheduler-spi</module> <module>dolphinscheduler-registry</module> <module>dolphinscheduler-task-plugin</module> <module>dolphinscheduler-ui</module> <module>dolphinscheduler-server</module> <module>dolphinscheduler-common</module> <module>dolphinscheduler-api</module> <module>dolphinscheduler-dao</module> <module>dolphinscheduler-dist</module> <module>dolphinscheduler-remote</module> <module>dolphinscheduler-service</module> <module>dolphinscheduler-microbench</module> <module>dolphinscheduler-data-quality</module> <module>dolphinscheduler-standalone-server</module> <module>dolphinscheduler-datasource-plugin</module> <module>dolphinscheduler-python</module> <module>dolphinscheduler-meter</module> <module>dolphinscheduler-master</module> <module>dolphinscheduler-worker</module> <module>dolphinscheduler-log-server</module> <module>dolphinscheduler-tools</module> <module>dolphinscheduler-ui-next</module> </modules> </project>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,577
[Bug][UI Next][V1.0.0-Alpha] Workflow instance view log error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Workflow instance show the log path instead of the log content when I view log. ![image](https://user-images.githubusercontent.com/8847400/155951618-6b15c95a-ff75-42b7-ab2a-6226285e2e17.png) ### What you expected to happen Show log content ### How to reproduce Open workflow -> worflow instance. Right-click the dag node and select the view log button to view the log. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8577
https://github.com/apache/dolphinscheduler/pull/8578
87927c43380741c78dfa719fe79120818370be3b
d7f7686880e4807668693ae8b5478b42aa4757b5
"2022-02-28T08:44:41Z"
java
"2022-02-28T09:15:56Z"
dolphinscheduler-ui-next/src/views/projects/workflow/components/dag/dag-context-menu.tsx
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { genTaskCodeList } from '@/service/modules/task-definition' import type { Cell } from '@antv/x6' import { defineComponent, onMounted, PropType, inject, ref, computed } from 'vue' import { useI18n } from 'vue-i18n' import { useRoute } from 'vue-router' import styles from './menu.module.scss' import { uuid } from '@/utils/common' const props = { cell: { type: Object as PropType<Cell>, require: true }, visible: { type: Boolean as PropType<boolean>, default: true }, left: { type: Number as PropType<number>, default: 0 }, top: { type: Number as PropType<number>, default: 0 }, releaseState: { type: String as PropType<string>, default: 'OFFLINE' } } export default defineComponent({ name: 'dag-context-menu', props, emits: ['hide', 'start', 'edit', 'viewLog', 'copyTask', 'removeTasks'], setup(props, ctx) { const graph = inject('graph', ref()) const route = useRoute() const projectCode = Number(route.params.projectCode) const startAvailable = computed( () => route.name === 'workflow-definition-detail' && props.releaseState !== 'NOT_RELEASE' ) const hide = () => { ctx.emit('hide', false) } const startRunning = () => { ctx.emit('start') } const handleEdit = () => { ctx.emit('edit', Number(props.cell?.id)) } const handleViewLog = () => { ctx.emit('viewLog') } const handleCopy = () => { const genNums = 1 const type = props.cell?.data.taskType const taskName = uuid(props.cell?.data.taskName + '_') const targetCode = Number(props.cell?.id) const flag = props.cell?.data.flag genTaskCodeList(genNums, projectCode).then((res) => { const [code] = res ctx.emit('copyTask', taskName, code, targetCode, type, flag, { x: props.left + 100, y: props.top + 100 }) }) } const handleDelete = () => { graph.value?.removeCell(props.cell) ctx.emit('removeTasks', [Number(props.cell?.id)]) } onMounted(() => { document.addEventListener('click', () => { hide() }) }) return { startAvailable, startRunning, handleEdit, handleCopy, handleDelete, handleViewLog } }, render() { const { t } = useI18n() return ( this.visible && ( <div class={styles['dag-context-menu']} style={{ left: `${this.left}px`, top: `${this.top}px` }} > <div class={`${styles['menu-item']} ${ !this.startAvailable ? styles['disabled'] : '' } `} onClick={this.startRunning} > {t('project.node.start')} </div> <div class={`${styles['menu-item']} ${ this.releaseState === 'ONLINE' ? styles['disabled'] : '' } `} onClick={this.handleEdit} > {t('project.node.edit')} </div> <div class={`${styles['menu-item']} ${ this.releaseState === 'ONLINE' ? styles['disabled'] : '' } `} onClick={this.handleCopy} > {t('project.node.copy')} </div> <div class={`${styles['menu-item']} ${ this.releaseState === 'ONLINE' ? styles['disabled'] : '' } `} onClick={this.handleDelete} > {t('project.node.delete')} </div> <div class={`${styles['menu-item']}`} onClick={this.handleViewLog}> {t('project.node.view_log')} </div> </div> ) ) } })
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,577
[Bug][UI Next][V1.0.0-Alpha] Workflow instance view log error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Workflow instance show the log path instead of the log content when I view log. ![image](https://user-images.githubusercontent.com/8847400/155951618-6b15c95a-ff75-42b7-ab2a-6226285e2e17.png) ### What you expected to happen Show log content ### How to reproduce Open workflow -> worflow instance. Right-click the dag node and select the view log button to view the log. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8577
https://github.com/apache/dolphinscheduler/pull/8578
87927c43380741c78dfa719fe79120818370be3b
d7f7686880e4807668693ae8b5478b42aa4757b5
"2022-02-28T08:44:41Z"
java
"2022-02-28T09:15:56Z"
dolphinscheduler-ui-next/src/views/projects/workflow/components/dag/index.tsx
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import type { Graph } from '@antv/x6' import { defineComponent, ref, provide, PropType, toRef, watch, onBeforeUnmount } from 'vue' import { useI18n } from 'vue-i18n' import DagToolbar from './dag-toolbar' import DagCanvas from './dag-canvas' import DagSidebar from './dag-sidebar' import Styles from './dag.module.scss' import DagAutoLayoutModal from './dag-auto-layout-modal' import { useGraphAutoLayout, useGraphBackfill, useDagDragAndDrop, useTaskEdit, useBusinessMapper, useNodeMenu, useNodeStatus } from './dag-hooks' import { useThemeStore } from '@/store/theme/theme' import VersionModal from '../../definition/components/version-modal' import { WorkflowDefinition } from './types' import DagSaveModal from './dag-save-modal' import ContextMenuItem from './dag-context-menu' import TaskModal from '@/views/projects/task/components/node/detail-modal' import StartModal from '@/views/projects/workflow/definition/components/start-modal' import LogModal from '@/views/projects/workflow/instance/components/log-modal' import './x6-style.scss' const props = { // If this prop is passed, it means from definition detail instance: { type: Object as PropType<any>, default: undefined }, definition: { type: Object as PropType<WorkflowDefinition>, default: undefined }, readonly: { type: Boolean as PropType<boolean>, default: false }, projectCode: { type: Number as PropType<number>, default: 0 } } export default defineComponent({ name: 'workflow-dag', props, emits: ['refresh', 'save'], setup(props, context) { const { t } = useI18n() const theme = useThemeStore() // Whether the graph can be operated provide('readonly', toRef(props, 'readonly')) const graph = ref<Graph>() provide('graph', graph) // Auto layout modal const { visible: layoutVisible, toggle: layoutToggle, formValue, formRef, submit, cancel } = useGraphAutoLayout({ graph }) // Edit task const { taskConfirm, taskModalVisible, currTask, taskCancel, appendTask, editTask, copyTask, taskDefinitions, removeTasks } = useTaskEdit({ graph, definition: toRef(props, 'definition') }) // Right click cell const { menuCell, pageX, pageY, menuVisible, startModalShow, logModalShow, menuHide, menuStart, viewLog, hideLog } = useNodeMenu({ graph }) const statusTimerRef = ref() const { refreshTaskStatus } = useNodeStatus({ graph }) const { onDragStart, onDrop } = useDagDragAndDrop({ graph, readonly: toRef(props, 'readonly'), appendTask }) // backfill useGraphBackfill({ graph, definition: toRef(props, 'definition') }) // version modal const versionModalShow = ref(false) const versionToggle = (bool: boolean) => { if (typeof bool === 'boolean') { versionModalShow.value = bool } else { versionModalShow.value = !versionModalShow.value } } const refreshDetail = () => { context.emit('refresh') versionModalShow.value = false } // Save modal const saveModalShow = ref(false) const saveModelToggle = (bool: boolean) => { if (typeof bool === 'boolean') { saveModalShow.value = bool } else { saveModalShow.value = !versionModalShow.value } } const { getConnects, getLocations } = useBusinessMapper() const onSave = (saveForm: any) => { const edges = graph.value?.getEdges() || [] const nodes = graph.value?.getNodes() || [] if (!nodes.length) { window.$message.error(t('project.dag.node_not_created')) saveModelToggle(false) return } const connects = getConnects(nodes, edges, taskDefinitions.value as any) const locations = getLocations(nodes) context.emit('save', { taskDefinitions: taskDefinitions.value, saveForm, connects, locations }) saveModelToggle(false) } watch( () => props.definition, () => { if (props.instance) { refreshTaskStatus() statusTimerRef.value = setInterval(() => refreshTaskStatus(), 9000) } } ) onBeforeUnmount(() => clearInterval(statusTimerRef.value)) return () => ( <div class={[ Styles.dag, Styles[`dag-${theme.darkTheme ? 'dark' : 'light'}`] ]} > <DagToolbar layoutToggle={layoutToggle} instance={props.instance} definition={props.definition} onVersionToggle={versionToggle} onSaveModelToggle={saveModelToggle} onRemoveTasks={removeTasks} onRefresh={refreshTaskStatus} /> <div class={Styles.content}> <DagSidebar onDragStart={onDragStart} /> <DagCanvas onDrop={onDrop} /> </div> <DagAutoLayoutModal visible={layoutVisible.value} submit={submit} cancel={cancel} formValue={formValue} formRef={formRef} /> {!!props.definition && ( <VersionModal v-model:row={props.definition.processDefinition} v-model:show={versionModalShow.value} onUpdateList={refreshDetail} /> )} <DagSaveModal v-model:show={saveModalShow.value} onSave={onSave} definition={props.definition} /> <TaskModal readonly={props.readonly} show={taskModalVisible.value} projectCode={props.projectCode} data={currTask.value as any} onSubmit={taskConfirm} onCancel={taskCancel} /> <ContextMenuItem cell={menuCell.value} visible={menuVisible.value} left={pageX.value} top={pageY.value} releaseState={props.definition?.processDefinition.releaseState} onHide={menuHide} onStart={menuStart} onEdit={editTask} onCopyTask={copyTask} onRemoveTasks={removeTasks} onViewLog={viewLog} /> {!!props.definition && ( <StartModal v-model:row={props.definition.processDefinition} v-model:show={startModalShow.value} /> )} {!!props.instance && logModalShow.value && ( <LogModal v-model:show={logModalShow.value} taskInstanceId={props.instance.id} taskInstanceType={props.instance.taskType} onHideLog={hideLog} /> )} </div> ) } })
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,577
[Bug][UI Next][V1.0.0-Alpha] Workflow instance view log error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Workflow instance show the log path instead of the log content when I view log. ![image](https://user-images.githubusercontent.com/8847400/155951618-6b15c95a-ff75-42b7-ab2a-6226285e2e17.png) ### What you expected to happen Show log content ### How to reproduce Open workflow -> worflow instance. Right-click the dag node and select the view log button to view the log. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8577
https://github.com/apache/dolphinscheduler/pull/8578
87927c43380741c78dfa719fe79120818370be3b
d7f7686880e4807668693ae8b5478b42aa4757b5
"2022-02-28T08:44:41Z"
java
"2022-02-28T09:15:56Z"
dolphinscheduler-ui-next/src/views/projects/workflow/components/dag/use-node-menu.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import type { Ref } from 'vue' import { onMounted, ref } from 'vue' import type { Graph, Cell } from '@antv/x6' interface Options { graph: Ref<Graph | undefined> } /** * Get position of the right-clicked Cell. */ export function useNodeMenu(options: Options) { const { graph } = options const startModalShow = ref(false) const logModalShow = ref(false) const menuVisible = ref(false) const pageX = ref() const pageY = ref() const menuCell = ref<Cell>() const menuHide = () => { menuVisible.value = false // unlock scroller graph.value?.unlockScroller() } const menuStart = () => { startModalShow.value = true } const viewLog = () => { logModalShow.value = true } const hideLog = () => { logModalShow.value = false } onMounted(() => { if (graph.value) { // contextmenu graph.value.on('node:contextmenu', ({ cell, x, y }) => { menuCell.value = cell const data = graph.value?.localToPage(x, y) pageX.value = data?.x pageY.value = data?.y // show menu menuVisible.value = true // lock scroller graph.value?.lockScroller() }) } }) return { pageX, pageY, startModalShow, logModalShow, menuVisible, menuCell, menuHide, menuStart, viewLog, hideLog } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,577
[Bug][UI Next][V1.0.0-Alpha] Workflow instance view log error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Workflow instance show the log path instead of the log content when I view log. ![image](https://user-images.githubusercontent.com/8847400/155951618-6b15c95a-ff75-42b7-ab2a-6226285e2e17.png) ### What you expected to happen Show log content ### How to reproduce Open workflow -> worflow instance. Right-click the dag node and select the view log button to view the log. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8577
https://github.com/apache/dolphinscheduler/pull/8578
87927c43380741c78dfa719fe79120818370be3b
d7f7686880e4807668693ae8b5478b42aa4757b5
"2022-02-28T08:44:41Z"
java
"2022-02-28T09:15:56Z"
dolphinscheduler-ui-next/src/views/projects/workflow/components/dag/use-node-status.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import type { Ref } from 'vue' import { render, h } from 'vue' import { useRoute } from 'vue-router' import { useI18n } from 'vue-i18n' import type { Graph } from '@antv/x6' import { tasksState } from '@/utils/common' import { NODE, NODE_STATUS_MARKUP } from './dag-config' import { queryTaskListByProcessId } from '@/service/modules/process-instances' import NodeStatus from '@/views/projects/workflow/components/dag/dag-node-status' interface Options { graph: Ref<Graph | undefined> } /** * Node status and tooltip */ export function useNodeStatus(options: Options) { const { graph } = options const route = useRoute() const { t } = useI18n() const setNodeStatus = (code: string, state: string, taskInstance: any) => { const stateProps = tasksState(t)[state] const node = graph.value?.getCellById(code) if (node) { // Destroy the previous dom node.removeMarkup() node.setMarkup(NODE.markup.concat(NODE_STATUS_MARKUP)) const nodeView = graph.value?.findViewByCell(node) const el = nodeView?.find('div')[0] const a = h(NodeStatus, { t, taskInstance, stateProps }) render(a, el as HTMLElement) } } /** * Task status */ const refreshTaskStatus = () => { const projectCode = Number(route.params.projectCode) const instanceId = Number(route.params.id) queryTaskListByProcessId(instanceId, projectCode).then((res: any) => { window.$message.success(t('project.workflow.refresh_status_succeeded')) const { taskList } = res if (taskList) { taskList.forEach((taskInstance: any) => { setNodeStatus(taskInstance.taskCode, taskInstance.state, taskInstance) }) } }) } return { refreshTaskStatus } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,577
[Bug][UI Next][V1.0.0-Alpha] Workflow instance view log error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Workflow instance show the log path instead of the log content when I view log. ![image](https://user-images.githubusercontent.com/8847400/155951618-6b15c95a-ff75-42b7-ab2a-6226285e2e17.png) ### What you expected to happen Show log content ### How to reproduce Open workflow -> worflow instance. Right-click the dag node and select the view log button to view the log. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8577
https://github.com/apache/dolphinscheduler/pull/8578
87927c43380741c78dfa719fe79120818370be3b
d7f7686880e4807668693ae8b5478b42aa4757b5
"2022-02-28T08:44:41Z"
java
"2022-02-28T09:15:56Z"
dolphinscheduler-ui-next/src/views/projects/workflow/instance/components/log-modal.tsx
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import _ from 'lodash' import { defineComponent, PropType, Transition, toRefs, ref, onMounted, computed, reactive, renderSlot } from 'vue' import { useI18n } from 'vue-i18n' import { NButton, NIcon, NTooltip } from 'naive-ui' import { queryLog } from '@/service/modules/log' import { DownloadOutlined, SyncOutlined, FullscreenOutlined, FullscreenExitOutlined } from '@vicons/antd' import { downloadFile } from '@/service/service' import styles from './log.module.scss' const props = { taskInstanceId: { type: Number as PropType<number>, default: -1 }, taskInstanceType: { type: String as PropType<string>, default: '' } } export default defineComponent({ name: 'workflow-instance-log', props, emits: ['hideLog'], setup(props, ctx) { const { t } = useI18n() const loadingRef = ref(false) const loadingIndex = ref(0) const isDataRef = ref(true) const logBox = ref() const logContent = ref() const logContentBox = ref() const textareaLog = ref() const isScreen = ref(false) const textareaHeight = computed(() => logContentBox.value ? logContentBox.value.clientHeight : 0 ) const boxRef = reactive({ width: '', height: '', marginLeft: '', marginRight: '', marginTop: '' }) const refreshLog = () => { loadingRef.value = true queryLog({ taskInstanceId: props.taskInstanceId, skipLineNum: loadingIndex.value * 1000, limit: loadingIndex.value === 0 ? 1000 : (loadingIndex.value + 1) * 1000 }) .then((res: any) => { setTimeout(() => { loadingRef.value = false if (res) { window.$message.success(t('project.workflow.update_log_success')) } else { window.$message.warning(t('project.workflow.no_more_log')) } }, 1500) textareaLog.value.innerHTML = res || t('project.workflow.no_log') }) .catch((error: any) => { window.$message.error(error.message || '') loadingRef.value = false }) } const showLog = () => { queryLog({ taskInstanceId: props.taskInstanceId, skipLineNum: loadingIndex.value * 1000, limit: loadingIndex.value === 0 ? 1000 : (loadingIndex.value + 1) * 1000 }) .then((res: any) => { if (!res) { isDataRef.value = false setTimeout(() => { window.$message.warning(t('project.workflow.no_more_log')) }, 1000) textareaLog.value.innerHTML = t('project.workflow.no_log') } else { isDataRef.value = true textareaLog.value.innerHTML = res || t('project.workflow.no_log') setTimeout(() => { textareaLog.value.scrollTop = 2 }, 800) } }) .catch((error: any) => { window.$message.error(error.message || '') }) } const initLog = () => { window.$message.info(t('project.workflow.loading_log')) showLog() } const downloadLog = () => { downloadFile('log/download-log', { taskInstanceId: props.taskInstanceId }) } const screenOpen = () => { isScreen.value = true const winW = window.innerWidth - 40 const winH = window.innerHeight - 40 boxRef.width = `${winW}px` boxRef.height = `${winH}px` boxRef.marginLeft = `-${winW / 2}px` boxRef.marginRight = `-${winH / 2}px` boxRef.marginTop = `-${winH / 2}px` logContent.value.animate({ scrollTop: 0 }, 0) } const screenClose = () => { isScreen.value = false boxRef.width = '' boxRef.height = '' boxRef.marginLeft = '' boxRef.marginRight = '' boxRef.marginTop = '' logContent.value.animate({ scrollTop: 0 }, 0) } const toggleScreen = () => { if (isScreen.value) { screenClose() } else { screenOpen() } } const close = () => { ctx.emit('hideLog') } /** * up */ const onUp = _.debounce( function () { loadingIndex.value = loadingIndex.value - 1 showLog() }, 1000, { leading: false, trailing: true } ) /** * down */ const onDown = _.debounce( function () { loadingIndex.value = loadingIndex.value + 1 showLog() }, 1000, { leading: false, trailing: true } ) const onTextareaScroll = () => { textareaLog.value.onscroll = () => { // Listen for scrollbar events if ( textareaLog.value.scrollTop + textareaLog.value.clientHeight === textareaLog.value.clientHeight ) { if (loadingIndex.value > 0) { window.$message.info(t('project.workflow.loading_log')) onUp() } } // Listen for scrollbar events if ( textareaLog.value.scrollHeight === textareaLog.value.clientHeight + textareaLog.value.scrollTop ) { // No data is not requested if (isDataRef.value) { window.$message.info(t('project.workflow.loading_log')) onDown() } } } } onMounted(() => { initLog() onTextareaScroll() }) return { t, logBox, logContentBox, loadingRef, textareaLog, logContent, textareaHeight, isScreen, boxRef, showLog, downloadLog, refreshLog, toggleScreen, close, ...toRefs(props) } }, render() { return ( <div> <span class={styles['log-model']}> {this.taskInstanceId && this.taskInstanceType !== 'SUB_PROCESS' && ( <span> {renderSlot(this.$slots, 'history')} <slot name='history'></slot> <span onClick={this.showLog}> {renderSlot(this.$slots, 'log')} </span> </span> )} <Transition name='fade'> { <div class={styles['log-pop']}> <div class={styles['log-box']} style={{ ...this.boxRef }}> <div class={styles['title']}> <div class={styles['left-item']}> {this.t('project.workflow.view_log')} </div> <div class={styles['right-item']}> <NTooltip> {{ trigger: () => ( <NButton strong secondary circle type='info' class={styles.button} onClick={this.downloadLog} > <NIcon> <DownloadOutlined /> </NIcon> </NButton> ), default: () => this.t('project.workflow.download_log') }} </NTooltip> <NTooltip> {{ trigger: () => ( <NButton strong secondary circle type='info' class={styles.button} onClick={() => !this.loadingRef && this.refreshLog() } > <NIcon> <SyncOutlined /> </NIcon> </NButton> ), default: () => this.t('project.workflow.refresh_log') }} </NTooltip> <NTooltip> {{ trigger: () => ( <NButton strong secondary circle type='info' class={styles.button} onClick={this.toggleScreen} > <NIcon> {this.isScreen ? ( <FullscreenExitOutlined /> ) : ( <FullscreenOutlined /> )} </NIcon> </NButton> ), default: () => this.isScreen ? this.t('project.workflow.cancel_full_screen') : this.t('project.workflow.enter_full_screen') }} </NTooltip> </div> </div> <div class={styles['content']} ref='logContent'> <div class={styles['content-log-box']} ref='logContentBox'> <textarea class={styles['textarea-ft']} style={`width: 100%; height: ${this.textareaHeight}px`} spellcheck='false' ref='textareaLog' readonly ></textarea> </div> </div> <div class={styles['operation']}> <NButton type='primary' size='small' round onClick={this.close} > {this.t('project.workflow.close')} </NButton> </div> </div> </div> } </Transition> </span> </div> ) } })
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,581
[Bug] [API-Doc] missing some chinese translation
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![](https://vip2.loli.io/2022/02/28/u6YhjlZ7HvUkJc9.png) ### What you expected to happen above. ### How to reproduce above. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8581
https://github.com/apache/dolphinscheduler/pull/8588
eb88d9cbd328f5c2aef5f21015b8cfa8f5adc5d6
3cfb3270fe2dbe3b009c307ce31881c772c24eee
"2022-02-28T10:07:12Z"
java
"2022-03-01T07:14:52Z"
dolphinscheduler-api/src/main/resources/i18n/messages.properties
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # QUERY_SCHEDULE_LIST_NOTES=query schedule list EXECUTE_PROCESS_TAG=execute process related operation PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation RUN_PROCESS_INSTANCE_NOTES=run process instance BATCH_RUN_PROCESS_INSTANCE_NOTES=batch run process instance START_NODE_LIST=start node list(node name) TASK_DEPEND_TYPE=task depend type COMMAND_TYPE=command type RUN_MODE=run mode TIMEOUT=timeout EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance EXECUTE_TYPE=execute type START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition GET_RECEIVER_CC_NOTES=query receiver cc DESC=description GROUP_NAME=group name GROUP_TYPE=group type QUERY_ALERT_GROUP_LIST_NOTES=query alert group list UPDATE_ALERT_GROUP_NOTES=update alert group DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not GRANT_ALERT_GROUP_NOTES=grant alert group USER_IDS=user id list ALERT_GROUP_TAG=alert group related operation ALERT_PLUGIN_INSTANCE_TAG=alert plugin instance related operation UPDATE_ALERT_PLUGIN_INSTANCE_NOTES=update alert plugin instance operation CREATE_ALERT_PLUGIN_INSTANCE_NOTES=create alert plugin instance operation DELETE_ALERT_PLUGIN_INSTANCE_NOTES=delete alert plugin instance operation GET_ALERT_PLUGIN_INSTANCE_NOTES=get alert plugin instance operation CREATE_ALERT_GROUP_NOTES=create alert group WORKER_GROUP_TAG=worker group related operation SAVE_WORKER_GROUP_NOTES=create worker group WORKER_GROUP_NAME=worker group name WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2 QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging QUERY_WORKER_GROUP_LIST_NOTES=query worker group list DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id DATA_ANALYSIS_TAG=analysis related operation of task state COUNT_TASK_STATE_NOTES=count task state COUNT_PROCESS_INSTANCE_NOTES=count process instance state COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user COUNT_COMMAND_STATE_NOTES=count command state COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue ACCESS_TOKEN_TAG=access token related operation MONITOR_TAG=monitor related operation MASTER_LIST_NOTES=master server list WORKER_LIST_NOTES=worker server list QUERY_DATABASE_STATE_NOTES=query database state QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE TASK_STATE=task instance state SOURCE_TABLE=SOURCE TABLE DEST_TABLE=dest table TASK_DATE=task date QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging DATA_SOURCE_TAG=data source related operation CREATE_DATA_SOURCE_NOTES=create data source DATA_SOURCE_NAME=data source name DATA_SOURCE_NOTE=data source desc DB_TYPE=database type DATA_SOURCE_HOST=DATA SOURCE HOST DATA_SOURCE_PORT=data source port DATABASE_NAME=database name QUEUE_TAG=queue related operation QUERY_QUEUE_LIST_NOTES=query queue list QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging CREATE_QUEUE_NOTES=create queue YARN_QUEUE_NAME=yarn(hadoop) queue name QUEUE_ID=queue id TENANT_DESC=tenant desc QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging QUERY_TENANT_LIST_NOTES=query tenant list UPDATE_TENANT_NOTES=update tenant DELETE_TENANT_NOTES=delete tenant RESOURCES_TAG=resource center related operation CREATE_RESOURCE_NOTES=create resource RESOURCE_TYPE=resource file type RESOURCE_NAME=resource name RESOURCE_DESC=resource file desc RESOURCE_FILE=resource file RESOURCE_ID=resource id QUERY_RESOURCE_LIST_NOTES=query resource list DELETE_RESOURCE_BY_ID_NOTES=delete resource by id VIEW_RESOURCE_BY_ID_NOTES=view resource by id ONLINE_CREATE_RESOURCE_NOTES=online create resource SUFFIX=resource file suffix CONTENT=resource file content UPDATE_RESOURCE_NOTES=edit resource file online DOWNLOAD_RESOURCE_NOTES=download resource file CREATE_UDF_FUNCTION_NOTES=create udf function UDF_TYPE=UDF type FUNC_NAME=function name CLASS_NAME=package and class name ARG_TYPES=arguments UDF_DESC=udf desc VIEW_UDF_FUNCTION_NOTES=view udf function UPDATE_UDF_FUNCTION_NOTES=update udf function QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name DELETE_UDF_FUNCTION_NOTES=delete udf function AUTHORIZED_FILE_NOTES=authorized file UNAUTHORIZED_FILE_NOTES=unauthorized file AUTHORIZED_UDF_FUNC_NOTES=authorized udf func UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func VERIFY_QUEUE_NOTES=verify queue TENANT_TAG=tenant related operation CREATE_TENANT_NOTES=create tenant TENANT_CODE=os tenant code QUEUE_NAME=queue name PASSWORD=password DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...} DATA_SOURCE_PRINCIPAL=principal DATA_SOURCE_KERBEROS_KRB5_CONF=the kerberos authentication parameter java.security.krb5.conf DATA_SOURCE_KERBEROS_KEYTAB_USERNAME=the kerberos authentication parameter login.user.keytab.username DATA_SOURCE_KERBEROS_KEYTAB_PATH=the kerberos authentication parameter login.user.keytab.path PROJECT_TAG=project related operation CREATE_PROJECT_NOTES=create project PROJECT_DESC=project description UPDATE_PROJECT_NOTES=update project PROJECT_ID=project id QUERY_PROJECT_BY_ID_NOTES=query project info by project id QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING DELETE_PROJECT_BY_ID_NOTES=delete project by id QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project QUERY_ALL_PROJECT_LIST_NOTES=query all project list QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project QUERY_AUTHORIZED_USER_NOTES=query authorized user TASK_RECORD_TAG=task record related operation QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging CREATE_TOKEN_NOTES=create access token for specified user UPDATE_TOKEN_NOTES=update access token for specified user TOKEN=access token string, it will be automatically generated when it absent EXPIRE_TIME=expire time for the token TOKEN_ID=access token id QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging QUERY_ACCESS_TOKEN_BY_USER_NOTES=query access token for specified user SCHEDULE=schedule WARNING_TYPE=warning type(sending strategy) WARNING_GROUP_ID=warning group id FAILURE_STRATEGY=failure strategy RECEIVERS=receivers RECEIVERS_CC=receivers cc WORKER_GROUP_ID=worker server group id PROCESS_INSTANCE_PRIORITY=process instance priority UPDATE_SCHEDULE_NOTES=update schedule SCHEDULE_ID=schedule id ONLINE_SCHEDULE_NOTES=online schedule OFFLINE_SCHEDULE_NOTES=offline schedule QUERY_SCHEDULE_NOTES=query schedule QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging LOGIN_TAG=User login related operations USER_NAME=user name PROJECT_NAME=project name CREATE_PROCESS_DEFINITION_NOTES=create process definition PROCESS_DEFINITION_NAME=process definition name PROCESS_DEFINITION_JSON=process definition detail info (json format) PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format) PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format) PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format) PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format) PROCESS_DEFINITION_DESC=process definition desc PROCESS_DEFINITION_TAG=process definition related opertation SIGNOUT_NOTES=logout USER_PASSWORD=user password UPDATE_PROCESS_INSTANCE_NOTES=update process instance QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list VERIFY_PROCESS_DEFINITION_NAME_NOTES=verify process definition name LOGIN_NOTES=user login UPDATE_PROCESS_DEFINITION_NOTES=update process definition PROCESS_DEFINITION_ID=process definition id PROCESS_DEFINITION_IDS=process definition ids RELEASE_PROCESS_DEFINITION_NOTES=release process definition QUERY_PROCESS_DEFINITION_BY_ID_NOTES=query process definition by id QUERY_PROCESS_DEFINITION_BY_NAME_NOTES=query process definition by name QUERY_PROCESS_DEFINITION_LIST_NOTES=query process definition list QUERY_PROCESS_DEFINITION_LIST_PAGING_NOTES=query process definition list paging QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list PAGE_NO=page no PROCESS_INSTANCE_ID=process instance id PROCESS_INSTANCE_JSON=process instance info(json format) SCHEDULE_TIME=schedule time SYNC_DEFINE=update the information of the process instance to the process definition RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance SEARCH_VAL=search val USER_ID=user id PAGE_SIZE=page size LIMIT=limit VIEW_TREE_NOTES=view tree GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id PROCESS_DEFINITION_ID_LIST=process definition id list QUERY_PROCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query process definition all by project id DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id TASK_ID=task instance id SKIP_LINE_NUM=skip line num QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log USERS_TAG=users related operation SCHEDULER_TAG=scheduler related operation CREATE_SCHEDULE_NOTES=create schedule CREATE_USER_NOTES=create user TENANT_ID=tenant id QUEUE=queue EMAIL=email PHONE=phone QUERY_USER_LIST_NOTES=query user list UPDATE_USER_NOTES=update user DELETE_USER_BY_ID_NOTES=delete user by id GRANT_PROJECT_NOTES=GRANT PROJECT PROJECT_IDS=project ids(string format, multiple projects separated by ",") GRANT_PROJECT_BY_CODE_NOTES=GRANT PROJECT BY CODE REVOKE_PROJECT_NOTES=REVOKE PROJECT FOR USER PROJECT_CODE=project code GRANT_RESOURCE_NOTES=grant resource file RESOURCE_IDS=resource ids(string format, multiple resources separated by ",") GET_USER_INFO_NOTES=get user info LIST_USER_NOTES=list user VERIFY_USER_NAME_NOTES=verify user name UNAUTHORIZED_USER_NOTES=cancel authorization ALERT_GROUP_ID=alert group id AUTHORIZED_USER_NOTES=authorized user GRANT_UDF_FUNC_NOTES=grant udf function UDF_IDS=udf ids(string format, multiple udf functions separated by ",") GRANT_DATASOURCE_NOTES=grant datasource DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",") QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables VIEW_GANTT_NOTES=view gantt SUB_PROCESS_INSTANCE_ID=sub process instance id TASK_NAME=task instance name TASK_INSTANCE_TAG=task instance related operation LOGGER_TAG=log related operation PROCESS_INSTANCE_TAG=process instance related operation EXECUTION_STATUS=runing status for workflow and task nodes HOST=ip address of running task START_DATE=start date END_DATE=end date QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id UPDATE_DATA_SOURCE_NOTES=update data source DATA_SOURCE_ID=DATA SOURCE ID QUERY_DATA_SOURCE_NOTES=query data source by id QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test DELETE_DATA_SOURCE_NOTES=delete data source VERIFY_DATA_SOURCE_NOTES=verify data source UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source AUTHORIZED_DATA_SOURCE_NOTES=authorized data source DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id QUERY_ALERT_GROUP_LIST_PAGING_NOTES=query alert group list paging EXPORT_PROCESS_DEFINITION_BY_ID_NOTES=export process definition by id BATCH_EXPORT_PROCESS_DEFINITION_BY_IDS_NOTES= batch export process definition by ids QUERY_USER_CREATED_PROJECT_NOTES= query user created project QUERY_AUTHORIZED_AND_USER_CREATED_PROJECT_NOTES= query authorized and user created project COPY_PROCESS_DEFINITION_NOTES= copy process definition notes MOVE_PROCESS_DEFINITION_NOTES= move process definition notes TARGET_PROJECT_ID= target project id IS_COPY = is copy DELETE_PROCESS_DEFINITION_VERSION_NOTES=delete process definition version QUERY_PROCESS_DEFINITION_VERSIONS_NOTES=query process definition versions SWITCH_PROCESS_DEFINITION_VERSION_NOTES=switch process definition version VERSION=version GET_RULE_FORM_CREATE_JSON_NOTES=get rule form-create json QUERY_RULE_LIST_PAGING_NOTES=query rule list paging QUERY_RULE_LIST_NOTES=query rule list QUERY_EXECUTE_RESULT_LIST_PAGING_NOTES=query execute result list paging RULE_ID=rule id RULE_TYPE=rule type STATE=state GET_DATASOURCE_OPTIONS_NOTES=get datasource options GET_DATASOURCE_TABLES_NOTES=get datasource table GET_DATASOURCE_TABLE_COLUMNS_NOTES=get datasource table columns TABLE_NAME=table name AUDIT_LOG_TAG=audit log related operation MODULE_TYPE=module type OPERATION_TYPE=operation type
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,581
[Bug] [API-Doc] missing some chinese translation
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![](https://vip2.loli.io/2022/02/28/u6YhjlZ7HvUkJc9.png) ### What you expected to happen above. ### How to reproduce above. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8581
https://github.com/apache/dolphinscheduler/pull/8588
eb88d9cbd328f5c2aef5f21015b8cfa8f5adc5d6
3cfb3270fe2dbe3b009c307ce31881c772c24eee
"2022-02-28T10:07:12Z"
java
"2022-03-01T07:14:52Z"
dolphinscheduler-api/src/main/resources/i18n/messages_en_US.properties
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # QUERY_SCHEDULE_LIST_NOTES=query schedule list EXECUTE_PROCESS_TAG=execute process related operation PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation RUN_PROCESS_INSTANCE_NOTES=run process instance BATCH_RUN_PROCESS_INSTANCE_NOTES=batch run process instance(If any processDefinitionCode cannot be found, the failure information is returned and the status is set to failed. The successful task will run normally and will not stop) START_NODE_LIST=start node list(node name) TASK_DEPEND_TYPE=task depend type COMMAND_TYPE=command type RUN_MODE=run mode TIMEOUT=timeout EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance EXECUTE_TYPE=execute type START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition GET_RECEIVER_CC_NOTES=query receiver cc DESC=description GROUP_NAME=group name GROUP_TYPE=group type QUERY_ALERT_GROUP_LIST_NOTES=query alert group list UPDATE_ALERT_GROUP_NOTES=update alert group DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not GRANT_ALERT_GROUP_NOTES=grant alert group USER_IDS=user id list EXECUTOR_TAG=executor operation EXECUTOR_NAME=executor name WORKER_GROUP=work group startParams=start parameters ALERT_GROUP_TAG=alert group related operation ALERT_PLUGIN_INSTANCE_TAG=alert plugin instance related operation WORK_FLOW_LINEAGE_TAG=work flow lineage related operation UI_PLUGINS_TAG=UI plugin related operation UPDATE_ALERT_PLUGIN_INSTANCE_NOTES=update alert plugin instance operation CREATE_ALERT_PLUGIN_INSTANCE_NOTES=create alert plugin instance operation DELETE_ALERT_PLUGIN_INSTANCE_NOTES=delete alert plugin instance operation QUERY_ALERT_PLUGIN_INSTANCE_LIST_PAGING_NOTES=query alert plugin instance paging QUERY_TOPN_LONGEST_RUNNING_PROCESS_INSTANCE_NOTES=query topN longest running process instance ALERT_PLUGIN_INSTANCE_NAME=alert plugin instance name ALERT_PLUGIN_DEFINE_ID=alert plugin define id ALERT_PLUGIN_ID=alert plugin id ALERT_PLUGIN_INSTANCE_ID=alert plugin instance id ALERT_PLUGIN_INSTANCE_PARAMS=alert plugin instance parameters ALERT_INSTANCE_NAME=alert instance name VERIFY_ALERT_INSTANCE_NAME_NOTES=verify alert instance name DATA_SOURCE_PARAM=datasource parameter QUERY_ALL_ALERT_PLUGIN_INSTANCE_NOTES=query all alert plugin instances GET_ALERT_PLUGIN_INSTANCE_NOTES=get alert plugin instance operation CREATE_ALERT_GROUP_NOTES=create alert group WORKER_GROUP_TAG=worker group related operation SAVE_WORKER_GROUP_NOTES=create worker group WORKER_GROUP_NAME=worker group name WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2 QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging QUERY_WORKER_GROUP_LIST_NOTES=query worker group list DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id DATA_ANALYSIS_TAG=analysis related operation of task state COUNT_TASK_STATE_NOTES=count task state COUNT_PROCESS_INSTANCE_NOTES=count process instance state COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user COUNT_COMMAND_STATE_NOTES=count command state COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\ ACCESS_TOKEN_TAG=access token related operation MONITOR_TAG=monitor related operation MASTER_LIST_NOTES=master server list WORKER_LIST_NOTES=worker server list QUERY_DATABASE_STATE_NOTES=query database state QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE TASK_STATE=task instance state SOURCE_TABLE=SOURCE TABLE DEST_TABLE=dest table TASK_DATE=task date QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging DATA_SOURCE_TAG=data source related operation CREATE_DATA_SOURCE_NOTES=create data source DATA_SOURCE_NAME=data source name DATA_SOURCE_NOTE=data source desc DB_TYPE=database type DATA_SOURCE_HOST=DATA SOURCE HOST DATA_SOURCE_PORT=data source port DATABASE_NAME=database name QUEUE_TAG=queue related operation QUERY_QUEUE_LIST_NOTES=query queue list QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging CREATE_QUEUE_NOTES=create queue YARN_QUEUE_NAME=yarn(hadoop) queue name QUEUE_ID=queue id TENANT_DESC=tenant desc QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging QUERY_TENANT_LIST_NOTES=query tenant list UPDATE_TENANT_NOTES=update tenant DELETE_TENANT_NOTES=delete tenant RESOURCES_TAG=resource center related operation CREATE_RESOURCE_NOTES=create resource RESOURCE_TYPE=resource file type RESOURCE_NAME=resource name RESOURCE_DESC=resource file desc RESOURCE_FILE=resource file RESOURCE_ID=resource id QUERY_RESOURCE_LIST_NOTES=query resource list DELETE_RESOURCE_BY_ID_NOTES=delete resource by id VIEW_RESOURCE_BY_ID_NOTES=view resource by id ONLINE_CREATE_RESOURCE_NOTES=online create resource SUFFIX=resource file suffix CONTENT=resource file content UPDATE_RESOURCE_NOTES=edit resource file online DOWNLOAD_RESOURCE_NOTES=download resource file CREATE_UDF_FUNCTION_NOTES=create udf function UDF_TYPE=UDF type FUNC_NAME=function name CLASS_NAME=package and class name ARG_TYPES=arguments UDF_DESC=udf desc VIEW_UDF_FUNCTION_NOTES=view udf function UPDATE_UDF_FUNCTION_NOTES=update udf function QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name DELETE_UDF_FUNCTION_NOTES=delete udf function AUTHORIZED_FILE_NOTES=authorized file UNAUTHORIZED_FILE_NOTES=unauthorized file AUTHORIZED_UDF_FUNC_NOTES=authorized udf func UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func VERIFY_QUEUE_NOTES=verify queue TENANT_TAG=tenant related operation CREATE_TENANT_NOTES=create tenant TENANT_CODE=os tenant code QUEUE_NAME=queue name PASSWORD=password DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...} DATA_SOURCE_PRINCIPAL=principal DATA_SOURCE_KERBEROS_KRB5_CONF=the kerberos authentication parameter java.security.krb5.conf DATA_SOURCE_KERBEROS_KEYTAB_USERNAME=the kerberos authentication parameter login.user.keytab.username DATA_SOURCE_KERBEROS_KEYTAB_PATH=the kerberos authentication parameter login.user.keytab.path PROJECT_TAG=project related operation CREATE_PROJECT_NOTES=create project PROJECT_DESC=project description UPDATE_PROJECT_NOTES=update project PROJECT_ID=project id QUERY_PROJECT_BY_ID_NOTES=query project info by project id QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING QUERY_ALL_PROJECT_LIST_NOTES=query all project list DELETE_PROJECT_BY_ID_NOTES=delete project by id QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project QUERY_AUTHORIZED_USER_NOTES=query authorized user TASK_RECORD_TAG=task record related operation QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging CREATE_TOKEN_NOTES=create access token for specified user UPDATE_TOKEN_NOTES=update access token for specified user TOKEN=access token string, it will be automatically generated when it absent EXPIRE_TIME=expire time for the token TOKEN_ID=access token id QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging QUERY_ACCESS_TOKEN_BY_USER_NOTES=query access token for specified user SCHEDULE=schedule WARNING_TYPE=warning type(sending strategy) WARNING_GROUP_ID=warning group id FAILURE_STRATEGY=failure strategy RECEIVERS=receivers RECEIVERS_CC=receivers cc WORKER_GROUP_ID=worker server group id PROCESS_INSTANCE_START_TIME=process instance start time PROCESS_INSTANCE_END_TIME=process instance end time PROCESS_INSTANCE_SIZE=process instance size PROCESS_INSTANCE_PRIORITY=process instance priority EXPECTED_PARALLELISM_NUMBER=custom parallelism to set the complement task threads DRY_RUN=dry run COMPLEMENT_DEPENDENT_MODE=complement dependent mode UPDATE_SCHEDULE_NOTES=update schedule SCHEDULE_ID=schedule id ONLINE_SCHEDULE_NOTES=online schedule OFFLINE_SCHEDULE_NOTES=offline schedule QUERY_SCHEDULE_NOTES=query schedule QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging LOGIN_TAG=User login related operations USER_NAME=user name PROJECT_NAME=project name CREATE_PROCESS_DEFINITION_NOTES=create process definition PROCESS_DEFINITION_NAME=process definition name PROCESS_DEFINITION_JSON=process definition detail info (json format) PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format) PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format) PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format) PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format) PROCESS_DEFINITION_DESC=process definition desc PROCESS_DEFINITION_TAG=process definition related operation SIGNOUT_NOTES=logout USER_PASSWORD=user password UPDATE_PROCESS_INSTANCE_NOTES=update process instance QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list VERIFY_PROCESS_DEFINITION_NAME_NOTES=verify process definition name LOGIN_NOTES=user login UPDATE_PROCESS_DEFINITION_NOTES=update process definition PROCESS_DEFINITION_ID=process definition id PROCESS_DEFINITION_IDS=process definition ids PROCESS_DEFINITION_CODE=process definition code PROCESS_DEFINITION_CODE_LIST=process definition code list IMPORT_PROCESS_DEFINITION_NOTES=import process definition RELEASE_PROCESS_DEFINITION_NOTES=release process definition QUERY_PROCESS_DEFINITION_BY_ID_NOTES=query process definition by id QUERY_PROCESS_DEFINITION_LIST_NOTES=query process definition list QUERY_PROCESS_DEFINITION_LIST_PAGING_NOTES=query process definition list paging QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list PAGE_NO=page no PROCESS_INSTANCE_ID=process instance id PROCESS_INSTANCE_JSON=process instance info(json format) SCHEDULE_TIME=schedule time SYNC_DEFINE=update the information of the process instance to the process definition RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance PREVIEW_SCHEDULE_NOTES=preview schedule SEARCH_VAL=search val USER_ID=user id FORCE_TASK_SUCCESS=force task success QUERY_TASK_INSTANCE_LIST_PAGING_NOTES=query task instance list paging PROCESS_INSTANCE_NAME=process instance name TASK_INSTANCE_ID=task instance id VERIFY_TENANT_CODE_NOTES=verify tenant code QUERY_UI_PLUGIN_DETAIL_BY_ID=query ui plugin detail by id PLUGIN_ID=plugin id QUERY_UI_PLUGINS_BY_TYPE=query ui plugins by type ACTIVATE_USER_NOTES=active user BATCH_ACTIVATE_USER_NOTES=batch active user STATE=state REPEAT_PASSWORD=repeat password REGISTER_USER_NOTES=register user USER_NAMES=user names PAGE_SIZE=page size LIMIT=limit CREATE_WORKER_GROUP_NOTES=create worker group WORKER_ADDR_LIST=worker address list QUERY_WORKER_ADDRESS_LIST_NOTES=query worker address list QUERY_WORKFLOW_LINEAGE_BY_IDS_NOTES=query workflow lineage by ids QUERY_WORKFLOW_LINEAGE_BY_NAME_NOTES=query workflow lineage by name VIEW_TREE_NOTES=view tree UDF_ID=udf id GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id GET_NODE_LIST_BY_DEFINITION_CODE_NOTES=get node list by definition code QUERY_PROCESS_DEFINITION_BY_NAME_NOTES=query process definition by name PROCESS_DEFINITION_ID_LIST=process definition id list QUERY_PROCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query process definition all by project id DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_NOTES=batch delete process instance by process ids QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id TASK_ID=task instance id PROCESS_INSTANCE_IDS=process_instance ids SKIP_LINE_NUM=skip line num QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log USERS_TAG=users related operation SCHEDULER_TAG=scheduler related operation CREATE_SCHEDULE_NOTES=create schedule CREATE_USER_NOTES=create user TENANT_ID=tenant id QUEUE=queue EMAIL=email PHONE=phone QUERY_USER_LIST_NOTES=query user list UPDATE_USER_NOTES=update user UPDATE_QUEUE_NOTES=update queue DELETE_USER_BY_ID_NOTES=delete user by id GRANT_PROJECT_NOTES=GRANT PROJECT PROJECT_IDS=project ids(string format, multiple projects separated by ",") GRANT_PROJECT_BY_CODE_NOTES=GRANT PROJECT BY CODE REVOKE_PROJECT_NOTES=REVOKE PROJECT FOR USER PROJECT_CODE=project code GRANT_RESOURCE_NOTES=grant resource file RESOURCE_IDS=resource ids(string format, multiple resources separated by ",") GET_USER_INFO_NOTES=get user info LIST_USER_NOTES=list user VERIFY_USER_NAME_NOTES=verify user name UNAUTHORIZED_USER_NOTES=cancel authorization ALERT_GROUP_ID=alert group id AUTHORIZED_USER_NOTES=authorized user AUTHORIZE_RESOURCE_TREE_NOTES=authorize resource tree RESOURCE_CURRENTDIR=dir of the current resource QUERY_RESOURCE_LIST_PAGING_NOTES=query resource list paging RESOURCE_PID=parent directory ID of the current resource RESOURCE_FULL_NAME=resource full name QUERY_BY_RESOURCE_NAME=query by resource name QUERY_UDF_FUNC_LIST_NOTES=query udf funciton list VERIFY_RESOURCE_NAME_NOTES=verify resource name GRANT_UDF_FUNC_NOTES=grant udf function UDF_IDS=udf ids(string format, multiple udf functions separated by ",") GRANT_DATASOURCE_NOTES=grant datasource DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",") QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables VIEW_GANTT_NOTES=view gantt SUB_PROCESS_INSTANCE_ID=sub process instance id TASK_NAME=task instance name TASK_INSTANCE_TAG=task instance related operation LOGGER_TAG=log related operation PROCESS_INSTANCE_TAG=process instance related operation EXECUTION_STATUS=runing status for workflow and task nodes HOST=ip address of running task START_DATE=start date END_DATE=end date QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id UPDATE_DATA_SOURCE_NOTES=update data source DATA_SOURCE_ID=DATA SOURCE ID QUERY_DATA_SOURCE_NOTES=query data source by id QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test DELETE_DATA_SOURCE_NOTES=delete data source VERIFY_DATA_SOURCE_NOTES=verify data source UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source AUTHORIZED_DATA_SOURCE_NOTES=authorized data source DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id QUERY_ALERT_GROUP_LIST_PAGING_NOTES=query alert group list paging EXPORT_PROCESS_DEFINITION_BY_ID_NOTES=export process definition by id BATCH_EXPORT_PROCESS_DEFINITION_BY_IDS_NOTES=batch export process definition by ids QUERY_USER_CREATED_PROJECT_NOTES=query user created project QUERY_AUTHORIZED_AND_USER_CREATED_PROJECT_NOTES=query authorized and user created project COPY_PROCESS_DEFINITION_NOTES=copy process definition notes MOVE_PROCESS_DEFINITION_NOTES=move process definition notes TARGET_PROJECT_ID=target project id IS_COPY=is copy DELETE_PROCESS_DEFINITION_VERSION_NOTES=delete process definition version QUERY_PROCESS_DEFINITION_VERSIONS_NOTES=query process definition versions SWITCH_PROCESS_DEFINITION_VERSION_NOTES=switch process definition version VERSION=version TASK_GROUP_QUEUEID=task group queue id TASK_GROUP_QUEUE_PRIORITY=task group queue priority GET_RULE_FORM_CREATE_JSON_NOTES=get rule form-create json QUERY_RULE_LIST_PAGING_NOTES=query rule list paging QUERY_RULE_LIST_NOTES=query rule list QUERY_EXECUTE_RESULT_LIST_PAGING_NOTES=query execute result list paging RULE_ID=rule id RULE_TYPE=rule type GET_DATASOURCE_OPTIONS_NOTES=get datasource options GET_DATASOURCE_TABLES_NOTES=get datasource table GET_DATASOURCE_TABLE_COLUMNS_NOTES=get datasource table columns TABLE_NAME=table name QUERY_AUDIT_LOG=query audit log AUDIT_LOG_TAG=audit log related operation MODULE_TYPE=module type OPERATION_TYPE=operation type
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,581
[Bug] [API-Doc] missing some chinese translation
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![](https://vip2.loli.io/2022/02/28/u6YhjlZ7HvUkJc9.png) ### What you expected to happen above. ### How to reproduce above. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8581
https://github.com/apache/dolphinscheduler/pull/8588
eb88d9cbd328f5c2aef5f21015b8cfa8f5adc5d6
3cfb3270fe2dbe3b009c307ce31881c772c24eee
"2022-02-28T10:07:12Z"
java
"2022-03-01T07:14:52Z"
dolphinscheduler-api/src/main/resources/i18n/messages_zh_CN.properties
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # QUERY_SCHEDULE_LIST_NOTES=查询定时列表 PROCESS_INSTANCE_EXECUTOR_TAG=流程实例执行相关操作 UI_PLUGINS_TAG=UI插件相关操作 WORK_FLOW_LINEAGE_TAG=工作流血缘相关操作 RUN_PROCESS_INSTANCE_NOTES=运行流程实例 BATCH_RUN_PROCESS_INSTANCE_NOTES=批量运行流程实例(其中有任意一个processDefinitionCode找不到,则返回失败信息并且状态置为失败,成功的任务会正常运行,不会停止) START_NODE_LIST=开始节点列表(节点name) TASK_DEPEND_TYPE=任务依赖类型 COMMAND_TYPE=指令类型 RUN_MODE=运行模式 TIMEOUT=超时时间 EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=执行流程实例的各种操作(暂停、停止、重跑、恢复等) EXECUTE_TYPE=执行类型 EXECUTOR_TAG=流程相关操作 EXECUTOR_NAME=流程名称 START_CHECK_PROCESS_DEFINITION_NOTES=检查流程定义 DESC=备注(描述) GROUP_NAME=组名称 WORKER_GROUP=worker群组 startParams=启动参数 GROUP_TYPE=组类型 QUERY_ALERT_GROUP_LIST_NOTES=告警组列表 UPDATE_ALERT_GROUP_NOTES=编辑(更新)告警组 DELETE_ALERT_GROUP_BY_ID_NOTES=通过ID删除告警组 VERIFY_ALERT_GROUP_NAME_NOTES=检查告警组是否存在 GRANT_ALERT_GROUP_NOTES=授权告警组 PROCESS_DEFINITION_IDS=流程定义ID PROCESS_DEFINITION_CODE=流程定义编码 PROCESS_DEFINITION_CODE_LIST=流程定义编码列表 USER_IDS=用户ID列表 ALERT_GROUP_TAG=告警组相关操作 WORKER_GROUP_TAG=Worker分组管理 SAVE_WORKER_GROUP_NOTES=创建Worker分组 ALERT_PLUGIN_INSTANCE_TAG=告警插件实例相关操作 WORKER_GROUP_NAME=Worker分组名称 WORKER_IP_LIST=Worker ip列表,注意:多个IP地址以逗号分割 QUERY_WORKER_GROUP_PAGING_NOTES=Worker分组管理 QUERY_WORKER_GROUP_LIST_NOTES=查询worker group分组 DELETE_WORKER_GROUP_BY_ID_NOTES=通过ID删除worker group DATA_ANALYSIS_TAG=任务状态分析相关操作 COUNT_TASK_STATE_NOTES=任务状态统计 COUNT_PROCESS_INSTANCE_NOTES=统计流程实例状态 COUNT_PROCESS_DEFINITION_BY_USER_NOTES=统计用户创建的流程定义 COUNT_COMMAND_STATE_NOTES=统计命令状态 COUNT_QUEUE_STATE_NOTES=统计队列里任务状态 ACCESS_TOKEN_TAG=访问token相关操作 MONITOR_TAG=监控相关操作 MASTER_LIST_NOTES=master服务列表 WORKER_LIST_NOTES=worker服务列表 QUERY_DATABASE_STATE_NOTES=查询数据库状态 QUERY_ZOOKEEPER_STATE_NOTES=查询Zookeeper状态 TASK_STATE=任务实例状态 SOURCE_TABLE=源表 DEST_TABLE=目标表 TASK_DATE=任务时间 QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=分页查询历史任务记录列表 DATA_SOURCE_TAG=数据源相关操作 CREATE_DATA_SOURCE_NOTES=创建数据源 DATA_SOURCE_NAME=数据源名称 DATA_SOURCE_NOTE=数据源描述 DB_TYPE=数据源类型 DATA_SOURCE_HOST=IP主机名 DATA_SOURCE_PORT=数据源端口 DATABASE_NAME=数据库名 QUEUE_TAG=队列相关操作 QUERY_TOPN_LONGEST_RUNNING_PROCESS_INSTANCE_NOTES=查询topN最长运行流程实例 QUERY_QUEUE_LIST_NOTES=查询队列列表 QUERY_QUEUE_LIST_PAGING_NOTES=分页查询队列列表 CREATE_QUEUE_NOTES=创建队列 YARN_QUEUE_NAME=hadoop yarn队列名 QUEUE_ID=队列ID TENANT_DESC=租户描述 QUERY_TENANT_LIST_PAGING_NOTES=分页查询租户列表 QUERY_TENANT_LIST_NOTES=查询租户列表 UPDATE_TENANT_NOTES=更新租户 DELETE_TENANT_NOTES=删除租户 RESOURCES_TAG=资源中心相关操作 CREATE_RESOURCE_NOTES=创建资源 RESOURCE_FULL_NAME=资源全名 RESOURCE_TYPE=资源文件类型 RESOURCE_NAME=资源文件名称 RESOURCE_DESC=资源文件描述 RESOURCE_FILE=资源文件 RESOURCE_ID=资源ID QUERY_RESOURCE_LIST_NOTES=查询资源列表 QUERY_BY_RESOURCE_NAME=通过资源名称查询 QUERY_UDF_FUNC_LIST_NOTES=查询UDF函数列表 VERIFY_RESOURCE_NAME_NOTES=验证资源名称 DELETE_RESOURCE_BY_ID_NOTES=通过ID删除资源 VIEW_RESOURCE_BY_ID_NOTES=通过ID浏览资源 ONLINE_CREATE_RESOURCE_NOTES=在线创建资源 SUFFIX=资源文件后缀 CONTENT=资源文件内容 UPDATE_RESOURCE_NOTES=在线更新资源文件 DOWNLOAD_RESOURCE_NOTES=下载资源文件 CREATE_UDF_FUNCTION_NOTES=创建UDF函数 UDF_TYPE=UDF类型 FUNC_NAME=函数名称 CLASS_NAME=包名类名 ARG_TYPES=参数 UDF_DESC=udf描述,使用说明 VIEW_UDF_FUNCTION_NOTES=查看udf函数 UPDATE_UDF_FUNCTION_NOTES=更新udf函数 QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=分页查询udf函数列表 VERIFY_UDF_FUNCTION_NAME_NOTES=验证udf函数名 DELETE_UDF_FUNCTION_NOTES=删除UDF函数 AUTHORIZED_FILE_NOTES=授权文件 UNAUTHORIZED_FILE_NOTES=取消授权文件 AUTHORIZED_UDF_FUNC_NOTES=授权udf函数 UNAUTHORIZED_UDF_FUNC_NOTES=取消udf函数授权 VERIFY_QUEUE_NOTES=验证队列 TENANT_TAG=租户相关操作 CREATE_TENANT_NOTES=创建租户 TENANT_CODE=操作系统租户 QUEUE_NAME=队列名 PASSWORD=密码 DATA_SOURCE_OTHER=jdbc连接参数,格式为:{"key1":"value1",...} DATA_SOURCE_PRINCIPAL=principal DATA_SOURCE_KERBEROS_KRB5_CONF=kerberos认证参数 java.security.krb5.conf DATA_SOURCE_KERBEROS_KEYTAB_USERNAME=kerberos认证参数 login.user.keytab.username DATA_SOURCE_KERBEROS_KEYTAB_PATH=kerberos认证参数 login.user.keytab.path PROJECT_TAG=项目相关操作 CREATE_PROJECT_NOTES=创建项目 PROJECT_DESC=项目描述 UPDATE_PROJECT_NOTES=更新项目 PROJECT_ID=项目ID QUERY_PROJECT_BY_ID_NOTES=通过项目ID查询项目信息 QUERY_PROJECT_LIST_PAGING_NOTES=分页查询项目列表 QUERY_ALL_PROJECT_LIST_NOTES=查询所有项目 DELETE_PROJECT_BY_ID_NOTES=通过ID删除项目 QUERY_UNAUTHORIZED_PROJECT_NOTES=查询未授权的项目 QUERY_AUTHORIZED_PROJECT_NOTES=查询授权项目 QUERY_AUTHORIZED_USER_NOTES=查询拥有项目授权的用户 TASK_RECORD_TAG=任务记录相关操作 QUERY_TASK_RECORD_LIST_PAGING_NOTES=分页查询任务记录列表 CREATE_TOKEN_NOTES=为指定用户创建安全令牌 UPDATE_TOKEN_NOTES=更新指定用户的安全令牌 TOKEN=安全令牌字符串,若未显式指定将会自动生成 EXPIRE_TIME=安全令牌的过期时间 TOKEN_ID=安全令牌的ID QUERY_ACCESS_TOKEN_LIST_NOTES=分页查询access token列表 QUERY_ACCESS_TOKEN_BY_USER_NOTES=查询指定用户的access token SCHEDULE=定时 WARNING_TYPE=发送策略 WARNING_GROUP_ID=发送组ID FAILURE_STRATEGY=失败策略 RECEIVERS=收件人 RECEIVERS_CC=收件人(抄送) WORKER_GROUP_ID=Worker Server分组ID PROCESS_INSTANCE_PRIORITY=流程实例优先级 EXPECTED_PARALLELISM_NUMBER=补数任务自定义并行度 DRY_RUN=是否空跑 COMPLEMENT_DEPENDENT_MODE=补数依赖的类型 UPDATE_SCHEDULE_NOTES=更新定时 SCHEDULE_ID=定时ID ONLINE_SCHEDULE_NOTES=定时上线 OFFLINE_SCHEDULE_NOTES=定时下线 QUERY_SCHEDULE_NOTES=查询定时 QUERY_SCHEDULE_LIST_PAGING_NOTES=分页查询定时 LOGIN_TAG=用户登录相关操作 USER_NAME=用户名 PROJECT_NAME=项目名称 CREATE_PROCESS_DEFINITION_NOTES=创建流程定义 PROCESS_INSTANCE_START_TIME=流程实例启动时间 PROCESS_INSTANCE_END_TIME=流程实例结束时间 PROCESS_INSTANCE_SIZE=流程实例个数 PROCESS_DEFINITION_NAME=流程定义名称 PROCESS_DEFINITION_JSON=流程定义详细信息(json格式) PROCESS_DEFINITION_LOCATIONS=流程定义节点坐标位置信息(json格式) PROCESS_INSTANCE_LOCATIONS=流程实例节点坐标位置信息(json格式) PROCESS_DEFINITION_CONNECTS=流程定义节点图标连接信息(json格式) PROCESS_INSTANCE_CONNECTS=流程实例节点图标连接信息(json格式) PROCESS_DEFINITION_DESC=流程定义描述信息 PROCESS_DEFINITION_TAG=流程定义相关操作 SIGNOUT_NOTES=退出登录 USER_PASSWORD=用户密码 UPDATE_PROCESS_INSTANCE_NOTES=更新流程实例 QUERY_PROCESS_INSTANCE_LIST_NOTES=查询流程实例列表 VERIFY_PROCESS_DEFINITION_NAME_NOTES=验证流程定义名字 LOGIN_NOTES=用户登录 UPDATE_PROCESS_DEFINITION_NOTES=更新流程定义 PROCESS_DEFINITION_ID=流程定义ID RELEASE_PROCESS_DEFINITION_NOTES=发布流程定义 QUERY_PROCESS_DEFINITION_BY_ID_NOTES=通过流程定义ID查询流程定义 QUERY_PROCESS_DEFINITION_LIST_NOTES=查询流程定义列表 QUERY_PROCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表 QUERY_ALL_DEFINITION_LIST_NOTES=查询所有流程定义 PAGE_NO=页码号 PROCESS_INSTANCE_ID=流程实例ID PROCESS_INSTANCE_IDS=流程实例ID集合 PROCESS_INSTANCE_JSON=流程实例信息(json格式) PREVIEW_SCHEDULE_NOTES=定时调度预览 SCHEDULE_TIME=定时时间 SYNC_DEFINE=更新流程实例的信息是否同步到流程定义 RECOVERY_PROCESS_INSTANCE_FLAG=是否恢复流程实例 SEARCH_VAL=搜索值 FORCE_TASK_SUCCESS=强制TASK成功 QUERY_TASK_INSTANCE_LIST_PAGING_NOTES=分页查询任务实例列表 PROCESS_INSTANCE_NAME=流程实例名称 TASK_INSTANCE_ID=任务实例ID VERIFY_TENANT_CODE_NOTES=验证租户 QUERY_UI_PLUGIN_DETAIL_BY_ID=通过ID查询UI插件详情 QUERY_UI_PLUGINS_BY_TYPE=通过类型查询UI插件 ACTIVATE_USER_NOTES=激活用户 BATCH_ACTIVATE_USER_NOTES=批量激活用户 REPEAT_PASSWORD=重复密码 REGISTER_USER_NOTES=用户注册 STATE=状态 USER_NAMES=多个用户名 PLUGIN_ID=插件ID USER_ID=用户ID PAGE_SIZE=页大小 LIMIT=显示多少条 UDF_ID=udf ID AUTHORIZE_RESOURCE_TREE_NOTES=授权资源树 RESOURCE_CURRENTDIR=当前资源目录 RESOURCE_PID=资源父目录ID QUERY_RESOURCE_LIST_PAGING_NOTES=分页查询资源列表 VIEW_TREE_NOTES=树状图 IMPORT_PROCESS_DEFINITION_NOTES=导入流程定义 GET_NODE_LIST_BY_DEFINITION_ID_NOTES=通过流程定义ID获得任务节点列表 PROCESS_DEFINITION_ID_LIST=流程定义id列表 QUERY_PROCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=通过项目ID查询流程定义 BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=通过流程定义ID集合批量删除流程定义 BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_NOTES=通过流程实例ID集合批量删除流程实例 DELETE_PROCESS_DEFINITION_BY_ID_NOTES=通过流程定义ID删除流程定义 QUERY_PROCESS_INSTANCE_BY_ID_NOTES=通过流程实例ID查询流程实例 DELETE_PROCESS_INSTANCE_BY_ID_NOTES=通过流程实例ID删除流程实例 TASK_ID=任务实例ID SKIP_LINE_NUM=忽略行数 QUERY_TASK_INSTANCE_LOG_NOTES=查询任务实例日志 DOWNLOAD_TASK_INSTANCE_LOG_NOTES=下载任务实例日志 USERS_TAG=用户相关操作 SCHEDULER_TAG=定时相关操作 CREATE_SCHEDULE_NOTES=创建定时 CREATE_USER_NOTES=创建用户 CREATE_WORKER_GROUP_NOTES=创建Worker分组 WORKER_ADDR_LIST=worker地址列表 QUERY_WORKER_ADDRESS_LIST_NOTES=查询worker地址列表 QUERY_WORKFLOW_LINEAGE_BY_IDS_NOTES=通过IDs查询工作流血缘列表 QUERY_WORKFLOW_LINEAGE_BY_NAME_NOTES=通过名称查询工作流血缘列表 TENANT_ID=租户ID QUEUE=使用的队列 EMAIL=邮箱 PHONE=手机号 QUERY_USER_LIST_NOTES=查询用户列表 UPDATE_USER_NOTES=更新用户 UPDATE_QUEUE_NOTES=更新队列 DELETE_USER_BY_ID_NOTES=删除用户通过ID GRANT_PROJECT_NOTES=授权项目 PROJECT_IDS=项目IDS(字符串格式,多个项目以","分割) GRANT_PROJECT_BY_CODE_NOTES=授权项目 REVOKE_PROJECT_NOTES=撤销用户的项目权限 PROJECT_CODE=项目Code GRANT_RESOURCE_NOTES=授权资源文件 RESOURCE_IDS=资源ID列表(字符串格式,多个资源ID以","分割) GET_USER_INFO_NOTES=获取用户信息 GET_NODE_LIST_BY_DEFINITION_CODE_NOTES=通过流程定义编码查询节点列表 QUERY_PROCESS_DEFINITION_BY_NAME_NOTES=通过名称查询流程定义 LIST_USER_NOTES=用户列表 VERIFY_USER_NAME_NOTES=验证用户名 UNAUTHORIZED_USER_NOTES=取消授权 ALERT_GROUP_ID=报警组ID AUTHORIZED_USER_NOTES=授权用户 GRANT_UDF_FUNC_NOTES=授权udf函数 UDF_IDS=udf函数id列表(字符串格式,多个udf函数ID以","分割) GRANT_DATASOURCE_NOTES=授权数据源 DATASOURCE_IDS=数据源ID列表(字符串格式,多个数据源ID以","分割) QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=通过任务实例ID查询子流程实例 QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=通过子流程实例ID查询父流程实例信息 QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=查询流程实例全局变量和局部变量 VIEW_GANTT_NOTES=浏览Gantt图 SUB_PROCESS_INSTANCE_ID=子流程实例ID TASK_NAME=任务实例名 TASK_INSTANCE_TAG=任务实例相关操作 LOGGER_TAG=日志相关操作 PROCESS_INSTANCE_TAG=流程实例相关操作 EXECUTION_STATUS=工作流和任务节点的运行状态 HOST=运行任务的主机IP地址 START_DATE=开始时间 END_DATE=结束时间 QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=通过流程实例ID查询任务列表 DELETE_ALERT_PLUGIN_INSTANCE_NOTES=删除告警插件实例 CREATE_ALERT_PLUGIN_INSTANCE_NOTES=创建告警插件实例 GET_ALERT_PLUGIN_INSTANCE_NOTES=查询告警插件实例 QUERY_ALERT_PLUGIN_INSTANCE_LIST_PAGING_NOTES=分页查询告警实例列表 QUERY_ALL_ALERT_PLUGIN_INSTANCE_NOTES=查询所有告警实例列表 UPDATE_ALERT_PLUGIN_INSTANCE_NOTES=更新告警插件实例 ALERT_PLUGIN_INSTANCE_NAME=告警插件实例名称 ALERT_PLUGIN_DEFINE_ID=告警插件定义ID ALERT_PLUGIN_ID=告警插件ID ALERT_PLUGIN_INSTANCE_ID=告警插件实例ID ALERT_PLUGIN_INSTANCE_PARAMS=告警插件实例参数 ALERT_INSTANCE_NAME=告警插件名称 VERIFY_ALERT_INSTANCE_NAME_NOTES=验证告警插件名称 UPDATE_DATA_SOURCE_NOTES=更新数据源 DATA_SOURCE_PARAM=数据源参数 DATA_SOURCE_ID=数据源ID CREATE_ALERT_GROUP_NOTES=创建告警组 QUERY_DATA_SOURCE_NOTES=查询数据源通过ID QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=通过数据源类型查询数据源列表 QUERY_DATA_SOURCE_LIST_PAGING_NOTES=分页查询数据源列表 CONNECT_DATA_SOURCE_NOTES=连接数据源 CONNECT_DATA_SOURCE_TEST_NOTES=连接数据源测试 DELETE_DATA_SOURCE_NOTES=删除数据源 VERIFY_DATA_SOURCE_NOTES=验证数据源 UNAUTHORIZED_DATA_SOURCE_NOTES=未授权的数据源 AUTHORIZED_DATA_SOURCE_NOTES=授权的数据源 DELETE_SCHEDULER_BY_ID_NOTES=根据定时id删除定时数据 QUERY_ALERT_GROUP_LIST_PAGING_NOTES=分页查询告警组列表 EXPORT_PROCESS_DEFINITION_BY_ID_NOTES=通过工作流ID导出工作流定义 BATCH_EXPORT_PROCESS_DEFINITION_BY_IDS_NOTES=批量导出工作流定义 QUERY_USER_CREATED_PROJECT_NOTES=查询用户创建的项目 QUERY_AUTHORIZED_AND_USER_CREATED_PROJECT_NOTES=查询授权和用户创建的项目 COPY_PROCESS_DEFINITION_NOTES=复制工作流定义 MOVE_PROCESS_DEFINITION_NOTES=移动工作流定义 TARGET_PROJECT_ID=目标项目ID IS_COPY=是否复制 DELETE_PROCESS_DEFINITION_VERSION_NOTES=删除流程历史版本 QUERY_PROCESS_DEFINITION_VERSIONS_NOTES=查询流程历史版本信息 SWITCH_PROCESS_DEFINITION_VERSION_NOTES=切换流程版本 VERSION=版本号 TASK_GROUP_QUEUEID=任务组队列id TASK_GROUP_QUEUE_PRIORITY=任务队列优先级 GET_RULE_FORM_CREATE_JSON_NOTES=获取规则form-create json QUERY_RULE_LIST_PAGING_NOTES=查询规则分页列表 QUERY_RULE_LIST_NOTES=查询规则列表 QUERY_EXECUTE_RESULT_LIST_PAGING_NOTES=查询数据质量任务结果分页列表 RULE_ID=规则ID RULE_TYPE=规则类型 GET_DATASOURCE_OPTIONS_NOTES=获取数据源OPTIONS GET_DATASOURCE_TABLES_NOTES=获取数据源表列表 GET_DATASOURCE_TABLE_COLUMNS_NOTES=获取数据源表列名 TABLE_NAME=表名 QUERY_AUDIT_LOG=查询审计日志 AUDIT_LOG_TAG=审计日志执行相关操作 MODULE_TYPE=模块类型 OPERATION_TYPE=操作类型
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,506
[Bug] [Master] process instance stop result wrong
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened 1. Stop a process instance which task instance can not find worker group. The state of the stop process instance is success. If taskInstance.getHost() is empty it will cause WorkflowExecuteThread.taskInstanceMap's taskInstance state wrong In ```org.apache.dolphinscheduler.server.master.runner.task.CommonTaskProcessor#killTask```. 2. Stop a process instance which task instance state is running. The state of the stop process instance is success. 3. When a downstream user uses a dependent node, if the result of kill command is success, the dependent node will judge that the execution is successful. In fact, the execution fails, resulting in an error in the detection result of the dependent node. ### What you expected to happen The state of the stop process instance is stop. ### How to reproduce 1. Stop a process instance which task instance can not find worker group. 2. Stop a process instance which task instance state is running. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8506
https://github.com/apache/dolphinscheduler/pull/8529
3cfb3270fe2dbe3b009c307ce31881c772c24eee
74afdcf8f315eb84ba1a87386e4ebab7767c6b35
"2022-02-23T09:45:07Z"
java
"2022-03-01T07:20:02Z"
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteThread.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.runner; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVERY_START_NODE_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODES; import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.DependResult; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.StateEvent; import org.apache.dolphinscheduler.common.enums.StateEventType; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.TaskGroupQueueStatus; import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.process.ProcessDag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.Environment; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskGroupQueue; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.utils.DagHelper; import org.apache.dolphinscheduler.remote.command.HostUpdateCommand; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager; import org.apache.dolphinscheduler.server.master.runner.task.ITaskProcessor; import org.apache.dolphinscheduler.server.master.runner.task.TaskAction; import org.apache.dolphinscheduler.server.master.runner.task.TaskProcessorFactory; import org.apache.dolphinscheduler.service.alert.ProcessAlertManager; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.service.quartz.cron.CronUtils; import org.apache.dolphinscheduler.service.queue.PeerTaskInstancePriorityQueue; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicBoolean; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.Lists; /** * master exec thread,split dag */ public class WorkflowExecuteThread { /** * logger of WorkflowExecuteThread */ private static final Logger logger = LoggerFactory.getLogger(WorkflowExecuteThread.class); /** * master config */ private MasterConfig masterConfig; /** * process service */ private ProcessService processService; /** * alert manager */ private ProcessAlertManager processAlertManager; /** * netty executor manager */ private NettyExecutorManager nettyExecutorManager; /** * process instance */ private ProcessInstance processInstance; /** * process definition */ private ProcessDefinition processDefinition; /** * the object of DAG */ private DAG<String, TaskNode, TaskNodeRelation> dag; /** * key of workflow */ private String key; /** * start flag, true: start nodes submit completely */ private boolean isStart = false; /** * submit failure nodes */ private boolean taskFailedSubmit = false; /** * task instance hash map, taskId as key */ private Map<Integer, TaskInstance> taskInstanceMap = new ConcurrentHashMap<>(); /** * running taskProcessor, taskCode as key, taskProcessor as value * only on taskProcessor per taskCode */ private final Map<Long, ITaskProcessor> activeTaskProcessorMaps = new ConcurrentHashMap<>(); /** * valid task map, taskCode as key, taskId as value * in a DAG, only one taskInstance per taskCode is valid */ private Map<Long, Integer> validTaskMap = new ConcurrentHashMap<>(); /** * error task map, taskCode as key, taskInstanceId as value * in a DAG, only one taskInstance per taskCode is valid */ private Map<Long, Integer> errorTaskMap = new ConcurrentHashMap<>(); /** * complete task map, taskCode as key, taskInstanceId as value * in a DAG, only one taskInstance per taskCode is valid */ private Map<Long, Integer> completeTaskMap = new ConcurrentHashMap<>(); /** * depend failed task map, taskCode as key, taskId as value */ private Map<Long, Integer> dependFailedTaskMap = new ConcurrentHashMap<>(); /** * forbidden task map, code as key */ private Map<Long, TaskNode> forbiddenTaskMap = new ConcurrentHashMap<>(); /** * skip task map, code as key */ private Map<String, TaskNode> skipTaskNodeMap = new ConcurrentHashMap<>(); /** * complement date list */ private List<Date> complementListDate = Lists.newLinkedList(); /** * state event queue */ private ConcurrentLinkedQueue<StateEvent> stateEvents = new ConcurrentLinkedQueue<>(); /** * ready to submit task queue */ private PeerTaskInstancePriorityQueue readyToSubmitTaskQueue = new PeerTaskInstancePriorityQueue(); /** * wait to retry taskInstance map, taskCode as key, taskInstance as value * before retry, the taskInstance id is 0 */ private Map<Long, TaskInstance> waitToRetryTaskInstanceMap = new ConcurrentHashMap<>(); /** * state wheel execute thread */ private StateWheelExecuteThread stateWheelExecuteThread; /** * constructor of WorkflowExecuteThread * * @param processInstance processInstance * @param processService processService * @param nettyExecutorManager nettyExecutorManager * @param processAlertManager processAlertManager * @param masterConfig masterConfig * @param stateWheelExecuteThread stateWheelExecuteThread */ public WorkflowExecuteThread(ProcessInstance processInstance , ProcessService processService , NettyExecutorManager nettyExecutorManager , ProcessAlertManager processAlertManager , MasterConfig masterConfig , StateWheelExecuteThread stateWheelExecuteThread) { this.processService = processService; this.processInstance = processInstance; this.masterConfig = masterConfig; this.nettyExecutorManager = nettyExecutorManager; this.processAlertManager = processAlertManager; this.stateWheelExecuteThread = stateWheelExecuteThread; } /** * the process start nodes are submitted completely. */ public boolean isStart() { return this.isStart; } /** * handle event */ public void handleEvents() { if (!isStart) { return; } while (!this.stateEvents.isEmpty()) { try { StateEvent stateEvent = this.stateEvents.peek(); if (stateEventHandler(stateEvent)) { this.stateEvents.remove(stateEvent); } } catch (Exception e) { logger.error("state handle error:", e); } } } public String getKey() { if (StringUtils.isNotEmpty(key) || this.processDefinition == null) { return key; } key = String.format("%d_%d_%d", this.processDefinition.getCode(), this.processDefinition.getVersion(), this.processInstance.getId()); return key; } public boolean addStateEvent(StateEvent stateEvent) { if (processInstance.getId() != stateEvent.getProcessInstanceId()) { logger.info("state event would be abounded :{}", stateEvent.toString()); return false; } this.stateEvents.add(stateEvent); return true; } public int eventSize() { return this.stateEvents.size(); } public ProcessInstance getProcessInstance() { return this.processInstance; } private boolean stateEventHandler(StateEvent stateEvent) { logger.info("process event: {}", stateEvent.toString()); if (!checkProcessInstance(stateEvent)) { return false; } boolean result = false; switch (stateEvent.getType()) { case PROCESS_STATE_CHANGE: result = processStateChangeHandler(stateEvent); break; case TASK_STATE_CHANGE: result = taskStateChangeHandler(stateEvent); break; case PROCESS_TIMEOUT: result = processTimeout(); break; case TASK_TIMEOUT: result = taskTimeout(stateEvent); break; case WAIT_TASK_GROUP: result = checkForceStartAndWakeUp(stateEvent); break; case TASK_RETRY: result = taskRetryEventHandler(stateEvent); break; default: break; } if (result) { this.stateEvents.remove(stateEvent); } return result; } private boolean checkForceStartAndWakeUp(StateEvent stateEvent) { TaskGroupQueue taskGroupQueue = this.processService.loadTaskGroupQueue(stateEvent.getTaskInstanceId()); if (taskGroupQueue.getForceStart() == Flag.YES.getCode()) { TaskInstance taskInstance = this.processService.findTaskInstanceById(stateEvent.getTaskInstanceId()); ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(taskInstance.getTaskCode()); ProcessInstance processInstance = this.processService.findProcessInstanceById(taskInstance.getProcessInstanceId()); taskProcessor.init(taskInstance, processInstance); taskProcessor.action(TaskAction.DISPATCH); this.processService.updateTaskGroupQueueStatus(taskGroupQueue.getId(), TaskGroupQueueStatus.ACQUIRE_SUCCESS.getCode()); return true; } if (taskGroupQueue.getInQueue() == Flag.YES.getCode()) { boolean acquireTaskGroup = processService.acquireTaskGroupAgain(taskGroupQueue); if (acquireTaskGroup) { TaskInstance taskInstance = this.processService.findTaskInstanceById(stateEvent.getTaskInstanceId()); ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(taskInstance.getTaskCode()); ProcessInstance processInstance = this.processService.findProcessInstanceById(taskInstance.getProcessInstanceId()); taskProcessor.init(taskInstance, processInstance); taskProcessor.action(TaskAction.DISPATCH); return true; } } return false; } private boolean taskTimeout(StateEvent stateEvent) { if (!checkTaskInstanceByStateEvent(stateEvent)) { return true; } TaskInstance taskInstance = taskInstanceMap.get(stateEvent.getTaskInstanceId()); if (TimeoutFlag.CLOSE == taskInstance.getTaskDefine().getTimeoutFlag()) { return true; } TaskTimeoutStrategy taskTimeoutStrategy = taskInstance.getTaskDefine().getTimeoutNotifyStrategy(); if (TaskTimeoutStrategy.FAILED == taskTimeoutStrategy) { ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(taskInstance.getTaskCode()); taskProcessor.action(TaskAction.TIMEOUT); } else { processAlertManager.sendTaskTimeoutAlert(processInstance, taskInstance, taskInstance.getTaskDefine()); } return true; } private boolean processTimeout() { this.processAlertManager.sendProcessTimeoutAlert(this.processInstance, this.processDefinition); return true; } private boolean taskStateChangeHandler(StateEvent stateEvent) { if (!checkTaskInstanceByStateEvent(stateEvent)) { return true; } TaskInstance task = getTaskInstance(stateEvent.getTaskInstanceId()); if (task.getState() == null) { logger.error("task state is null, state handler error: {}", stateEvent); return true; } if (task.getState().typeIsFinished()) { if (completeTaskMap.containsKey(task.getTaskCode()) && completeTaskMap.get(task.getTaskCode()) == task.getId()) { return true; } taskFinished(task); if (task.getTaskGroupId() > 0) { releaseTaskGroup(task); } return true; } if (activeTaskProcessorMaps.containsKey(task.getTaskCode())) { ITaskProcessor iTaskProcessor = activeTaskProcessorMaps.get(task.getTaskCode()); iTaskProcessor.action(TaskAction.RUN); if (iTaskProcessor.taskInstance().getState().typeIsFinished()) { taskFinished(task); } return true; } logger.error("state handler error: {}", stateEvent); return true; } private void taskFinished(TaskInstance taskInstance) { logger.info("work flow {} task id:{} code:{} state:{} ", processInstance.getId(), taskInstance.getId(), taskInstance.getTaskCode(), taskInstance.getState()); activeTaskProcessorMaps.remove(taskInstance.getTaskCode()); stateWheelExecuteThread.removeTask4TimeoutCheck(processInstance, taskInstance); stateWheelExecuteThread.removeTask4RetryCheck(processInstance, taskInstance); stateWheelExecuteThread.removeTask4StateCheck(processInstance, taskInstance); if (taskInstance.getState().typeIsSuccess()) { completeTaskMap.put(taskInstance.getTaskCode(), taskInstance.getId()); processInstance.setVarPool(taskInstance.getVarPool()); processService.saveProcessInstance(processInstance); submitPostNode(Long.toString(taskInstance.getTaskCode())); } else if (taskInstance.taskCanRetry() && processInstance.getState() != ExecutionStatus.READY_STOP) { // retry task retryTaskInstance(taskInstance); } else if (taskInstance.getState().typeIsFailure()) { completeTaskMap.put(taskInstance.getTaskCode(), taskInstance.getId()); if (taskInstance.isConditionsTask() || DagHelper.haveConditionsAfterNode(Long.toString(taskInstance.getTaskCode()), dag)) { submitPostNode(Long.toString(taskInstance.getTaskCode())); } else { errorTaskMap.put(taskInstance.getTaskCode(), taskInstance.getId()); if (processInstance.getFailureStrategy() == FailureStrategy.END) { killAllTasks(); } } } this.updateProcessInstanceState(); } /** * release task group * @param taskInstance */ private void releaseTaskGroup(TaskInstance taskInstance) { if (taskInstance.getTaskGroupId() > 0) { TaskInstance nextTaskInstance = this.processService.releaseTaskGroup(taskInstance); if (nextTaskInstance != null) { if (nextTaskInstance.getProcessInstanceId() == taskInstance.getProcessInstanceId()) { StateEvent nextEvent = new StateEvent(); nextEvent.setProcessInstanceId(this.processInstance.getId()); nextEvent.setTaskInstanceId(nextTaskInstance.getId()); nextEvent.setType(StateEventType.WAIT_TASK_GROUP); this.stateEvents.add(nextEvent); } else { ProcessInstance processInstance = this.processService.findProcessInstanceById(nextTaskInstance.getProcessInstanceId()); this.processService.sendStartTask2Master(processInstance, nextTaskInstance.getId(), org.apache.dolphinscheduler.remote.command.CommandType.TASK_WAKEUP_EVENT_REQUEST); } } } } /** * crate new task instance to retry, different objects from the original * @param taskInstance */ private void retryTaskInstance(TaskInstance taskInstance) { if (!taskInstance.taskCanRetry()) { return; } TaskInstance newTaskInstance = cloneRetryTaskInstance(taskInstance); if (newTaskInstance == null) { logger.error("retry fail, new taskInstancce is null, task code:{}, task id:{}", taskInstance.getTaskCode(), taskInstance.getId()); return; } waitToRetryTaskInstanceMap.put(newTaskInstance.getTaskCode(), newTaskInstance); if (!taskInstance.retryTaskIntervalOverTime()) { logger.info("failure task will be submitted: process id: {}, task instance code: {} state:{} retry times:{} / {}, interval:{}", processInstance.getId(), newTaskInstance.getTaskCode(), newTaskInstance.getState(), newTaskInstance.getRetryTimes(), newTaskInstance.getMaxRetryTimes(), newTaskInstance.getRetryInterval()); stateWheelExecuteThread.addTask4TimeoutCheck(processInstance, newTaskInstance); stateWheelExecuteThread.addTask4RetryCheck(processInstance, newTaskInstance); } else { addTaskToStandByList(newTaskInstance); submitStandByTask(); waitToRetryTaskInstanceMap.remove(newTaskInstance.getTaskCode()); } } /** * handle task retry event * @param stateEvent * @return */ private boolean taskRetryEventHandler(StateEvent stateEvent) { TaskInstance taskInstance = waitToRetryTaskInstanceMap.get(stateEvent.getTaskCode()); addTaskToStandByList(taskInstance); submitStandByTask(); waitToRetryTaskInstanceMap.remove(stateEvent.getTaskCode()); return true; } /** * update process instance */ public void refreshProcessInstance(int processInstanceId) { logger.info("process instance update: {}", processInstanceId); processInstance = processService.findProcessInstanceById(processInstanceId); processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); processInstance.setProcessDefinition(processDefinition); } /** * update task instance */ public void refreshTaskInstance(int taskInstanceId) { logger.info("task instance update: {} ", taskInstanceId); TaskInstance taskInstance = processService.findTaskInstanceById(taskInstanceId); if (taskInstance == null) { logger.error("can not find task instance, id:{}", taskInstanceId); return; } processService.packageTaskInstance(taskInstance, processInstance); taskInstanceMap.put(taskInstance.getId(), taskInstance); validTaskMap.remove(taskInstance.getTaskCode()); if (Flag.YES == taskInstance.getFlag()) { validTaskMap.put(taskInstance.getTaskCode(), taskInstance.getId()); } } /** * check process instance by state event */ public boolean checkProcessInstance(StateEvent stateEvent) { if (this.processInstance.getId() != stateEvent.getProcessInstanceId()) { logger.error("mismatch process instance id: {}, state event:{}", this.processInstance.getId(), stateEvent); return false; } return true; } /** * check if task instance exist by state event */ public boolean checkTaskInstanceByStateEvent(StateEvent stateEvent) { if (stateEvent.getTaskInstanceId() == 0) { logger.error("task instance id null, state event:{}", stateEvent); return false; } if (!taskInstanceMap.containsKey(stateEvent.getTaskInstanceId())) { logger.error("mismatch task instance id, event:{}", stateEvent); return false; } return true; } /** * check if task instance exist by task code */ public boolean checkTaskInstanceByCode(long taskCode) { if (taskInstanceMap == null || taskInstanceMap.size() == 0) { return false; } for (TaskInstance taskInstance : taskInstanceMap.values()) { if (taskInstance.getTaskCode() == taskCode) { return true; } } return false; } /** * check if task instance exist by id */ public boolean checkTaskInstanceById(int taskInstanceId) { if (taskInstanceMap == null || taskInstanceMap.size() == 0) { return false; } return taskInstanceMap.containsKey(taskInstanceId); } /** * get task instance from memory */ public TaskInstance getTaskInstance(int taskInstanceId) { if (taskInstanceMap.containsKey(taskInstanceId)) { return taskInstanceMap.get(taskInstanceId); } return null; } public TaskInstance getActiveTaskInstanceByTaskCode(long taskCode) { if (activeTaskProcessorMaps.containsKey(taskCode)) { return activeTaskProcessorMaps.get(taskCode).taskInstance(); } return null; } public TaskInstance getRetryTaskInstanceByTaskCode(long taskCode) { if (waitToRetryTaskInstanceMap.containsKey(taskCode)) { return waitToRetryTaskInstanceMap.get(taskCode); } return null; } private boolean processStateChangeHandler(StateEvent stateEvent) { try { logger.info("process:{} state {} change to {}", processInstance.getId(), processInstance.getState(), stateEvent.getExecutionStatus()); if (stateEvent.getExecutionStatus() == ExecutionStatus.STOP) { this.updateProcessInstanceState(stateEvent); return true; } if (processComplementData()) { return true; } if (stateEvent.getExecutionStatus().typeIsFinished()) { endProcess(); } if (processInstance.getState() == ExecutionStatus.READY_STOP) { killAllTasks(); } return true; } catch (Exception e) { logger.error("process state change error:", e); } return true; } private boolean processComplementData() throws Exception { if (!needComplementProcess()) { return false; } if (processInstance.getState() == ExecutionStatus.READY_STOP) { return false; } Date scheduleDate = processInstance.getScheduleTime(); if (scheduleDate == null) { scheduleDate = complementListDate.get(0); } else if (processInstance.getState().typeIsFinished()) { endProcess(); if (complementListDate.size() <= 0) { logger.info("process complement end. process id:{}", processInstance.getId()); return true; } int index = complementListDate.indexOf(scheduleDate); if (index >= complementListDate.size() - 1 || !processInstance.getState().typeIsSuccess()) { logger.info("process complement end. process id:{}", processInstance.getId()); // complement data ends || no success return true; } logger.info("process complement continue. process id:{}, schedule time:{} complementListDate:{}", processInstance.getId(), processInstance.getScheduleTime(), complementListDate.toString()); scheduleDate = complementListDate.get(index + 1); } //the next process complement int create = this.createComplementDataCommand(scheduleDate); if (create > 0) { logger.info("create complement data command successfully."); } return true; } private int createComplementDataCommand(Date scheduleDate) { Command command = new Command(); command.setScheduleTime(scheduleDate); command.setCommandType(CommandType.COMPLEMENT_DATA); command.setProcessDefinitionCode(processInstance.getProcessDefinitionCode()); Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) { cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); } cmdParam.replace(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.format(scheduleDate, "yyyy-MM-dd HH:mm:ss", null)); command.setCommandParam(JSONUtils.toJsonString(cmdParam)); command.setTaskDependType(processInstance.getTaskDependType()); command.setFailureStrategy(processInstance.getFailureStrategy()); command.setWarningType(processInstance.getWarningType()); command.setWarningGroupId(processInstance.getWarningGroupId()); command.setStartTime(new Date()); command.setExecutorId(processInstance.getExecutorId()); command.setUpdateTime(new Date()); command.setProcessInstancePriority(processInstance.getProcessInstancePriority()); command.setWorkerGroup(processInstance.getWorkerGroup()); command.setEnvironmentCode(processInstance.getEnvironmentCode()); command.setDryRun(processInstance.getDryRun()); command.setProcessInstanceId(0); command.setProcessDefinitionVersion(processInstance.getProcessDefinitionVersion()); return processService.createCommand(command); } private boolean needComplementProcess() { if (processInstance.isComplementData() && Flag.NO == processInstance.getIsSubProcess()) { return true; } return false; } /** * process start handle */ public void startProcess() { if (this.taskInstanceMap.size() > 0) { return; } try { isStart = false; buildFlowDag(); initTaskQueue(); submitPostNode(null); isStart = true; } catch (Exception e) { logger.error("start process error, process instance id:{}", processInstance.getId(), e); } } /** * process end handle */ private void endProcess() { this.stateEvents.clear(); if (processDefinition.getExecutionType().typeIsSerialWait()) { checkSerialProcess(processDefinition); } if (processInstance.getState().typeIsWaitingThread()) { processService.createRecoveryWaitingThreadCommand(null, processInstance); } if (processAlertManager.isNeedToSendWarning(processInstance)) { ProjectUser projectUser = processService.queryProjectWithUserByProcessInstanceId(processInstance.getId()); processAlertManager.sendAlertProcessInstance(processInstance, getValidTaskList(), projectUser); } if (checkTaskQueue()) { //release task group processService.releaseAllTaskGroup(processInstance.getId()); } } public void checkSerialProcess(ProcessDefinition processDefinition) { int nextInstanceId = processInstance.getNextProcessInstanceId(); if (nextInstanceId == 0) { ProcessInstance nextProcessInstance = this.processService.loadNextProcess4Serial(processInstance.getProcessDefinition().getCode(), ExecutionStatus.SERIAL_WAIT.getCode()); if (nextProcessInstance == null) { return; } nextInstanceId = nextProcessInstance.getId(); } ProcessInstance nextProcessInstance = this.processService.findProcessInstanceById(nextInstanceId); if (nextProcessInstance.getState().typeIsFinished() || nextProcessInstance.getState().typeIsRunning()) { return; } Map<String, Object> cmdParam = new HashMap<>(); cmdParam.put(CMD_PARAM_RECOVER_PROCESS_ID_STRING, nextInstanceId); Command command = new Command(); command.setCommandType(CommandType.RECOVER_SERIAL_WAIT); command.setProcessDefinitionCode(processDefinition.getCode()); command.setCommandParam(JSONUtils.toJsonString(cmdParam)); processService.createCommand(command); } /** * generate process dag * * @throws Exception exception */ private void buildFlowDag() throws Exception { if (this.dag != null) { return; } processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); processInstance.setProcessDefinition(processDefinition); List<TaskInstance> recoverNodeList = getStartTaskInstanceList(processInstance.getCommandParam()); List<ProcessTaskRelation> processTaskRelations = processService.findRelationByCode(processDefinition.getCode(), processDefinition.getVersion()); List<TaskDefinitionLog> taskDefinitionLogs = processService.getTaskDefineLogListByRelation(processTaskRelations); List<TaskNode> taskNodeList = processService.transformTask(processTaskRelations, taskDefinitionLogs); forbiddenTaskMap.clear(); taskNodeList.forEach(taskNode -> { if (taskNode.isForbidden()) { forbiddenTaskMap.put(taskNode.getCode(), taskNode); } }); // generate process to get DAG info List<String> recoveryNodeCodeList = getRecoveryNodeCodeList(recoverNodeList); List<String> startNodeNameList = parseStartNodeName(processInstance.getCommandParam()); ProcessDag processDag = generateFlowDag(taskNodeList, startNodeNameList, recoveryNodeCodeList, processInstance.getTaskDependType()); if (processDag == null) { logger.error("processDag is null"); return; } // generate process dag dag = DagHelper.buildDagGraph(processDag); } /** * init task queue */ private void initTaskQueue() { taskFailedSubmit = false; activeTaskProcessorMaps.clear(); dependFailedTaskMap.clear(); completeTaskMap.clear(); errorTaskMap.clear(); if (!isNewProcessInstance()) { List<TaskInstance> validTaskInstanceList = processService.findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance task : validTaskInstanceList) { if (validTaskMap.containsKey(task.getTaskCode())) { int oldTaskInstanceId = validTaskMap.get(task.getTaskCode()); TaskInstance oldTaskInstance = taskInstanceMap.get(oldTaskInstanceId); if (!oldTaskInstance.getState().typeIsFinished() && task.getState().typeIsFinished()) { task.setFlag(Flag.NO); processService.updateTaskInstance(task); continue; } logger.warn("have same taskCode taskInstance when init task queue, taskCode:{}", task.getTaskCode()); } validTaskMap.put(task.getTaskCode(), task.getId()); taskInstanceMap.put(task.getId(), task); if (task.isTaskComplete()) { completeTaskMap.put(task.getTaskCode(), task.getId()); continue; } if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(Long.toString(task.getTaskCode()), dag)) { continue; } if (task.taskCanRetry()) { if (task.getState() == ExecutionStatus.NEED_FAULT_TOLERANCE) { // tolerantTaskInstance add to standby list directly TaskInstance tolerantTaskInstance = cloneTolerantTaskInstance(task); addTaskToStandByList(tolerantTaskInstance); } else { retryTaskInstance(task); } continue; } if (task.getState().typeIsFailure()) { errorTaskMap.put(task.getTaskCode(), task.getId()); } } } if (processInstance.isComplementData() && complementListDate.size() == 0) { Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); if (cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) { Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE)); Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionCode(processInstance.getProcessDefinitionCode()); if (complementListDate.size() == 0 && needComplementProcess()) { complementListDate = CronUtils.getSelfFireDateList(start, end, schedules); logger.info(" process definition code:{} complement data: {}", processInstance.getProcessDefinitionCode(), complementListDate.toString()); if (complementListDate.size() > 0 && Flag.NO == processInstance.getIsSubProcess()) { processInstance.setScheduleTime(complementListDate.get(0)); processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime())); processService.updateProcessInstance(processInstance); } } } } } /** * submit task to execute * * @param taskInstance task instance * @return TaskInstance */ private TaskInstance submitTaskExec(TaskInstance taskInstance) { try { // package task instance before submit processService.packageTaskInstance(taskInstance, processInstance); ITaskProcessor taskProcessor = TaskProcessorFactory.getTaskProcessor(taskInstance.getTaskType()); taskProcessor.init(taskInstance, processInstance); if (taskInstance.getState() == ExecutionStatus.RUNNING_EXECUTION && taskProcessor.getType().equalsIgnoreCase(Constants.COMMON_TASK_TYPE)) { notifyProcessHostUpdate(taskInstance); } boolean submit = taskProcessor.action(TaskAction.SUBMIT); if (!submit) { logger.error("process id:{} name:{} submit standby task id:{} name:{} failed!", processInstance.getId(), processInstance.getName(), taskInstance.getId(), taskInstance.getName()); return null; } // in a dag, only one taskInstance is valid per taskCode, so need to set the old taskInstance invalid if (validTaskMap.containsKey(taskInstance.getTaskCode())) { int oldTaskInstanceId = validTaskMap.get(taskInstance.getTaskCode()); if (taskInstance.getId() != oldTaskInstanceId) { TaskInstance oldTaskInstance = taskInstanceMap.get(oldTaskInstanceId); oldTaskInstance.setFlag(Flag.NO); processService.updateTaskInstance(oldTaskInstance); validTaskMap.remove(taskInstance.getTaskCode()); activeTaskProcessorMaps.remove(taskInstance.getTaskCode()); } } validTaskMap.put(taskInstance.getTaskCode(), taskInstance.getId()); taskInstanceMap.put(taskInstance.getId(), taskInstance); activeTaskProcessorMaps.put(taskInstance.getTaskCode(), taskProcessor); taskProcessor.action(TaskAction.RUN); stateWheelExecuteThread.addTask4TimeoutCheck(processInstance, taskInstance); stateWheelExecuteThread.addTask4StateCheck(processInstance, taskInstance); if (taskProcessor.taskInstance().getState().typeIsFinished()) { StateEvent stateEvent = new StateEvent(); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setTaskInstanceId(taskInstance.getId()); stateEvent.setExecutionStatus(taskProcessor.taskInstance().getState()); stateEvent.setType(StateEventType.TASK_STATE_CHANGE); this.stateEvents.add(stateEvent); } return taskInstance; } catch (Exception e) { logger.error("submit standby task error", e); return null; } } private void notifyProcessHostUpdate(TaskInstance taskInstance) { if (StringUtils.isEmpty(taskInstance.getHost())) { return; } try { HostUpdateCommand hostUpdateCommand = new HostUpdateCommand(); hostUpdateCommand.setProcessHost(NetUtils.getAddr(masterConfig.getListenPort())); hostUpdateCommand.setTaskInstanceId(taskInstance.getId()); Host host = new Host(taskInstance.getHost()); nettyExecutorManager.doExecute(host, hostUpdateCommand.convert2Command()); } catch (Exception e) { logger.error("notify process host update", e); } } /** * find task instance in db. * in case submit more than one same name task in the same time. * * @param taskCode task code * @param taskVersion task version * @return TaskInstance */ private TaskInstance findTaskIfExists(Long taskCode, int taskVersion) { List<TaskInstance> validTaskInstanceList = getValidTaskList(); for (TaskInstance taskInstance : validTaskInstanceList) { if (taskInstance.getTaskCode() == taskCode && taskInstance.getTaskDefinitionVersion() == taskVersion) { return taskInstance; } } return null; } /** * encapsulation task * * @param processInstance process instance * @param taskNode taskNode * @return TaskInstance */ private TaskInstance createTaskInstance(ProcessInstance processInstance, TaskNode taskNode) { TaskInstance taskInstance = findTaskIfExists(taskNode.getCode(), taskNode.getVersion()); if (taskInstance != null) { return taskInstance; } return newTaskInstance(processInstance, taskNode); } /** * clone a new taskInstance for retry and reset some logic fields * @return */ public TaskInstance cloneRetryTaskInstance(TaskInstance taskInstance) { TaskNode taskNode = dag.getNode(Long.toString(taskInstance.getTaskCode())); if (taskNode == null) { logger.error("taskNode is null, code:{}", taskInstance.getTaskCode()); return null; } TaskInstance newTaskInstance = newTaskInstance(processInstance, taskNode); newTaskInstance.setTaskDefine(taskInstance.getTaskDefine()); newTaskInstance.setProcessDefine(taskInstance.getProcessDefine()); newTaskInstance.setProcessInstance(processInstance); newTaskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1); // todo relative funtion: TaskInstance.retryTaskIntervalOverTime newTaskInstance.setState(taskInstance.getState()); newTaskInstance.setEndTime(taskInstance.getEndTime()); return newTaskInstance; } /** * clone a new taskInstance for tolerant and reset some logic fields * @return */ public TaskInstance cloneTolerantTaskInstance(TaskInstance taskInstance) { TaskNode taskNode = dag.getNode(Long.toString(taskInstance.getTaskCode())); if (taskNode == null) { logger.error("taskNode is null, code:{}", taskInstance.getTaskCode()); return null; } TaskInstance newTaskInstance = newTaskInstance(processInstance, taskNode); newTaskInstance.setTaskDefine(taskInstance.getTaskDefine()); newTaskInstance.setProcessDefine(taskInstance.getProcessDefine()); newTaskInstance.setProcessInstance(processInstance); newTaskInstance.setRetryTimes(taskInstance.getRetryTimes()); newTaskInstance.setState(taskInstance.getState()); return newTaskInstance; } /** * new a taskInstance * @param processInstance * @param taskNode * @return */ public TaskInstance newTaskInstance(ProcessInstance processInstance, TaskNode taskNode) { TaskInstance taskInstance = new TaskInstance(); taskInstance.setTaskCode(taskNode.getCode()); taskInstance.setTaskDefinitionVersion(taskNode.getVersion()); // task name taskInstance.setName(taskNode.getName()); // task instance state taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); // process instance id taskInstance.setProcessInstanceId(processInstance.getId()); // task instance type taskInstance.setTaskType(taskNode.getType().toUpperCase()); // task instance whether alert taskInstance.setAlertFlag(Flag.NO); // task instance start time taskInstance.setStartTime(null); // task instance flag taskInstance.setFlag(Flag.YES); // task dry run flag taskInstance.setDryRun(processInstance.getDryRun()); // task instance retry times taskInstance.setRetryTimes(0); // max task instance retry times taskInstance.setMaxRetryTimes(taskNode.getMaxRetryTimes()); // retry task instance interval taskInstance.setRetryInterval(taskNode.getRetryInterval()); //set task param taskInstance.setTaskParams(taskNode.getTaskParams()); //set task group and priority taskInstance.setTaskGroupId(taskNode.getTaskGroupId()); taskInstance.setTaskGroupPriority(taskNode.getTaskGroupPriority()); // task instance priority if (taskNode.getTaskInstancePriority() == null) { taskInstance.setTaskInstancePriority(Priority.MEDIUM); } else { taskInstance.setTaskInstancePriority(taskNode.getTaskInstancePriority()); } String processWorkerGroup = processInstance.getWorkerGroup(); processWorkerGroup = StringUtils.isBlank(processWorkerGroup) ? DEFAULT_WORKER_GROUP : processWorkerGroup; String taskWorkerGroup = StringUtils.isBlank(taskNode.getWorkerGroup()) ? processWorkerGroup : taskNode.getWorkerGroup(); Long processEnvironmentCode = Objects.isNull(processInstance.getEnvironmentCode()) ? -1 : processInstance.getEnvironmentCode(); Long taskEnvironmentCode = Objects.isNull(taskNode.getEnvironmentCode()) ? processEnvironmentCode : taskNode.getEnvironmentCode(); if (!processWorkerGroup.equals(DEFAULT_WORKER_GROUP) && taskWorkerGroup.equals(DEFAULT_WORKER_GROUP)) { taskInstance.setWorkerGroup(processWorkerGroup); taskInstance.setEnvironmentCode(processEnvironmentCode); } else { taskInstance.setWorkerGroup(taskWorkerGroup); taskInstance.setEnvironmentCode(taskEnvironmentCode); } if (!taskInstance.getEnvironmentCode().equals(-1L)) { Environment environment = processService.findEnvironmentByCode(taskInstance.getEnvironmentCode()); if (Objects.nonNull(environment) && StringUtils.isNotEmpty(environment.getConfig())) { taskInstance.setEnvironmentConfig(environment.getConfig()); } } // delay execution time taskInstance.setDelayTime(taskNode.getDelayTime()); return taskInstance; } public void getPreVarPool(TaskInstance taskInstance, Set<String> preTask) { Map<String, Property> allProperty = new HashMap<>(); Map<String, TaskInstance> allTaskInstance = new HashMap<>(); if (CollectionUtils.isNotEmpty(preTask)) { for (String preTaskCode : preTask) { Integer taskId = completeTaskMap.get(Long.parseLong(preTaskCode)); if (taskId == null) { continue; } TaskInstance preTaskInstance = taskInstanceMap.get(taskId); if (preTaskInstance == null) { continue; } String preVarPool = preTaskInstance.getVarPool(); if (StringUtils.isNotEmpty(preVarPool)) { List<Property> properties = JSONUtils.toList(preVarPool, Property.class); for (Property info : properties) { setVarPoolValue(allProperty, allTaskInstance, preTaskInstance, info); } } } if (allProperty.size() > 0) { taskInstance.setVarPool(JSONUtils.toJsonString(allProperty.values())); } } } private void setVarPoolValue(Map<String, Property> allProperty, Map<String, TaskInstance> allTaskInstance, TaskInstance preTaskInstance, Property thisProperty) { //for this taskInstance all the param in this part is IN. thisProperty.setDirect(Direct.IN); //get the pre taskInstance Property's name String proName = thisProperty.getProp(); //if the Previous nodes have the Property of same name if (allProperty.containsKey(proName)) { //comparison the value of two Property Property otherPro = allProperty.get(proName); //if this property'value of loop is empty,use the other,whether the other's value is empty or not if (StringUtils.isEmpty(thisProperty.getValue())) { allProperty.put(proName, otherPro); //if property'value of loop is not empty,and the other's value is not empty too, use the earlier value } else if (StringUtils.isNotEmpty(otherPro.getValue())) { TaskInstance otherTask = allTaskInstance.get(proName); if (otherTask.getEndTime().getTime() > preTaskInstance.getEndTime().getTime()) { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } else { allProperty.put(proName, otherPro); } } else { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } } else { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } } /** * get complete task instance map, taskCode as key */ private Map<String, TaskInstance> getCompleteTaskInstanceMap() { Map<String, TaskInstance> completeTaskInstanceMap = new HashMap<>(); for (Integer taskInstanceId : completeTaskMap.values()) { TaskInstance taskInstance = taskInstanceMap.get(taskInstanceId); completeTaskInstanceMap.put(Long.toString(taskInstance.getTaskCode()), taskInstance); } return completeTaskInstanceMap; } /** * get valid task list */ private List<TaskInstance> getValidTaskList() { List<TaskInstance> validTaskInstanceList = new ArrayList<>(); for (Integer taskInstanceId : validTaskMap.values()) { validTaskInstanceList.add(taskInstanceMap.get(taskInstanceId)); } return validTaskInstanceList; } private void submitPostNode(String parentNodeCode) { Set<String> submitTaskNodeList = DagHelper.parsePostNodes(parentNodeCode, skipTaskNodeMap, dag, getCompleteTaskInstanceMap()); List<TaskInstance> taskInstances = new ArrayList<>(); for (String taskNode : submitTaskNodeList) { TaskNode taskNodeObject = dag.getNode(taskNode); if (checkTaskInstanceByCode(taskNodeObject.getCode())) { continue; } TaskInstance task = createTaskInstance(processInstance, taskNodeObject); taskInstances.add(task); } // if previous node success , post node submit for (TaskInstance task : taskInstances) { if (readyToSubmitTaskQueue.contains(task)) { continue; } if (task.getId() > 0 && completeTaskMap.containsKey(task.getTaskCode())) { logger.info("task {} has already run success", task.getName()); continue; } if (task.getState().typeIsPause() || task.getState().typeIsCancel()) { logger.info("task {} stopped, the state is {}", task.getName(), task.getState()); continue; } addTaskToStandByList(task); } submitStandByTask(); updateProcessInstanceState(); } /** * determine whether the dependencies of the task node are complete * * @return DependResult */ private DependResult isTaskDepsComplete(String taskCode) { Collection<String> startNodes = dag.getBeginNode(); // if vertex,returns true directly if (startNodes.contains(taskCode)) { return DependResult.SUCCESS; } TaskNode taskNode = dag.getNode(taskCode); List<String> indirectDepCodeList = new ArrayList<>(); setIndirectDepList(taskCode, indirectDepCodeList); for (String depsNode : indirectDepCodeList) { if (dag.containsNode(depsNode) && !skipTaskNodeMap.containsKey(depsNode)) { // dependencies must be fully completed Long despNodeTaskCode = Long.parseLong(depsNode); if (!completeTaskMap.containsKey(despNodeTaskCode)) { return DependResult.WAITING; } Integer depsTaskId = completeTaskMap.get(despNodeTaskCode); ExecutionStatus depTaskState = taskInstanceMap.get(depsTaskId).getState(); if (depTaskState.typeIsPause() || depTaskState.typeIsCancel()) { return DependResult.NON_EXEC; } // ignore task state if current task is condition if (taskNode.isConditionsTask()) { continue; } if (!dependTaskSuccess(depsNode, taskCode)) { return DependResult.FAILED; } } } logger.info("taskCode: {} completeDependTaskList: {}", taskCode, Arrays.toString(completeTaskMap.keySet().toArray())); return DependResult.SUCCESS; } /** * This function is specially used to handle the dependency situation where the parent node is a prohibited node. * When the parent node is a forbidden node, the dependency relationship should continue to be traced * * @param taskCode taskCode * @param indirectDepCodeList All indirectly dependent nodes */ private void setIndirectDepList(String taskCode, List<String> indirectDepCodeList) { TaskNode taskNode = dag.getNode(taskCode); List<String> depCodeList = taskNode.getDepList(); for (String depsNode : depCodeList) { if (forbiddenTaskMap.containsKey(Long.parseLong(depsNode))) { setIndirectDepList(depsNode, indirectDepCodeList); } else { indirectDepCodeList.add(depsNode); } } } /** * depend node is completed, but here need check the condition task branch is the next node */ private boolean dependTaskSuccess(String dependNodeName, String nextNodeName) { if (dag.getNode(dependNodeName).isConditionsTask()) { //condition task need check the branch to run List<String> nextTaskList = DagHelper.parseConditionTask(dependNodeName, skipTaskNodeMap, dag, getCompleteTaskInstanceMap()); if (!nextTaskList.contains(nextNodeName)) { return false; } } else { long taskCode = Long.parseLong(dependNodeName); Integer taskInstanceId = completeTaskMap.get(taskCode); ExecutionStatus depTaskState = taskInstanceMap.get(taskInstanceId).getState(); if (depTaskState.typeIsFailure()) { return false; } } return true; } /** * query task instance by complete state * * @param state state * @return task instance list */ private List<TaskInstance> getCompleteTaskByState(ExecutionStatus state) { List<TaskInstance> resultList = new ArrayList<>(); for (Integer taskInstanceId : completeTaskMap.values()) { TaskInstance taskInstance = taskInstanceMap.get(taskInstanceId); if (taskInstance != null && taskInstance.getState() == state) { resultList.add(taskInstance); } } return resultList; } /** * where there are ongoing tasks * * @param state state * @return ExecutionStatus */ private ExecutionStatus runningState(ExecutionStatus state) { if (state == ExecutionStatus.READY_STOP || state == ExecutionStatus.READY_PAUSE || state == ExecutionStatus.WAITING_THREAD || state == ExecutionStatus.DELAY_EXECUTION) { // if the running task is not completed, the state remains unchanged return state; } else { return ExecutionStatus.RUNNING_EXECUTION; } } /** * exists failure task,contains submit failure、dependency failure,execute failure(retry after) * * @return Boolean whether has failed task */ private boolean hasFailedTask() { if (this.taskFailedSubmit) { return true; } if (this.errorTaskMap.size() > 0) { return true; } return this.dependFailedTaskMap.size() > 0; } /** * process instance failure * * @return Boolean whether process instance failed */ private boolean processFailed() { if (hasFailedTask()) { if (processInstance.getFailureStrategy() == FailureStrategy.END) { return true; } if (processInstance.getFailureStrategy() == FailureStrategy.CONTINUE) { return readyToSubmitTaskQueue.size() == 0 && activeTaskProcessorMaps.size() == 0 && waitToRetryTaskInstanceMap.size() == 0; } } return false; } /** * whether task for waiting thread * * @return Boolean whether has waiting thread task */ private boolean hasWaitingThreadTask() { List<TaskInstance> waitingList = getCompleteTaskByState(ExecutionStatus.WAITING_THREAD); return CollectionUtils.isNotEmpty(waitingList); } /** * prepare for pause * 1,failed retry task in the preparation queue , returns to failure directly * 2,exists pause task,complement not completed, pending submission of tasks, return to suspension * 3,success * * @return ExecutionStatus */ private ExecutionStatus processReadyPause() { if (hasRetryTaskInStandBy()) { return ExecutionStatus.FAILURE; } List<TaskInstance> pauseList = getCompleteTaskByState(ExecutionStatus.PAUSE); if (CollectionUtils.isNotEmpty(pauseList) || !isComplementEnd() || readyToSubmitTaskQueue.size() > 0) { return ExecutionStatus.PAUSE; } else { return ExecutionStatus.SUCCESS; } } /** * generate the latest process instance status by the tasks state * * @return process instance execution status */ private ExecutionStatus getProcessInstanceState(ProcessInstance instance) { ExecutionStatus state = instance.getState(); if (activeTaskProcessorMaps.size() > 0 || hasRetryTaskInStandBy()) { // active task and retry task exists return runningState(state); } // waiting thread if (hasWaitingThreadTask()) { return ExecutionStatus.WAITING_THREAD; } // pause if (state == ExecutionStatus.READY_PAUSE) { return processReadyPause(); } // stop if (state == ExecutionStatus.READY_STOP) { List<TaskInstance> stopList = getCompleteTaskByState(ExecutionStatus.STOP); List<TaskInstance> killList = getCompleteTaskByState(ExecutionStatus.KILL); List<TaskInstance> failList = getCompleteTaskByState(ExecutionStatus.FAILURE); if (CollectionUtils.isNotEmpty(stopList) || CollectionUtils.isNotEmpty(killList) || CollectionUtils.isNotEmpty(failList) || !isComplementEnd()) { return ExecutionStatus.STOP; } else { return ExecutionStatus.SUCCESS; } } // process failure if (processFailed()) { return ExecutionStatus.FAILURE; } // success if (state == ExecutionStatus.RUNNING_EXECUTION) { List<TaskInstance> killTasks = getCompleteTaskByState(ExecutionStatus.KILL); if (readyToSubmitTaskQueue.size() > 0 || waitToRetryTaskInstanceMap.size() > 0) { //tasks currently pending submission, no retries, indicating that depend is waiting to complete return ExecutionStatus.RUNNING_EXECUTION; } else if (CollectionUtils.isNotEmpty(killTasks)) { // tasks maybe killed manually return ExecutionStatus.FAILURE; } else { // if the waiting queue is empty and the status is in progress, then success return ExecutionStatus.SUCCESS; } } return state; } /** * whether complement end * * @return Boolean whether is complement end */ private boolean isComplementEnd() { if (!processInstance.isComplementData()) { return true; } try { Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); Date endTime = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); return processInstance.getScheduleTime().equals(endTime); } catch (Exception e) { logger.error("complement end failed ", e); return false; } } /** * updateProcessInstance process instance state * after each batch of tasks is executed, the status of the process instance is updated */ private void updateProcessInstanceState() { ExecutionStatus state = getProcessInstanceState(processInstance); if (processInstance.getState() != state) { logger.info( "work flow process instance [id: {}, name:{}], state change from {} to {}, cmd type: {}", processInstance.getId(), processInstance.getName(), processInstance.getState(), state, processInstance.getCommandType()); processInstance.setState(state); if (state.typeIsFinished()) { processInstance.setEndTime(new Date()); } processService.updateProcessInstance(processInstance); StateEvent stateEvent = new StateEvent(); stateEvent.setExecutionStatus(processInstance.getState()); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setType(StateEventType.PROCESS_STATE_CHANGE); this.processStateChangeHandler(stateEvent); } } /** * stateEvent's execution status as process instance state */ private void updateProcessInstanceState(StateEvent stateEvent) { ExecutionStatus state = stateEvent.getExecutionStatus(); if (processInstance.getState() != state) { logger.info( "work flow process instance [id: {}, name:{}], state change from {} to {}, cmd type: {}", processInstance.getId(), processInstance.getName(), processInstance.getState(), state, processInstance.getCommandType()); processInstance.setState(state); if (state.typeIsFinished()) { processInstance.setEndTime(new Date()); } processService.updateProcessInstance(processInstance); } } /** * get task dependency result * * @param taskInstance task instance * @return DependResult */ private DependResult getDependResultForTask(TaskInstance taskInstance) { return isTaskDepsComplete(Long.toString(taskInstance.getTaskCode())); } /** * add task to standby list * * @param taskInstance task instance */ private void addTaskToStandByList(TaskInstance taskInstance) { try { if (readyToSubmitTaskQueue.contains(taskInstance)) { logger.warn("task was found in ready submit queue, task code:{}", taskInstance.getTaskCode()); return; } // need to check if the tasks with same task code is active boolean active = hadNotFailTask(taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion()); if (active) { logger.warn("task was found in active task list, task code:{}", taskInstance.getTaskCode()); return; } logger.info("add task to stand by list, task name:{}, task id:{}, task code:{}", taskInstance.getName(), taskInstance.getId(), taskInstance.getTaskCode()); readyToSubmitTaskQueue.put(taskInstance); } catch (Exception e) { logger.error("add task instance to readyToSubmitTaskQueue, taskName:{}, task id:{}", taskInstance.getName(), taskInstance.getId(), e); } } /** * remove task from stand by list * * @param taskInstance task instance */ private void removeTaskFromStandbyList(TaskInstance taskInstance) { logger.info("remove task from stand by list, id: {} name:{}", taskInstance.getId(), taskInstance.getName()); try { readyToSubmitTaskQueue.remove(taskInstance); } catch (Exception e) { logger.error("remove task instance from readyToSubmitTaskQueue error, task id:{}, Name: {}", taskInstance.getId(), taskInstance.getName(), e); } } /** * has retry task in standby * * @return Boolean whether has retry task in standby */ private boolean hasRetryTaskInStandBy() { for (Iterator<TaskInstance> iter = readyToSubmitTaskQueue.iterator(); iter.hasNext(); ) { if (iter.next().getState().typeIsFailure()) { return true; } } return false; } /** * close the on going tasks */ private void killAllTasks() { logger.info("kill called on process instance id: {}, num: {}", processInstance.getId(), activeTaskProcessorMaps.size()); if (readyToSubmitTaskQueue.size() > 0) { readyToSubmitTaskQueue.clear(); } for (long taskCode : activeTaskProcessorMaps.keySet()) { ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(taskCode); Integer taskInstanceId = validTaskMap.get(taskCode); if (taskInstanceId == null || taskInstanceId.equals(0)) { continue; } TaskInstance taskInstance = processService.findTaskInstanceById(taskInstanceId); if (taskInstance == null || taskInstance.getState().typeIsFinished()) { continue; } taskProcessor.action(TaskAction.STOP); if (taskProcessor.taskInstance().getState().typeIsFinished()) { StateEvent stateEvent = new StateEvent(); stateEvent.setType(StateEventType.TASK_STATE_CHANGE); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setTaskInstanceId(taskInstance.getId()); stateEvent.setExecutionStatus(taskProcessor.taskInstance().getState()); this.addStateEvent(stateEvent); } } } public boolean workFlowFinish() { return this.processInstance.getState().typeIsFinished(); } /** * handling the list of tasks to be submitted */ private void submitStandByTask() { try { int length = readyToSubmitTaskQueue.size(); for (int i = 0; i < length; i++) { TaskInstance task = readyToSubmitTaskQueue.peek(); if (task == null) { continue; } // stop tasks which is retrying if forced success happens if (task.taskCanRetry()) { TaskInstance retryTask = processService.findTaskInstanceById(task.getId()); if (retryTask != null && retryTask.getState().equals(ExecutionStatus.FORCED_SUCCESS)) { task.setState(retryTask.getState()); logger.info("task: {} has been forced success, put it into complete task list and stop retrying", task.getName()); removeTaskFromStandbyList(task); completeTaskMap.put(task.getTaskCode(), task.getId()); taskInstanceMap.put(task.getId(), task); submitPostNode(Long.toString(task.getTaskCode())); continue; } } //init varPool only this task is the first time running if (task.isFirstRun()) { //get pre task ,get all the task varPool to this task Set<String> preTask = dag.getPreviousNodes(Long.toString(task.getTaskCode())); getPreVarPool(task, preTask); } DependResult dependResult = getDependResultForTask(task); if (DependResult.SUCCESS == dependResult) { TaskInstance taskInstance = submitTaskExec(task); if (taskInstance == null) { this.taskFailedSubmit = true; } else { removeTaskFromStandbyList(task); } } else if (DependResult.FAILED == dependResult) { // if the dependency fails, the current node is not submitted and the state changes to failure. dependFailedTaskMap.put(task.getTaskCode(), task.getId()); removeTaskFromStandbyList(task); logger.info("task {},id:{} depend result : {}", task.getName(), task.getId(), dependResult); } else if (DependResult.NON_EXEC == dependResult) { // for some reasons(depend task pause/stop) this task would not be submit removeTaskFromStandbyList(task); logger.info("remove task {},id:{} , because depend result : {}", task.getName(), task.getId(), dependResult); } } } catch (Exception e) { logger.error("submit standby task error", e); } } /** * get recovery task instance list * * @param taskIdArray task id array * @return recovery task instance list */ private List<TaskInstance> getRecoverTaskInstanceList(String[] taskIdArray) { if (taskIdArray == null || taskIdArray.length == 0) { return new ArrayList<>(); } List<Integer> taskIdList = new ArrayList<>(taskIdArray.length); for (String taskId : taskIdArray) { try { Integer id = Integer.valueOf(taskId); taskIdList.add(id); } catch (Exception e) { logger.error("get recovery task instance failed ", e); } } return processService.findTaskInstanceByIdList(taskIdList); } /** * get start task instance list * * @param cmdParam command param * @return task instance list */ private List<TaskInstance> getStartTaskInstanceList(String cmdParam) { List<TaskInstance> instanceList = new ArrayList<>(); Map<String, String> paramMap = JSONUtils.toMap(cmdParam); if (paramMap != null && paramMap.containsKey(CMD_PARAM_RECOVERY_START_NODE_STRING)) { String[] idList = paramMap.get(CMD_PARAM_RECOVERY_START_NODE_STRING).split(Constants.COMMA); instanceList = getRecoverTaskInstanceList(idList); } return instanceList; } /** * parse "StartNodeNameList" from cmd param * * @param cmdParam command param * @return start node name list */ private List<String> parseStartNodeName(String cmdParam) { List<String> startNodeNameList = new ArrayList<>(); Map<String, String> paramMap = JSONUtils.toMap(cmdParam); if (paramMap == null) { return startNodeNameList; } if (paramMap.containsKey(CMD_PARAM_START_NODES)) { startNodeNameList = Arrays.asList(paramMap.get(CMD_PARAM_START_NODES).split(Constants.COMMA)); } return startNodeNameList; } /** * generate start node code list from parsing command param; * if "StartNodeIdList" exists in command param, return StartNodeIdList * * @return recovery node code list */ private List<String> getRecoveryNodeCodeList(List<TaskInstance> recoverNodeList) { List<String> recoveryNodeCodeList = new ArrayList<>(); if (CollectionUtils.isNotEmpty(recoverNodeList)) { for (TaskInstance task : recoverNodeList) { recoveryNodeCodeList.add(Long.toString(task.getTaskCode())); } } return recoveryNodeCodeList; } /** * generate flow dag * * @param totalTaskNodeList total task node list * @param startNodeNameList start node name list * @param recoveryNodeCodeList recovery node code list * @param depNodeType depend node type * @return ProcessDag process dag * @throws Exception exception */ public ProcessDag generateFlowDag(List<TaskNode> totalTaskNodeList, List<String> startNodeNameList, List<String> recoveryNodeCodeList, TaskDependType depNodeType) throws Exception { return DagHelper.generateFlowDag(totalTaskNodeList, startNodeNameList, recoveryNodeCodeList, depNodeType); } /** * check task queue */ private boolean checkTaskQueue() { AtomicBoolean result = new AtomicBoolean(false); taskInstanceMap.forEach((id, taskInstance) -> { if (taskInstance != null && taskInstance.getTaskGroupId() > 0) { result.set(true); } }); return result.get(); } /** * is new process instance */ private boolean isNewProcessInstance() { if (ExecutionStatus.RUNNING_EXECUTION == processInstance.getState() && processInstance.getRunTimes() == 1) { return true; } else { return false; } } /** * check if had not fail task by taskCode and version * @param taskCode * @param version * @return */ private boolean hadNotFailTask(long taskCode, int version) { boolean result = false; for (Entry<Integer, TaskInstance> entry : taskInstanceMap.entrySet()) { TaskInstance taskInstance = entry.getValue(); if (taskInstance.getTaskCode() == taskCode && taskInstance.getTaskDefinitionVersion() == version) { if (!taskInstance.getState().typeIsFailure()) { result = true; break; } } } return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,598
[Bug][UI Next][V1.0.0-Alpha] Workflow instance menu readonly bug
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Dag node menu can't click when when I operate the workflow instance ![image](https://user-images.githubusercontent.com/8847400/156115287-b82a84ae-293a-45dc-82fa-ac09c69b4749.png) ### What you expected to happen `start` and other button allow clicking. ### How to reproduce Open workflow -> worflow instance. Right-click the dag node. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8598
https://github.com/apache/dolphinscheduler/pull/8603
09f70353b8c7fe73efac9674998120f11f398a51
985b4e85208ce2b1ca0e7afeacf9b7f3a5ee4d98
"2022-03-01T06:18:37Z"
java
"2022-03-01T08:59:35Z"
dolphinscheduler-ui-next/src/views/projects/workflow/components/dag/dag-context-menu.tsx
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { genTaskCodeList } from '@/service/modules/task-definition' import type { Cell } from '@antv/x6' import { defineComponent, onMounted, PropType, inject, ref, computed } from 'vue' import { useI18n } from 'vue-i18n' import { useRoute } from 'vue-router' import styles from './menu.module.scss' import { uuid } from '@/utils/common' const props = { cell: { type: Object as PropType<Cell>, require: true }, taskList: { type: Array as PropType<Array<any>>, default: [] }, visible: { type: Boolean as PropType<boolean>, default: true }, left: { type: Number as PropType<number>, default: 0 }, top: { type: Number as PropType<number>, default: 0 }, releaseState: { type: String as PropType<string>, default: 'OFFLINE' } } export default defineComponent({ name: 'dag-context-menu', props, emits: ['hide', 'start', 'edit', 'viewLog', 'copyTask', 'removeTasks'], setup(props, ctx) { const graph = inject('graph', ref()) const route = useRoute() const projectCode = Number(route.params.projectCode) const startAvailable = computed( () => route.name === 'workflow-definition-detail' && props.releaseState !== 'NOT_RELEASE' ) const hide = () => { ctx.emit('hide', false) } const startRunning = () => { ctx.emit('start') } const handleEdit = () => { ctx.emit('edit', Number(props.cell?.id)) } const handleViewLog = () => { const taskCode = Number(props.cell?.id) const taskInstance = props.taskList.find( (task: any) => task.taskCode === taskCode ) if (taskInstance) { ctx.emit('viewLog', taskInstance.id, taskInstance.taskType) } } const handleCopy = () => { const genNums = 1 const type = props.cell?.data.taskType const taskName = uuid(props.cell?.data.taskName + '_') const targetCode = Number(props.cell?.id) const flag = props.cell?.data.flag genTaskCodeList(genNums, projectCode).then((res) => { const [code] = res ctx.emit('copyTask', taskName, code, targetCode, type, flag, { x: props.left + 100, y: props.top + 100 }) }) } const handleDelete = () => { graph.value?.removeCell(props.cell) ctx.emit('removeTasks', [Number(props.cell?.id)]) } onMounted(() => { document.addEventListener('click', () => { hide() }) }) return { startAvailable, startRunning, handleEdit, handleCopy, handleDelete, handleViewLog } }, render() { const { t } = useI18n() return ( this.visible && ( <div class={styles['dag-context-menu']} style={{ left: `${this.left}px`, top: `${this.top}px` }} > <div class={`${styles['menu-item']} ${ !this.startAvailable ? styles['disabled'] : '' } `} onClick={this.startRunning} > {t('project.node.start')} </div> <div class={`${styles['menu-item']} ${ this.releaseState === 'ONLINE' ? styles['disabled'] : '' } `} onClick={this.handleEdit} > {t('project.node.edit')} </div> <div class={`${styles['menu-item']} ${ this.releaseState === 'ONLINE' ? styles['disabled'] : '' } `} onClick={this.handleCopy} > {t('project.node.copy')} </div> <div class={`${styles['menu-item']} ${ this.releaseState === 'ONLINE' ? styles['disabled'] : '' } `} onClick={this.handleDelete} > {t('project.node.delete')} </div> {this.taskList.length > 0 && ( <div class={`${styles['menu-item']}`} onClick={this.handleViewLog}> {t('project.node.view_log')} </div> )} </div> ) ) } })
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,598
[Bug][UI Next][V1.0.0-Alpha] Workflow instance menu readonly bug
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Dag node menu can't click when when I operate the workflow instance ![image](https://user-images.githubusercontent.com/8847400/156115287-b82a84ae-293a-45dc-82fa-ac09c69b4749.png) ### What you expected to happen `start` and other button allow clicking. ### How to reproduce Open workflow -> worflow instance. Right-click the dag node. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8598
https://github.com/apache/dolphinscheduler/pull/8603
09f70353b8c7fe73efac9674998120f11f398a51
985b4e85208ce2b1ca0e7afeacf9b7f3a5ee4d98
"2022-03-01T06:18:37Z"
java
"2022-03-01T08:59:35Z"
dolphinscheduler-ui-next/src/views/projects/workflow/components/dag/index.tsx
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import type { Graph } from '@antv/x6' import { defineComponent, ref, provide, PropType, toRef, watch, onBeforeUnmount } from 'vue' import { useI18n } from 'vue-i18n' import DagToolbar from './dag-toolbar' import DagCanvas from './dag-canvas' import DagSidebar from './dag-sidebar' import Styles from './dag.module.scss' import DagAutoLayoutModal from './dag-auto-layout-modal' import { useGraphAutoLayout, useGraphBackfill, useDagDragAndDrop, useTaskEdit, useBusinessMapper, useNodeMenu, useNodeStatus } from './dag-hooks' import { useThemeStore } from '@/store/theme/theme' import VersionModal from '../../definition/components/version-modal' import { WorkflowDefinition } from './types' import DagSaveModal from './dag-save-modal' import ContextMenuItem from './dag-context-menu' import TaskModal from '@/views/projects/task/components/node/detail-modal' import StartModal from '@/views/projects/workflow/definition/components/start-modal' import LogModal from '@/views/projects/workflow/instance/components/log-modal' import './x6-style.scss' const props = { // If this prop is passed, it means from definition detail instance: { type: Object as PropType<any>, default: undefined }, definition: { type: Object as PropType<WorkflowDefinition>, default: undefined }, readonly: { type: Boolean as PropType<boolean>, default: false }, projectCode: { type: Number as PropType<number>, default: 0 } } export default defineComponent({ name: 'workflow-dag', props, emits: ['refresh', 'save'], setup(props, context) { const { t } = useI18n() const theme = useThemeStore() // Whether the graph can be operated provide('readonly', toRef(props, 'readonly')) const graph = ref<Graph>() provide('graph', graph) // Auto layout modal const { visible: layoutVisible, toggle: layoutToggle, formValue, formRef, submit, cancel } = useGraphAutoLayout({ graph }) // Edit task const { taskConfirm, taskModalVisible, currTask, taskCancel, appendTask, editTask, copyTask, taskDefinitions, removeTasks } = useTaskEdit({ graph, definition: toRef(props, 'definition') }) // Right click cell const { menuCell, pageX, pageY, menuVisible, startModalShow, logModalShow, logViewTaskId, logViewTaskType, menuHide, menuStart, viewLog, hideLog } = useNodeMenu({ graph }) const statusTimerRef = ref() const { taskList, refreshTaskStatus } = useNodeStatus({ graph }) const { onDragStart, onDrop } = useDagDragAndDrop({ graph, readonly: toRef(props, 'readonly'), appendTask }) // backfill useGraphBackfill({ graph, definition: toRef(props, 'definition') }) // version modal const versionModalShow = ref(false) const versionToggle = (bool: boolean) => { if (typeof bool === 'boolean') { versionModalShow.value = bool } else { versionModalShow.value = !versionModalShow.value } } const refreshDetail = () => { context.emit('refresh') versionModalShow.value = false } // Save modal const saveModalShow = ref(false) const saveModelToggle = (bool: boolean) => { if (typeof bool === 'boolean') { saveModalShow.value = bool } else { saveModalShow.value = !versionModalShow.value } } const { getConnects, getLocations } = useBusinessMapper() const onSave = (saveForm: any) => { const edges = graph.value?.getEdges() || [] const nodes = graph.value?.getNodes() || [] if (!nodes.length) { window.$message.error(t('project.dag.node_not_created')) saveModelToggle(false) return } const connects = getConnects(nodes, edges, taskDefinitions.value as any) const locations = getLocations(nodes) context.emit('save', { taskDefinitions: taskDefinitions.value, saveForm, connects, locations }) saveModelToggle(false) } watch( () => props.definition, () => { if (props.instance) { refreshTaskStatus() statusTimerRef.value = setInterval(() => refreshTaskStatus(), 90000) } } ) onBeforeUnmount(() => clearInterval(statusTimerRef.value)) return () => ( <div class={[ Styles.dag, Styles[`dag-${theme.darkTheme ? 'dark' : 'light'}`] ]} > <DagToolbar layoutToggle={layoutToggle} instance={props.instance} definition={props.definition} onVersionToggle={versionToggle} onSaveModelToggle={saveModelToggle} onRemoveTasks={removeTasks} onRefresh={refreshTaskStatus} /> <div class={Styles.content}> <DagSidebar onDragStart={onDragStart} /> <DagCanvas onDrop={onDrop} /> </div> <DagAutoLayoutModal visible={layoutVisible.value} submit={submit} cancel={cancel} formValue={formValue} formRef={formRef} /> {!!props.definition && ( <VersionModal v-model:row={props.definition.processDefinition} v-model:show={versionModalShow.value} onUpdateList={refreshDetail} /> )} <DagSaveModal v-model:show={saveModalShow.value} onSave={onSave} definition={props.definition} /> <TaskModal readonly={props.readonly} show={taskModalVisible.value} projectCode={props.projectCode} data={currTask.value as any} onSubmit={taskConfirm} onCancel={taskCancel} /> <ContextMenuItem taskList={taskList.value} cell={menuCell.value} visible={menuVisible.value} left={pageX.value} top={pageY.value} releaseState={props.definition?.processDefinition.releaseState} onHide={menuHide} onStart={menuStart} onEdit={editTask} onCopyTask={copyTask} onRemoveTasks={removeTasks} onViewLog={viewLog} /> {!!props.definition && ( <StartModal v-model:row={props.definition.processDefinition} v-model:show={startModalShow.value} /> )} {!!props.instance && logModalShow.value && ( <LogModal taskInstanceId={logViewTaskId.value} taskInstanceType={logViewTaskType.value} onHideLog={hideLog} /> )} </div> ) } })
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,598
[Bug][UI Next][V1.0.0-Alpha] Workflow instance menu readonly bug
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Dag node menu can't click when when I operate the workflow instance ![image](https://user-images.githubusercontent.com/8847400/156115287-b82a84ae-293a-45dc-82fa-ac09c69b4749.png) ### What you expected to happen `start` and other button allow clicking. ### How to reproduce Open workflow -> worflow instance. Right-click the dag node. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8598
https://github.com/apache/dolphinscheduler/pull/8603
09f70353b8c7fe73efac9674998120f11f398a51
985b4e85208ce2b1ca0e7afeacf9b7f3a5ee4d98
"2022-03-01T06:18:37Z"
java
"2022-03-01T08:59:35Z"
dolphinscheduler-ui-next/src/views/projects/workflow/components/dag/types.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { TaskType } from '@/views/projects/task/constants/task-type' export interface ProcessDefinition { id: number code: number name: string version: number releaseState: string projectCode: number description: string globalParams: string globalParamList: any[] globalParamMap: any createTime: string updateTime: string flag: string userId: number userName?: any projectName?: any locations: string scheduleReleaseState?: any timeout: number tenantId: number tenantCode: string modifyBy?: any warningGroupId: number } export interface Connect { id?: number name: string processDefinitionVersion?: number projectCode?: number processDefinitionCode?: number preTaskCode: number preTaskVersion: number postTaskCode: number postTaskVersion: number conditionType: string conditionParams: any createTime?: string updateTime?: string } export interface TaskDefinition { id: number code: number name: string version: number description: string projectCode: any userId: number taskType: TaskType taskParams: any taskParamList: any[] taskParamMap: any flag: string taskPriority: string userName: any projectName?: any workerGroup: string environmentCode: number failRetryTimes: number failRetryInterval: number timeoutFlag: 'OPEN' | 'CLOSE' timeoutNotifyStrategy: string timeout: number delayTime: number resourceIds: string createTime: string updateTime: string modifyBy: any dependence: string } export type NodeData = { code: number taskType: TaskType name: string } & Partial<TaskDefinition> export interface WorkflowDefinition { processDefinition: ProcessDefinition processTaskRelationList: Connect[] taskDefinitionList: TaskDefinition[] } export interface Dragged { x: number y: number type: TaskType } export interface Coordinate { x: number y: number } export interface GlobalParam { key: string value: string } export interface SaveForm { name: string description: string tenantCode: string timeoutFlag: boolean timeout: number globalParams: GlobalParam[] release: boolean } export interface Location { taskCode: number x: number y: number } export interface IStartupParam { commandType: string commandParam: string failureStrategy: string processInstancePriority: string workerGroup: string warningType: string warningGroupId: number }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,598
[Bug][UI Next][V1.0.0-Alpha] Workflow instance menu readonly bug
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Dag node menu can't click when when I operate the workflow instance ![image](https://user-images.githubusercontent.com/8847400/156115287-b82a84ae-293a-45dc-82fa-ac09c69b4749.png) ### What you expected to happen `start` and other button allow clicking. ### How to reproduce Open workflow -> worflow instance. Right-click the dag node. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8598
https://github.com/apache/dolphinscheduler/pull/8603
09f70353b8c7fe73efac9674998120f11f398a51
985b4e85208ce2b1ca0e7afeacf9b7f3a5ee4d98
"2022-03-01T06:18:37Z"
java
"2022-03-01T08:59:35Z"
dolphinscheduler-ui-next/src/views/projects/workflow/components/dag/use-node-menu.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import type { Ref } from 'vue' import { onMounted, ref } from 'vue' import type { Graph, Cell } from '@antv/x6' interface Options { graph: Ref<Graph | undefined> } /** * Get position of the right-clicked Cell. */ export function useNodeMenu(options: Options) { const { graph } = options const startModalShow = ref(false) const logModalShow = ref(false) const logViewTaskId = ref() const logViewTaskType = ref() const menuVisible = ref(false) const pageX = ref() const pageY = ref() const menuCell = ref<Cell>() const menuHide = () => { menuVisible.value = false // unlock scroller graph.value?.unlockScroller() } const menuStart = () => { startModalShow.value = true } const viewLog = (taskId: number, taskType: string) => { logViewTaskId.value = taskId logViewTaskType.value = taskType logModalShow.value = true } const hideLog = () => { logModalShow.value = false } onMounted(() => { if (graph.value) { // contextmenu graph.value.on('node:contextmenu', ({ cell, x, y }) => { menuCell.value = cell const data = graph.value?.localToPage(x, y) pageX.value = data?.x pageY.value = data?.y // show menu menuVisible.value = true // lock scroller graph.value?.lockScroller() }) } }) return { pageX, pageY, startModalShow, logModalShow, logViewTaskId, logViewTaskType, menuVisible, menuCell, menuHide, menuStart, viewLog, hideLog } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,598
[Bug][UI Next][V1.0.0-Alpha] Workflow instance menu readonly bug
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Dag node menu can't click when when I operate the workflow instance ![image](https://user-images.githubusercontent.com/8847400/156115287-b82a84ae-293a-45dc-82fa-ac09c69b4749.png) ### What you expected to happen `start` and other button allow clicking. ### How to reproduce Open workflow -> worflow instance. Right-click the dag node. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8598
https://github.com/apache/dolphinscheduler/pull/8603
09f70353b8c7fe73efac9674998120f11f398a51
985b4e85208ce2b1ca0e7afeacf9b7f3a5ee4d98
"2022-03-01T06:18:37Z"
java
"2022-03-01T08:59:35Z"
dolphinscheduler-ui-next/src/views/projects/workflow/components/dag/use-node-status.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import type { Ref } from 'vue' import { render, h, ref } from 'vue' import { useRoute } from 'vue-router' import { useI18n } from 'vue-i18n' import type { Graph } from '@antv/x6' import { tasksState } from '@/utils/common' import { NODE, NODE_STATUS_MARKUP } from './dag-config' import { queryTaskListByProcessId } from '@/service/modules/process-instances' import NodeStatus from '@/views/projects/workflow/components/dag/dag-node-status' interface Options { graph: Ref<Graph | undefined> } /** * Node status and tooltip */ export function useNodeStatus(options: Options) { const { graph } = options const route = useRoute() const taskList = ref<Array<Object>>([]) const { t } = useI18n() const setNodeStatus = (code: string, state: string, taskInstance: any) => { const stateProps = tasksState(t)[state] const node = graph.value?.getCellById(code) if (node) { // Destroy the previous dom node.removeMarkup() node.setMarkup(NODE.markup.concat(NODE_STATUS_MARKUP)) const nodeView = graph.value?.findViewByCell(node) const el = nodeView?.find('div')[0] const a = h(NodeStatus, { t, taskInstance, stateProps }) render(a, el as HTMLElement) } } /** * Task status */ const refreshTaskStatus = () => { const projectCode = Number(route.params.projectCode) const instanceId = Number(route.params.id) queryTaskListByProcessId(instanceId, projectCode).then((res: any) => { window.$message.success(t('project.workflow.refresh_status_succeeded')) taskList.value = res.taskList if (taskList.value) { taskList.value.forEach((taskInstance: any) => { setNodeStatus(taskInstance.taskCode, taskInstance.state, taskInstance) }) } }) } return { taskList, refreshTaskStatus } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,617
[Bug][UI Next][V1.0.0-Alpha] Workflow version info language internationalization
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Version info language internationalization was not handled in workflow. ![image](https://user-images.githubusercontent.com/8847400/156130132-968ec1c0-ba81-4331-960a-41d626a13f5c.png) ### What you expected to happen Display english ### How to reproduce Open workflow definition list and click version info ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8617
https://github.com/apache/dolphinscheduler/pull/8630
985b4e85208ce2b1ca0e7afeacf9b7f3a5ee4d98
102828409f0c31a1f5d5372d8881a6e76ce9b7b1
"2022-03-01T08:14:42Z"
java
"2022-03-01T10:26:28Z"
dolphinscheduler-ui-next/src/locales/modules/en_US.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ const login = { test: 'Test', userName: 'Username', userName_tips: 'Please enter your username', userPassword: 'Password', userPassword_tips: 'Please enter your password', login: 'Login' } const modal = { cancel: 'Cancel', confirm: 'Confirm' } const theme = { light: 'Light', dark: 'Dark' } const userDropdown = { profile: 'Profile', password: 'Password', logout: 'Logout' } const menu = { home: 'Home', project: 'Project', resources: 'Resources', datasource: 'Datasource', monitor: 'Monitor', security: 'Security', project_overview: 'Project Overview', workflow_relation: 'Workflow Relation', workflow: 'Workflow', workflow_definition: 'Workflow Definition', workflow_instance: 'Workflow Instance', task: 'Task', task_instance: 'Task Instance', task_definition: 'Task Definition', file_manage: 'File Manage', udf_manage: 'UDF Manage', resource_manage: 'Resource Manage', function_manage: 'Function Manage', service_manage: 'Service Manage', master: 'Master', worker: 'Worker', db: 'DB', statistical_manage: 'Statistical Manage', statistics: 'Statistics', audit_log: 'Audit Log', tenant_manage: 'Tenant Manage', user_manage: 'User Manage', alarm_group_manage: 'Alarm Group Manage', alarm_instance_manage: 'Alarm Instance Manage', worker_group_manage: 'Worker Group Manage', yarn_queue_manage: 'Yarn Queue Manage', environment_manage: 'Environment Manage', k8s_namespace_manage: 'K8S Namespace Manage', token_manage: 'Token Manage', task_group_manage: 'Task Group Manage', task_group_option: 'Task Group Option', task_group_queue: 'Task Group Queue', data_quality: 'Data Quality', task_result: 'Task Result', rule: 'Rule management' } const home = { task_state_statistics: 'Task State Statistics', process_state_statistics: 'Process State Statistics', process_definition_statistics: 'Process Definition Statistics', number: 'Number', state: 'State' } const password = { edit_password: 'Edit Password', password: 'Password', confirm_password: 'Confirm Password', password_tips: 'Please enter your password', confirm_password_tips: 'Please enter your confirm password', two_password_entries_are_inconsistent: 'Two password entries are inconsistent', submit: 'Submit' } const profile = { profile: 'Profile', edit: 'Edit', username: 'Username', email: 'Email', phone: 'Phone', state: 'State', permission: 'Permission', create_time: 'Create Time', update_time: 'Update Time', administrator: 'Administrator', ordinary_user: 'Ordinary User', edit_profile: 'Edit Profile', username_tips: 'Please enter your username', email_tips: 'Please enter your email', email_correct_tips: 'Please enter your email in the correct format', phone_tips: 'Please enter your phone', state_tips: 'Please choose your state', enable: 'Enable', disable: 'Disable' } const monitor = { master: { cpu_usage: 'CPU Usage', memory_usage: 'Memory Usage', load_average: 'Load Average', create_time: 'Create Time', last_heartbeat_time: 'Last Heartbeat Time', directory_detail: 'Directory Detail', host: 'Host', directory: 'Directory' }, worker: { cpu_usage: 'CPU Usage', memory_usage: 'Memory Usage', load_average: 'Load Average', create_time: 'Create Time', last_heartbeat_time: 'Last Heartbeat Time', directory_detail: 'Directory Detail', host: 'Host', directory: 'Directory' }, db: { health_state: 'Health State', max_connections: 'Max Connections', threads_connections: 'Threads Connections', threads_running_connections: 'Threads Running Connections' }, statistics: { command_number_of_waiting_for_running: 'Command Number Of Waiting For Running', failure_command_number: 'Failure Command Number', tasks_number_of_waiting_running: 'Tasks Number Of Waiting Running', task_number_of_ready_to_kill: 'Task Number Of Ready To Kill' }, audit_log: { user_name: 'User Name', resource_type: 'Resource Type', project_name: 'Project Name', operation_type: 'Operation Type', create_time: 'Create Time', start_time: 'Start Time', end_time: 'End Time', user_audit: 'User Audit', project_audit: 'Project Audit', create: 'Create', update: 'Update', delete: 'Delete', read: 'Read' } } const resource = { file: { file_manage: 'File Manage', create_folder: 'Create Folder', create_file: 'Create File', upload_files: 'Upload Files', enter_keyword_tips: 'Please enter keyword', id: '#', name: 'Name', user_name: 'Resource userName', whether_directory: 'Whether directory', file_name: 'File Name', description: 'Description', size: 'Size', update_time: 'Update Time', operation: 'Operation', edit: 'Edit', rename: 'Rename', download: 'Download', delete: 'Delete', yes: 'Yes', no: 'No', folder_name: 'Folder Name', enter_name_tips: 'Please enter name', enter_description_tips: 'Please enter description', enter_content_tips: 'Please enter the resource content', file_format: 'File Format', file_content: 'File Content', delete_confirm: 'Delete?', confirm: 'Confirm', cancel: 'Cancel', success: 'Success', file_details: 'File Details', return: 'Return', save: 'Save' }, udf: { udf_resources: 'UDF resources', create_folder: 'Create Folder', upload_udf_resources: 'Upload UDF Resources', id: '#', udf_source_name: 'UDF Resource Name', whether_directory: 'Whether directory', file_name: 'File Name', file_size: 'File Size', description: 'Description', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', yes: 'Yes', no: 'No', edit: 'Edit', download: 'Download', delete: 'Delete', delete_confirm: 'Delete?', success: 'Success', folder_name: 'Folder Name', upload: 'Upload', upload_files: 'Upload Files', file_upload: 'File Upload', enter_keyword_tips: 'Please enter keyword', enter_name_tips: 'Please enter name', enter_description_tips: 'Please enter description' }, function: { udf_function: 'UDF Function', create_udf_function: 'Create UDF Function', edit_udf_function: 'Create UDF Function', id: '#', udf_function_name: 'UDF Function Name', class_name: 'Class Name', type: 'Type', description: 'Description', jar_package: 'Jar Package', update_time: 'Update Time', operation: 'Operation', rename: 'Rename', edit: 'Edit', delete: 'Delete', success: 'Success', package_name: 'Package Name', udf_resources: 'UDF Resources', instructions: 'Instructions', upload_resources: 'Upload Resources', udf_resources_directory: 'UDF resources directory', delete_confirm: 'Delete?', enter_keyword_tips: 'Please enter keyword', enter_udf_unction_name_tips: 'Please enter a UDF function name', enter_package_name_tips: 'Please enter a Package name', enter_select_udf_resources_tips: 'Please select UDF resources', enter_select_udf_resources_directory_tips: 'Please select UDF resources directory', enter_instructions_tips: 'Please enter a instructions', enter_name_tips: 'Please enter name', enter_description_tips: 'Please enter description' }, task_group_option: { id: 'No.', manage: 'Task group manage', option: 'Task group option', create: 'Create task group', edit: 'Edit task group', delete: 'Delete task group', view_queue: 'View the queue of the task group', switch_status: 'Switch status', code: 'Task group code', name: 'Task group name', project_name: 'Project name', resource_pool_size: 'Resource pool size', resource_pool_size_be_a_number: 'The size of the task group resource pool should be more than 1', resource_used_pool_size: 'Used resource', desc: 'Task group desc', status: 'Task group status', enable_status: 'Enable', disable_status: 'Disable', please_enter_name: 'Please enter task group name', please_enter_desc: 'Please enter task group description', please_enter_resource_pool_size: 'Please enter task group resource pool size', please_select_project: 'Please select a project', create_time: 'Create time', update_time: 'Update time', actions: 'Actions', please_enter_keywords: 'Please enter keywords' }, task_group_queue: { id: 'No.', actions: 'Actions', task_name: 'Task name', task_group_name: 'Task group name', project_name: 'Project name', process_name: 'Process name', process_instance_name: 'Process instance', queue: 'Task group queue', priority: 'Priority', priority_be_a_number: 'The priority of the task group queue should be a positive number', force_starting_status: 'Starting status', in_queue: 'In queue', task_status: 'Task status', view: 'View task group queue', the_status_of_waiting: 'Waiting into the queue', the_status_of_queuing: 'Queuing', the_status_of_releasing: 'Released', modify_priority: 'Edit the priority', start_task: 'Start the task', priority_not_empty: 'The value of priority can not be empty', priority_must_be_number: 'The value of priority should be number', please_select_task_name: 'Please select a task name', create_time: 'Create time', update_time: 'Update time', edit_priority: 'Edit the task priority' } } const project = { list: { create_project: 'Create Project', edit_project: 'Edit Project', project_list: 'Project List', project_tips: 'Please enter your project', description_tips: 'Please enter your description', username_tips: 'Please enter your username', project_name: 'Project Name', project_description: 'Project Description', owned_users: 'Owned Users', workflow_define_count: 'Workflow Define Count', process_instance_running_count: 'Process Instance Running Count', description: 'Description', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', edit: 'Edit', delete: 'Delete', confirm: 'Confirm', cancel: 'Cancel', delete_confirm: 'Delete?' }, workflow: { workflow_relation: 'Workflow Relation', create_workflow: 'Create Workflow', import_workflow: 'Import Workflow', workflow_name: 'Workflow Name', current_selection: 'Current Selection', online: 'Online', offline: 'Offline', refresh: 'Refresh', show_hide_label: 'Show / Hide Label', workflow_offline: 'Workflow Offline', schedule_offline: 'Schedule Offline', schedule_start_time: 'Schedule Start Time', schedule_end_time: 'Schedule End Time', crontab_expression: 'Crontab', workflow_publish_status: 'Workflow Publish Status', schedule_publish_status: 'Schedule Publish Status', workflow_definition: 'Workflow Definition', workflow_instance: 'Workflow Instance', id: '#', status: 'Status', create_time: 'Create Time', update_time: 'Update Time', description: 'Description', create_user: 'Create User', modify_user: 'Modify User', operation: 'Operation', edit: 'Edit', start: 'Start', timing: 'Timing', timezone: 'Timezone', up_line: 'Online', down_line: 'Offline', copy_workflow: 'Copy Workflow', cron_manage: 'Cron manage', delete: 'Delete', tree_view: 'Tree View', export: 'Export', version_info: 'Version Info', version: 'Version', file_upload: 'File Upload', upload_file: 'Upload File', upload: 'Upload', file_name: 'File Name', success: 'Success', set_parameters_before_starting: 'Please set the parameters before starting', set_parameters_before_timing: 'Set parameters before timing', start_and_stop_time: 'Start and stop time', next_five_execution_times: 'Next five execution times', execute_time: 'Execute time', failure_strategy: 'Failure Strategy', notification_strategy: 'Notification Strategy', workflow_priority: 'Workflow Priority', worker_group: 'Worker Group', environment_name: 'Environment Name', alarm_group: 'Alarm Group', complement_data: 'Complement Data', startup_parameter: 'Startup Parameter', whether_dry_run: 'Whether Dry-Run', continue: 'Continue', end: 'End', none_send: 'None', success_send: 'Success', failure_send: 'Failure', all_send: 'All', whether_complement_data: 'Whether it is a complement process?', schedule_date: 'Schedule date', mode_of_execution: 'Mode of execution', serial_execution: 'Serial execution', parallel_execution: 'Parallel execution', parallelism: 'Parallelism', custom_parallelism: 'Custom Parallelism', please_enter_parallelism: 'Please enter Parallelism', please_choose: 'Please Choose', start_time: 'Start Time', end_time: 'End Time', crontab: 'Crontab', delete_confirm: 'Delete?', enter_name_tips: 'Please enter name', confirm_switch_version: 'Confirm Switch To This Version?', current_version: 'Current Version', run_type: 'Run Type', scheduling_time: 'Scheduling Time', duration: 'Duration', run_times: 'Run Times', fault_tolerant_sign: 'Fault-tolerant Sign', dry_run_flag: 'Dry-run Flag', executor: 'Executor', host: 'Host', start_process: 'Start Process', execute_from_the_current_node: 'Execute from the current node', recover_tolerance_fault_process: 'Recover tolerance fault process', resume_the_suspension_process: 'Resume the suspension process', execute_from_the_failed_nodes: 'Execute from the failed nodes', scheduling_execution: 'Scheduling execution', rerun: 'Rerun', stop: 'Stop', pause: 'Pause', recovery_waiting_thread: 'Recovery waiting thread', recover_serial_wait: 'Recover serial wait', recovery_suspend: 'Recovery Suspend', recovery_failed: 'Recovery Failed', gantt: 'Gantt', name: 'Name', all_status: 'AllStatus', submit_success: 'Submitted successfully', running: 'Running', ready_to_pause: 'Ready to pause', ready_to_stop: 'Ready to stop', failed: 'Failed', need_fault_tolerance: 'Need fault tolerance', kill: 'Kill', waiting_for_thread: 'Waiting for thread', waiting_for_dependence: 'Waiting for dependence', waiting_for_dependency_to_complete: 'Waiting for dependency to complete', delay_execution: 'Delay execution', forced_success: 'Forced success', serial_wait: 'Serial wait', executing: 'Executing', startup_type: 'Startup Type', complement_range: 'Complement Range', parameters_variables: 'Parameters variables', global_parameters: 'Global parameters', local_parameters: 'Local parameters', type: 'Type', retry_count: 'Retry Count', submit_time: 'Submit Time', refresh_status_succeeded: 'Refresh status succeeded', view_log: 'View log', update_log_success: 'Update log success', no_more_log: 'No more logs', no_log: 'No log', loading_log: 'Loading Log...', close: 'Close', download_log: 'Download Log', refresh_log: 'Refresh Log', enter_full_screen: 'Enter full screen', cancel_full_screen: 'Cancel full screen' }, task: { task_name: 'Task Name', task_type: 'Task Type', create_task: 'Create Task', workflow_instance: 'Workflow Instance', workflow_name: 'Workflow Name', workflow_name_tips: 'Please select workflow name', workflow_state: 'Workflow State', version: 'Version', current_version: 'Current Version', switch_version: 'Switch To This Version', confirm_switch_version: 'Confirm Switch To This Version?', description: 'Description', move: 'Move', upstream_tasks: 'Upstream Tasks', executor: 'Executor', node_type: 'Node Type', state: 'State', submit_time: 'Submit Time', start_time: 'Start Time', create_time: 'Create Time', update_time: 'Update Time', end_time: 'End Time', duration: 'Duration', retry_count: 'Retry Count', dry_run_flag: 'Dry Run Flag', host: 'Host', operation: 'Operation', edit: 'Edit', delete: 'Delete', delete_confirm: 'Delete?', submitted_success: 'Submitted Success', running_execution: 'Running Execution', ready_pause: 'Ready Pause', pause: 'Pause', ready_stop: 'Ready Stop', stop: 'Stop', failure: 'Failure', success: 'Success', need_fault_tolerance: 'Need Fault Tolerance', kill: 'Kill', waiting_thread: 'Waiting Thread', waiting_depend: 'Waiting Depend', delay_execution: 'Delay Execution', forced_success: 'Forced Success', serial_wait: 'Serial Wait', view_log: 'View Log', download_log: 'Download Log' }, dag: { create: 'Create Workflow', search: 'Search', download_png: 'Download PNG', fullscreen_open: 'Open Fullscreen', fullscreen_close: 'Close Fullscreen', save: 'Save', close: 'Close', format: 'Format', refresh_dag_status: 'Refresh DAG status', layout_type: 'Layout Type', grid_layout: 'Grid', dagre_layout: 'Dagre', rows: 'Rows', cols: 'Cols', copy_success: 'Copy Success', workflow_name: 'Workflow Name', description: 'Description', tenant: 'Tenant', timeout_alert: 'Timeout Alert', global_variables: 'Global Variables', basic_info: 'Basic Information', minute: 'Minute', key: 'Key', value: 'Value', success: 'Success', delete_cell: 'Delete selected edges and nodes', online_directly: 'Whether to go online the process definition', dag_name_empty: 'DAG graph name cannot be empty', positive_integer: 'Please enter a positive integer greater than 0', prop_empty: 'prop is empty', prop_repeat: 'prop is repeat', node_not_created: 'Failed to save node not created', copy_name: 'Copy Name', view_variables: 'View Variables', startup_parameter: 'Startup Parameter' }, node: { current_node_settings: 'Current node settings', instructions: 'Instructions', view_history: 'View history', view_log: 'View log', enter_this_child_node: 'Enter this child node', name: 'Node name', name_tips: 'Please enter name (required)', task_type: 'Task Type', task_type_tips: 'Please select a task type (required)', process_name: 'Process Name', process_name_tips: 'Please select a process (required)', child_node: 'Child Node', enter_child_node: 'Enter child node', run_flag: 'Run flag', normal: 'Normal', prohibition_execution: 'Prohibition execution', description: 'Description', description_tips: 'Please enter description', task_priority: 'Task priority', worker_group: 'Worker group', worker_group_tips: 'The Worker group no longer exists, please select the correct Worker group!', environment_name: 'Environment Name', task_group_name: 'Task group name', task_group_queue_priority: 'Priority', number_of_failed_retries: 'Number of failed retries', times: 'Times', failed_retry_interval: 'Failed retry interval', minute: 'Minute', delay_execution_time: 'Delay execution time', state: 'State', branch_flow: 'Branch flow', cancel: 'Cancel', loading: 'Loading...', confirm: 'Confirm', success: 'Success', failed: 'Failed', backfill_tips: 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process', task_instance_tips: 'The task has not been executed and cannot enter the sub-Process', branch_tips: 'Cannot select the same node for successful branch flow and failed branch flow', timeout_alarm: 'Timeout alarm', timeout_strategy: 'Timeout strategy', timeout_strategy_tips: 'Timeout strategy must be selected', timeout_failure: 'Timeout failure', timeout_period: 'Timeout period', timeout_period_tips: 'Timeout must be a positive integer', script: 'Script', script_tips: 'Please enter script(required)', resources: 'Resources', resources_tips: 'Please select resources', non_resources_tips: 'Please delete all non-existent resources', useless_resources_tips: 'Unauthorized or deleted resources', custom_parameters: 'Custom Parameters', copy_success: 'Copy success', copy_failed: 'The browser does not support automatic copying', prop_tips: 'prop(required)', prop_repeat: 'prop is repeat', value_tips: 'value(optional)', value_required_tips: 'value(required)', pre_tasks: 'Pre tasks', program_type: 'Program Type', spark_version: 'Spark Version', main_class: 'Main Class', main_class_tips: 'Please enter main class', main_package: 'Main Package', main_package_tips: 'Please enter main package', deploy_mode: 'Deploy Mode', app_name: 'App Name', app_name_tips: 'Please enter app name(optional)', driver_cores: 'Driver Cores', driver_cores_tips: 'Please enter Driver cores', driver_memory: 'Driver Memory', driver_memory_tips: 'Please enter Driver memory', executor_number: 'Executor Number', executor_number_tips: 'Please enter Executor number', executor_memory: 'Executor Memory', executor_memory_tips: 'Please enter Executor memory', executor_cores: 'Executor Cores', executor_cores_tips: 'Please enter Executor cores', main_arguments: 'Main Arguments', main_arguments_tips: 'Please enter main arguments', option_parameters: 'Option Parameters', option_parameters_tips: 'Please enter option parameters', positive_integer_tips: 'should be a positive integer', flink_version: 'Flink Version', job_manager_memory: 'JobManager Memory', job_manager_memory_tips: 'Please enter JobManager memory', task_manager_memory: 'TaskManager Memory', task_manager_memory_tips: 'Please enter TaskManager memory', slot_number: 'Slot Number', slot_number_tips: 'Please enter Slot number', parallelism: 'Parallelism', custom_parallelism: 'Configure parallelism', parallelism_tips: 'Please enter Parallelism', parallelism_number_tips: 'Parallelism number should be positive integer', parallelism_complement_tips: 'If there are a large number of tasks requiring complement, you can use the custom parallelism to ' + 'set the complement task thread to a reasonable value to avoid too large impact on the server.', task_manager_number: 'TaskManager Number', task_manager_number_tips: 'Please enter TaskManager number', http_url: 'Http Url', http_url_tips: 'Please Enter Http Url', http_method: 'Http Method', http_parameters: 'Http Parameters', http_check_condition: 'Http Check Condition', http_condition: 'Http Condition', http_condition_tips: 'Please Enter Http Condition', timeout_settings: 'Timeout Settings', connect_timeout: 'Connect Timeout', ms: 'ms', socket_timeout: 'Socket Timeout', status_code_default: 'Default response code 200', status_code_custom: 'Custom response code', body_contains: 'Content includes', body_not_contains: 'Content does not contain', http_parameters_position: 'Http Parameters Position', target_task_name: 'Target Task Name', target_task_name_tips: 'Please enter the Pigeon task name', datasource_type: 'Datasource types', datasource_instances: 'Datasource instances', sql_type: 'SQL Type', sql_type_query: 'Query', sql_type_non_query: 'Non Query', sql_statement: 'SQL Statement', pre_sql_statement: 'Pre SQL Statement', post_sql_statement: 'Post SQL Statement', sql_input_placeholder: 'Please enter non-query sql.', sql_empty_tips: 'The sql can not be empty.', procedure_method: 'SQL Statement', procedure_method_tips: 'Please enter the procedure script', procedure_method_snippet: '--Please enter the procedure script \n\n--call procedure:call <procedure-name>[(<arg1>,<arg2>, ...)]\n\n--call function:?= call <procedure-name>[(<arg1>,<arg2>, ...)]', start: 'Start', edit: 'Edit', copy: 'Copy', delete: 'Delete', custom_job: 'Custom Job', custom_script: 'Custom Script', sqoop_job_name: 'Job Name', sqoop_job_name_tips: 'Please enter Job Name(required)', direct: 'Direct', hadoop_custom_params: 'Hadoop Params', sqoop_advanced_parameters: 'Sqoop Advanced Parameters', data_source: 'Data Source', type: 'Type', datasource: 'Datasource', datasource_tips: 'Please select the datasource', model_type: 'ModelType', form: 'Form', table: 'Table', table_tips: 'Please enter Mysql Table(required)', column_type: 'ColumnType', all_columns: 'All Columns', some_columns: 'Some Columns', column: 'Column', column_tips: 'Please enter Columns (Comma separated)', database: 'Database', database_tips: 'Please enter Hive Database(required)', hive_table_tips: 'Please enter Hive Table(required)', hive_partition_keys: 'Hive partition Keys', hive_partition_keys_tips: 'Please enter Hive Partition Keys', hive_partition_values: 'Hive partition Values', hive_partition_values_tips: 'Please enter Hive Partition Values', export_dir: 'Export Dir', export_dir_tips: 'Please enter Export Dir(required)', sql_statement_tips: 'SQL Statement(required)', map_column_hive: 'Map Column Hive', map_column_java: 'Map Column Java', data_target: 'Data Target', create_hive_table: 'CreateHiveTable', drop_delimiter: 'DropDelimiter', over_write_src: 'OverWriteSrc', hive_target_dir: 'Hive Target Dir', hive_target_dir_tips: 'Please enter hive target dir', replace_delimiter: 'ReplaceDelimiter', replace_delimiter_tips: 'Please enter Replace Delimiter', target_dir: 'Target Dir', target_dir_tips: 'Please enter Target Dir(required)', delete_target_dir: 'DeleteTargetDir', compression_codec: 'CompressionCodec', file_type: 'FileType', fields_terminated: 'FieldsTerminated', fields_terminated_tips: 'Please enter Fields Terminated', lines_terminated: 'LinesTerminated', lines_terminated_tips: 'Please enter Lines Terminated', is_update: 'IsUpdate', update_key: 'UpdateKey', update_key_tips: 'Please enter Update Key', update_mode: 'UpdateMode', only_update: 'OnlyUpdate', allow_insert: 'AllowInsert', concurrency: 'Concurrency', concurrency_tips: 'Please enter Concurrency', sea_tunnel_deploy_mode: 'Deploy Mode', sea_tunnel_master: 'Master', sea_tunnel_master_url: 'Master URL', sea_tunnel_queue: 'Queue', sea_tunnel_master_url_tips: 'Please enter the master url, e.g., 127.0.0.1:7077', switch_condition: 'Condition', switch_branch_flow: 'Branch Flow', and: 'and', or: 'or', datax_custom_template: 'Custom Template Switch', datax_json_template: 'JSON', datax_target_datasource_type: 'Target Datasource Type', datax_target_database: 'Target Database', datax_target_table: 'Target Table', datax_target_table_tips: 'Please enter the name of the target table', datax_target_database_pre_sql: 'Pre SQL Statement', datax_target_database_post_sql: 'Post SQL Statement', datax_non_query_sql_tips: 'Please enter the non-query sql statement', datax_job_speed_byte: 'Speed(Byte count)', datax_job_speed_byte_info: '(0 means unlimited)', datax_job_speed_record: 'Speed(Record count)', datax_job_speed_record_info: '(0 means unlimited)', datax_job_runtime_memory: 'Runtime Memory Limits', datax_job_runtime_memory_xms: 'Low Limit Value', datax_job_runtime_memory_xmx: 'High Limit Value', datax_job_runtime_memory_unit: 'G', current_hour: 'CurrentHour', last_1_hour: 'Last1Hour', last_2_hour: 'Last2Hours', last_3_hour: 'Last3Hours', last_24_hour: 'Last24Hours', today: 'today', last_1_days: 'Last1Days', last_2_days: 'Last2Days', last_3_days: 'Last3Days', last_7_days: 'Last7Days', this_week: 'ThisWeek', last_week: 'LastWeek', last_monday: 'LastMonday', last_tuesday: 'LastTuesday', last_wednesday: 'LastWednesday', last_thursday: 'LastThursday', last_friday: 'LastFriday', last_saturday: 'LastSaturday', last_sunday: 'LastSunday', this_month: 'ThisMonth', last_month: 'LastMonth', last_month_begin: 'LastMonthBegin', last_month_end: 'LastMonthEnd', month: 'month', week: 'week', day: 'day', hour: 'hour', add_dependency: 'Add dependency', waiting_dependent_start: 'Waiting Dependent start', check_interval: 'Check interval', waiting_dependent_complete: 'Waiting Dependent complete' } } const security = { tenant: { tenant_manage: 'Tenant Manage', create_tenant: 'Create Tenant', search_tips: 'Please enter keywords', num: 'Serial number', tenant_code: 'Operating System Tenant', description: 'Description', queue_name: 'QueueName', create_time: 'Create Time', update_time: 'Update Time', actions: 'Operation', edit_tenant: 'Edit Tenant', tenant_code_tips: 'Please enter the operating system tenant', queue_name_tips: 'Please select queue', description_tips: 'Please enter a description', delete_confirm: 'Delete?', edit: 'Edit', delete: 'Delete' }, alarm_group: { create_alarm_group: 'Create Alarm Group', edit_alarm_group: 'Edit Alarm Group', search_tips: 'Please enter keywords', alert_group_name_tips: 'Please enter your alert group name', alarm_plugin_instance: 'Alarm Plugin Instance', alarm_plugin_instance_tips: 'Please select alert plugin instance', alarm_group_description_tips: 'Please enter your alarm group description', alert_group_name: 'Alert Group Name', alarm_group_description: 'Alarm Group Description', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', delete_confirm: 'Delete?', edit: 'Edit', delete: 'Delete' }, worker_group: { create_worker_group: 'Create Worker Group', edit_worker_group: 'Edit Worker Group', search_tips: 'Please enter keywords', operation: 'Operation', delete_confirm: 'Delete?', edit: 'Edit', delete: 'Delete', group_name: 'Group Name', group_name_tips: 'Please enter your group name', worker_addresses: 'Worker Addresses', worker_addresses_tips: 'Please select worker addresses', create_time: 'Create Time', update_time: 'Update Time' }, yarn_queue: { create_queue: 'Create Queue', edit_queue: 'Edit Queue', search_tips: 'Please enter keywords', queue_name: 'Queue Name', queue_value: 'Queue Value', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', edit: 'Edit', queue_name_tips: 'Please enter your queue name', queue_value_tips: 'Please enter your queue value' }, environment: { create_environment: 'Create Environment', edit_environment: 'Edit Environment', search_tips: 'Please enter keywords', edit: 'Edit', delete: 'Delete', environment_name: 'Environment Name', environment_config: 'Environment Config', environment_desc: 'Environment Desc', worker_groups: 'Worker Groups', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', delete_confirm: 'Delete?', environment_name_tips: 'Please enter your environment name', environment_config_tips: 'Please enter your environment config', environment_description_tips: 'Please enter your environment description', worker_group_tips: 'Please select worker group' }, token: { create_token: 'Create Token', edit_token: 'Edit Token', search_tips: 'Please enter keywords', user: 'User', user_tips: 'Please select user', token: 'Token', token_tips: 'Please enter your token', expiration_time: 'Expiration Time', expiration_time_tips: 'Please select expiration time', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', edit: 'Edit', delete: 'Delete', delete_confirm: 'Delete?' }, user: { user_manage: 'User Manage', create_user: 'Create User', update_user: 'Update User', delete_user: 'Delete User', delete_confirm: 'Are you sure to delete?', delete_confirm_tip: 'Deleting user is a dangerous operation,please be careful', project: 'Project', resource: 'Resource', file_resource: 'File Resource', udf_resource: 'UDF Resource', datasource: 'Datasource', udf: 'UDF Function', authorize_project: 'Project Authorize', authorize_resource: 'Resource Authorize', authorize_datasource: 'Datasource Authorize', authorize_udf: 'UDF Function Authorize', index: 'Index', username: 'Username', username_exists: 'The username already exists', username_rule_msg: 'Please enter username', user_password: 'Please enter password', user_password_rule_msg: 'Please enter a password containing letters and numbers with a length between 6 and 20', user_type: 'User Type', tenant_code: 'Tenant', tenant_id_rule_msg: 'Please select tenant', queue: 'Queue', email: 'Email', email_rule_msg: 'Please enter valid email', phone: 'Phone', phone_rule_msg: 'Please enter valid phone number', state: 'State', state_enabled: 'Enabled', state_disabled: 'Disabled', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', edit: 'Edit', delete: 'Delete', authorize: 'Authorize', save_error_msg: 'Failed to save, please retry', delete_error_msg: 'Failed to delete, please retry', auth_error_msg: 'Failed to authorize, please retry', auth_success_msg: 'Authorize succeeded' }, alarm_instance: { search_input_tips: 'Please input the keywords', alarm_instance_manage: 'Alarm instance manage', alarm_instance: 'Alarm Instance', serial_number: '#', alarm_instance_name: 'Alarm instance name', alarm_instance_name_tips: 'Please enter alarm plugin instance name', alarm_plugin_name: 'Alarm plugin name', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', edit: 'Edit', delete: 'Delete', confirm: 'Confirm', cancel: 'Cancel', submit: 'Submit', create: 'Create', select_plugin: 'Select plugin', select_plugin_tips: 'Select Alarm plugin', instance_parameter_exception: 'Instance parameter exception', WebHook: 'WebHook', webHook: 'WebHook', IsEnableProxy: 'Enable Proxy', Proxy: 'Proxy', Port: 'Port', User: 'User', corpId: 'CorpId', secret: 'Secret', Secret: 'Secret', users: 'Users', userSendMsg: 'UserSendMsg', agentId: 'AgentId', showType: 'Show Type', receivers: 'Receivers', receiverCcs: 'ReceiverCcs', serverHost: 'SMTP Host', serverPort: 'SMTP Port', sender: 'Sender', enableSmtpAuth: 'SMTP Auth', Password: 'Password', starttlsEnable: 'SMTP STARTTLS Enable', sslEnable: 'SMTP SSL Enable', smtpSslTrust: 'SMTP SSL Trust', url: 'URL', requestType: 'Request Type', headerParams: 'Headers', bodyParams: 'Body', contentField: 'Content Field', Keyword: 'Keyword', userParams: 'User Params', path: 'Script Path', type: 'Type', sendType: 'Send Type', username: 'Username', botToken: 'Bot Token', chatId: 'Channel Chat Id', parseMode: 'Parse Mode' }, k8s_namespace: { create_namespace: 'Create Namespace', edit_namespace: 'Edit Namespace', search_tips: 'Please enter keywords', k8s_namespace: 'K8S Namespace', k8s_namespace_tips: 'Please enter k8s namespace', k8s_cluster: 'K8S Cluster', k8s_cluster_tips: 'Please enter k8s cluster', owner: 'Owner', owner_tips: 'Please enter owner', tag: 'Tag', tag_tips: 'Please enter tag', limit_cpu: 'Limit CPU', limit_cpu_tips: 'Please enter limit CPU', limit_memory: 'Limit Memory', limit_memory_tips: 'Please enter limit memory', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', edit: 'Edit', delete: 'Delete', delete_confirm: 'Delete?' } } const datasource = { datasource: 'DataSource', create_datasource: 'Create DataSource', search_input_tips: 'Please input the keywords', serial_number: '#', datasource_name: 'Datasource Name', datasource_name_tips: 'Please enter datasource name', datasource_user_name: 'Owner', datasource_type: 'Datasource Type', datasource_parameter: 'Datasource Parameter', description: 'Description', description_tips: 'Please enter description', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', click_to_view: 'Click to view', delete: 'Delete', confirm: 'Confirm', cancel: 'Cancel', create: 'Create', edit: 'Edit', success: 'Success', test_connect: 'Test Connect', ip: 'IP', ip_tips: 'Please enter IP', port: 'Port', port_tips: 'Please enter port', database_name: 'Database Name', database_name_tips: 'Please enter database name', oracle_connect_type: 'ServiceName or SID', oracle_connect_type_tips: 'Please select serviceName or SID', oracle_service_name: 'ServiceName', oracle_sid: 'SID', jdbc_connect_parameters: 'jdbc connect parameters', principal_tips: 'Please enter Principal', krb5_conf_tips: 'Please enter the kerberos authentication parameter java.security.krb5.conf', keytab_username_tips: 'Please enter the kerberos authentication parameter login.user.keytab.username', keytab_path_tips: 'Please enter the kerberos authentication parameter login.user.keytab.path', format_tips: 'Please enter format', connection_parameter: 'connection parameter', user_name: 'User Name', user_name_tips: 'Please enter your username', user_password: 'Password', user_password_tips: 'Please enter your password' } const data_quality = { task_result: { task_name: 'Task Name', workflow_instance: 'Workflow Instance', rule_type: 'Rule Type', rule_name: 'Rule Name', state: 'State', actual_value: 'Actual Value', excepted_value: 'Excepted Value', check_type: 'Check Type', operator: 'Operator', threshold: 'Threshold', failure_strategy: 'Failure Strategy', excepted_value_type: 'Excepted Value Type', error_output_path: 'Error Output Path', username: 'Username', create_time: 'Create Time', update_time: 'Update Time', undone: 'Undone', success: 'Success', failure: 'Failure', single_table: 'Single Table', single_table_custom_sql: 'Single Table Custom Sql', multi_table_accuracy: 'Multi Table Accuracy', multi_table_comparison: 'Multi Table Comparison', expected_and_actual_or_expected: '(Expected - Actual) / Expected x 100%', expected_and_actual: 'Expected - Actual', actual_and_expected: 'Actual - Expected', actual_or_expected: 'Actual / Expected x 100%' }, rule: { actions: 'Actions', name: 'Rule Name', type: 'Rule Type', username: 'User Name', create_time: 'Create Time', update_time: 'Update Time', input_item: 'Rule input item', view_input_item: 'View input items', input_item_title: 'Input item title', input_item_placeholder: 'Input item placeholder', input_item_type: 'Input item type', src_connector_type: 'SrcConnType', src_datasource_id: 'SrcSource', src_table: 'SrcTable', src_filter: 'SrcFilter', src_field: 'SrcField', statistics_name: 'ActualValName', check_type: 'CheckType', operator: 'Operator', threshold: 'Threshold', failure_strategy: 'FailureStrategy', target_connector_type: 'TargetConnType', target_datasource_id: 'TargetSourceId', target_table: 'TargetTable', target_filter: 'TargetFilter', mapping_columns: 'OnClause', statistics_execute_sql: 'ActualValExecSql', comparison_name: 'ExceptedValName', comparison_execute_sql: 'ExceptedValExecSql', comparison_type: 'ExceptedValType', writer_connector_type: 'WriterConnType', writer_datasource_id: 'WriterSourceId', target_field: 'TargetField', field_length: 'FieldLength', logic_operator: 'LogicOperator', regexp_pattern: 'RegexpPattern', deadline: 'Deadline', datetime_format: 'DatetimeFormat', enum_list: 'EnumList', begin_time: 'BeginTime', fix_value: 'FixValue', null_check: 'NullCheck', custom_sql: 'Custom Sql', single_table: 'Single Table', single_table_custom_sql: 'Single Table Custom Sql', multi_table_accuracy: 'Multi Table Accuracy', multi_table_value_comparison: 'Multi Table Compare', field_length_check: 'FieldLengthCheck', uniqueness_check: 'UniquenessCheck', regexp_check: 'RegexpCheck', timeliness_check: 'TimelinessCheck', enumeration_check: 'EnumerationCheck', table_count_check: 'TableCountCheck', All: 'All', FixValue: 'FixValue', DailyAvg: 'DailyAvg', WeeklyAvg: 'WeeklyAvg', MonthlyAvg: 'MonthlyAvg', Last7DayAvg: 'Last7DayAvg', Last30DayAvg: 'Last30DayAvg', SrcTableTotalRows: 'SrcTableTotalRows', TargetTableTotalRows: 'TargetTableTotalRows' } } const crontab = { second: 'second', minute: 'minute', hour: 'hour', day: 'day', month: 'month', year: 'year', monday: 'Monday', tuesday: 'Tuesday', wednesday: 'Wednesday', thursday: 'Thursday', friday: 'Friday', saturday: 'Saturday', sunday: 'Sunday', every_second: 'Every second', every: 'Every', second_carried_out: 'second carried out', second_start: 'Start', specific_second: 'Specific second(multiple)', specific_second_tip: 'Please enter a specific second', cycle_from: 'Cycle from', to: 'to', every_minute: 'Every minute', minute_carried_out: 'minute carried out', minute_start: 'Start', specific_minute: 'Specific minute(multiple)', specific_minute_tip: 'Please enter a specific minute', every_hour: 'Every hour', hour_carried_out: 'hour carried out', hour_start: 'Start', specific_hour: 'Specific hour(multiple)', specific_hour_tip: 'Please enter a specific hour', every_day: 'Every day', week_carried_out: 'week carried out', start: 'Start', day_carried_out: 'day carried out', day_start: 'Start', specific_week: 'Specific day of the week(multiple)', specific_week_tip: 'Please enter a specific week', specific_day: 'Specific days(multiple)', specific_day_tip: 'Please enter a days', last_day_of_month: 'On the last day of the month', last_work_day_of_month: 'On the last working day of the month', last_of_month: 'At the last of this month', before_end_of_month: 'Before the end of this month', recent_business_day_to_month: 'The most recent business day (Monday to Friday) to this month', in_this_months: 'In this months', every_month: 'Every month', month_carried_out: 'month carried out', month_start: 'Start', specific_month: 'Specific months(multiple)', specific_month_tip: 'Please enter a months', every_year: 'Every year', year_carried_out: 'year carried out', year_start: 'Start', specific_year: 'Specific year(multiple)', specific_year_tip: 'Please enter a year', one_hour: 'hour', one_day: 'day' } export default { login, modal, theme, userDropdown, menu, home, password, profile, monitor, resource, project, security, datasource, data_quality, crontab }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,615
[Bug][UI Next][V1.0.0-Alpha] Click the resource manage file editing page to display blank bug
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Page is blank when I clicking file edit of resource manage ### What you expected to happen The page displays the file editing contents. ### How to reproduce Open Resources -> File Mange in UI. click file edit button ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8615
https://github.com/apache/dolphinscheduler/pull/8631
7b66b66e8ffbadeadc0b7f3081e7e218b776932c
18c384c60aa87159a761ae35d67ebcdec8a6009f
"2022-03-01T08:07:19Z"
java
"2022-03-01T14:21:07Z"
dolphinscheduler-ui-next/src/service/modules/resources/index.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { axios, downloadFile } from '@/service/service' import { ResourceTypeReq, UdfTypeReq, NameReq, FileNameReq, FullNameReq, IdReq, ContentReq, DescriptionReq, CreateReq, UserIdReq, OnlineCreateReq, ProgramTypeReq, ListReq, ViewResourceReq, ResourceIdReq, UdfFuncReq } from './types' export function queryResourceListPaging( params: ListReq & IdReq & ResourceTypeReq ): any { return axios({ url: '/resources', method: 'get', params }) } export function queryResourceById( params: ResourceTypeReq & FullNameReq & IdReq, id: number ): any { return axios({ url: `/resources/${id}`, method: 'get', params }) } export function createResource( data: CreateReq & FileNameReq & NameReq & ResourceTypeReq ): any { return axios({ url: '/resources', method: 'post', data }) } export function authorizedFile(params: UserIdReq) { return axios({ url: '/resources/authed-file', method: 'get', params }) } export function authorizeResourceTree(params: UserIdReq) { return axios({ url: '/resources/authed-resource-tree', method: 'get', params }) } export function authUDFFunc(params: UserIdReq) { return axios({ url: '/resources/authed-udf-func', method: 'get', params }) } export function createDirectory( data: CreateReq & NameReq & ResourceTypeReq ): any { return axios({ url: '/resources/directory', method: 'post', data }) } export function queryResourceList(params: ResourceTypeReq): any { return axios({ url: '/resources/list', method: 'get', params }) } export function onlineCreateResource( data: OnlineCreateReq & FileNameReq & ResourceTypeReq ): any { return axios({ url: '/resources/online-create', method: 'post', data }) } export function queryResourceByProgramType( params: ResourceTypeReq & ProgramTypeReq ): any { return axios({ url: '/resources/query-by-type', method: 'get', params }) } export function queryUdfFuncListPaging(params: ListReq): any { return axios({ url: '/resources/udf-func', method: 'get', params }) } export function queryUdfFuncList(params: IdReq & ListReq): any { return axios({ url: '/resources/udf-func/list', method: 'get', params }) } export function verifyUdfFuncName(params: NameReq): any { return axios({ url: '/resources/udf-func/verify-name', method: 'get', params }) } export function deleteUdfFunc(id: number): any { return axios({ url: `/resources/udf-func/${id}`, method: 'delete' }) } export function unAuthUDFFunc(params: UserIdReq) { return axios({ url: '/resources/unauth-udf-func', method: 'get', params }) } export function verifyResourceName(params: FullNameReq & ResourceTypeReq): any { return axios({ url: '/resources/verify-name', method: 'get', params }) } export function queryResource( params: FullNameReq & ResourceTypeReq, id: IdReq ): any { return axios({ url: `/resources/verify-name/${id}`, method: 'get', params }) } export function updateResource( data: NameReq & ResourceTypeReq & IdReq & DescriptionReq, id: number ): any { return axios({ url: `/resources/${id}`, method: 'put', data }) } export function deleteResource(id: number): any { return axios({ url: `/resources/${id}`, method: 'delete' }) } export function downloadResource(id: number): void { downloadFile(`resources/${id}/download`) } export function viewUIUdfFunction(id: IdReq): any { return axios({ url: `/resources/${id}/udf-func`, method: 'get' }) } export function updateResourceContent(data: ContentReq, id: number): any { return axios({ url: `/resources/${id}/update-content`, method: 'put', data }) } export function viewResource(params: ViewResourceReq, id: number): any { return axios({ url: `/resources/${id}/view`, method: 'get', params }) } export function createUdfFunc( data: UdfFuncReq, resourceId: ResourceIdReq ): any { return axios({ url: `/resources/${resourceId}/udf-func`, method: 'post', data }) } export function updateUdfFunc( data: UdfFuncReq, resourceId: ResourceIdReq, id: number ): any { return axios({ url: `/resources/${resourceId}/udf-func/${id}`, method: 'put', data }) }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,615
[Bug][UI Next][V1.0.0-Alpha] Click the resource manage file editing page to display blank bug
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Page is blank when I clicking file edit of resource manage ### What you expected to happen The page displays the file editing contents. ### How to reproduce Open Resources -> File Mange in UI. click file edit button ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8615
https://github.com/apache/dolphinscheduler/pull/8631
7b66b66e8ffbadeadc0b7f3081e7e218b776932c
18c384c60aa87159a761ae35d67ebcdec8a6009f
"2022-03-01T08:07:19Z"
java
"2022-03-01T14:21:07Z"
dolphinscheduler-ui-next/src/service/modules/resources/types.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ interface FileReq { file: any } interface ResourceTypeReq { type: 'FILE' | 'UDF' programType?: string } interface UdfTypeReq { type: 'HIVE' | 'SPARK' } interface NameReq { name: string } interface FileNameReq { fileName: string } interface FullNameReq { fullName: string } interface IdReq { id: number } interface ContentReq { content: string } interface DescriptionReq { description?: string } interface CreateReq extends ResourceTypeReq, DescriptionReq { currentDir: string pid: number } interface UserIdReq { userId: number } interface OnlineCreateReq extends CreateReq, ContentReq { suffix: string } interface ProgramTypeReq { programType: 'JAVA' | 'SCALA' | 'PYTHON' } interface ListReq { pageNo: number pageSize: number searchVal?: string } interface ViewResourceReq { limit: number skipLineNum: number } interface ResourceIdReq { resourceId: number } interface UdfFuncReq extends UdfTypeReq, DescriptionReq, ResourceIdReq { className: string funcName: string argTypes?: string database?: string } interface ResourceFile { id: number pid: number alias: string userId: number type: string directory: boolean fileName: string fullName: string description: string size: number updateTime: string } interface ResourceListRes { currentPage: number pageSize: number start: number total: number totalList: ResourceFile[] } export { FileReq, ResourceTypeReq, UdfTypeReq, NameReq, FileNameReq, FullNameReq, IdReq, ContentReq, DescriptionReq, CreateReq, UserIdReq, OnlineCreateReq, ProgramTypeReq, ListReq, ViewResourceReq, ResourceIdReq, UdfFuncReq, ResourceListRes }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,615
[Bug][UI Next][V1.0.0-Alpha] Click the resource manage file editing page to display blank bug
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Page is blank when I clicking file edit of resource manage ### What you expected to happen The page displays the file editing contents. ### How to reproduce Open Resources -> File Mange in UI. click file edit button ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8615
https://github.com/apache/dolphinscheduler/pull/8631
7b66b66e8ffbadeadc0b7f3081e7e218b776932c
18c384c60aa87159a761ae35d67ebcdec8a6009f
"2022-03-01T08:07:19Z"
java
"2022-03-01T14:21:07Z"
dolphinscheduler-ui-next/src/views/resource/file/create/index.tsx
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { defineComponent, ref, toRefs } from 'vue' import { useRouter } from 'vue-router' import { NForm, NFormItem, NInput, NSelect, NButton } from 'naive-ui' import { useI18n } from 'vue-i18n' import Card from '@/components/card' import MonacoEditor from '@/components/monaco-editor' import { useCreate } from './use-create' import { useForm } from './use-form' import { fileTypeArr } from '@/utils/common' import styles from '../index.module.scss' import type { Router } from 'vue-router' export default defineComponent({ name: 'ResourceFileCreate', setup() { const router: Router = useRouter() const { state } = useForm() const { handleCreateFile } = useCreate(state) const fileSuffixOptions = fileTypeArr.map((suffix) => ({ key: suffix, label: suffix, value: suffix })) const handleFile = () => { handleCreateFile() } const handleReturn = () => { const { id } = router.currentRoute.value.params const name = id ? 'resource-file-subdirectory' : 'file' router.push({ name, params: { id } }) } return { fileSuffixOptions, handleFile, handleReturn, ...toRefs(state) } }, render() { const { t } = useI18n() return ( <Card title={t('resource.file.file_details')}> <NForm rules={this.rules} ref='fileFormRef' label-placement='left' label-width='160' class={styles['form-content']} > <NFormItem label={t('resource.file.file_name')} path='fileName'> <NInput v-model={[this.fileForm.fileName, 'value']} placeholder={t('resource.file.enter_name_tips')} style={{ width: '300px' }} /> </NFormItem> <NFormItem label={t('resource.file.file_format')} path='suffix'> <NSelect defaultValue={[this.fileForm.suffix]} v-model={[this.fileForm.suffix, 'value']} options={this.fileSuffixOptions} style={{ width: '100px' }} /> </NFormItem> <NFormItem label={t('resource.file.description')} path='description'> <NInput type='textarea' v-model={[this.fileForm.description, 'value']} placeholder={t('resource.file.enter_description_tips')} style={{ width: '430px' }} /> </NFormItem> <NFormItem label={t('resource.file.file_content')} path='content'> <div style={{ width: '90%' }} > <MonacoEditor v-model={[this.fileForm.content, 'value']} /> </div> </NFormItem> <div class={styles['file-edit-content']}> <div class={styles.submit}> <NButton type='info' size='small' round onClick={this.handleFile}> {t('resource.file.save')} </NButton> <NButton type='info' size='small' text style={{ marginLeft: '15px' }} onClick={this.handleReturn} > {t('resource.file.return')} </NButton> </div> </div> </NForm> </Card> ) } })
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,615
[Bug][UI Next][V1.0.0-Alpha] Click the resource manage file editing page to display blank bug
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Page is blank when I clicking file edit of resource manage ### What you expected to happen The page displays the file editing contents. ### How to reproduce Open Resources -> File Mange in UI. click file edit button ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8615
https://github.com/apache/dolphinscheduler/pull/8631
7b66b66e8ffbadeadc0b7f3081e7e218b776932c
18c384c60aa87159a761ae35d67ebcdec8a6009f
"2022-03-01T08:07:19Z"
java
"2022-03-01T14:21:07Z"
dolphinscheduler-ui-next/src/views/resource/file/edit/index.tsx
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { useRouter } from 'vue-router' import { defineComponent, onMounted, ref, toRefs } from 'vue' import { NButton, NForm, NFormItem, NSpace } from 'naive-ui' import { useI18n } from 'vue-i18n' import Card from '@/components/card' import MonacoEditor from '@/components/monaco-editor' import { useForm } from './use-form' import { useEdit } from './use-edit' import styles from '../index.module.scss' import type { Router } from 'vue-router' export default defineComponent({ name: 'ResourceFileEdit', setup() { const router: Router = useRouter() const resourceViewRef = ref() const routeNameRef = ref(router.currentRoute.value.name) const idRef = ref(Number(router.currentRoute.value.params.id)) const { state } = useForm() const { getResourceView, handleUpdateContent } = useEdit(state) const handleFileContent = () => { state.fileForm.content = resourceViewRef.value.content handleUpdateContent(idRef.value) } const handleReturn = () => { router.go(-1) } onMounted(() => { resourceViewRef.value = getResourceView(idRef.value) }) return { idRef, routeNameRef, resourceViewRef, handleReturn, handleFileContent, ...toRefs(state) } }, render() { const { t } = useI18n() return ( <Card title={t('resource.file.file_details')}> <div class={styles['file-edit-content']}> <h2> <span>{this.resourceViewRef.value?.alias}</span> </h2> <NForm rules={this.rules} ref='fileFormRef' class={styles['form-content']} > <NFormItem path='content'> <div style={{ width: '90%' }}> <MonacoEditor v-model={[this.resourceViewRef.value.content, 'value']} /> </div> </NFormItem> {this.routeNameRef === 'resource-file-edit' && ( <NSpace> <NButton type='info' size='small' text style={{ marginRight: '15px' }} onClick={this.handleReturn} > {t('resource.file.return')} </NButton> <NButton type='info' size='small' round onClick={() => this.handleFileContent()} > {t('resource.file.save')} </NButton> </NSpace> )} </NForm> </div> </Card> ) } })
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,615
[Bug][UI Next][V1.0.0-Alpha] Click the resource manage file editing page to display blank bug
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Page is blank when I clicking file edit of resource manage ### What you expected to happen The page displays the file editing contents. ### How to reproduce Open Resources -> File Mange in UI. click file edit button ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8615
https://github.com/apache/dolphinscheduler/pull/8631
7b66b66e8ffbadeadc0b7f3081e7e218b776932c
18c384c60aa87159a761ae35d67ebcdec8a6009f
"2022-03-01T08:07:19Z"
java
"2022-03-01T14:21:07Z"
dolphinscheduler-ui-next/src/views/resource/file/edit/use-edit.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { useI18n } from 'vue-i18n' import { useRouter } from 'vue-router' import type { Router } from 'vue-router' import { useAsyncState } from '@vueuse/core' import { viewResource, updateResourceContent } from '@/service/modules/resources' export function useEdit(state: any) { const { t } = useI18n() const router: Router = useRouter() const getResourceView = (id: number) => { const params = { skipLineNum: 0, limit: 3000 } const { state } = useAsyncState(viewResource(params, id), {}) return state } const handleUpdateContent = (id: number) => { state.fileFormRef.validate(async (valid: any) => { if (!valid) { try { await updateResourceContent( { ...state.fileForm }, id ) window.$message.success(t('resource.file.success')) router.go(-1) } catch (error: any) { window.$message.error(error.message) } } }) } return { getResourceView, handleUpdateContent } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,574
[Feature][UI Next][V1.0.0-Alpha] Delete the space between the create button in `Security - Warning Instance Manage`
null
https://github.com/apache/dolphinscheduler/issues/8574
https://github.com/apache/dolphinscheduler/pull/8665
2f21c89b80ea77745ea4856097ac9cfc8c86b975
b43e5f0d7e3eb79fe53bf3c965f99af88d16da71
"2022-02-28T07:25:10Z"
java
"2022-03-02T11:59:22Z"
dolphinscheduler-ui-next/src/views/security/alarm-instance-manage/index.tsx
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { defineComponent, onMounted, ref, toRefs } from 'vue' import { NButton, NInput, NIcon, NDataTable, NPagination, NSpace } from 'naive-ui' import Card from '@/components/card' import DetailModal from './detail' import { SearchOutlined } from '@vicons/antd' import { useI18n } from 'vue-i18n' import { useUserInfo } from './use-userinfo' import { useColumns } from './use-columns' import { useTable } from './use-table' import styles from './index.module.scss' import type { IRecord } from './types' const AlarmInstanceManage = defineComponent({ name: 'alarm-instance-manage', setup() { const { t } = useI18n() const showDetailModal = ref(false) const currentRecord = ref() const { IS_ADMIN } = useUserInfo() const { columnsRef } = useColumns( (record: IRecord, type: 'edit' | 'delete') => { if (type === 'edit') { showDetailModal.value = true currentRecord.value = record } else { deleteRecord(record.id) } } ) const { data, changePage, changePageSize, deleteRecord, updateList } = useTable() const onCreate = () => { currentRecord.value = null showDetailModal.value = true } const onCloseModal = () => { showDetailModal.value = false currentRecord.value = {} } onMounted(() => { changePage(1) }) return { t, IS_ADMIN, showDetailModal, currentRecord: currentRecord, columnsRef, ...toRefs(data), changePage, changePageSize, onCreate, onCloseModal, onUpdatedList: updateList } }, render() { const { t, IS_ADMIN, currentRecord, showDetailModal, columnsRef, list, page, pageSize, itemCount, loading, changePage, changePageSize, onCreate, onUpdatedList, onCloseModal } = this return ( <> <Card title=''> {{ default: () => ( <div class={styles['conditions']}> {IS_ADMIN && ( <NButton onClick={onCreate} type='primary'>{`${t( 'security.alarm_instance.create' )} ${t('security.alarm_instance.alarm_instance')}`}</NButton> )} <NSpace class={styles['conditions-search']} justify='end' wrap={false} > <div class={styles['conditions-search-input']}> <NInput v-model={[this.searchVal, 'value']} placeholder={`${t( 'security.alarm_instance.search_input_tips' )}`} /> </div> <NButton type='primary' onClick={onUpdatedList}> <NIcon> <SearchOutlined /> </NIcon> </NButton> </NSpace> </div> ) }} </Card> <Card title='' class={styles['mt-8']}> <NDataTable columns={columnsRef} data={list} loading={loading} striped /> <NPagination page={page} page-size={pageSize} item-count={itemCount} show-quick-jumper class={styles['pagination']} on-update:page={changePage} on-update:page-size={changePageSize} /> </Card> {IS_ADMIN && ( <DetailModal show={showDetailModal} currentRecord={currentRecord} onCancel={onCloseModal} onUpdate={onUpdatedList} /> )} </> ) } }) export default AlarmInstanceManage
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,632
[Bug][UI Next][V1.0.0-Alpha] The task instance button prompts an error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened 项目管理-项目实例-任务-任务实例,执行失败任务强制成功按钮提示错误 <img width="1425" alt="image" src="https://user-images.githubusercontent.com/76080484/156141468-b17f0e1f-e62a-4261-8c5a-a47a0748da45.png"> ### What you expected to happen 按钮提示为 强制成功 ### How to reproduce 项目管理-项目实例-任务-任务实例,执行失败任务强制成功按钮提示错误 ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8632
https://github.com/apache/dolphinscheduler/pull/8668
60a00490c6706228de22feedf9ea9c6b5a40e934
8096a1063bb6b23066b60da856d1349c04ee9c81
"2022-03-01T09:25:56Z"
java
"2022-03-02T13:04:47Z"
dolphinscheduler-ui-next/src/locales/modules/en_US.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ const login = { test: 'Test', userName: 'Username', userName_tips: 'Please enter your username', userPassword: 'Password', userPassword_tips: 'Please enter your password', login: 'Login' } const modal = { cancel: 'Cancel', confirm: 'Confirm' } const theme = { light: 'Light', dark: 'Dark' } const userDropdown = { profile: 'Profile', password: 'Password', logout: 'Logout' } const menu = { home: 'Home', project: 'Project', resources: 'Resources', datasource: 'Datasource', monitor: 'Monitor', security: 'Security', project_overview: 'Project Overview', workflow_relation: 'Workflow Relation', workflow: 'Workflow', workflow_definition: 'Workflow Definition', workflow_instance: 'Workflow Instance', task: 'Task', task_instance: 'Task Instance', task_definition: 'Task Definition', file_manage: 'File Manage', udf_manage: 'UDF Manage', resource_manage: 'Resource Manage', function_manage: 'Function Manage', service_manage: 'Service Manage', master: 'Master', worker: 'Worker', db: 'DB', statistical_manage: 'Statistical Manage', statistics: 'Statistics', audit_log: 'Audit Log', tenant_manage: 'Tenant Manage', user_manage: 'User Manage', alarm_group_manage: 'Alarm Group Manage', alarm_instance_manage: 'Alarm Instance Manage', worker_group_manage: 'Worker Group Manage', yarn_queue_manage: 'Yarn Queue Manage', environment_manage: 'Environment Manage', k8s_namespace_manage: 'K8S Namespace Manage', token_manage: 'Token Manage', task_group_manage: 'Task Group Manage', task_group_option: 'Task Group Option', task_group_queue: 'Task Group Queue', data_quality: 'Data Quality', task_result: 'Task Result', rule: 'Rule management' } const home = { task_state_statistics: 'Task State Statistics', process_state_statistics: 'Process State Statistics', process_definition_statistics: 'Process Definition Statistics', number: 'Number', state: 'State' } const password = { edit_password: 'Edit Password', password: 'Password', confirm_password: 'Confirm Password', password_tips: 'Please enter your password', confirm_password_tips: 'Please enter your confirm password', two_password_entries_are_inconsistent: 'Two password entries are inconsistent', submit: 'Submit' } const profile = { profile: 'Profile', edit: 'Edit', username: 'Username', email: 'Email', phone: 'Phone', state: 'State', permission: 'Permission', create_time: 'Create Time', update_time: 'Update Time', administrator: 'Administrator', ordinary_user: 'Ordinary User', edit_profile: 'Edit Profile', username_tips: 'Please enter your username', email_tips: 'Please enter your email', email_correct_tips: 'Please enter your email in the correct format', phone_tips: 'Please enter your phone', state_tips: 'Please choose your state', enable: 'Enable', disable: 'Disable' } const monitor = { master: { cpu_usage: 'CPU Usage', memory_usage: 'Memory Usage', load_average: 'Load Average', create_time: 'Create Time', last_heartbeat_time: 'Last Heartbeat Time', directory_detail: 'Directory Detail', host: 'Host', directory: 'Directory' }, worker: { cpu_usage: 'CPU Usage', memory_usage: 'Memory Usage', load_average: 'Load Average', create_time: 'Create Time', last_heartbeat_time: 'Last Heartbeat Time', directory_detail: 'Directory Detail', host: 'Host', directory: 'Directory' }, db: { health_state: 'Health State', max_connections: 'Max Connections', threads_connections: 'Threads Connections', threads_running_connections: 'Threads Running Connections' }, statistics: { command_number_of_waiting_for_running: 'Command Number Of Waiting For Running', failure_command_number: 'Failure Command Number', tasks_number_of_waiting_running: 'Tasks Number Of Waiting Running', task_number_of_ready_to_kill: 'Task Number Of Ready To Kill' }, audit_log: { user_name: 'User Name', resource_type: 'Resource Type', project_name: 'Project Name', operation_type: 'Operation Type', create_time: 'Create Time', start_time: 'Start Time', end_time: 'End Time', user_audit: 'User Audit', project_audit: 'Project Audit', create: 'Create', update: 'Update', delete: 'Delete', read: 'Read' } } const resource = { file: { file_manage: 'File Manage', create_folder: 'Create Folder', create_file: 'Create File', upload_files: 'Upload Files', enter_keyword_tips: 'Please enter keyword', name: 'Name', user_name: 'Resource userName', whether_directory: 'Whether directory', file_name: 'File Name', description: 'Description', size: 'Size', update_time: 'Update Time', operation: 'Operation', edit: 'Edit', rename: 'Rename', download: 'Download', delete: 'Delete', yes: 'Yes', no: 'No', folder_name: 'Folder Name', enter_name_tips: 'Please enter name', enter_description_tips: 'Please enter description', enter_content_tips: 'Please enter the resource content', file_format: 'File Format', file_content: 'File Content', delete_confirm: 'Delete?', confirm: 'Confirm', cancel: 'Cancel', success: 'Success', file_details: 'File Details', return: 'Return', save: 'Save' }, udf: { udf_resources: 'UDF resources', create_folder: 'Create Folder', upload_udf_resources: 'Upload UDF Resources', udf_source_name: 'UDF Resource Name', whether_directory: 'Whether directory', file_name: 'File Name', file_size: 'File Size', description: 'Description', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', yes: 'Yes', no: 'No', edit: 'Edit', download: 'Download', delete: 'Delete', delete_confirm: 'Delete?', success: 'Success', folder_name: 'Folder Name', upload: 'Upload', upload_files: 'Upload Files', file_upload: 'File Upload', enter_keyword_tips: 'Please enter keyword', enter_name_tips: 'Please enter name', enter_description_tips: 'Please enter description' }, function: { udf_function: 'UDF Function', create_udf_function: 'Create UDF Function', edit_udf_function: 'Create UDF Function', udf_function_name: 'UDF Function Name', class_name: 'Class Name', type: 'Type', description: 'Description', jar_package: 'Jar Package', update_time: 'Update Time', operation: 'Operation', rename: 'Rename', edit: 'Edit', delete: 'Delete', success: 'Success', package_name: 'Package Name', udf_resources: 'UDF Resources', instructions: 'Instructions', upload_resources: 'Upload Resources', udf_resources_directory: 'UDF resources directory', delete_confirm: 'Delete?', enter_keyword_tips: 'Please enter keyword', enter_udf_unction_name_tips: 'Please enter a UDF function name', enter_package_name_tips: 'Please enter a Package name', enter_select_udf_resources_tips: 'Please select UDF resources', enter_select_udf_resources_directory_tips: 'Please select UDF resources directory', enter_instructions_tips: 'Please enter a instructions', enter_name_tips: 'Please enter name', enter_description_tips: 'Please enter description' }, task_group_option: { manage: 'Task group manage', option: 'Task group option', create: 'Create task group', edit: 'Edit task group', delete: 'Delete task group', view_queue: 'View the queue of the task group', switch_status: 'Switch status', code: 'Task group code', name: 'Task group name', project_name: 'Project name', resource_pool_size: 'Resource pool size', resource_pool_size_be_a_number: 'The size of the task group resource pool should be more than 1', resource_used_pool_size: 'Used resource', desc: 'Task group desc', status: 'Task group status', enable_status: 'Enable', disable_status: 'Disable', please_enter_name: 'Please enter task group name', please_enter_desc: 'Please enter task group description', please_enter_resource_pool_size: 'Please enter task group resource pool size', please_select_project: 'Please select a project', create_time: 'Create time', update_time: 'Update time', actions: 'Actions', please_enter_keywords: 'Please enter keywords' }, task_group_queue: { actions: 'Actions', task_name: 'Task name', task_group_name: 'Task group name', project_name: 'Project name', process_name: 'Process name', process_instance_name: 'Process instance', queue: 'Task group queue', priority: 'Priority', priority_be_a_number: 'The priority of the task group queue should be a positive number', force_starting_status: 'Starting status', in_queue: 'In queue', task_status: 'Task status', view: 'View task group queue', the_status_of_waiting: 'Waiting into the queue', the_status_of_queuing: 'Queuing', the_status_of_releasing: 'Released', modify_priority: 'Edit the priority', start_task: 'Start the task', priority_not_empty: 'The value of priority can not be empty', priority_must_be_number: 'The value of priority should be number', please_select_task_name: 'Please select a task name', create_time: 'Create time', update_time: 'Update time', edit_priority: 'Edit the task priority' } } const project = { list: { create_project: 'Create Project', edit_project: 'Edit Project', project_list: 'Project List', project_tips: 'Please enter your project', description_tips: 'Please enter your description', username_tips: 'Please enter your username', project_name: 'Project Name', project_description: 'Project Description', owned_users: 'Owned Users', workflow_define_count: 'Workflow Define Count', process_instance_running_count: 'Process Instance Running Count', description: 'Description', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', edit: 'Edit', delete: 'Delete', confirm: 'Confirm', cancel: 'Cancel', delete_confirm: 'Delete?' }, workflow: { workflow_relation: 'Workflow Relation', create_workflow: 'Create Workflow', import_workflow: 'Import Workflow', workflow_name: 'Workflow Name', current_selection: 'Current Selection', online: 'Online', offline: 'Offline', refresh: 'Refresh', show_hide_label: 'Show / Hide Label', workflow_offline: 'Workflow Offline', schedule_offline: 'Schedule Offline', schedule_start_time: 'Schedule Start Time', schedule_end_time: 'Schedule End Time', crontab_expression: 'Crontab', workflow_publish_status: 'Workflow Publish Status', schedule_publish_status: 'Schedule Publish Status', workflow_definition: 'Workflow Definition', workflow_instance: 'Workflow Instance', status: 'Status', create_time: 'Create Time', update_time: 'Update Time', description: 'Description', create_user: 'Create User', modify_user: 'Modify User', operation: 'Operation', edit: 'Edit', start: 'Start', timing: 'Timing', timezone: 'Timezone', up_line: 'Online', down_line: 'Offline', copy_workflow: 'Copy Workflow', cron_manage: 'Cron manage', delete: 'Delete', tree_view: 'Tree View', export: 'Export', version_info: 'Version Info', version: 'Version', file_upload: 'File Upload', upload_file: 'Upload File', upload: 'Upload', file_name: 'File Name', success: 'Success', set_parameters_before_starting: 'Please set the parameters before starting', set_parameters_before_timing: 'Set parameters before timing', start_and_stop_time: 'Start and stop time', next_five_execution_times: 'Next five execution times', execute_time: 'Execute time', failure_strategy: 'Failure Strategy', notification_strategy: 'Notification Strategy', workflow_priority: 'Workflow Priority', worker_group: 'Worker Group', environment_name: 'Environment Name', alarm_group: 'Alarm Group', complement_data: 'Complement Data', startup_parameter: 'Startup Parameter', whether_dry_run: 'Whether Dry-Run', continue: 'Continue', end: 'End', none_send: 'None', success_send: 'Success', failure_send: 'Failure', all_send: 'All', whether_complement_data: 'Whether it is a complement process?', schedule_date: 'Schedule date', mode_of_execution: 'Mode of execution', serial_execution: 'Serial execution', parallel_execution: 'Parallel execution', parallelism: 'Parallelism', custom_parallelism: 'Custom Parallelism', please_enter_parallelism: 'Please enter Parallelism', please_choose: 'Please Choose', start_time: 'Start Time', end_time: 'End Time', crontab: 'Crontab', delete_confirm: 'Delete?', enter_name_tips: 'Please enter name', switch_version: 'Switch To This Version', confirm_switch_version: 'Confirm Switch To This Version?', current_version: 'Current Version', run_type: 'Run Type', scheduling_time: 'Scheduling Time', duration: 'Duration', run_times: 'Run Times', fault_tolerant_sign: 'Fault-tolerant Sign', dry_run_flag: 'Dry-run Flag', executor: 'Executor', host: 'Host', start_process: 'Start Process', execute_from_the_current_node: 'Execute from the current node', recover_tolerance_fault_process: 'Recover tolerance fault process', resume_the_suspension_process: 'Resume the suspension process', execute_from_the_failed_nodes: 'Execute from the failed nodes', scheduling_execution: 'Scheduling execution', rerun: 'Rerun', stop: 'Stop', pause: 'Pause', recovery_waiting_thread: 'Recovery waiting thread', recover_serial_wait: 'Recover serial wait', recovery_suspend: 'Recovery Suspend', recovery_failed: 'Recovery Failed', gantt: 'Gantt', name: 'Name', all_status: 'AllStatus', submit_success: 'Submitted successfully', running: 'Running', ready_to_pause: 'Ready to pause', ready_to_stop: 'Ready to stop', failed: 'Failed', need_fault_tolerance: 'Need fault tolerance', kill: 'Kill', waiting_for_thread: 'Waiting for thread', waiting_for_dependence: 'Waiting for dependence', waiting_for_dependency_to_complete: 'Waiting for dependency to complete', delay_execution: 'Delay execution', forced_success: 'Forced success', serial_wait: 'Serial wait', executing: 'Executing', startup_type: 'Startup Type', complement_range: 'Complement Range', parameters_variables: 'Parameters variables', global_parameters: 'Global parameters', local_parameters: 'Local parameters', type: 'Type', retry_count: 'Retry Count', submit_time: 'Submit Time', refresh_status_succeeded: 'Refresh status succeeded', view_log: 'View log', update_log_success: 'Update log success', no_more_log: 'No more logs', no_log: 'No log', loading_log: 'Loading Log...', close: 'Close', download_log: 'Download Log', refresh_log: 'Refresh Log', enter_full_screen: 'Enter full screen', cancel_full_screen: 'Cancel full screen' }, task: { task_name: 'Task Name', task_type: 'Task Type', create_task: 'Create Task', workflow_instance: 'Workflow Instance', workflow_name: 'Workflow Name', workflow_name_tips: 'Please select workflow name', workflow_state: 'Workflow State', version: 'Version', current_version: 'Current Version', switch_version: 'Switch To This Version', confirm_switch_version: 'Confirm Switch To This Version?', description: 'Description', move: 'Move', upstream_tasks: 'Upstream Tasks', executor: 'Executor', node_type: 'Node Type', state: 'State', submit_time: 'Submit Time', start_time: 'Start Time', create_time: 'Create Time', update_time: 'Update Time', end_time: 'End Time', duration: 'Duration', retry_count: 'Retry Count', dry_run_flag: 'Dry Run Flag', host: 'Host', operation: 'Operation', edit: 'Edit', delete: 'Delete', delete_confirm: 'Delete?', submitted_success: 'Submitted Success', running_execution: 'Running Execution', ready_pause: 'Ready Pause', pause: 'Pause', ready_stop: 'Ready Stop', stop: 'Stop', failure: 'Failure', success: 'Success', need_fault_tolerance: 'Need Fault Tolerance', kill: 'Kill', waiting_thread: 'Waiting Thread', waiting_depend: 'Waiting Depend', delay_execution: 'Delay Execution', forced_success: 'Forced Success', serial_wait: 'Serial Wait', view_log: 'View Log', download_log: 'Download Log' }, dag: { create: 'Create Workflow', search: 'Search', download_png: 'Download PNG', fullscreen_open: 'Open Fullscreen', fullscreen_close: 'Close Fullscreen', save: 'Save', close: 'Close', format: 'Format', refresh_dag_status: 'Refresh DAG status', layout_type: 'Layout Type', grid_layout: 'Grid', dagre_layout: 'Dagre', rows: 'Rows', cols: 'Cols', copy_success: 'Copy Success', workflow_name: 'Workflow Name', description: 'Description', tenant: 'Tenant', timeout_alert: 'Timeout Alert', global_variables: 'Global Variables', basic_info: 'Basic Information', minute: 'Minute', key: 'Key', value: 'Value', success: 'Success', delete_cell: 'Delete selected edges and nodes', online_directly: 'Whether to go online the process definition', dag_name_empty: 'DAG graph name cannot be empty', positive_integer: 'Please enter a positive integer greater than 0', prop_empty: 'prop is empty', prop_repeat: 'prop is repeat', node_not_created: 'Failed to save node not created', copy_name: 'Copy Name', view_variables: 'View Variables', startup_parameter: 'Startup Parameter' }, node: { current_node_settings: 'Current node settings', instructions: 'Instructions', view_history: 'View history', view_log: 'View log', enter_this_child_node: 'Enter this child node', name: 'Node name', name_tips: 'Please enter name (required)', task_type: 'Task Type', task_type_tips: 'Please select a task type (required)', process_name: 'Process Name', process_name_tips: 'Please select a process (required)', child_node: 'Child Node', enter_child_node: 'Enter child node', run_flag: 'Run flag', normal: 'Normal', prohibition_execution: 'Prohibition execution', description: 'Description', description_tips: 'Please enter description', task_priority: 'Task priority', worker_group: 'Worker group', worker_group_tips: 'The Worker group no longer exists, please select the correct Worker group!', environment_name: 'Environment Name', task_group_name: 'Task group name', task_group_queue_priority: 'Priority', number_of_failed_retries: 'Number of failed retries', times: 'Times', failed_retry_interval: 'Failed retry interval', minute: 'Minute', delay_execution_time: 'Delay execution time', state: 'State', branch_flow: 'Branch flow', cancel: 'Cancel', loading: 'Loading...', confirm: 'Confirm', success: 'Success', failed: 'Failed', backfill_tips: 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process', task_instance_tips: 'The task has not been executed and cannot enter the sub-Process', branch_tips: 'Cannot select the same node for successful branch flow and failed branch flow', timeout_alarm: 'Timeout alarm', timeout_strategy: 'Timeout strategy', timeout_strategy_tips: 'Timeout strategy must be selected', timeout_failure: 'Timeout failure', timeout_period: 'Timeout period', timeout_period_tips: 'Timeout must be a positive integer', script: 'Script', script_tips: 'Please enter script(required)', resources: 'Resources', resources_tips: 'Please select resources', non_resources_tips: 'Please delete all non-existent resources', useless_resources_tips: 'Unauthorized or deleted resources', custom_parameters: 'Custom Parameters', copy_success: 'Copy success', copy_failed: 'The browser does not support automatic copying', prop_tips: 'prop(required)', prop_repeat: 'prop is repeat', value_tips: 'value(optional)', value_required_tips: 'value(required)', pre_tasks: 'Pre tasks', program_type: 'Program Type', spark_version: 'Spark Version', main_class: 'Main Class', main_class_tips: 'Please enter main class', main_package: 'Main Package', main_package_tips: 'Please enter main package', deploy_mode: 'Deploy Mode', app_name: 'App Name', app_name_tips: 'Please enter app name(optional)', driver_cores: 'Driver Cores', driver_cores_tips: 'Please enter Driver cores', driver_memory: 'Driver Memory', driver_memory_tips: 'Please enter Driver memory', executor_number: 'Executor Number', executor_number_tips: 'Please enter Executor number', executor_memory: 'Executor Memory', executor_memory_tips: 'Please enter Executor memory', executor_cores: 'Executor Cores', executor_cores_tips: 'Please enter Executor cores', main_arguments: 'Main Arguments', main_arguments_tips: 'Please enter main arguments', option_parameters: 'Option Parameters', option_parameters_tips: 'Please enter option parameters', positive_integer_tips: 'should be a positive integer', flink_version: 'Flink Version', job_manager_memory: 'JobManager Memory', job_manager_memory_tips: 'Please enter JobManager memory', task_manager_memory: 'TaskManager Memory', task_manager_memory_tips: 'Please enter TaskManager memory', slot_number: 'Slot Number', slot_number_tips: 'Please enter Slot number', parallelism: 'Parallelism', custom_parallelism: 'Configure parallelism', parallelism_tips: 'Please enter Parallelism', parallelism_number_tips: 'Parallelism number should be positive integer', parallelism_complement_tips: 'If there are a large number of tasks requiring complement, you can use the custom parallelism to ' + 'set the complement task thread to a reasonable value to avoid too large impact on the server.', task_manager_number: 'TaskManager Number', task_manager_number_tips: 'Please enter TaskManager number', http_url: 'Http Url', http_url_tips: 'Please Enter Http Url', http_method: 'Http Method', http_parameters: 'Http Parameters', http_check_condition: 'Http Check Condition', http_condition: 'Http Condition', http_condition_tips: 'Please Enter Http Condition', timeout_settings: 'Timeout Settings', connect_timeout: 'Connect Timeout', ms: 'ms', socket_timeout: 'Socket Timeout', status_code_default: 'Default response code 200', status_code_custom: 'Custom response code', body_contains: 'Content includes', body_not_contains: 'Content does not contain', http_parameters_position: 'Http Parameters Position', target_task_name: 'Target Task Name', target_task_name_tips: 'Please enter the Pigeon task name', datasource_type: 'Datasource types', datasource_instances: 'Datasource instances', sql_type: 'SQL Type', sql_type_query: 'Query', sql_type_non_query: 'Non Query', sql_statement: 'SQL Statement', pre_sql_statement: 'Pre SQL Statement', post_sql_statement: 'Post SQL Statement', sql_input_placeholder: 'Please enter non-query sql.', sql_empty_tips: 'The sql can not be empty.', procedure_method: 'SQL Statement', procedure_method_tips: 'Please enter the procedure script', procedure_method_snippet: '--Please enter the procedure script \n\n--call procedure:call <procedure-name>[(<arg1>,<arg2>, ...)]\n\n--call function:?= call <procedure-name>[(<arg1>,<arg2>, ...)]', start: 'Start', edit: 'Edit', copy: 'Copy', delete: 'Delete', custom_job: 'Custom Job', custom_script: 'Custom Script', sqoop_job_name: 'Job Name', sqoop_job_name_tips: 'Please enter Job Name(required)', direct: 'Direct', hadoop_custom_params: 'Hadoop Params', sqoop_advanced_parameters: 'Sqoop Advanced Parameters', data_source: 'Data Source', type: 'Type', datasource: 'Datasource', datasource_tips: 'Please select the datasource', model_type: 'ModelType', form: 'Form', table: 'Table', table_tips: 'Please enter Mysql Table(required)', column_type: 'ColumnType', all_columns: 'All Columns', some_columns: 'Some Columns', column: 'Column', column_tips: 'Please enter Columns (Comma separated)', database: 'Database', database_tips: 'Please enter Hive Database(required)', hive_table_tips: 'Please enter Hive Table(required)', hive_partition_keys: 'Hive partition Keys', hive_partition_keys_tips: 'Please enter Hive Partition Keys', hive_partition_values: 'Hive partition Values', hive_partition_values_tips: 'Please enter Hive Partition Values', export_dir: 'Export Dir', export_dir_tips: 'Please enter Export Dir(required)', sql_statement_tips: 'SQL Statement(required)', map_column_hive: 'Map Column Hive', map_column_java: 'Map Column Java', data_target: 'Data Target', create_hive_table: 'CreateHiveTable', drop_delimiter: 'DropDelimiter', over_write_src: 'OverWriteSrc', hive_target_dir: 'Hive Target Dir', hive_target_dir_tips: 'Please enter hive target dir', replace_delimiter: 'ReplaceDelimiter', replace_delimiter_tips: 'Please enter Replace Delimiter', target_dir: 'Target Dir', target_dir_tips: 'Please enter Target Dir(required)', delete_target_dir: 'DeleteTargetDir', compression_codec: 'CompressionCodec', file_type: 'FileType', fields_terminated: 'FieldsTerminated', fields_terminated_tips: 'Please enter Fields Terminated', lines_terminated: 'LinesTerminated', lines_terminated_tips: 'Please enter Lines Terminated', is_update: 'IsUpdate', update_key: 'UpdateKey', update_key_tips: 'Please enter Update Key', update_mode: 'UpdateMode', only_update: 'OnlyUpdate', allow_insert: 'AllowInsert', concurrency: 'Concurrency', concurrency_tips: 'Please enter Concurrency', sea_tunnel_deploy_mode: 'Deploy Mode', sea_tunnel_master: 'Master', sea_tunnel_master_url: 'Master URL', sea_tunnel_queue: 'Queue', sea_tunnel_master_url_tips: 'Please enter the master url, e.g., 127.0.0.1:7077', switch_condition: 'Condition', switch_branch_flow: 'Branch Flow', and: 'and', or: 'or', datax_custom_template: 'Custom Template Switch', datax_json_template: 'JSON', datax_target_datasource_type: 'Target Datasource Type', datax_target_database: 'Target Database', datax_target_table: 'Target Table', datax_target_table_tips: 'Please enter the name of the target table', datax_target_database_pre_sql: 'Pre SQL Statement', datax_target_database_post_sql: 'Post SQL Statement', datax_non_query_sql_tips: 'Please enter the non-query sql statement', datax_job_speed_byte: 'Speed(Byte count)', datax_job_speed_byte_info: '(0 means unlimited)', datax_job_speed_record: 'Speed(Record count)', datax_job_speed_record_info: '(0 means unlimited)', datax_job_runtime_memory: 'Runtime Memory Limits', datax_job_runtime_memory_xms: 'Low Limit Value', datax_job_runtime_memory_xmx: 'High Limit Value', datax_job_runtime_memory_unit: 'G', current_hour: 'CurrentHour', last_1_hour: 'Last1Hour', last_2_hour: 'Last2Hours', last_3_hour: 'Last3Hours', last_24_hour: 'Last24Hours', today: 'today', last_1_days: 'Last1Days', last_2_days: 'Last2Days', last_3_days: 'Last3Days', last_7_days: 'Last7Days', this_week: 'ThisWeek', last_week: 'LastWeek', last_monday: 'LastMonday', last_tuesday: 'LastTuesday', last_wednesday: 'LastWednesday', last_thursday: 'LastThursday', last_friday: 'LastFriday', last_saturday: 'LastSaturday', last_sunday: 'LastSunday', this_month: 'ThisMonth', last_month: 'LastMonth', last_month_begin: 'LastMonthBegin', last_month_end: 'LastMonthEnd', month: 'month', week: 'week', day: 'day', hour: 'hour', add_dependency: 'Add dependency', waiting_dependent_start: 'Waiting Dependent start', check_interval: 'Check interval', waiting_dependent_complete: 'Waiting Dependent complete' } } const security = { tenant: { tenant_manage: 'Tenant Manage', create_tenant: 'Create Tenant', search_tips: 'Please enter keywords', tenant_code: 'Operating System Tenant', description: 'Description', queue_name: 'QueueName', create_time: 'Create Time', update_time: 'Update Time', actions: 'Operation', edit_tenant: 'Edit Tenant', tenant_code_tips: 'Please enter the operating system tenant', queue_name_tips: 'Please select queue', description_tips: 'Please enter a description', delete_confirm: 'Delete?', edit: 'Edit', delete: 'Delete' }, alarm_group: { create_alarm_group: 'Create Alarm Group', edit_alarm_group: 'Edit Alarm Group', search_tips: 'Please enter keywords', alert_group_name_tips: 'Please enter your alert group name', alarm_plugin_instance: 'Alarm Plugin Instance', alarm_plugin_instance_tips: 'Please select alert plugin instance', alarm_group_description_tips: 'Please enter your alarm group description', alert_group_name: 'Alert Group Name', alarm_group_description: 'Alarm Group Description', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', delete_confirm: 'Delete?', edit: 'Edit', delete: 'Delete' }, worker_group: { create_worker_group: 'Create Worker Group', edit_worker_group: 'Edit Worker Group', search_tips: 'Please enter keywords', operation: 'Operation', delete_confirm: 'Delete?', edit: 'Edit', delete: 'Delete', group_name: 'Group Name', group_name_tips: 'Please enter your group name', worker_addresses: 'Worker Addresses', worker_addresses_tips: 'Please select worker addresses', create_time: 'Create Time', update_time: 'Update Time' }, yarn_queue: { create_queue: 'Create Queue', edit_queue: 'Edit Queue', search_tips: 'Please enter keywords', queue_name: 'Queue Name', queue_value: 'Queue Value', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', edit: 'Edit', queue_name_tips: 'Please enter your queue name', queue_value_tips: 'Please enter your queue value' }, environment: { create_environment: 'Create Environment', edit_environment: 'Edit Environment', search_tips: 'Please enter keywords', edit: 'Edit', delete: 'Delete', environment_name: 'Environment Name', environment_config: 'Environment Config', environment_desc: 'Environment Desc', worker_groups: 'Worker Groups', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', delete_confirm: 'Delete?', environment_name_tips: 'Please enter your environment name', environment_config_tips: 'Please enter your environment config', environment_description_tips: 'Please enter your environment description', worker_group_tips: 'Please select worker group' }, token: { create_token: 'Create Token', edit_token: 'Edit Token', search_tips: 'Please enter keywords', user: 'User', user_tips: 'Please select user', token: 'Token', token_tips: 'Please enter your token', expiration_time: 'Expiration Time', expiration_time_tips: 'Please select expiration time', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', edit: 'Edit', delete: 'Delete', delete_confirm: 'Delete?' }, user: { user_manage: 'User Manage', create_user: 'Create User', update_user: 'Update User', delete_user: 'Delete User', delete_confirm: 'Are you sure to delete?', delete_confirm_tip: 'Deleting user is a dangerous operation,please be careful', project: 'Project', resource: 'Resource', file_resource: 'File Resource', udf_resource: 'UDF Resource', datasource: 'Datasource', udf: 'UDF Function', authorize_project: 'Project Authorize', authorize_resource: 'Resource Authorize', authorize_datasource: 'Datasource Authorize', authorize_udf: 'UDF Function Authorize', username: 'Username', username_exists: 'The username already exists', username_rule_msg: 'Please enter username', user_password: 'Please enter password', user_password_rule_msg: 'Please enter a password containing letters and numbers with a length between 6 and 20', user_type: 'User Type', tenant_code: 'Tenant', tenant_id_rule_msg: 'Please select tenant', queue: 'Queue', email: 'Email', email_rule_msg: 'Please enter valid email', phone: 'Phone', phone_rule_msg: 'Please enter valid phone number', state: 'State', state_enabled: 'Enabled', state_disabled: 'Disabled', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', edit: 'Edit', delete: 'Delete', authorize: 'Authorize', save_error_msg: 'Failed to save, please retry', delete_error_msg: 'Failed to delete, please retry', auth_error_msg: 'Failed to authorize, please retry', auth_success_msg: 'Authorize succeeded' }, alarm_instance: { search_input_tips: 'Please input the keywords', alarm_instance_manage: 'Alarm instance manage', alarm_instance: 'Alarm Instance', alarm_instance_name: 'Alarm instance name', alarm_instance_name_tips: 'Please enter alarm plugin instance name', alarm_plugin_name: 'Alarm plugin name', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', edit: 'Edit', delete: 'Delete', confirm: 'Confirm', cancel: 'Cancel', submit: 'Submit', create: 'Create', select_plugin: 'Select plugin', select_plugin_tips: 'Select Alarm plugin', instance_parameter_exception: 'Instance parameter exception', WebHook: 'WebHook', webHook: 'WebHook', IsEnableProxy: 'Enable Proxy', Proxy: 'Proxy', Port: 'Port', User: 'User', corpId: 'CorpId', secret: 'Secret', Secret: 'Secret', users: 'Users', userSendMsg: 'UserSendMsg', agentId: 'AgentId', showType: 'Show Type', receivers: 'Receivers', receiverCcs: 'ReceiverCcs', serverHost: 'SMTP Host', serverPort: 'SMTP Port', sender: 'Sender', enableSmtpAuth: 'SMTP Auth', Password: 'Password', starttlsEnable: 'SMTP STARTTLS Enable', sslEnable: 'SMTP SSL Enable', smtpSslTrust: 'SMTP SSL Trust', url: 'URL', requestType: 'Request Type', headerParams: 'Headers', bodyParams: 'Body', contentField: 'Content Field', Keyword: 'Keyword', userParams: 'User Params', path: 'Script Path', type: 'Type', sendType: 'Send Type', username: 'Username', botToken: 'Bot Token', chatId: 'Channel Chat Id', parseMode: 'Parse Mode' }, k8s_namespace: { create_namespace: 'Create Namespace', edit_namespace: 'Edit Namespace', search_tips: 'Please enter keywords', k8s_namespace: 'K8S Namespace', k8s_namespace_tips: 'Please enter k8s namespace', k8s_cluster: 'K8S Cluster', k8s_cluster_tips: 'Please enter k8s cluster', owner: 'Owner', owner_tips: 'Please enter owner', tag: 'Tag', tag_tips: 'Please enter tag', limit_cpu: 'Limit CPU', limit_cpu_tips: 'Please enter limit CPU', limit_memory: 'Limit Memory', limit_memory_tips: 'Please enter limit memory', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', edit: 'Edit', delete: 'Delete', delete_confirm: 'Delete?' } } const datasource = { datasource: 'DataSource', create_datasource: 'Create DataSource', search_input_tips: 'Please input the keywords', datasource_name: 'Datasource Name', datasource_name_tips: 'Please enter datasource name', datasource_user_name: 'Owner', datasource_type: 'Datasource Type', datasource_parameter: 'Datasource Parameter', description: 'Description', description_tips: 'Please enter description', create_time: 'Create Time', update_time: 'Update Time', operation: 'Operation', click_to_view: 'Click to view', delete: 'Delete', confirm: 'Confirm', cancel: 'Cancel', create: 'Create', edit: 'Edit', success: 'Success', test_connect: 'Test Connect', ip: 'IP', ip_tips: 'Please enter IP', port: 'Port', port_tips: 'Please enter port', database_name: 'Database Name', database_name_tips: 'Please enter database name', oracle_connect_type: 'ServiceName or SID', oracle_connect_type_tips: 'Please select serviceName or SID', oracle_service_name: 'ServiceName', oracle_sid: 'SID', jdbc_connect_parameters: 'jdbc connect parameters', principal_tips: 'Please enter Principal', krb5_conf_tips: 'Please enter the kerberos authentication parameter java.security.krb5.conf', keytab_username_tips: 'Please enter the kerberos authentication parameter login.user.keytab.username', keytab_path_tips: 'Please enter the kerberos authentication parameter login.user.keytab.path', format_tips: 'Please enter format', connection_parameter: 'connection parameter', user_name: 'User Name', user_name_tips: 'Please enter your username', user_password: 'Password', user_password_tips: 'Please enter your password' } const data_quality = { task_result: { task_name: 'Task Name', workflow_instance: 'Workflow Instance', rule_type: 'Rule Type', rule_name: 'Rule Name', state: 'State', actual_value: 'Actual Value', excepted_value: 'Excepted Value', check_type: 'Check Type', operator: 'Operator', threshold: 'Threshold', failure_strategy: 'Failure Strategy', excepted_value_type: 'Excepted Value Type', error_output_path: 'Error Output Path', username: 'Username', create_time: 'Create Time', update_time: 'Update Time', undone: 'Undone', success: 'Success', failure: 'Failure', single_table: 'Single Table', single_table_custom_sql: 'Single Table Custom Sql', multi_table_accuracy: 'Multi Table Accuracy', multi_table_comparison: 'Multi Table Comparison', expected_and_actual_or_expected: '(Expected - Actual) / Expected x 100%', expected_and_actual: 'Expected - Actual', actual_and_expected: 'Actual - Expected', actual_or_expected: 'Actual / Expected x 100%' }, rule: { actions: 'Actions', name: 'Rule Name', type: 'Rule Type', username: 'User Name', create_time: 'Create Time', update_time: 'Update Time', input_item: 'Rule input item', view_input_item: 'View input items', input_item_title: 'Input item title', input_item_placeholder: 'Input item placeholder', input_item_type: 'Input item type', src_connector_type: 'SrcConnType', src_datasource_id: 'SrcSource', src_table: 'SrcTable', src_filter: 'SrcFilter', src_field: 'SrcField', statistics_name: 'ActualValName', check_type: 'CheckType', operator: 'Operator', threshold: 'Threshold', failure_strategy: 'FailureStrategy', target_connector_type: 'TargetConnType', target_datasource_id: 'TargetSourceId', target_table: 'TargetTable', target_filter: 'TargetFilter', mapping_columns: 'OnClause', statistics_execute_sql: 'ActualValExecSql', comparison_name: 'ExceptedValName', comparison_execute_sql: 'ExceptedValExecSql', comparison_type: 'ExceptedValType', writer_connector_type: 'WriterConnType', writer_datasource_id: 'WriterSourceId', target_field: 'TargetField', field_length: 'FieldLength', logic_operator: 'LogicOperator', regexp_pattern: 'RegexpPattern', deadline: 'Deadline', datetime_format: 'DatetimeFormat', enum_list: 'EnumList', begin_time: 'BeginTime', fix_value: 'FixValue', null_check: 'NullCheck', custom_sql: 'Custom Sql', single_table: 'Single Table', single_table_custom_sql: 'Single Table Custom Sql', multi_table_accuracy: 'Multi Table Accuracy', multi_table_value_comparison: 'Multi Table Compare', field_length_check: 'FieldLengthCheck', uniqueness_check: 'UniquenessCheck', regexp_check: 'RegexpCheck', timeliness_check: 'TimelinessCheck', enumeration_check: 'EnumerationCheck', table_count_check: 'TableCountCheck', All: 'All', FixValue: 'FixValue', DailyAvg: 'DailyAvg', WeeklyAvg: 'WeeklyAvg', MonthlyAvg: 'MonthlyAvg', Last7DayAvg: 'Last7DayAvg', Last30DayAvg: 'Last30DayAvg', SrcTableTotalRows: 'SrcTableTotalRows', TargetTableTotalRows: 'TargetTableTotalRows' } } const crontab = { second: 'second', minute: 'minute', hour: 'hour', day: 'day', month: 'month', year: 'year', monday: 'Monday', tuesday: 'Tuesday', wednesday: 'Wednesday', thursday: 'Thursday', friday: 'Friday', saturday: 'Saturday', sunday: 'Sunday', every_second: 'Every second', every: 'Every', second_carried_out: 'second carried out', second_start: 'Start', specific_second: 'Specific second(multiple)', specific_second_tip: 'Please enter a specific second', cycle_from: 'Cycle from', to: 'to', every_minute: 'Every minute', minute_carried_out: 'minute carried out', minute_start: 'Start', specific_minute: 'Specific minute(multiple)', specific_minute_tip: 'Please enter a specific minute', every_hour: 'Every hour', hour_carried_out: 'hour carried out', hour_start: 'Start', specific_hour: 'Specific hour(multiple)', specific_hour_tip: 'Please enter a specific hour', every_day: 'Every day', week_carried_out: 'week carried out', start: 'Start', day_carried_out: 'day carried out', day_start: 'Start', specific_week: 'Specific day of the week(multiple)', specific_week_tip: 'Please enter a specific week', specific_day: 'Specific days(multiple)', specific_day_tip: 'Please enter a days', last_day_of_month: 'On the last day of the month', last_work_day_of_month: 'On the last working day of the month', last_of_month: 'At the last of this month', before_end_of_month: 'Before the end of this month', recent_business_day_to_month: 'The most recent business day (Monday to Friday) to this month', in_this_months: 'In this months', every_month: 'Every month', month_carried_out: 'month carried out', month_start: 'Start', specific_month: 'Specific months(multiple)', specific_month_tip: 'Please enter a months', every_year: 'Every year', year_carried_out: 'year carried out', year_start: 'Start', specific_year: 'Specific year(multiple)', specific_year_tip: 'Please enter a year', one_hour: 'hour', one_day: 'day' } export default { login, modal, theme, userDropdown, menu, home, password, profile, monitor, resource, project, security, datasource, data_quality, crontab }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,632
[Bug][UI Next][V1.0.0-Alpha] The task instance button prompts an error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened 项目管理-项目实例-任务-任务实例,执行失败任务强制成功按钮提示错误 <img width="1425" alt="image" src="https://user-images.githubusercontent.com/76080484/156141468-b17f0e1f-e62a-4261-8c5a-a47a0748da45.png"> ### What you expected to happen 按钮提示为 强制成功 ### How to reproduce 项目管理-项目实例-任务-任务实例,执行失败任务强制成功按钮提示错误 ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8632
https://github.com/apache/dolphinscheduler/pull/8668
60a00490c6706228de22feedf9ea9c6b5a40e934
8096a1063bb6b23066b60da856d1349c04ee9c81
"2022-03-01T09:25:56Z"
java
"2022-03-02T13:04:47Z"
dolphinscheduler-ui-next/src/locales/modules/zh_CN.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ const login = { test: '测试', userName: '用户名', userName_tips: '请输入用户名', userPassword: '密码', userPassword_tips: '请输入密码', login: '登录' } const modal = { cancel: '取消', confirm: '确定' } const theme = { light: '浅色', dark: '深色' } const userDropdown = { profile: '用户信息', password: '密码管理', logout: '退出登录' } const menu = { home: '首页', project: '项目管理', resources: '资源中心', datasource: '数据源中心', monitor: '监控中心', security: '安全中心', project_overview: '项目概览', workflow_relation: '工作流关系', workflow: '工作流', workflow_definition: '工作流定义', workflow_instance: '工作流实例', task: '任务', task_instance: '任务实例', task_definition: '任务定义', file_manage: '文件管理', udf_manage: 'UDF管理', resource_manage: '资源管理', function_manage: '函数管理', service_manage: '服务管理', master: 'Master', worker: 'Worker', db: 'DB', statistical_manage: '统计管理', statistics: 'Statistics', audit_log: '审计日志', tenant_manage: '租户管理', user_manage: '用户管理', alarm_group_manage: '告警组管理', alarm_instance_manage: '告警实例管理', worker_group_manage: 'Worker分组管理', yarn_queue_manage: 'Yarn队列管理', environment_manage: '环境管理', k8s_namespace_manage: 'K8S命名空间管理', token_manage: '令牌管理', task_group_manage: '任务组管理', task_group_option: '任务组配置', task_group_queue: '任务组队列', data_quality: '数据质量', task_result: '任务结果', rule: '规则管理' } const home = { task_state_statistics: '任务状态统计', process_state_statistics: '流程状态统计', process_definition_statistics: '流程定义统计', number: '数量', state: '状态' } const password = { edit_password: '修改密码', password: '密码', confirm_password: '确认密码', password_tips: '请输入密码', confirm_password_tips: '请输入确认密码', two_password_entries_are_inconsistent: '两次密码输入不一致', submit: '提交' } const profile = { profile: '用户信息', edit: '编辑', username: '用户名', email: '邮箱', phone: '手机', state: '状态', permission: '权限', create_time: '创建时间', update_time: '更新时间', administrator: '管理员', ordinary_user: '普通用户', edit_profile: '编辑用户', username_tips: '请输入用户名', email_tips: '请输入邮箱', email_correct_tips: '请输入正确格式的邮箱', phone_tips: '请输入手机号', state_tips: '请选择状态', enable: '启用', disable: '禁用' } const monitor = { master: { cpu_usage: '处理器使用量', memory_usage: '内存使用量', load_average: '平均负载量', create_time: '创建时间', last_heartbeat_time: '最后心跳时间', directory_detail: '目录详情', host: '主机', directory: '注册目录' }, worker: { cpu_usage: '处理器使用量', memory_usage: '内存使用量', load_average: '平均负载量', create_time: '创建时间', last_heartbeat_time: '最后心跳时间', directory_detail: '目录详情', host: '主机', directory: '注册目录' }, db: { health_state: '健康状态', max_connections: '最大连接数', threads_connections: '当前连接数', threads_running_connections: '数据库当前活跃连接数' }, statistics: { command_number_of_waiting_for_running: '待执行的命令数', failure_command_number: '执行失败的命令数', tasks_number_of_waiting_running: '待运行任务数', task_number_of_ready_to_kill: '待杀死任务数' }, audit_log: { user_name: '用户名称', resource_type: '资源类型', project_name: '项目名称', operation_type: '操作类型', create_time: '创建时间', start_time: '开始时间', end_time: '结束时间', user_audit: '用户管理审计', project_audit: '项目管理审计', create: '创建', update: '更新', delete: '删除', read: '读取' } } const resource = { file: { file_manage: '文件管理', create_folder: '创建文件夹', create_file: '创建文件', upload_files: '上传文件', enter_keyword_tips: '请输入关键词', name: '名称', user_name: '所属用户', whether_directory: '是否文件夹', file_name: '文件名称', description: '描述', size: '大小', update_time: '更新时间', operation: '操作', edit: '编辑', rename: '重命名', download: '下载', delete: '删除', yes: '是', no: '否', folder_name: '文件夹名称', enter_name_tips: '请输入名称', enter_description_tips: '请输入描述', enter_content_tips: '请输入资源内容', enter_suffix_tips: '请输入文件后缀', file_format: '文件格式', file_content: '文件内容', delete_confirm: '确定删除吗?', confirm: '确定', cancel: '取消', success: '成功', file_details: '文件详情', return: '返回', save: '保存' }, udf: { udf_resources: 'UDF资源', create_folder: '创建文件夹', upload_udf_resources: '上传UDF资源', udf_source_name: 'UDF资源名称', whether_directory: '是否文件夹', file_name: '文件名称', file_size: '文件大小', description: '描述', create_time: '创建时间', update_time: '更新时间', operation: '操作', yes: '是', no: '否', edit: '编辑', download: '下载', delete: '删除', success: '成功', folder_name: '文件夹名称', upload: '上传', upload_files: '上传文件', file_upload: '文件上传', delete_confirm: '确定删除吗?', enter_keyword_tips: '请输入关键词', enter_name_tips: '请输入名称', enter_description_tips: '请输入描述' }, function: { udf_function: 'UDF函数', create_udf_function: '创建UDF函数', edit_udf_function: '编辑UDF函数', udf_function_name: 'UDF函数名称', class_name: '类名', type: '类型', description: '描述', jar_package: 'jar包', update_time: '更新时间', operation: '操作', rename: '重命名', edit: '编辑', delete: '删除', success: '成功', package_name: '包名类名', udf_resources: 'UDF资源', instructions: '使用说明', upload_resources: '上传资源', udf_resources_directory: 'UDF资源目录', delete_confirm: '确定删除吗?', enter_keyword_tips: '请输入关键词', enter_udf_unction_name_tips: '请输入UDF函数名称', enter_package_name_tips: '请输入包名类名', enter_select_udf_resources_tips: '请选择UDF资源', enter_select_udf_resources_directory_tips: '请选择UDF资源目录', enter_instructions_tips: '请输入使用说明', enter_name_tips: '请输入名称', enter_description_tips: '请输入描述' }, task_group_option: { manage: '任务组管理', option: '任务组配置', create: '创建任务组', edit: '编辑任务组', delete: '删除任务组', view_queue: '查看任务组队列', switch_status: '切换任务组状态', code: '任务组编号', name: '任务组名称', project_name: '项目名称', resource_pool_size: '资源容量', resource_used_pool_size: '已用资源', desc: '描述信息', status: '任务组状态', enable_status: '启用', disable_status: '不可用', please_enter_name: '请输入任务组名称', please_enter_desc: '请输入任务组描述', please_enter_resource_pool_size: '请输入资源容量大小', resource_pool_size_be_a_number: '资源容量大小必须大于等于1的数值', please_select_project: '请选择项目', create_time: '创建时间', update_time: '更新时间', actions: '操作', please_enter_keywords: '请输入搜索关键词' }, task_group_queue: { actions: '操作', task_name: '任务名称', task_group_name: '任务组名称', project_name: '项目名称', process_name: '工作流名称', process_instance_name: '工作流实例', queue: '任务组队列', priority: '组内优先级', priority_be_a_number: '优先级必须是大于等于0的数值', force_starting_status: '是否强制启动', in_queue: '是否排队中', task_status: '任务状态', view_task_group_queue: '查看任务组队列', the_status_of_waiting: '等待入队', the_status_of_queuing: '排队中', the_status_of_releasing: '已释放', modify_priority: '修改优先级', start_task: '强制启动', priority_not_empty: '优先级不能为空', priority_must_be_number: '优先级必须是数值', please_select_task_name: '请选择节点名称', create_time: '创建时间', update_time: '更新时间', edit_priority: '修改优先级' } } const project = { list: { create_project: '创建项目', edit_project: '编辑项目', project_list: '项目列表', project_tips: '请输入项目名称', description_tips: '请输入项目描述', username_tips: '请输入所属用户', project_name: '项目名称', project_description: '项目描述', owned_users: '所属用户', workflow_define_count: '工作流定义数', process_instance_running_count: '正在运行的流程数', description: '描述', create_time: '创建时间', update_time: '更新时间', operation: '操作', edit: '编辑', delete: '删除', confirm: '确定', cancel: '取消', delete_confirm: '确定删除吗?' }, workflow: { workflow_relation: '工作流关系', create_workflow: '创建工作流', import_workflow: '导入工作流', workflow_name: '工作流名称', current_selection: '当前选择', online: '已上线', offline: '已下线', refresh: '刷新', show_hide_label: '显示 / 隐藏标签', workflow_offline: '工作流下线', schedule_offline: '调度下线', schedule_start_time: '定时开始时间', schedule_end_time: '定时结束时间', crontab_expression: 'Crontab', workflow_publish_status: '工作流上线状态', schedule_publish_status: '定时状态', workflow_definition: '工作流定义', workflow_instance: '工作流实例', status: '状态', create_time: '创建时间', update_time: '更新时间', description: '描述', create_user: '创建用户', modify_user: '修改用户', operation: '操作', edit: '编辑', confirm: '确定', cancel: '取消', start: '运行', timing: '定时', timezone: '时区', up_line: '上线', down_line: '下线', copy_workflow: '复制工作流', cron_manage: '定时管理', delete: '删除', tree_view: '树形图', export: '导出', version_info: '版本信息', version: '版本', file_upload: '文件上传', upload_file: '上传文件', upload: '上传', file_name: '文件名称', success: '成功', set_parameters_before_starting: '启动前请先设置参数', set_parameters_before_timing: '定时前请先设置参数', start_and_stop_time: '起止时间', next_five_execution_times: '接下来五次执行时间', execute_time: '执行时间', failure_strategy: '失败策略', notification_strategy: '通知策略', workflow_priority: '流程优先级', worker_group: 'Worker分组', environment_name: '环境名称', alarm_group: '告警组', complement_data: '补数', startup_parameter: '启动参数', whether_dry_run: '是否空跑', continue: '继续', end: '结束', none_send: '都不发', success_send: '成功发', failure_send: '失败发', all_send: '成功或失败都发', whether_complement_data: '是否是补数', schedule_date: '调度日期', mode_of_execution: '执行方式', serial_execution: '串行执行', parallel_execution: '并行执行', parallelism: '并行度', custom_parallelism: '自定义并行度', please_enter_parallelism: '请输入并行度', please_choose: '请选择', start_time: '开始时间', end_time: '结束时间', crontab: 'Crontab', delete_confirm: '确定删除吗?', enter_name_tips: '请输入名称', switch_version: '切换到该版本', confirm_switch_version: '确定切换到该版本吗?', current_version: '当前版本', run_type: '运行类型', scheduling_time: '调度时间', duration: '运行时长', run_times: '运行次数', fault_tolerant_sign: '容错标识', dry_run_flag: '空跑标识', executor: '执行用户', host: 'Host', start_process: '启动工作流', execute_from_the_current_node: '从当前节点开始执行', recover_tolerance_fault_process: '恢复被容错的工作流', resume_the_suspension_process: '恢复运行流程', execute_from_the_failed_nodes: '从失败节点开始执行', scheduling_execution: '调度执行', rerun: '重跑', stop: '停止', pause: '暂停', recovery_waiting_thread: '恢复等待线程', recover_serial_wait: '串行恢复', recovery_suspend: '恢复运行', recovery_failed: '恢复失败', gantt: '甘特图', name: '名称', all_status: '全部状态', submit_success: '提交成功', running: '正在运行', ready_to_pause: '准备暂停', ready_to_stop: '准备停止', failed: '失败', need_fault_tolerance: '需要容错', kill: 'Kill', waiting_for_thread: '等待线程', waiting_for_dependence: '等待依赖', waiting_for_dependency_to_complete: '等待依赖完成', delay_execution: '延时执行', forced_success: '强制成功', serial_wait: '串行等待', executing: '正在执行', startup_type: '启动类型', complement_range: '补数范围', parameters_variables: '参数变量', global_parameters: '全局参数', local_parameters: '局部参数', type: '类型', retry_count: '重试次数', submit_time: '提交时间', refresh_status_succeeded: '刷新状态成功', view_log: '查看日志', update_log_success: '更新日志成功', no_more_log: '暂无更多日志', no_log: '暂无日志', loading_log: '正在努力请求日志中...', close: '关闭', download_log: '下载日志', refresh_log: '刷新日志', enter_full_screen: '进入全屏', cancel_full_screen: '取消全屏' }, task: { task_name: '任务名称', task_type: '任务类型', create_task: '创建任务', workflow_instance: '工作流实例', workflow_name: '工作流名称', workflow_name_tips: '请选择工作流名称', workflow_state: '工作流状态', version: '版本', current_version: '当前版本', switch_version: '切换到该版本', confirm_switch_version: '确定切换到该版本吗?', description: '描述', move: '移动', upstream_tasks: '上游任务', executor: '执行用户', node_type: '节点类型', state: '状态', submit_time: '提交时间', start_time: '开始时间', create_time: '创建时间', update_time: '更新时间', end_time: '结束时间', duration: '运行时间', retry_count: '重试次数', dry_run_flag: '空跑标识', host: '主机', operation: '操作', edit: '编辑', delete: '删除', delete_confirm: '确定删除吗?', submitted_success: '提交成功', running_execution: '正在运行', ready_pause: '准备暂停', pause: '暂停', ready_stop: '准备停止', stop: '停止', failure: '失败', success: '成功', need_fault_tolerance: '需要容错', kill: '已被杀', waiting_thread: '等待线程', waiting_depend: '等待依赖完成', delay_execution: '延时执行', forced_success: '强制成功', serial_wait: '串行等待', view_log: '查看日志', download_log: '下载日志' }, dag: { create: '创建工作流', search: '搜索', download_png: '下载工作流图片', fullscreen_open: '全屏', fullscreen_close: '退出全屏', save: '保存', close: '关闭', format: '格式化', refresh_dag_status: '刷新DAG状态', layout_type: '布局类型', grid_layout: '网格布局', dagre_layout: '层次布局', rows: '行数', cols: '列数', copy_success: '复制成功', workflow_name: '工作流名称', description: '描述', tenant: '租户', timeout_alert: '超时告警', global_variables: '全局变量', basic_info: '基本信息', minute: '分', key: '键', value: '值', success: '成功', delete_cell: '删除选中的线或节点', online_directly: '是否上线流程定义', dag_name_empty: 'DAG图名称不能为空', positive_integer: '请输入大于 0 的正整数', prop_empty: '自定义参数prop不能为空', prop_repeat: 'prop中有重复', node_not_created: '未创建节点保存失败', copy_name: '复制名称', view_variables: '查看变量', startup_parameter: '启动参数' }, node: { current_node_settings: '当前节点设置', instructions: '使用说明', view_history: '查看历史', view_log: '查看日志', enter_this_child_node: '进入该子节点', name: '节点名称', name_tips: '请输入名称(必填)', task_type: '任务类型', task_type_tips: '请选择任务类型(必选)', process_name: '工作流名称', process_name_tips: '请选择工作流(必选)', child_node: '子节点', enter_child_node: '进入该子节点', run_flag: '运行标志', normal: '正常', prohibition_execution: '禁止执行', description: '描述', description_tips: '请输入描述', task_priority: '任务优先级', worker_group: 'Worker分组', worker_group_tips: '该Worker分组已经不存在,请选择正确的Worker分组!', environment_name: '环境名称', task_group_name: '任务组名称', task_group_queue_priority: '组内优先级', number_of_failed_retries: '失败重试次数', times: '次', failed_retry_interval: '失败重试间隔', minute: '分', delay_execution_time: '延时执行时间', state: '状态', branch_flow: '分支流转', cancel: '取消', loading: '正在努力加载中...', confirm: '确定', success: '成功', failed: '失败', backfill_tips: '新创建子工作流还未执行,不能进入子工作流', task_instance_tips: '该任务还未执行,不能进入子工作流', branch_tips: '成功分支流转和失败分支流转不能选择同一个节点', timeout_alarm: '超时告警', timeout_strategy: '超时策略', timeout_strategy_tips: '超时策略必须选一个', timeout_failure: '超时失败', timeout_period: '超时时长', timeout_period_tips: '超时时长必须为正整数', script: '脚本', script_tips: '请输入脚本(必填)', resources: '资源', resources_tips: '请选择资源', no_resources_tips: '请删除所有未授权或已删除资源', useless_resources_tips: '未授权或已删除资源', custom_parameters: '自定义参数', copy_failed: '该浏览器不支持自动复制', prop_tips: 'prop(必填)', prop_repeat: 'prop中有重复', value_tips: 'value(选填)', value_required_tips: 'value(必填)', pre_tasks: '前置任务', program_type: '程序类型', spark_version: 'Spark版本', main_class: '主函数的Class', main_class_tips: '请填写主函数的Class', main_package: '主程序包', main_package_tips: '请选择主程序包', deploy_mode: '部署方式', app_name: '任务名称', app_name_tips: '请输入任务名称(选填)', driver_cores: 'Driver核心数', driver_cores_tips: '请输入Driver核心数', driver_memory: 'Driver内存数', driver_memory_tips: '请输入Driver内存数', executor_number: 'Executor数量', executor_number_tips: '请输入Executor数量', executor_memory: 'Executor内存数', executor_memory_tips: '请输入Executor内存数', executor_cores: 'Executor核心数', executor_cores_tips: '请输入Executor核心数', main_arguments: '主程序参数', main_arguments_tips: '请输入主程序参数', option_parameters: '选项参数', option_parameters_tips: '请输入选项参数', positive_integer_tips: '应为正整数', flink_version: 'Flink版本', job_manager_memory: 'JobManager内存数', job_manager_memory_tips: '请输入JobManager内存数', task_manager_memory: 'TaskManager内存数', task_manager_memory_tips: '请输入TaskManager内存数', slot_number: 'Slot数量', slot_number_tips: '请输入Slot数量', parallelism: '并行度', custom_parallelism: '自定义并行度', parallelism_tips: '请输入并行度', parallelism_number_tips: '并行度必须为正整数', parallelism_complement_tips: '如果存在大量任务需要补数时,可以利用自定义并行度将补数的任务线程设置成合理的数值,避免对服务器造成过大的影响', task_manager_number: 'TaskManager数量', task_manager_number_tips: '请输入TaskManager数量', http_url: '请求地址', http_url_tips: '请填写请求地址(必填)', http_method: '请求类型', http_parameters: '请求参数', http_check_condition: '校验条件', http_condition: '校验内容', http_condition_tips: '请填写校验内容', timeout_settings: '超时设置', connect_timeout: '连接超时', ms: '毫秒', socket_timeout: 'Socket超时', status_code_default: '默认响应码200', status_code_custom: '自定义响应码', body_contains: '内容包含', body_not_contains: '内容不包含', http_parameters_position: '参数位置', target_task_name: '目标任务名', target_task_name_tips: '请输入Pigeon任务名', datasource_type: '数据源类型', datasource_instances: '数据源实例', sql_type: 'SQL类型', sql_type_query: '查询', sql_type_non_query: '非查询', sql_statement: 'SQL语句', pre_sql_statement: '前置SQL语句', post_sql_statement: '后置SQL语句', sql_input_placeholder: '请输入非查询SQL语句', sql_empty_tips: '语句不能为空', procedure_method: 'SQL语句', procedure_method_tips: '请输入存储脚本', procedure_method_snippet: '--请输入存储脚本 \n\n--调用存储过程: call <procedure-name>[(<arg1>,<arg2>, ...)] \n\n--调用存储函数:?= call <procedure-name>[(<arg1>,<arg2>, ...)]', start: '运行', edit: '编辑', copy: '复制节点', delete: '删除', custom_job: '自定义任务', custom_script: '自定义脚本', sqoop_job_name: '任务名称', sqoop_job_name_tips: '请输入任务名称(必填)', direct: '流向', hadoop_custom_params: 'Hadoop参数', sqoop_advanced_parameters: 'Sqoop参数', data_source: '数据来源', type: '类型', datasource: '数据源', datasource_tips: '请选择数据源', model_type: '模式', form: '表单', table: '表名', table_tips: '请输入Mysql表名(必填)', column_type: '列类型', all_columns: '全表导入', some_columns: '选择列', column: '列', column_tips: '请输入列名,用 , 隔开', database: '数据库', database_tips: '请输入Hive数据库(必填)', hive_table_tips: '请输入Hive表名(必填)', hive_partition_keys: 'Hive 分区键', hive_partition_keys_tips: '请输入分区键', hive_partition_values: 'Hive 分区值', hive_partition_values_tips: '请输入分区值', export_dir: '数据源路径', export_dir_tips: '请输入数据源路径(必填)', sql_statement_tips: 'SQL语句(必填)', map_column_hive: 'Hive类型映射', map_column_java: 'Java类型映射', data_target: '数据目的', create_hive_table: '是否创建新表', drop_delimiter: '是否删除分隔符', over_write_src: '是否覆盖数据源', hive_target_dir: 'Hive目标路径', hive_target_dir_tips: '请输入Hive临时目录', replace_delimiter: '替换分隔符', replace_delimiter_tips: '请输入替换分隔符', target_dir: '目标路径', target_dir_tips: '请输入目标路径(必填)', delete_target_dir: '是否删除目录', compression_codec: '压缩类型', file_type: '保存格式', fields_terminated: '列分隔符', fields_terminated_tips: '请输入列分隔符', lines_terminated: '行分隔符', lines_terminated_tips: '请输入行分隔符', is_update: '是否更新', update_key: '更新列', update_key_tips: '请输入更新列', update_mode: '更新类型', only_update: '只更新', allow_insert: '无更新便插入', concurrency: '并发度', concurrency_tips: '请输入并发度', sea_tunnel_deploy_mode: '部署方式', sea_tunnel_master: 'Master', sea_tunnel_master_url: 'Master URL', sea_tunnel_queue: '队列', sea_tunnel_master_url_tips: '请直接填写地址,例如:127.0.0.1:7077', switch_condition: '条件', switch_branch_flow: '分支流转', and: '且', or: '或', datax_custom_template: '自定义模板', datax_json_template: 'JSON', datax_target_datasource_type: '目标源类型', datax_target_database: '目标源实例', datax_target_table: '目标表', datax_target_table_tips: '请输入目标表名', datax_target_database_pre_sql: '目标库前置SQL', datax_target_database_post_sql: '目标库后置SQL', datax_non_query_sql_tips: '请输入非查询SQL语句', datax_job_speed_byte: '限流(字节数)', datax_job_speed_byte_info: '(KB,0代表不限制)', datax_job_speed_record: '限流(记录数)', datax_job_speed_record_info: '(0代表不限制)', datax_job_runtime_memory: '运行内存', datax_job_runtime_memory_xms: '最小内存', datax_job_runtime_memory_xmx: '最大内存', datax_job_runtime_memory_unit: 'G', current_hour: '当前小时', last_1_hour: '前1小时', last_2_hour: '前2小时', last_3_hour: '前3小时', last_24_hour: '前24小时', today: '今天', last_1_days: '昨天', last_2_days: '前两天', last_3_days: '前三天', last_7_days: '前七天', this_week: '本周', last_week: '上周', last_monday: '上周一', last_tuesday: '上周二', last_wednesday: '上周三', last_thursday: '上周四', last_friday: '上周五', last_saturday: '上周六', last_sunday: '上周日', this_month: '本月', last_month: '上月', last_month_begin: '上月初', last_month_end: '上月末', month: '月', week: '周', day: '日', hour: '时', add_dependency: '添加依赖', waiting_dependent_start: '等待依赖启动', check_interval: '检查间隔', waiting_dependent_complete: '等待依赖完成' } } const security = { tenant: { tenant_manage: '租户管理', create_tenant: '创建租户', search_tips: '请输入关键词', tenant_code: '操作系统租户', description: '描述', queue_name: '队列', create_time: '创建时间', update_time: '更新时间', actions: '操作', edit_tenant: '编辑租户', tenant_code_tips: '请输入操作系统租户', queue_name_tips: '请选择队列', description_tips: '请输入描述', delete_confirm: '确定删除吗?', edit: '编辑', delete: '删除' }, alarm_group: { create_alarm_group: '创建告警组', edit_alarm_group: '编辑告警组', search_tips: '请输入关键词', alert_group_name_tips: '请输入告警组名称', alarm_plugin_instance: '告警组实例', alarm_plugin_instance_tips: '请选择告警组实例', alarm_group_description_tips: '请输入告警组描述', alert_group_name: '告警组名称', alarm_group_description: '告警组描述', create_time: '创建时间', update_time: '更新时间', operation: '操作', delete_confirm: '确定删除吗?', edit: '编辑', delete: '删除' }, worker_group: { create_worker_group: '创建Worker分组', edit_worker_group: '编辑Worker分组', search_tips: '请输入关键词', operation: '操作', delete_confirm: '确定删除吗?', edit: '编辑', delete: '删除', group_name: '分组名称', group_name_tips: '请输入分组名称', worker_addresses: 'Worker地址', worker_addresses_tips: '请选择Worker地址', create_time: '创建时间', update_time: '更新时间' }, yarn_queue: { create_queue: '创建队列', edit_queue: '编辑队列', search_tips: '请输入关键词', queue_name: '队列名', queue_value: '队列值', create_time: '创建时间', update_time: '更新时间', operation: '操作', edit: '编辑', queue_name_tips: '请输入队列名', queue_value_tips: '请输入队列值' }, environment: { create_environment: '创建环境', edit_environment: '编辑环境', search_tips: '请输入关键词', edit: '编辑', delete: '删除', environment_name: '环境名称', environment_config: '环境配置', environment_desc: '环境描述', worker_groups: 'Worker分组', create_time: '创建时间', update_time: '更新时间', operation: '操作', delete_confirm: '确定删除吗?', environment_name_tips: '请输入环境名', environment_config_tips: '请输入环境配置', environment_description_tips: '请输入环境描述', worker_group_tips: '请选择Worker分组' }, token: { create_token: '创建令牌', edit_token: '编辑令牌', search_tips: '请输入关键词', user: '用户', user_tips: '请选择用户', token: '令牌', token_tips: '请输入令牌', expiration_time: '失效时间', expiration_time_tips: '请选择失效时间', create_time: '创建时间', update_time: '更新时间', operation: '操作', edit: '编辑', delete: '删除', delete_confirm: '确定删除吗?' }, user: { user_manage: '用户管理', create_user: '创建用户', update_user: '更新用户', delete_user: '删除用户', delete_confirm: '确定删除吗?', delete_confirm_tip: '删除用户属于危险操作,请谨慎操作!', project: '项目', resource: '资源', file_resource: '文件资源', udf_resource: 'UDF资源', datasource: '数据源', udf: 'UDF函数', authorize_project: '项目授权', authorize_resource: '资源授权', authorize_datasource: '数据源授权', authorize_udf: 'UDF函数授权', username: '用户名', username_exists: '用户名已存在', username_rule_msg: '请输入用户名', user_password: '密码', user_password_rule_msg: '请输入包含字母和数字,长度在6~20之间的密码', user_type: '用户类型', tenant_code: '租户', tenant_id_rule_msg: '请选择租户', queue: '队列', email: '邮件', email_rule_msg: '请输入正确的邮箱', phone: '手机', phone_rule_msg: '请输入正确的手机号', state: '状态', state_enabled: '启用', state_disabled: '停用', create_time: '创建时间', update_time: '更新时间', operation: '操作', edit: '编辑', delete: '删除', authorize: '授权', save_error_msg: '保存失败,请重试', delete_error_msg: '删除失败,请重试', auth_error_msg: '授权失败,请重试', auth_success_msg: '授权成功' }, alarm_instance: { search_input_tips: '请输入关键字', alarm_instance_manage: '告警实例管理', alarm_instance: '告警实例', alarm_instance_name: '告警实例名称', alarm_instance_name_tips: '请输入告警实例名称', alarm_plugin_name: '告警插件名称', create_time: '创建时间', update_time: '更新时间', operation: '操作', edit: '编辑', delete: '删除', confirm: '确定', cancel: '取消', submit: '提交', create: '创建', select_plugin: '选择插件', select_plugin_tips: '请选择告警插件', instance_parameter_exception: '实例参数异常', WebHook: 'Web钩子', webHook: 'Web钩子', IsEnableProxy: '启用代理', Proxy: '代理', Port: '端口', User: '用户', corpId: '企业ID', secret: '密钥', Secret: '密钥', users: '群员', userSendMsg: '群员信息', agentId: '应用ID', showType: '内容展示类型', receivers: '收件人', receiverCcs: '抄送人', serverHost: 'SMTP服务器', serverPort: 'SMTP端口', sender: '发件人', enableSmtpAuth: '请求认证', Password: '密码', starttlsEnable: 'STARTTLS连接', sslEnable: 'SSL连接', smtpSslTrust: 'SSL证书信任', url: 'URL', requestType: '请求方式', headerParams: '请求头', bodyParams: '请求体', contentField: '内容字段', Keyword: '关键词', userParams: '自定义参数', path: '脚本路径', type: '类型', sendType: '发送类型', username: '用户名', botToken: '机器人Token', chatId: '频道ID', parseMode: '解析类型' }, k8s_namespace: { create_namespace: '创建命名空间', edit_namespace: '编辑命名空间', search_tips: '请输入关键词', k8s_namespace: 'K8S命名空间', k8s_namespace_tips: '请输入k8s命名空间', k8s_cluster: 'K8S集群', k8s_cluster_tips: '请输入k8s集群', owner: '负责人', owner_tips: '请输入负责人', tag: '标签', tag_tips: '请输入标签', limit_cpu: '最大CPU', limit_cpu_tips: '请输入最大CPU', limit_memory: '最大内存', limit_memory_tips: '请输入最大内存', create_time: '创建时间', update_time: '更新时间', operation: '操作', edit: '编辑', delete: '删除', delete_confirm: '确定删除吗?' } } const datasource = { datasource: '数据源', create_datasource: '创建数据源', search_input_tips: '请输入关键字', datasource_name: '数据源名称', datasource_name_tips: '请输入数据源名称', datasource_user_name: '所属用户', datasource_type: '数据源类型', datasource_parameter: '数据源参数', description: '描述', description_tips: '请输入描述', create_time: '创建时间', update_time: '更新时间', operation: '操作', click_to_view: '点击查看', delete: '删除', confirm: '确定', cancel: '取消', create: '创建', edit: '编辑', success: '成功', test_connect: '测试连接', ip: 'IP主机名', ip_tips: '请输入IP主机名', port: '端口', port_tips: '请输入端口', database_name: '数据库名', database_name_tips: '请输入数据库名', oracle_connect_type: '服务名或SID', oracle_connect_type_tips: '请选择服务名或SID', oracle_service_name: '服务名', oracle_sid: 'SID', jdbc_connect_parameters: 'jdbc连接参数', principal_tips: '请输入Principal', krb5_conf_tips: '请输入kerberos认证参数 java.security.krb5.conf', keytab_username_tips: '请输入kerberos认证参数 login.user.keytab.username', keytab_path_tips: '请输入kerberos认证参数 login.user.keytab.path', format_tips: '请输入格式为', connection_parameter: '连接参数', user_name: '用户名', user_name_tips: '请输入用户名', user_password: '密码', user_password_tips: '请输入密码' } const data_quality = { task_result: { task_name: '任务名称', workflow_instance: '工作流实例', rule_type: '规则类型', rule_name: '规则名称', state: '状态', actual_value: '实际值', excepted_value: '期望值', check_type: '检测类型', operator: '操作符', threshold: '阈值', failure_strategy: '失败策略', excepted_value_type: '期望值类型', error_output_path: '错误数据路径', username: '用户名', create_time: '创建时间', update_time: '更新时间', undone: '未完成', success: '成功', failure: '失败', single_table: '单表检测', single_table_custom_sql: '自定义SQL', multi_table_accuracy: '多表准确性', multi_table_comparison: '两表值对比', expected_and_actual_or_expected: '(期望值-实际值)/实际值 x 100%', expected_and_actual: '期望值-实际值', actual_and_expected: '实际值-期望值', actual_or_expected: '实际值/期望值 x 100%' }, rule: { actions: '操作', name: '规则名称', type: '规则类型', username: '用户名', create_time: '创建时间', update_time: '更新时间', input_item: '规则输入项', view_input_item: '查看规则输入项信息', input_item_title: '输入项标题', input_item_placeholder: '输入项占位符', input_item_type: '输入项类型', src_connector_type: '源数据类型', src_datasource_id: '源数据源', src_table: '源数据表', src_filter: '源表过滤条件', src_field: '源表检测列', statistics_name: '实际值名', check_type: '校验方式', operator: '校验操作符', threshold: '阈值', failure_strategy: '失败策略', target_connector_type: '目标数据类型', target_datasource_id: '目标数据源', target_table: '目标数据表', target_filter: '目标表过滤条件', mapping_columns: 'ON语句', statistics_execute_sql: '实际值计算SQL', comparison_name: '期望值名', comparison_execute_sql: '期望值计算SQL', comparison_type: '期望值类型', writer_connector_type: '输出数据类型', writer_datasource_id: '输出数据源', target_field: '目标表检测列', field_length: '字段长度限制', logic_operator: '逻辑操作符', regexp_pattern: '正则表达式', deadline: '截止时间', datetime_format: '时间格式', enum_list: '枚举值列表', begin_time: '起始时间', fix_value: '固定值', null_check: '空值检测', custom_sql: '自定义SQL', single_table: '单表检测', multi_table_accuracy: '多表准确性', multi_table_value_comparison: '两表值比对', field_length_check: '字段长度校验', uniqueness_check: '唯一性校验', regexp_check: '正则表达式', timeliness_check: '及时性校验', enumeration_check: '枚举值校验', table_count_check: '表行数校验', all: '全部', FixValue: '固定值', DailyAvg: '日均值', WeeklyAvg: '周均值', MonthlyAvg: '月均值', Last7DayAvg: '最近7天均值', Last30DayAvg: '最近30天均值', SrcTableTotalRows: '源表总行数', TargetTableTotalRows: '目标表总行数' } } const crontab = { second: '秒', minute: '分', hour: '时', day: '天', month: '月', year: '年', monday: '星期一', tuesday: '星期二', wednesday: '星期三', thursday: '星期四', friday: '星期五', saturday: '星期六', sunday: '星期天', every_second: '每一秒钟', every: '每隔', second_carried_out: '秒执行 从', second_start: '秒开始', specific_second: '具体秒数(可多选)', specific_second_tip: '请选择具体秒数', cycle_from: '周期从', to: '到', every_minute: '每一分钟', minute_carried_out: '分执行 从', minute_start: '分开始', specific_minute: '具体分钟数(可多选)', specific_minute_tip: '请选择具体分钟数', every_hour: '每一小时', hour_carried_out: '小时执行 从', hour_start: '小时开始', specific_hour: '具体小时数(可多选)', specific_hour_tip: '请选择具体小时数', every_day: '每一天', week_carried_out: '周执行 从', start: '开始', day_carried_out: '天执行 从', day_start: '天开始', specific_week: '具体星期几(可多选)', specific_week_tip: '请选择具体周几', specific_day: '具体天数(可多选)', specific_day_tip: '请选择具体天数', last_day_of_month: '在这个月的最后一天', last_work_day_of_month: '在这个月的最后一个工作日', last_of_month: '在这个月的最后一个', before_end_of_month: '在本月底前', recent_business_day_to_month: '最近的工作日(周一至周五)至本月', in_this_months: '在这个月的第', every_month: '每一月', month_carried_out: '月执行 从', month_start: '月开始', specific_month: '具体月数(可多选)', specific_month_tip: '请选择具体月数', every_year: '每一年', year_carried_out: '年执行 从', year_start: '年开始', specific_year: '具体年数(可多选)', specific_year_tip: '请选择具体年数', one_hour: '小时', one_day: '日' } export default { login, modal, theme, userDropdown, menu, home, password, profile, monitor, resource, project, security, datasource, data_quality, crontab }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,632
[Bug][UI Next][V1.0.0-Alpha] The task instance button prompts an error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened 项目管理-项目实例-任务-任务实例,执行失败任务强制成功按钮提示错误 <img width="1425" alt="image" src="https://user-images.githubusercontent.com/76080484/156141468-b17f0e1f-e62a-4261-8c5a-a47a0748da45.png"> ### What you expected to happen 按钮提示为 强制成功 ### How to reproduce 项目管理-项目实例-任务-任务实例,执行失败任务强制成功按钮提示错误 ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8632
https://github.com/apache/dolphinscheduler/pull/8668
60a00490c6706228de22feedf9ea9c6b5a40e934
8096a1063bb6b23066b60da856d1349c04ee9c81
"2022-03-01T09:25:56Z"
java
"2022-03-02T13:04:47Z"
dolphinscheduler-ui-next/src/views/projects/task/instance/use-table.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { useI18n } from 'vue-i18n' import { h, reactive, ref } from 'vue' import { useAsyncState } from '@vueuse/core' import { queryTaskListPaging, forceSuccess, downloadLog } from '@/service/modules/task-instances' import { NButton, NIcon, NSpace, NTooltip } from 'naive-ui' import { AlignLeftOutlined, CheckCircleOutlined, DownloadOutlined } from '@vicons/antd' import { parseISO, format } from 'date-fns' import { useRoute } from 'vue-router' import type { TaskInstancesRes } from '@/service/modules/task-instances/types' export function useTable() { const { t } = useI18n() const route = useRoute() const projectCode = Number(route.params.projectCode) const variables = reactive({ columns: [], tableData: [], page: ref(1), pageSize: ref(10), searchVal: ref(null), processInstanceId: ref(null), host: ref(null), stateType: ref(null), datePickerRange: ref(null), executorName: ref(null), processInstanceName: ref(null), totalPage: ref(1), showModalRef: ref(false), row: {} }) const createColumns = (variables: any) => { variables.columns = [ { title: '#', key: 'index', render: (row: any, index: number) => index + 1 }, { title: t('project.task.task_name'), key: 'name' }, { title: t('project.task.workflow_instance'), key: 'processInstanceName', width: 250 }, { title: t('project.task.executor'), key: 'executorName' }, { title: t('project.task.node_type'), key: 'taskType' }, { title: t('project.task.state'), key: 'state' }, { title: t('project.task.submit_time'), key: 'submitTime', width: 170 }, { title: t('project.task.start_time'), key: 'startTime', width: 170 }, { title: t('project.task.end_time'), key: 'endTime', width: 170 }, { title: t('project.task.duration'), key: 'duration', render: (row: any) => h('span', null, row.duration ? row.duration : '-') }, { title: t('project.task.retry_count'), key: 'retryTimes' }, { title: t('project.task.dry_run_flag'), key: 'dryRun' }, { title: t('project.task.host'), key: 'host', width: 160 }, { title: t('project.task.operation'), key: 'operation', width: 150, render(row: any) { return h(NSpace, null, { default: () => [ h( NTooltip, {}, { trigger: () => h( NButton, { circle: true, type: 'info', size: 'small', disabled: !( row.state === 'FAILURE' || row.state === 'NEED_FAULT_TOLERANCE' || row.state === 'KILL' ), onClick: () => { handleForcedSuccess(row) } }, { icon: () => h(NIcon, null, { default: () => h(CheckCircleOutlined) }) } ), default: () => t('project.task.serial_wait') } ), h( NTooltip, {}, { trigger: () => h( NButton, { circle: true, type: 'info', size: 'small', onClick: () => handleLog(row) }, { icon: () => h(NIcon, null, { default: () => h(AlignLeftOutlined) }) } ), default: () => t('project.task.view_log') } ), h( NTooltip, {}, { trigger: () => h( NButton, { circle: true, type: 'info', size: 'small', onClick: () => downloadLog(row.id) }, { icon: () => h(NIcon, null, { default: () => h(DownloadOutlined) }) } ), default: () => t('project.task.download_log') } ) ] }) } } ] } const handleLog = (row: any) => { variables.showModalRef = true variables.row = row } const handleForcedSuccess = (row: any) => { forceSuccess({ id: row.id }, { projectCode }).then(() => { getTableData({ pageSize: variables.pageSize, pageNo: variables.tableData.length === 1 && variables.page > 1 ? variables.page - 1 : variables.page, searchVal: variables.searchVal, processInstanceId: variables.processInstanceId, host: variables.host, stateType: variables.stateType, datePickerRange: variables.datePickerRange, executorName: variables.executorName, processInstanceName: variables.processInstanceName }) }) } const getTableData = (params: any) => { const data = { pageSize: params.pageSize, pageNo: params.pageNo, searchVal: params.searchVal, processInstanceId: params.processInstanceId, host: params.host, stateType: params.stateType, startDate: params.datePickerRange ? format(parseISO(params.datePickerRange[0]), 'yyyy-MM-dd HH:mm:ss') : '', endDate: params.datePickerRange ? format(parseISO(params.datePickerRange[1]), 'yyyy-MM-dd HH:mm:ss') : '', executorName: params.executorName, processInstanceName: params.processInstanceName } const { state } = useAsyncState( queryTaskListPaging(data, { projectCode }).then( (res: TaskInstancesRes) => { variables.tableData = res.totalList.map((item, unused) => { item.submitTime = format( parseISO(item.submitTime), 'yyyy-MM-dd HH:mm:ss' ) item.startTime = format( parseISO(item.startTime), 'yyyy-MM-dd HH:mm:ss' ) item.endTime = format(parseISO(item.endTime), 'yyyy-MM-dd HH:mm:ss') return { ...item } }) as any } ), {} ) return state } return { t, variables, getTableData, createColumns } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,610
[Bug][UI Next][V1.0.0-Alpha] The display of the project name is too long, causing UI confusion
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened 项目名称展示超长导致UI错乱 <img width="1429" alt="image" src="https://user-images.githubusercontent.com/76080484/156125508-41cc27d3-0303-4a53-810b-a7ebff15b940.png"> ### What you expected to happen 显示项目名称展示字数,超出部分由省略号代替,鼠标悬浮上面时弹窗展示全量名称 ### How to reproduce 新建项目 ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8610
https://github.com/apache/dolphinscheduler/pull/8669
8096a1063bb6b23066b60da856d1349c04ee9c81
43fc7ec968e11568b5c07ab21b6d849646022fb3
"2022-03-01T07:40:54Z"
java
"2022-03-02T13:52:58Z"
dolphinscheduler-ui-next/src/views/projects/list/index.module.scss
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ .search-card { display: flex; justify-content: space-between; align-items: center; .box { display: flex; justify-content: flex-end; align-items: center; width: 300px; button { margin-left: 10px; } } } .table-card { margin-top: 8px; .pagination { margin-top: 20px; display: flex; justify-content: center; } } .links { color: dodgerblue; text-decoration: none; cursor: pointer; &:hover { text-decoration: underline; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,610
[Bug][UI Next][V1.0.0-Alpha] The display of the project name is too long, causing UI confusion
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened 项目名称展示超长导致UI错乱 <img width="1429" alt="image" src="https://user-images.githubusercontent.com/76080484/156125508-41cc27d3-0303-4a53-810b-a7ebff15b940.png"> ### What you expected to happen 显示项目名称展示字数,超出部分由省略号代替,鼠标悬浮上面时弹窗展示全量名称 ### How to reproduce 新建项目 ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8610
https://github.com/apache/dolphinscheduler/pull/8669
8096a1063bb6b23066b60da856d1349c04ee9c81
43fc7ec968e11568b5c07ab21b6d849646022fb3
"2022-03-01T07:40:54Z"
java
"2022-03-02T13:52:58Z"
dolphinscheduler-ui-next/src/views/projects/list/use-table.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { h, reactive, ref } from 'vue' import { useI18n } from 'vue-i18n' import { useAsyncState } from '@vueuse/core' import { queryProjectListPaging } from '@/service/modules/projects' import { parseISO, format } from 'date-fns' import { useRouter } from 'vue-router' import TableAction from './components/table-action' import styles from './index.module.scss' import type { Router } from 'vue-router' import type { TableColumns } from 'naive-ui/es/data-table/src/interface' import type { ProjectRes } from '@/service/modules/projects/types' import { useMenuStore } from '@/store/menu/menu' export function useTable( updateProjectItem = ( unusedCode: number, unusedName: string, unusedDescription: string ): void => {}, resetTableData = () => {} ) { const { t } = useI18n() const router: Router = useRouter() const menuStore = useMenuStore() const columns: TableColumns<any> = [ { title: '#', key: 'index', render: (row, index) => index + 1 }, { title: t('project.list.project_name'), key: 'name', render: (row) => h( 'a', { class: styles.links, onClick: () => { menuStore.setProjectCode(row.code) router.push({ path: `/projects/${row.code}` }) } }, { default: () => { return row.name } } ) }, { title: t('project.list.owned_users'), key: 'userName' }, { title: t('project.list.workflow_define_count'), key: 'defCount' }, { title: t('project.list.process_instance_running_count'), key: 'instRunningCount' }, { title: t('project.list.description'), key: 'description' }, { title: t('project.list.create_time'), key: 'createTime' }, { title: t('project.list.update_time'), key: 'updateTime' }, { title: t('project.list.operation'), key: 'actions', render: (row: any) => h(TableAction, { row, onResetTableData: () => { if (variables.page > 1 && variables.tableData.length === 1) { variables.page -= 1 } resetTableData() }, onUpdateProjectItem: (code, name, description) => updateProjectItem(code, name, description) }) } ] const variables = reactive({ tableData: [], page: ref(1), pageSize: ref(10), searchVal: ref(null), totalPage: ref(1) }) const getTableData = (params: any) => { const { state } = useAsyncState( queryProjectListPaging(params).then((res: ProjectRes) => { variables.totalPage = res.totalPage variables.tableData = res.totalList.map((item, unused) => { item.createTime = format( parseISO(item.createTime), 'yyyy-MM-dd HH:mm:ss' ) item.updateTime = format( parseISO(item.updateTime), 'yyyy-MM-dd HH:mm:ss' ) return { ...item } }) as any }), {} ) return state } return { getTableData, variables, columns } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,686
[Bug][UI Next][V1.0.0-Alpha] The problem of incomplete Chinese support for all date pickers in the project.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/19239641/156563819-9ea997f8-cf56-424c-bd31-543d2f9ee980.png) ### What you expected to happen Chinese support is not perfect ### How to reproduce Support Chinese ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8686
https://github.com/apache/dolphinscheduler/pull/8688
67a6813e9c5831b44037f10ecd532cc44e3caf7d
403c672e6d03ebc1282da2dbf1889c4db38fee95
"2022-03-03T12:23:31Z"
java
"2022-03-03T14:19:10Z"
dolphinscheduler-ui-next/src/App.tsx
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { defineComponent, computed, ref, nextTick, provide } from 'vue' import { zhCN, enUS, NConfigProvider, darkTheme, GlobalThemeOverrides, NMessageProvider } from 'naive-ui' import { useThemeStore } from '@/store/theme/theme' import { useLocalesStore } from '@/store/locales/locales' import themeList from '@/themes' const App = defineComponent({ name: 'App', setup() { const isRouterAlive = ref(true) const themeStore = useThemeStore() const currentTheme = computed(() => themeStore.darkTheme ? darkTheme : undefined ) const localesStore = useLocalesStore() /*refresh page when router params change*/ const reload = () => { isRouterAlive.value = false nextTick(() => { isRouterAlive.value = true }) } provide('reload', reload) return { reload, isRouterAlive, currentTheme, localesStore } }, render() { const themeOverrides: GlobalThemeOverrides = themeList[this.currentTheme ? 'dark' : 'light'] return ( <NConfigProvider theme={this.currentTheme} themeOverrides={themeOverrides} style={{ width: '100%', height: '100vh' }} locale={String(this.localesStore.getLocales) === 'zh_CN' ? zhCN : enUS} > <NMessageProvider> {this.isRouterAlive ? <router-view /> : ''} </NMessageProvider> </NConfigProvider> ) } }) export default App
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,616
[Bug] [WorkFlowLineage] work flow lineage search result missing data
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![](https://vip2.loli.io/2022/03/01/Tx7jRAFYJMp2irC.png) ![](https://vip1.loli.io/2022/03/01/gQmCFqZud38hjHi.png) ### What you expected to happen work flow lineage's result is right. ### How to reproduce above. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8616
https://github.com/apache/dolphinscheduler/pull/8684
403c672e6d03ebc1282da2dbf1889c4db38fee95
2ab8c1ca7d820c8921fa062b0757c9b70b00048c
"2022-03-01T08:11:13Z"
java
"2022-03-04T06:05:59Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/TaskGroupServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.TaskGroupQueueService; import org.apache.dolphinscheduler.api.service.TaskGroupService; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.dao.entity.TaskGroup; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.TaskGroupMapper; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.spi.utils.StringUtils; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; /** * task Group Service */ @Service public class TaskGroupServiceImpl extends BaseServiceImpl implements TaskGroupService { @Autowired private TaskGroupMapper taskGroupMapper; @Autowired private TaskGroupQueueService taskGroupQueueService; @Autowired private ProcessService processService; private static final Logger logger = LoggerFactory.getLogger(TaskGroupServiceImpl.class); /** * create a Task group * * @param loginUser login user * @param name task group name * @param description task group description * @param groupSize task group total size * @return the result code and msg */ @Override public Map<String, Object> createTaskGroup(User loginUser, Long projectCode, String name, String description, int groupSize) { Map<String, Object> result = new HashMap<>(); if (isNotAdmin(loginUser, result)) { return result; } if (name == null) { putMsg(result, Status.NAME_NULL); return result; } if (groupSize <= 0) { putMsg(result, Status.TASK_GROUP_SIZE_ERROR); return result; } TaskGroup taskGroup1 = taskGroupMapper.queryByName(loginUser.getId(), name); if (taskGroup1 != null) { putMsg(result, Status.TASK_GROUP_NAME_EXSIT); return result; } TaskGroup taskGroup = new TaskGroup(name, projectCode, description, groupSize, loginUser.getId(), Flag.YES.getCode()); taskGroup.setCreateTime(new Date()); taskGroup.setUpdateTime(new Date()); if (taskGroupMapper.insert(taskGroup) > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.CREATE_TASK_GROUP_ERROR); return result; } return result; } /** * update the task group * * @param loginUser login user * @param name task group name * @param description task group description * @param groupSize task group total size * @return the result code and msg */ @Override public Map<String, Object> updateTaskGroup(User loginUser, int id, String name, String description, int groupSize) { Map<String, Object> result = new HashMap<>(); if (isNotAdmin(loginUser, result)) { return result; } if (name == null) { putMsg(result, Status.NAME_NULL); return result; } if (groupSize <= 0) { putMsg(result, Status.TASK_GROUP_SIZE_ERROR); return result; } Integer exists = taskGroupMapper.selectCount(new QueryWrapper<TaskGroup>().lambda().eq(TaskGroup::getName, name).ne(TaskGroup::getId, id)); if (exists > 0) { putMsg(result, Status.TASK_GROUP_NAME_EXSIT); return result; } TaskGroup taskGroup = taskGroupMapper.selectById(id); if (taskGroup.getStatus() != Flag.YES.getCode()) { putMsg(result, Status.TASK_GROUP_STATUS_ERROR); return result; } taskGroup.setGroupSize(groupSize); taskGroup.setDescription(description); taskGroup.setUpdateTime(new Date()); if (StringUtils.isNotEmpty(name)) { taskGroup.setName(name); } int i = taskGroupMapper.updateById(taskGroup); logger.info("update result:{}", i); putMsg(result, Status.SUCCESS); return result; } /** * get task group status * * @param id task group id * @return is the task group available */ @Override public boolean isTheTaskGroupAvailable(int id) { return taskGroupMapper.selectCountByIdStatus(id, Flag.YES.getCode()) == 1; } /** * query all task group by user id * * @param loginUser login user * @param pageNo page no * @param pageSize page size * @return the result code and msg */ @Override public Map<String, Object> queryAllTaskGroup(User loginUser, String name, Integer status, int pageNo, int pageSize) { return this.doQuery(loginUser, pageNo, pageSize, loginUser.getId(), name, status); } /** * query all task group by status * * @param loginUser login user * @param pageNo page no * @param pageSize page size * @param status status * @return the result code and msg */ @Override public Map<String, Object> queryTaskGroupByStatus(User loginUser, int pageNo, int pageSize, int status) { return this.doQuery(loginUser, pageNo, pageSize, loginUser.getId(), null, status); } /** * query all task group by name * * @param loginUser login user * @param pageNo page no * @param pageSize page size * @param projectCode project code * @return the result code and msg */ @Override public Map<String, Object> queryTaskGroupByProjectCode(User loginUser, int pageNo, int pageSize, Long projectCode) { Map<String, Object> result = new HashMap<>(); if (isNotAdmin(loginUser, result)) { return result; } Page<TaskGroup> page = new Page<>(pageNo, pageSize); IPage<TaskGroup> taskGroupPaging = taskGroupMapper.queryTaskGroupPagingByProjectCode(page, projectCode); return getStringObjectMap(pageNo, pageSize, result, taskGroupPaging); } private Map<String, Object> getStringObjectMap(int pageNo, int pageSize, Map<String, Object> result, IPage<TaskGroup> taskGroupPaging) { PageInfo<TaskGroup> pageInfo = new PageInfo<>(pageNo, pageSize); int total = taskGroupPaging == null ? 0 : (int) taskGroupPaging.getTotal(); List<TaskGroup> list = taskGroupPaging == null ? new ArrayList<TaskGroup>() : taskGroupPaging.getRecords(); pageInfo.setTotal(total); pageInfo.setTotalList(list); result.put(Constants.DATA_LIST, pageInfo); logger.info("select result:{}", taskGroupPaging); putMsg(result, Status.SUCCESS); return result; } /** * query all task group by id * * @param loginUser login user * @param id id * @return the result code and msg */ @Override public Map<String, Object> queryTaskGroupById(User loginUser, int id) { Map<String, Object> result = new HashMap<>(); if (isNotAdmin(loginUser, result)) { return result; } TaskGroup taskGroup = taskGroupMapper.selectById(id); result.put(Constants.DATA_LIST, taskGroup); putMsg(result, Status.SUCCESS); return result; } /** * query * * @param pageNo page no * @param pageSize page size * @param userId user id * @param name name * @param status status * @return the result code and msg */ @Override public Map<String, Object> doQuery(User loginUser, int pageNo, int pageSize, int userId, String name, Integer status) { Map<String, Object> result = new HashMap<>(); if (isNotAdmin(loginUser, result)) { return result; } Page<TaskGroup> page = new Page<>(pageNo, pageSize); IPage<TaskGroup> taskGroupPaging = taskGroupMapper.queryTaskGroupPaging(page, userId, name, status); return getStringObjectMap(pageNo, pageSize, result, taskGroupPaging); } /** * close a task group * * @param loginUser login user * @param id task group id * @return the result code and msg */ @Override public Map<String, Object> closeTaskGroup(User loginUser, int id) { Map<String, Object> result = new HashMap<>(); if (isNotAdmin(loginUser, result)) { return result; } TaskGroup taskGroup = taskGroupMapper.selectById(id); if (taskGroup.getStatus() == Flag.NO.getCode()) { putMsg(result, Status.TASK_GROUP_STATUS_CLOSED); return result; } taskGroup.setStatus(Flag.NO.getCode()); taskGroupMapper.updateById(taskGroup); putMsg(result, Status.SUCCESS); return result; } /** * start a task group * * @param loginUser login user * @param id task group id * @return the result code and msg */ @Override public Map<String, Object> startTaskGroup(User loginUser, int id) { Map<String, Object> result = new HashMap<>(); if (isNotAdmin(loginUser, result)) { return result; } TaskGroup taskGroup = taskGroupMapper.selectById(id); if (taskGroup.getStatus() == Flag.YES.getCode()) { putMsg(result, Status.TASK_GROUP_STATUS_OPENED); return result; } taskGroup.setStatus(Flag.YES.getCode()); taskGroup.setUpdateTime(new Date(System.currentTimeMillis())); int update = taskGroupMapper.updateById(taskGroup); putMsg(result, Status.SUCCESS); return result; } /** * wake a task manually * * @param loginUser * @param queueId task group queue id * @return result */ @Override public Map<String, Object> forceStartTask(User loginUser, int queueId) { Map<String, Object> result = new HashMap<>(); if (isNotAdmin(loginUser, result)) { return result; } taskGroupQueueService.forceStartTask(queueId, Flag.YES.getCode()); putMsg(result, Status.SUCCESS); return result; } @Override public Map<String, Object> modifyPriority(User loginUser, Integer queueId, Integer priority) { Map<String, Object> result = new HashMap<>(); if (isNotAdmin(loginUser, result)) { return result; } taskGroupQueueService.modifyPriority(queueId, priority); putMsg(result, Status.SUCCESS); return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,616
[Bug] [WorkFlowLineage] work flow lineage search result missing data
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![](https://vip2.loli.io/2022/03/01/Tx7jRAFYJMp2irC.png) ![](https://vip1.loli.io/2022/03/01/gQmCFqZud38hjHi.png) ### What you expected to happen work flow lineage's result is right. ### How to reproduce above. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8616
https://github.com/apache/dolphinscheduler/pull/8684
403c672e6d03ebc1282da2dbf1889c4db38fee95
2ab8c1ca7d820c8921fa062b0757c9b70b00048c
"2022-03-01T08:11:13Z"
java
"2022-03-04T06:05:59Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/WorkFlowLineageServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.WorkFlowLineageService; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.TaskType; import org.apache.dolphinscheduler.common.model.DependentItem; import org.apache.dolphinscheduler.common.model.DependentTaskModel; import org.apache.dolphinscheduler.common.task.dependent.DependentParameters; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.dao.entity.ProcessLineage; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.WorkFlowLineage; import org.apache.dolphinscheduler.dao.entity.WorkFlowRelation; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.WorkFlowLineageMapper; import org.apache.dolphinscheduler.spi.utils.StringUtils; import org.apache.curator.shaded.com.google.common.collect.Sets; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.stream.Collectors; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; /** * work flow lineage service impl */ @Service public class WorkFlowLineageServiceImpl extends BaseServiceImpl implements WorkFlowLineageService { @Autowired private WorkFlowLineageMapper workFlowLineageMapper; @Autowired private ProjectMapper projectMapper; @Autowired private TaskDefinitionLogMapper taskDefinitionLogMapper; @Override public Map<String, Object> queryWorkFlowLineageByName(long projectCode, String workFlowName) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByCode(projectCode); if (project == null) { putMsg(result, Status.PROJECT_NOT_FOUND, projectCode); return result; } List<WorkFlowLineage> workFlowLineageList = workFlowLineageMapper.queryWorkFlowLineageByName(projectCode, workFlowName); result.put(Constants.DATA_LIST, workFlowLineageList); putMsg(result, Status.SUCCESS); return result; } @Override public Map<String, Object> queryWorkFlowLineageByCode(long projectCode, long workFlowCode) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByCode(projectCode); if (project == null) { putMsg(result, Status.PROJECT_NOT_FOUND, projectCode); return result; } Map<Long, WorkFlowLineage> workFlowLineagesMap = new HashMap<>(); Set<WorkFlowRelation> workFlowRelations = new HashSet<>(); Set<Long> sourceWorkFlowCodes = Sets.newHashSet(workFlowCode); recursiveWorkFlow(projectCode, workFlowLineagesMap, workFlowRelations, sourceWorkFlowCodes); Map<String, Object> workFlowLists = new HashMap<>(); workFlowLists.put(Constants.WORKFLOW_LIST, workFlowLineagesMap.values()); workFlowLists.put(Constants.WORKFLOW_RELATION_LIST, workFlowRelations); result.put(Constants.DATA_LIST, workFlowLists); putMsg(result, Status.SUCCESS); return result; } private void recursiveWorkFlow(long projectCode, Map<Long, WorkFlowLineage> workFlowLineagesMap, Set<WorkFlowRelation> workFlowRelations, Set<Long> sourceWorkFlowCodes) { for (Long workFlowCode : sourceWorkFlowCodes) { WorkFlowLineage workFlowLineage = workFlowLineageMapper.queryWorkFlowLineageByCode(projectCode, workFlowCode); workFlowLineagesMap.put(workFlowCode, workFlowLineage); List<ProcessLineage> processLineages = workFlowLineageMapper.queryProcessLineageByCode(projectCode, workFlowCode); List<TaskDefinition> taskDefinitionList = new ArrayList<>(); for (ProcessLineage processLineage : processLineages) { if (processLineage.getPreTaskCode() > 0) { taskDefinitionList.add(new TaskDefinition(processLineage.getPreTaskCode(), processLineage.getPreTaskVersion())); } if (processLineage.getPostTaskCode() > 0) { taskDefinitionList.add(new TaskDefinition(processLineage.getPostTaskCode(), processLineage.getPostTaskVersion())); } } sourceWorkFlowCodes = querySourceWorkFlowCodes(projectCode, workFlowCode, taskDefinitionList); if (sourceWorkFlowCodes.isEmpty()) { workFlowRelations.add(new WorkFlowRelation(0L, workFlowCode)); return; } else { workFlowLineagesMap.get(workFlowCode).setSourceWorkFlowCode(StringUtils.join(sourceWorkFlowCodes, Constants.COMMA)); sourceWorkFlowCodes.forEach(code -> workFlowRelations.add(new WorkFlowRelation(code, workFlowCode))); recursiveWorkFlow(projectCode, workFlowLineagesMap, workFlowRelations, sourceWorkFlowCodes); } } } @Override public Map<String, Object> queryWorkFlowLineage(long projectCode) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByCode(projectCode); if (project == null) { putMsg(result, Status.PROJECT_NOT_FOUND, projectCode); return result; } List<ProcessLineage> processLineages = workFlowLineageMapper.queryProcessLineage(projectCode); Map<Long, WorkFlowLineage> workFlowLineagesMap = new HashMap<>(); Set<WorkFlowRelation> workFlowRelations = new HashSet<>(); if (!processLineages.isEmpty()) { List<WorkFlowLineage> workFlowLineages = workFlowLineageMapper.queryWorkFlowLineageByLineage(processLineages); workFlowLineagesMap = workFlowLineages.stream().collect(Collectors.toMap(WorkFlowLineage::getWorkFlowCode, workFlowLineage -> workFlowLineage)); Map<Long, List<TaskDefinition>> workFlowMap = new HashMap<>(); for (ProcessLineage processLineage : processLineages) { workFlowMap.compute(processLineage.getProcessDefinitionCode(), (k, v) -> { if (v == null) { v = new ArrayList<>(); } if (processLineage.getPreTaskCode() > 0) { v.add(new TaskDefinition(processLineage.getPreTaskCode(), processLineage.getPreTaskVersion())); } if (processLineage.getPostTaskCode() > 0) { v.add(new TaskDefinition(processLineage.getPostTaskCode(), processLineage.getPostTaskVersion())); } return v; }); } for (Entry<Long, List<TaskDefinition>> workFlow : workFlowMap.entrySet()) { Set<Long> sourceWorkFlowCodes = querySourceWorkFlowCodes(projectCode, workFlow.getKey(), workFlow.getValue()); if (sourceWorkFlowCodes.isEmpty()) { workFlowRelations.add(new WorkFlowRelation(0L, workFlow.getKey())); } else { workFlowLineagesMap.get(workFlow.getKey()).setSourceWorkFlowCode(StringUtils.join(sourceWorkFlowCodes, Constants.COMMA)); sourceWorkFlowCodes.forEach(code -> workFlowRelations.add(new WorkFlowRelation(code, workFlow.getKey()))); } } } Map<String, Object> workFlowLists = new HashMap<>(); workFlowLists.put(Constants.WORKFLOW_LIST, workFlowLineagesMap.values()); workFlowLists.put(Constants.WORKFLOW_RELATION_LIST, workFlowRelations); result.put(Constants.DATA_LIST, workFlowLists); putMsg(result, Status.SUCCESS); return result; } private Set<Long> querySourceWorkFlowCodes(long projectCode, long workFlowCode, List<TaskDefinition> taskDefinitionList) { Set<Long> sourceWorkFlowCodes = new HashSet<>(); if (taskDefinitionList == null || taskDefinitionList.isEmpty()) { return sourceWorkFlowCodes; } List<TaskDefinitionLog> taskDefinitionLogs = taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionList); for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { if (taskDefinitionLog.getProjectCode() == projectCode) { if (taskDefinitionLog.getTaskType().equals(TaskType.DEPENDENT.getDesc())) { DependentParameters dependentParameters = JSONUtils.parseObject(taskDefinitionLog.getDependence(), DependentParameters.class); if (dependentParameters != null) { List<DependentTaskModel> dependTaskList = dependentParameters.getDependTaskList(); for (DependentTaskModel taskModel : dependTaskList) { List<DependentItem> dependItemList = taskModel.getDependItemList(); for (DependentItem dependentItem : dependItemList) { if (dependentItem.getProjectCode() == projectCode && dependentItem.getDefinitionCode() != workFlowCode) { sourceWorkFlowCodes.add(dependentItem.getDefinitionCode()); } } } } } } } return sourceWorkFlowCodes; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,616
[Bug] [WorkFlowLineage] work flow lineage search result missing data
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![](https://vip2.loli.io/2022/03/01/Tx7jRAFYJMp2irC.png) ![](https://vip1.loli.io/2022/03/01/gQmCFqZud38hjHi.png) ### What you expected to happen work flow lineage's result is right. ### How to reproduce above. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8616
https://github.com/apache/dolphinscheduler/pull/8684
403c672e6d03ebc1282da2dbf1889c4db38fee95
2ab8c1ca7d820c8921fa062b0757c9b70b00048c
"2022-03-01T08:11:13Z"
java
"2022-03-04T06:05:59Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/WorkFlowLineageMapper.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.dao.mapper; import org.apache.dolphinscheduler.dao.entity.DependentProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessLineage; import org.apache.dolphinscheduler.dao.entity.WorkFlowLineage; import org.apache.ibatis.annotations.Param; import java.util.List; public interface WorkFlowLineageMapper { /** * queryByName * * @param projectCode projectCode * @param workFlowName workFlowName * @return WorkFlowLineage list */ List<WorkFlowLineage> queryWorkFlowLineageByName(@Param("projectCode") long projectCode, @Param("workFlowName") String workFlowName); /** * queryWorkFlowLineageByCode * * @param projectCode projectCode * @param workFlowCode workFlowCode * @return WorkFlowLineage */ WorkFlowLineage queryWorkFlowLineageByCode(@Param("projectCode") long projectCode, @Param("workFlowCode") long workFlowCode); /** * queryWorkFlowLineageByCode * * @param processLineages processLineages * @return WorkFlowLineage list */ List<WorkFlowLineage> queryWorkFlowLineageByLineage(@Param("processLineages") List<ProcessLineage> processLineages); /** * queryProcessLineage * * @param projectCode projectCode * @return ProcessLineage list */ List<ProcessLineage> queryProcessLineage(@Param("projectCode") long projectCode); /** * queryCodeRelation * * @param projectCode projectCode * @param processDefinitionCode processDefinitionCode * @return ProcessLineage list */ List<ProcessLineage> queryProcessLineageByCode(@Param("projectCode") long projectCode, @Param("processDefinitionCode") long processDefinitionCode); /** * query process definition by name * * @return dependent process definition */ List<DependentProcessDefinition> queryDependentProcessDefinitionByProcessDefinitionCode(@Param("code") long code); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,616
[Bug] [WorkFlowLineage] work flow lineage search result missing data
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![](https://vip2.loli.io/2022/03/01/Tx7jRAFYJMp2irC.png) ![](https://vip1.loli.io/2022/03/01/gQmCFqZud38hjHi.png) ### What you expected to happen work flow lineage's result is right. ### How to reproduce above. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8616
https://github.com/apache/dolphinscheduler/pull/8684
403c672e6d03ebc1282da2dbf1889c4db38fee95
2ab8c1ca7d820c8921fa062b0757c9b70b00048c
"2022-03-01T08:11:13Z"
java
"2022-03-04T06:05:59Z"
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/WorkFlowLineageMapper.xml
<?xml version="1.0" encoding="UTF-8" ?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="org.apache.dolphinscheduler.dao.mapper.WorkFlowLineageMapper"> <select id="queryWorkFlowLineageByName" resultType="org.apache.dolphinscheduler.dao.entity.WorkFlowLineage"> select tepd.code as work_flow_code,tepd.name as work_flow_name from t_ds_process_definition tepd left join t_ds_schedules tes on tepd.code = tes.process_definition_code where tepd.project_code = #{projectCode} <if test="workFlowName != null and workFlowName != ''"> and tepd.name like concat('%', #{workFlowName}, '%') </if> </select> <select id="queryWorkFlowLineageByCode" resultType="org.apache.dolphinscheduler.dao.entity.WorkFlowLineage"> select tepd.code as work_flow_code,tepd.name as work_flow_name, '' as source_work_flow_code, tepd.release_state as work_flow_publish_status, tes.start_time as schedule_start_time, tes.end_time as schedule_end_time, tes.crontab as crontab, tes.release_state as schedule_publish_status from t_ds_process_definition tepd left join t_ds_schedules tes on tepd.code = tes.process_definition_code where tepd.project_code = #{projectCode} and tepd.code = #{workFlowCode} </select> <select id="queryWorkFlowLineageByLineage" resultType="org.apache.dolphinscheduler.dao.entity.WorkFlowLineage"> select tepd.code as work_flow_code,tepd.name as work_flow_name, '' as source_work_flow_code, tepd.release_state as work_flow_publish_status, tes.start_time as schedule_start_time, tes.end_time as schedule_end_time, tes.crontab as crontab, tes.release_state as schedule_publish_status from t_ds_process_definition tepd left join t_ds_schedules tes on tepd.code = tes.process_definition_code where 1=1 <if test="processLineages != null and processLineages.size != 0"> and <foreach collection="processLineages" index="index" item="item" open="(" separator=" or " close=")"> (tepd.project_code = #{item.projectCode} and tepd.code = #{item.processDefinitionCode}) </foreach> </if> </select> <select id="queryProcessLineage" resultType="org.apache.dolphinscheduler.dao.entity.ProcessLineage"> select ptr.project_code, ptr.post_task_code, ptr.post_task_version, ptr.pre_task_code, ptr.pre_task_version, ptr.process_definition_code, ptr.process_definition_version from t_ds_process_definition pd join t_ds_process_task_relation ptr on pd.code = ptr.process_definition_code and pd.version = ptr.process_definition_version where pd.project_code = #{projectCode} </select> <select id="queryProcessLineageByCode" resultType="org.apache.dolphinscheduler.dao.entity.ProcessLineage"> select project_code, post_task_code, post_task_version, pre_task_code, pre_task_version, process_definition_code, process_definition_version from t_ds_process_task_relation where project_code = #{projectCode} and process_definition_code = #{processDefinitionCode} </select> <select id="queryDependentProcessDefinitionByProcessDefinitionCode" resultType="DependentProcessDefinition"> SELECT c.code AS process_definition_code ,c.name AS process_definition_name ,a.code AS task_definition_code ,a.task_params FROM t_ds_task_definition a JOIN t_ds_process_task_relation b ON a.code = b.pre_task_code and a.version = b.pre_task_version JOIN t_ds_process_definition c ON c.code = b.process_definition_code AND c.version = b.process_definition_version AND c.project_code = b.project_code WHERE 1=1 <if test="code != null and code != ''"> AND a.task_params LIKE concat('%', #{code}, '%') </if> AND a.task_type = 'DEPENDENT' </select> </mapper>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,623
[Bug][UI-Next][V1.0.0-Alpha] process definition state prompt message wrong
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![](https://vip2.loli.io/2022/03/01/HoMe6y1vkaVYXx5.png) ### What you expected to happen In online state should prompt offline. ### How to reproduce above. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8623
https://github.com/apache/dolphinscheduler/pull/8695
79c9d7dd36b0677fdb6e6d68e9cb796ae4a06044
e1862a81ba8407a2af0ed8cc7209b3e663171d59
"2022-03-01T08:36:19Z"
java
"2022-03-04T07:57:03Z"
dolphinscheduler-ui-next/src/views/projects/workflow/definition/components/table-action.tsx
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { defineComponent, PropType, toRefs } from 'vue' import { NSpace, NTooltip, NButton, NIcon, NPopconfirm } from 'naive-ui' import { DeleteOutlined, DownloadOutlined, FormOutlined, InfoCircleFilled, PlayCircleOutlined, ClockCircleOutlined, CopyOutlined, FieldTimeOutlined, ExportOutlined, ApartmentOutlined, UploadOutlined } from '@vicons/antd' import { useI18n } from 'vue-i18n' import { IDefinitionData } from '../types' const props = { row: { type: Object as PropType<IDefinitionData> } } export default defineComponent({ name: 'TableAction', props, emits: [ 'updateList', 'startWorkflow', 'timingWorkflow', 'versionWorkflow', 'deleteWorkflow', 'releaseWorkflow', 'copyWorkflow', 'exportWorkflow', 'gotoTimingManage', 'gotoWorkflowTree' ], setup(props, ctx) { const handleStartWorkflow = () => { ctx.emit('startWorkflow') } const handleTimingWorkflow = () => { ctx.emit('timingWorkflow') } const handleVersionWorkflow = () => { ctx.emit('versionWorkflow') } const handleDeleteWorkflow = () => { ctx.emit('deleteWorkflow') } const handleReleaseWorkflow = () => { ctx.emit('releaseWorkflow') } const handleCopyWorkflow = () => { ctx.emit('copyWorkflow') } const handleExportWorkflow = () => { ctx.emit('exportWorkflow') } const handleGotoTimingManage = () => { ctx.emit('gotoTimingManage') } const handleGotoWorkflowTree = () => { ctx.emit('gotoWorkflowTree') } return { handleStartWorkflow, handleTimingWorkflow, handleVersionWorkflow, handleDeleteWorkflow, handleReleaseWorkflow, handleCopyWorkflow, handleExportWorkflow, handleGotoTimingManage, handleGotoWorkflowTree, ...toRefs(props) } }, render() { const { t } = useI18n() const releaseState = this.row?.releaseState const scheduleReleaseState = this.row?.scheduleReleaseState return ( <NSpace> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.edit'), trigger: () => ( <NButton size='tiny' type='info' tag='div' circle disabled={releaseState === 'ONLINE'} /* TODO: Edit workflow */ > <NIcon> <FormOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.start'), trigger: () => ( <NButton size='tiny' type='primary' tag='div' circle onClick={this.handleStartWorkflow} disabled={releaseState === 'OFFLINE'} > <NIcon> <PlayCircleOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.timing'), trigger: () => ( <NButton size='tiny' type='info' tag='div' circle onClick={this.handleTimingWorkflow} disabled={releaseState !== 'ONLINE' || !!scheduleReleaseState} > <NIcon> <ClockCircleOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.up_line'), trigger: () => ( <NButton size='tiny' type={releaseState === 'ONLINE' ? 'warning' : 'error'} tag='div' circle onClick={this.handleReleaseWorkflow} > <NIcon> {releaseState === 'ONLINE' ? ( <DownloadOutlined /> ) : ( <UploadOutlined /> )} </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.copy_workflow'), trigger: () => ( <NButton size='tiny' type='info' tag='div' circle disabled={releaseState === 'ONLINE'} onClick={this.handleCopyWorkflow} > <NIcon> <CopyOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.cron_manage'), trigger: () => ( <NButton size='tiny' type='info' tag='div' circle disabled={releaseState === 'OFFLINE'} onClick={this.handleGotoTimingManage} > <NIcon> <FieldTimeOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.delete'), trigger: () => ( <NButton size='tiny' type='error' tag='div' circle disabled={releaseState === 'ONLINE'} > <NPopconfirm onPositiveClick={this.handleDeleteWorkflow}> {{ default: () => t('project.workflow.delete_confirm'), icon: () => ( <NIcon> <InfoCircleFilled /> </NIcon> ), trigger: () => ( <NIcon> <DeleteOutlined /> </NIcon> ) }} </NPopconfirm> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.tree_view'), trigger: () => ( <NButton size='tiny' type='info' tag='div' circle onClick={this.handleGotoWorkflowTree} > <NIcon> <ApartmentOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.export'), trigger: () => ( <NButton size='tiny' type='info' tag='div' circle onClick={this.handleExportWorkflow} > <NIcon> <ExportOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.version_info'), trigger: () => ( <NButton size='tiny' type='info' tag='div' circle onClick={this.handleVersionWorkflow} > <NIcon> <InfoCircleFilled /> </NIcon> </NButton> ) }} </NTooltip> </NSpace> ) } })
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,571
[Feature][UI Next][V1.0.0-Alpha] Gantt chart of Project Manage - Workflow Instance
null
https://github.com/apache/dolphinscheduler/issues/8571
https://github.com/apache/dolphinscheduler/pull/8697
e1862a81ba8407a2af0ed8cc7209b3e663171d59
c8b5d1e5088241faaf29f61a5625d33fffb27a8e
"2022-02-28T06:55:09Z"
java
"2022-03-04T09:37:40Z"
dolphinscheduler-ui-next/src/router/modules/projects.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import type { Component } from 'vue' import utils from '@/utils' // All TSX files under the views folder automatically generate mapping relationship const modules = import.meta.glob('/src/views/**/**.tsx') const components: { [key: string]: Component } = utils.mapping(modules) export default { path: '/projects', name: 'projects', meta: { title: '项目管理' }, redirect: { name: 'projects-list' }, component: () => import('@/layouts/content'), children: [ { path: '/projects/list', name: 'projects-list', component: components['projects-list'], meta: { title: '项目', showSide: false, auth: [] } }, { path: '/projects/:projectCode', name: 'projects-overview', component: components['projects-overview'], meta: { title: '项目概览', showSide: true, auth: [] } }, { path: '/projects/:projectCode/workflow/relation', name: 'workflow-relation', component: components['projects-workflow-relation'], meta: { title: '工作流关系', showSide: true, auth: [] } }, { path: '/projects/:projectCode/workflow-definition', name: 'workflow-definition-list', component: components['projects-workflow-definition'], meta: { title: '工作流定义', showSide: true, auth: [] } }, { path: '/projects/:projectCode/workflow-definition/timing/:definitionCode', name: 'workflow-definition-timing', component: components['projects-workflow-definition-timing'], meta: { title: '定时管理', showSide: true, auth: [] } }, { path: '/projects/:projectCode/workflow/definitions/create', name: 'workflow-definition-create', component: components['projects-workflow-definition-create'], meta: { title: '创建工作流定义', showSide: true, auth: [] } }, { path: '/projects/:projectCode/workflow/definitions/:code', name: 'workflow-definition-detail', component: components['projects-workflow-definition-detail'], meta: { title: '工作流定义详情', showSide: true, auth: [] } }, { path: '/projects/:projectCode/workflow/instances', name: 'workflow-instance-list', component: components['projects-workflow-instance'], meta: { title: '工作流实例', showSide: true, auth: [] } }, { path: '/projects/:projectCode/workflow/instances/:id', name: 'workflow-instance-detail', component: components['projects-workflow-instance-detail'], meta: { title: '工作流实例详情', showSide: true, auth: [] } }, { path: '/projects/:projectCode/task/definitions', name: 'task-definition', component: components['projects-task-definition'], meta: { title: '任务定义', showSide: true, auth: [] } }, { path: '/projects/:projectCode/task/instances', name: 'task-instance', component: components['projects-task-instance'], meta: { title: '任务实例', showSide: true, auth: [] } }, { path: '/projects/:projectCode/workflow-definition/tree/:definitionCode', name: 'workflow-definition-tree', component: components['projects-workflow-definition-tree'], meta: { title: '工作流定义树形图', showSide: true, auth: [] } } ] }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,571
[Feature][UI Next][V1.0.0-Alpha] Gantt chart of Project Manage - Workflow Instance
null
https://github.com/apache/dolphinscheduler/issues/8571
https://github.com/apache/dolphinscheduler/pull/8697
e1862a81ba8407a2af0ed8cc7209b3e663171d59
c8b5d1e5088241faaf29f61a5625d33fffb27a8e
"2022-02-28T06:55:09Z"
java
"2022-03-04T09:37:40Z"
dolphinscheduler-ui-next/src/service/modules/process-instances/index.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { axios } from '@/service/service' import { CodeReq, ProcessInstanceListReq, BatchDeleteReq, SubIdReq, TaskReq, LongestReq, IdReq, ProcessInstanceReq } from './types' export function queryProcessInstanceListPaging( params: ProcessInstanceListReq, code: number ): any { return axios({ url: `/projects/${code}/process-instances`, method: 'get', params }) } export function batchDeleteProcessInstanceByIds( data: BatchDeleteReq, code: number ): any { return axios({ url: `/projects/${code}/process-instances/batch-delete`, method: 'post', data }) } export function queryParentInstanceBySubId( params: SubIdReq, code: CodeReq ): any { return axios({ url: `/projects/${code}/process-instances/query-parent-by-sub`, method: 'get', params }) } export function querySubProcessInstanceByTaskCode( params: TaskReq, code: CodeReq ): any { return axios({ url: `/projects/${code}/process-instances/query-sub-by-parent`, method: 'get', params }) } export function queryTopNLongestRunningProcessInstance( params: LongestReq, code: CodeReq ): any { return axios({ url: `/projects/${code}/process-instances/top-n`, method: 'get', params }) } export function queryProcessInstanceById( instanceId: number, projectCode: number ): any { return axios({ url: `/projects/${projectCode}/process-instances/${instanceId}`, method: 'get' }) } export function updateProcessInstance( data: ProcessInstanceReq, id: IdReq, code: CodeReq ): any { return axios({ url: `/projects/${code}/process-instances/${id}`, method: 'put', data }) } export function deleteProcessInstanceById(id: number, code: number): any { return axios({ url: `/projects/${code}/process-instances/${id}`, method: 'delete' }) } export function queryTaskListByProcessId(id: number, code: number): any { return axios({ url: `/projects/${code}/process-instances/${id}/tasks`, method: 'get' }) } export function vieGanttTree(id: IdReq, code: CodeReq): any { return axios({ url: `/projects/${code}/process-instances/${id}/view-gantt`, method: 'get' }) } export function viewVariables(id: number, code: number): any { return axios({ url: `/projects/${code}/process-instances/${id}/view-variables`, method: 'get' }) }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,571
[Feature][UI Next][V1.0.0-Alpha] Gantt chart of Project Manage - Workflow Instance
null
https://github.com/apache/dolphinscheduler/issues/8571
https://github.com/apache/dolphinscheduler/pull/8697
e1862a81ba8407a2af0ed8cc7209b3e663171d59
c8b5d1e5088241faaf29f61a5625d33fffb27a8e
"2022-02-28T06:55:09Z"
java
"2022-03-04T09:37:40Z"
dolphinscheduler-ui-next/src/utils/common.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { SettingFilled, SettingOutlined, CloseCircleOutlined, PauseCircleOutlined, CheckCircleOutlined, EditOutlined, MinusCircleOutlined, CheckCircleFilled, LoadingOutlined, PauseCircleFilled, ClockCircleOutlined, StopFilled, StopOutlined, GlobalOutlined, IssuesCloseOutlined } from '@vicons/antd' import { parseISO } from 'date-fns' import _ from 'lodash' import { ITaskState } from './types' /** * Intelligent display kb m */ export const bytesToSize = (bytes: number) => { if (bytes === 0) return '0 B' const k = 1024 // or 1024 const sizes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'] const i = Math.floor(Math.log(bytes) / Math.log(k)) return parseInt((bytes / Math.pow(k, i)).toPrecision(3)) + ' ' + sizes[i] } export const fileTypeArr = [ 'txt', 'log', 'sh', 'bat', 'conf', 'cfg', 'py', 'java', 'sql', 'xml', 'hql', 'properties', 'json', 'yml', 'yaml', 'ini', 'js' ] /** * Operation type * @desc tooltip * @code identifier */ export const runningType = (t: any) => [ { desc: `${t('project.workflow.start_process')}`, code: 'START_PROCESS' }, { desc: `${t('project.workflow.execute_from_the_current_node')}`, code: 'START_CURRENT_TASK_PROCESS' }, { desc: `${t('project.workflow.recover_tolerance_fault_process')}`, code: 'RECOVER_TOLERANCE_FAULT_PROCESS' }, { desc: `${t('project.workflow.resume_the_suspension_process')}`, code: 'RECOVER_SUSPENDED_PROCESS' }, { desc: `${t('project.workflow.execute_from_the_failed_nodes')}`, code: 'START_FAILURE_TASK_PROCESS' }, { desc: `${t('project.workflow.complement_data')}`, code: 'COMPLEMENT_DATA' }, { desc: `${t('project.workflow.scheduling_execution')}`, code: 'SCHEDULER' }, { desc: `${t('project.workflow.rerun')}`, code: 'REPEAT_RUNNING' }, { desc: `${t('project.workflow.pause')}`, code: 'PAUSE' }, { desc: `${t('project.workflow.stop')}`, code: 'STOP' }, { desc: `${t('project.workflow.recovery_waiting_thread')}`, code: 'RECOVER_WAITING_THREAD' }, { desc: `${t('project.workflow.recover_serial_wait')}`, code: 'RECOVER_SERIAL_WAIT' } ] /** * State code table */ export const stateType = (t: any) => [ { value: '', label: `${t('project.workflow.all_status')}` }, { value: 'SUBMITTED_SUCCESS', label: `${t('project.workflow.submit_success')}` }, { value: 'RUNNING_EXECUTION', label: `${t('project.workflow.running')}` }, { value: 'READY_PAUSE', label: `${t('project.workflow.ready_to_pause')}` }, { value: 'PAUSE', label: `${t('project.workflow.pause')}` }, { value: 'READY_STOP', label: `${t('project.workflow.ready_to_stop')}` }, { value: 'STOP', label: `${t('project.workflow.stop')}` }, { value: 'FAILURE', label: `${t('project.workflow.failed')}` }, { value: 'SUCCESS', label: `${t('project.workflow.success')}` }, { value: 'NEED_FAULT_TOLERANCE', label: `${t('project.workflow.need_fault_tolerance')}` }, { value: 'KILL', label: `${t('project.workflow.kill')}` }, { value: 'WAITING_THREAD', label: `${t('project.workflow.waiting_for_thread')}` }, { value: 'WAITING_DEPEND', label: `${t('project.workflow.waiting_for_dependency_to_complete')}` }, { value: 'DELAY_EXECUTION', label: `${t('project.workflow.delay_execution')}` }, { value: 'FORCED_SUCCESS', label: `${t('project.workflow.forced_success')}` }, { value: 'SERIAL_WAIT', label: `${t('project.workflow.serial_wait')}` } ] /** * Task status * @id id * @desc tooltip * @color color * @icon icon * @isSpin is loading (Need to execute the code block to write if judgment) */ // TODO: Looking for a more suitable icon export const tasksState = (t: any): ITaskState => ({ SUBMITTED_SUCCESS: { id: 0, desc: `${t('project.workflow.submit_success')}`, color: '#A9A9A9', icon: IssuesCloseOutlined, isSpin: false, classNames: 'submitted' }, RUNNING_EXECUTION: { id: 1, desc: `${t('project.workflow.executing')}`, color: '#0097e0', icon: SettingFilled, isSpin: true, classNames: 'executing' }, READY_PAUSE: { id: 2, desc: `${t('project.workflow.ready_to_pause')}`, color: '#07b1a3', icon: SettingOutlined, isSpin: false, classNames: 'submitted' }, PAUSE: { id: 3, desc: `${t('project.workflow.pause')}`, color: '#057c72', icon: PauseCircleOutlined, isSpin: false, classNames: 'pause' }, READY_STOP: { id: 4, desc: `${t('project.workflow.ready_to_stop')}`, color: '#FE0402', icon: StopFilled, isSpin: false }, STOP: { id: 5, desc: `${t('project.workflow.stop')}`, color: '#e90101', icon: StopOutlined, isSpin: false }, FAILURE: { id: 6, desc: `${t('project.workflow.failed')}`, color: '#000000', icon: CloseCircleOutlined, isSpin: false, classNames: 'failed' }, SUCCESS: { id: 7, desc: `${t('project.workflow.success')}`, color: '#33cc00', icon: CheckCircleOutlined, isSpin: false, classNames: 'success' }, NEED_FAULT_TOLERANCE: { id: 8, desc: `${t('project.workflow.need_fault_tolerance')}`, color: '#FF8C00', icon: EditOutlined, isSpin: false }, KILL: { id: 9, desc: `${t('project.workflow.kill')}`, color: '#a70202', icon: MinusCircleOutlined, isSpin: false }, WAITING_THREAD: { id: 10, desc: `${t('project.workflow.waiting_for_thread')}`, color: '#912eed', icon: ClockCircleOutlined, isSpin: false }, WAITING_DEPEND: { id: 11, desc: `${t('project.workflow.waiting_for_dependence')}`, color: '#5101be', icon: GlobalOutlined, isSpin: false }, DELAY_EXECUTION: { id: 12, desc: `${t('project.workflow.delay_execution')}`, color: '#5102ce', icon: PauseCircleFilled, isSpin: false }, FORCED_SUCCESS: { id: 13, desc: `${t('project.workflow.forced_success')}`, color: '#5102ce', icon: CheckCircleFilled, isSpin: false }, SERIAL_WAIT: { id: 14, desc: `${t('project.workflow.serial_wait')}`, color: '#5102ce', icon: LoadingOutlined, isSpin: false } }) /** * A simple uuid generator, support prefix and template pattern. * * @example * * uuid('v-') // -> v-xxx * uuid('v-ani-%{s}-translate') // -> v-ani-xxx */ export function uuid(prefix: string) { const id = Math.floor(Math.random() * 10000).toString(36) return prefix ? ~prefix.indexOf('%{s}') ? prefix.replace(/%\{s\}/g, id) : prefix + id : id } export const warningTypeList = [ { id: 'NONE', code: 'project.workflow.none_send' }, { id: 'SUCCESS', code: 'project.workflow.success_send' }, { id: 'FAILURE', code: 'project.workflow.failure_send' }, { id: 'ALL', code: 'project.workflow.all_send' } ] export const parseTime = (dateTime: string | number) => { if (_.isString(dateTime) === true) { return parseISO(dateTime as string) } else { return new Date(dateTime) } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,571
[Feature][UI Next][V1.0.0-Alpha] Gantt chart of Project Manage - Workflow Instance
null
https://github.com/apache/dolphinscheduler/issues/8571
https://github.com/apache/dolphinscheduler/pull/8697
e1862a81ba8407a2af0ed8cc7209b3e663171d59
c8b5d1e5088241faaf29f61a5625d33fffb27a8e
"2022-02-28T06:55:09Z"
java
"2022-03-04T09:37:40Z"
dolphinscheduler-ui-next/src/views/projects/workflow/instance/components/table-action.tsx
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { defineComponent, PropType, toRefs } from 'vue' import { NSpace, NTooltip, NButton, NIcon, NPopconfirm } from 'naive-ui' import { DeleteOutlined, FormOutlined, InfoCircleFilled, SyncOutlined, CloseOutlined, CloseCircleOutlined, PauseCircleOutlined, ControlOutlined, PlayCircleOutlined } from '@vicons/antd' import { useI18n } from 'vue-i18n' import { useRouter } from 'vue-router' import type { Router } from 'vue-router' import { IWorkflowInstance } from '@/service/modules/process-instances/types' const props = { row: { type: Object as PropType<IWorkflowInstance>, required: true } } export default defineComponent({ name: 'TableAction', props, emits: [ 'updateList', 'reRun', 'reStore', 'stop', 'suspend', 'deleteInstance' ], setup(props, ctx) { const router: Router = useRouter() const handleEdit = () => { router.push({ name: 'workflow-instance-detail', params: { id: props.row!.id }, query: { code: props.row!.processDefinitionCode } }) } const handleReRun = () => { ctx.emit('reRun') } const handleReStore = () => { ctx.emit('reStore') } const handleStop = () => { ctx.emit('stop') } const handleSuspend = () => { ctx.emit('suspend') } const handleDeleteInstance = () => { ctx.emit('deleteInstance') } return { handleEdit, handleReRun, handleReStore, handleStop, handleSuspend, handleDeleteInstance, ...toRefs(props) } }, render() { const { t } = useI18n() const state = this.row?.state return ( <NSpace> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.edit'), trigger: () => ( <NButton tag='div' size='tiny' type='info' circle disabled={ (state !== 'SUCCESS' && state !== 'PAUSE' && state !== 'FAILURE' && state !== 'STOP') || this.row?.disabled } onClick={this.handleEdit} > <NIcon> <FormOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.rerun'), trigger: () => { return ( <NButton tag='div' size='tiny' type='info' circle onClick={this.handleReRun} disabled={ (state !== 'SUCCESS' && state !== 'PAUSE' && state !== 'FAILURE' && state !== 'STOP') || this.row?.disabled } > {this.row?.buttonType === 'run' ? ( <span>{this.row?.count}</span> ) : ( <NIcon> <SyncOutlined /> </NIcon> )} </NButton> ) } }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.recovery_failed'), trigger: () => ( <NButton tag='div' size='tiny' type='primary' circle onClick={this.handleReStore} disabled={state !== 'FAILURE' || this.row?.disabled} > {this.row?.buttonType === 'store' ? ( <span>{this.row?.count}</span> ) : ( <NIcon> <CloseCircleOutlined /> </NIcon> )} </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => state === 'PAUSE' ? t('project.workflow.recovery_failed') : t('project.workflow.stop'), trigger: () => ( <NButton tag='div' size='tiny' type='error' circle onClick={this.handleStop} disabled={ (state !== 'RUNNING_EXECUTION' && state !== 'PAUSE') || this.row?.disabled } > <NIcon> {state === 'STOP' ? ( <PlayCircleOutlined /> ) : ( <CloseOutlined /> )} </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => state === 'PAUSE' ? t('project.workflow.recovery_suspend') : t('project.workflow.pause'), trigger: () => ( <NButton tag='div' size='tiny' type='warning' circle disabled={ (state !== 'RUNNING_EXECUTION' && state !== 'PAUSE') || this.row?.disabled } onClick={this.handleSuspend} > <NIcon> {state === 'PAUSE' ? ( <PlayCircleOutlined /> ) : ( <PauseCircleOutlined /> )} </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.delete'), trigger: () => ( <NButton tag='div' size='tiny' type='error' circle disabled={ (state !== 'SUCCESS' && state !== 'FAILURE' && state !== 'STOP' && state !== 'PAUSE') || this.row?.disabled } > <NPopconfirm onPositiveClick={this.handleDeleteInstance}> {{ default: () => t('project.workflow.delete_confirm'), icon: () => ( <NIcon> <InfoCircleFilled /> </NIcon> ), trigger: () => ( <NIcon> <DeleteOutlined /> </NIcon> ) }} </NPopconfirm> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.gantt'), trigger: () => ( <NButton tag='div' size='tiny' type='info' circle /* TODO: Goto gantt*/ disabled={this.row?.disabled} > <NIcon> <ControlOutlined /> </NIcon> </NButton> ) }} </NTooltip> </NSpace> ) } })
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,571
[Feature][UI Next][V1.0.0-Alpha] Gantt chart of Project Manage - Workflow Instance
null
https://github.com/apache/dolphinscheduler/issues/8571
https://github.com/apache/dolphinscheduler/pull/8697
e1862a81ba8407a2af0ed8cc7209b3e663171d59
c8b5d1e5088241faaf29f61a5625d33fffb27a8e
"2022-02-28T06:55:09Z"
java
"2022-03-04T09:37:40Z"
dolphinscheduler-ui-next/src/views/projects/workflow/instance/gantt/components/gantt-chart.tsx
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,571
[Feature][UI Next][V1.0.0-Alpha] Gantt chart of Project Manage - Workflow Instance
null
https://github.com/apache/dolphinscheduler/issues/8571
https://github.com/apache/dolphinscheduler/pull/8697
e1862a81ba8407a2af0ed8cc7209b3e663171d59
c8b5d1e5088241faaf29f61a5625d33fffb27a8e
"2022-02-28T06:55:09Z"
java
"2022-03-04T09:37:40Z"
dolphinscheduler-ui-next/src/views/projects/workflow/instance/gantt/index.tsx
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,571
[Feature][UI Next][V1.0.0-Alpha] Gantt chart of Project Manage - Workflow Instance
null
https://github.com/apache/dolphinscheduler/issues/8571
https://github.com/apache/dolphinscheduler/pull/8697
e1862a81ba8407a2af0ed8cc7209b3e663171d59
c8b5d1e5088241faaf29f61a5625d33fffb27a8e
"2022-02-28T06:55:09Z"
java
"2022-03-04T09:37:40Z"
dolphinscheduler-ui-next/src/views/projects/workflow/instance/gantt/type.ts
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,571
[Feature][UI Next][V1.0.0-Alpha] Gantt chart of Project Manage - Workflow Instance
null
https://github.com/apache/dolphinscheduler/issues/8571
https://github.com/apache/dolphinscheduler/pull/8697
e1862a81ba8407a2af0ed8cc7209b3e663171d59
c8b5d1e5088241faaf29f61a5625d33fffb27a8e
"2022-02-28T06:55:09Z"
java
"2022-03-04T09:37:40Z"
dolphinscheduler-ui-next/src/views/projects/workflow/instance/gantt/use-gantt.ts
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,680
[Bug][UI Next][V1.0.0-Alpha] If the drop-down box is selected, it cannot be deleted, and there is no clear button
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If the drop-down box is selected, it cannot be deleted, and there is no clear button ![image](https://user-images.githubusercontent.com/76080484/156514799-5b6137db-ffde-40a5-80af-e29325daceb3.png) ### What you expected to happen I think I can clear the selection box ### How to reproduce When running a workflow ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8680
https://github.com/apache/dolphinscheduler/pull/8700
79733643aab64af78c13f59fcaf2f9b3a4e3a068
63c476ac413597a30dd0f5dd92797c2873449825
"2022-03-03T07:16:09Z"
java
"2022-03-04T13:15:36Z"
dolphinscheduler-ui-next/src/views/projects/workflow/definition/components/start-modal.tsx
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { defineComponent, PropType, toRefs, h, onMounted, ref } from 'vue' import { useI18n } from 'vue-i18n' import Modal from '@/components/modal' import { useForm } from './use-form' import { useModal } from './use-modal' import { NForm, NFormItem, NButton, NIcon, NInput, NSpace, NRadio, NRadioGroup, NSelect, NSwitch, NCheckbox, NDatePicker } from 'naive-ui' import { ArrowDownOutlined, ArrowUpOutlined, DeleteOutlined, PlusCircleOutlined } from '@vicons/antd' import { IDefinitionData } from '../types' import styles from '../index.module.scss' const props = { row: { type: Object as PropType<IDefinitionData>, default: {} }, show: { type: Boolean as PropType<boolean>, default: false } } export default defineComponent({ name: 'workflowDefinitionStart', props, emits: ['update:show', 'update:row', 'updateList'], setup(props, ctx) { const parallelismRef = ref(false) const { t } = useI18n() const { startState } = useForm() const { variables, handleStartDefinition, getWorkerGroups, getAlertGroups, getEnvironmentList } = useModal(startState, ctx) const hideModal = () => { ctx.emit('update:show') } const handleStart = () => { handleStartDefinition(props.row.code) } const generalWarningTypeListOptions = () => [ { value: 'NONE', label: t('project.workflow.none_send') }, { value: 'SUCCESS', label: t('project.workflow.success_send') }, { value: 'FAILURE', label: t('project.workflow.failure_send') }, { value: 'ALL', label: t('project.workflow.all_send') } ] const generalPriorityList = () => [ { value: 'HIGHEST', label: 'HIGHEST', color: '#ff0000', icon: ArrowUpOutlined }, { value: 'HIGH', label: 'HIGH', color: '#ff0000', icon: ArrowUpOutlined }, { value: 'MEDIUM', label: 'MEDIUM', color: '#EA7D24', icon: ArrowUpOutlined }, { value: 'LOW', label: 'LOW', color: '#2A8734', icon: ArrowDownOutlined }, { value: 'LOWEST', label: 'LOWEST', color: '#2A8734', icon: ArrowDownOutlined } ] const renderLabel = (option: any) => { return [ h( NIcon, { style: { verticalAlign: 'middle', marginRight: '4px', marginBottom: '3px' }, color: option.color }, { default: () => h(option.icon) } ), option.label ] } const updateWorkerGroup = () => { startState.startForm.environmentCode = null } const addStartParams = () => { variables.startParamsList.push({ prop: '', value: '' }) } const updateParamsList = (index: number, param: Array<string>) => { variables.startParamsList[index].prop = param[0] variables.startParamsList[index].value = param[1] } const removeStartParams = (index: number) => { variables.startParamsList.splice(index, 1) } onMounted(() => { getWorkerGroups() getAlertGroups() getEnvironmentList() }) return { t, parallelismRef, hideModal, handleStart, generalWarningTypeListOptions, generalPriorityList, renderLabel, updateWorkerGroup, removeStartParams, addStartParams, updateParamsList, ...toRefs(variables), ...toRefs(startState), ...toRefs(props) } }, render() { const { t } = this return ( <Modal show={this.show} title={t('project.workflow.set_parameters_before_starting')} onCancel={this.hideModal} onConfirm={this.handleStart} > <NForm ref='startFormRef' label-placement='left' label-width='160'> <NFormItem label={t('project.workflow.workflow_name')} path='workflow_name' > {this.row.name} </NFormItem> <NFormItem label={t('project.workflow.failure_strategy')} path='failureStrategy' > <NRadioGroup v-model:value={this.startForm.failureStrategy}> <NSpace> <NRadio value='CONTINUE'> {t('project.workflow.continue')} </NRadio> <NRadio value='END'>{t('project.workflow.end')}</NRadio> </NSpace> </NRadioGroup> </NFormItem> <NFormItem label={t('project.workflow.notification_strategy')} path='warningType' > <NSelect options={this.generalWarningTypeListOptions()} v-model:value={this.startForm.warningType} /> </NFormItem> <NFormItem label={t('project.workflow.workflow_priority')} path='processInstancePriority' > <NSelect options={this.generalPriorityList()} renderLabel={this.renderLabel} v-model:value={this.startForm.processInstancePriority} /> </NFormItem> <NFormItem label={t('project.workflow.worker_group')} path='workerGroup' > <NSelect options={this.workerGroups} onUpdateValue={this.updateWorkerGroup} v-model:value={this.startForm.workerGroup} /> </NFormItem> <NFormItem label={t('project.workflow.environment_name')} path='environmentCode' > <NSelect options={this.environmentList.filter((item: any) => item.workerGroups?.includes(this.startForm.workerGroup) )} v-model:value={this.startForm.environmentCode} /> </NFormItem> <NFormItem label={t('project.workflow.alarm_group')} path='warningGroupId' > <NSelect options={this.alertGroups} placeholder={t('project.workflow.please_choose')} v-model:value={this.startForm.warningGroupId} /> </NFormItem> <NFormItem label={t('project.workflow.complement_data')} path='complement_data' > <NCheckbox checkedValue={'COMPLEMENT_DATA'} uncheckedValue={undefined} v-model:checked={this.startForm.execType} > {t('project.workflow.whether_complement_data')} </NCheckbox> </NFormItem> {this.startForm.execType && ( <NSpace> <NFormItem label={t('project.workflow.mode_of_execution')} path='runMode' > <NRadioGroup v-model:value={this.startForm.runMode}> <NSpace> <NRadio value={'RUN_MODE_SERIAL'}> {t('project.workflow.serial_execution')} </NRadio> <NRadio value={'RUN_MODE_PARALLEL'}> {t('project.workflow.parallel_execution')} </NRadio> </NSpace> </NRadioGroup> </NFormItem> {this.startForm.runMode === 'RUN_MODE_PARALLEL' && ( <NFormItem label={t('project.workflow.parallelism')} path='expectedParallelismNumber' > <NCheckbox v-model:checked={this.parallelismRef}> {t('project.workflow.custom_parallelism')} </NCheckbox> <NInput disabled={!this.parallelismRef} placeholder={t('project.workflow.please_enter_parallelism')} v-model:value={this.startForm.expectedParallelismNumber} /> </NFormItem> )} <NFormItem label={t('project.workflow.schedule_date')} path='startEndTime' > <NDatePicker type='datetimerange' clearable v-model:value={this.startForm.startEndTime} /> </NFormItem> </NSpace> )} <NFormItem label={t('project.workflow.startup_parameter')} path='startup_parameter' > {this.startParamsList.length === 0 ? ( <NButton text type='primary' onClick={this.addStartParams}> <NIcon> <PlusCircleOutlined /> </NIcon> </NButton> ) : ( <NSpace vertical> {this.startParamsList.map((item, index) => ( <NSpace class={styles.startup} key={index}> <NInput pair separator=':' placeholder={['prop', 'value']} onUpdateValue={(param) => this.updateParamsList(index, param) } /> <NButton text type='error' onClick={() => this.removeStartParams(index)} > <NIcon> <DeleteOutlined /> </NIcon> </NButton> <NButton text type='primary' onClick={this.addStartParams}> <NIcon> <PlusCircleOutlined /> </NIcon> </NButton> </NSpace> ))} </NSpace> )} </NFormItem> <NFormItem label={t('project.workflow.whether_dry_run')} path='dryRun' > <NSwitch checkedValue={1} uncheckedValue={0} v-model:value={this.startForm.dryRun} /> </NFormItem> </NForm> </Modal> ) } })
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,680
[Bug][UI Next][V1.0.0-Alpha] If the drop-down box is selected, it cannot be deleted, and there is no clear button
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If the drop-down box is selected, it cannot be deleted, and there is no clear button ![image](https://user-images.githubusercontent.com/76080484/156514799-5b6137db-ffde-40a5-80af-e29325daceb3.png) ### What you expected to happen I think I can clear the selection box ### How to reproduce When running a workflow ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8680
https://github.com/apache/dolphinscheduler/pull/8700
79733643aab64af78c13f59fcaf2f9b3a4e3a068
63c476ac413597a30dd0f5dd92797c2873449825
"2022-03-03T07:16:09Z"
java
"2022-03-04T13:15:36Z"
dolphinscheduler-ui-next/src/views/projects/workflow/definition/components/timing-modal.tsx
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { defineComponent, PropType, toRefs, h, onMounted, ref, watch } from 'vue' import { useI18n } from 'vue-i18n' import Modal from '@/components/modal' import { useForm } from './use-form' import { useModal } from './use-modal' import { NForm, NFormItem, NButton, NIcon, NInput, NSpace, NRadio, NRadioGroup, NSelect, NDatePicker, NInputGroup, NList, NListItem, NThing, NPopover } from 'naive-ui' import { ArrowDownOutlined, ArrowUpOutlined } from '@vicons/antd' import { timezoneList } from '@/utils/timezone' import Crontab from '@/components/crontab' const props = { row: { type: Object, default: {} }, show: { type: Boolean as PropType<boolean>, default: false }, type: { type: String as PropType<String>, default: 'create' } } export default defineComponent({ name: 'workflowDefinitionStart', props, emits: ['update:show', 'update:row', 'updateList'], setup(props, ctx) { const crontabRef = ref() const parallelismRef = ref(false) const { t } = useI18n() const { timingState } = useForm() const { variables, handleCreateTiming, handleUpdateTiming, getWorkerGroups, getAlertGroups, getEnvironmentList, getPreviewSchedule } = useModal(timingState, ctx) const hideModal = () => { ctx.emit('update:show') } const handleTiming = () => { if (props.type === 'create') { handleCreateTiming(props.row.code as number) } else { handleUpdateTiming(props.row.id) } } const generalWarningTypeListOptions = () => [ { value: 'NONE', label: t('project.workflow.none_send') }, { value: 'SUCCESS', label: t('project.workflow.success_send') }, { value: 'FAILURE', label: t('project.workflow.failure_send') }, { value: 'ALL', label: t('project.workflow.all_send') } ] const generalPriorityList = () => [ { value: 'HIGHEST', label: 'HIGHEST', color: '#ff0000', icon: ArrowUpOutlined }, { value: 'HIGH', label: 'HIGH', color: '#ff0000', icon: ArrowUpOutlined }, { value: 'MEDIUM', label: 'MEDIUM', color: '#EA7D24', icon: ArrowUpOutlined }, { value: 'LOW', label: 'LOW', color: '#2A8734', icon: ArrowDownOutlined }, { value: 'LOWEST', label: 'LOWEST', color: '#2A8734', icon: ArrowDownOutlined } ] const timezoneOptions = () => timezoneList.map((item) => ({ label: item, value: item })) const renderLabel = (option: any) => { return [ h( NIcon, { style: { verticalAlign: 'middle', marginRight: '4px', marginBottom: '3px' }, color: option.color }, { default: () => h(option.icon) } ), option.label ] } const updateWorkerGroup = () => { timingState.timingForm.environmentCode = null } const handlePreview = () => { getPreviewSchedule() } onMounted(() => { getWorkerGroups() getAlertGroups() getEnvironmentList() }) watch( () => props.row, () => { timingState.timingForm.startEndTime = [ new Date(props.row.startTime), new Date(props.row.endTime) ] timingState.timingForm.crontab = props.row.crontab timingState.timingForm.timezoneId = props.row.timezoneId timingState.timingForm.failureStrategy = props.row.failureStrategy timingState.timingForm.warningType = props.row.warningType timingState.timingForm.processInstancePriority = props.row.processInstancePriority timingState.timingForm.warningGroupId = props.row.warningGroupId timingState.timingForm.workerGroup = props.row.workerGroup timingState.timingForm.environmentCode = props.row.environmentCode } ) return { t, crontabRef, parallelismRef, hideModal, handleTiming, generalWarningTypeListOptions, generalPriorityList, timezoneOptions, renderLabel, updateWorkerGroup, handlePreview, ...toRefs(variables), ...toRefs(timingState), ...toRefs(props) } }, render() { const { t } = this if (Number(this.timingForm.warningGroupId) === 0) { this.timingForm.warningGroupId = '' } return ( <Modal show={this.show} title={t('project.workflow.set_parameters_before_timing')} onCancel={this.hideModal} onConfirm={this.handleTiming} > <NForm ref='timingFormRef' label-placement='left' label-width='160'> <NFormItem label={t('project.workflow.start_and_stop_time')} path='startEndTime' > <NDatePicker type='datetimerange' clearable v-model:value={this.timingForm.startEndTime} /> </NFormItem> <NFormItem label={t('project.workflow.timing')} path='crontab'> <NInputGroup> <NPopover trigger='click' showArrow={false} placement='bottom' style={{ width: '500px' }} > {{ trigger: () => ( <NInput style={{ width: '80%' }} readonly={true} v-model:value={this.timingForm.crontab} ></NInput> ), default: () => ( <Crontab v-model:value={this.timingForm.crontab} /> ) }} </NPopover> <NButton type='primary' ghost onClick={this.handlePreview}> {t('project.workflow.execute_time')} </NButton> </NInputGroup> </NFormItem> <NFormItem label={t('project.workflow.timezone')} path='timezoneId' showFeedback={false} > <NSelect v-model:value={this.timingForm.timezoneId} options={this.timezoneOptions()} /> </NFormItem> <NFormItem label=' ' showFeedback={false}> <NList> <NListItem> <NThing description={t('project.workflow.next_five_execution_times')} > {this.schedulePreviewList.map((item: string) => ( <NSpace> {item} <br /> </NSpace> ))} </NThing> </NListItem> </NList> </NFormItem> <NFormItem label={t('project.workflow.failure_strategy')} path='failureStrategy' > <NRadioGroup v-model:value={this.timingForm.failureStrategy}> <NSpace> <NRadio value='CONTINUE'> {t('project.workflow.continue')} </NRadio> <NRadio value='END'>{t('project.workflow.end')}</NRadio> </NSpace> </NRadioGroup> </NFormItem> <NFormItem label={t('project.workflow.notification_strategy')} path='warningType' > <NSelect options={this.generalWarningTypeListOptions()} v-model:value={this.timingForm.warningType} /> </NFormItem> <NFormItem label={t('project.workflow.workflow_priority')} path='processInstancePriority' > <NSelect options={this.generalPriorityList()} renderLabel={this.renderLabel} v-model:value={this.timingForm.processInstancePriority} /> </NFormItem> <NFormItem label={t('project.workflow.worker_group')} path='workerGroup' > <NSelect options={this.workerGroups} onUpdateValue={this.updateWorkerGroup} v-model:value={this.timingForm.workerGroup} /> </NFormItem> <NFormItem label={t('project.workflow.environment_name')} path='environmentCode' > <NSelect options={this.environmentList.filter((item: any) => item.workerGroups?.includes(this.timingForm.workerGroup) )} v-model:value={this.timingForm.environmentCode} /> </NFormItem> <NFormItem label={t('project.workflow.alarm_group')} path='warningGroupId' > <NSelect options={this.alertGroups} placeholder={t('project.workflow.please_choose')} v-model:value={this.timingForm.warningGroupId} /> </NFormItem> </NForm> </Modal> ) } })
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,622
[Feature][UI-Next][V1.0.0-Alpha] process definition/instance operation button too small to click
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description ![](https://vip1.loli.io/2022/03/01/7PDMXIA8U4wtWrf.png) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8622
https://github.com/apache/dolphinscheduler/pull/8702
63c476ac413597a30dd0f5dd92797c2873449825
6dc4be54e0b4f71c2ed2c4505bcaa0cb4d46cc73
"2022-03-01T08:32:48Z"
java
"2022-03-05T03:58:16Z"
dolphinscheduler-ui-next/src/views/projects/workflow/definition/components/table-action.tsx
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { defineComponent, PropType, toRefs } from 'vue' import { NSpace, NTooltip, NButton, NIcon, NPopconfirm } from 'naive-ui' import { DeleteOutlined, DownloadOutlined, FormOutlined, InfoCircleFilled, PlayCircleOutlined, ClockCircleOutlined, CopyOutlined, FieldTimeOutlined, ExportOutlined, ApartmentOutlined, UploadOutlined } from '@vicons/antd' import { useI18n } from 'vue-i18n' import { IDefinitionData } from '../types' const props = { row: { type: Object as PropType<IDefinitionData> } } export default defineComponent({ name: 'TableAction', props, emits: [ 'updateList', 'startWorkflow', 'timingWorkflow', 'versionWorkflow', 'deleteWorkflow', 'releaseWorkflow', 'copyWorkflow', 'exportWorkflow', 'gotoTimingManage', 'gotoWorkflowTree' ], setup(props, ctx) { const handleStartWorkflow = () => { ctx.emit('startWorkflow') } const handleTimingWorkflow = () => { ctx.emit('timingWorkflow') } const handleVersionWorkflow = () => { ctx.emit('versionWorkflow') } const handleDeleteWorkflow = () => { ctx.emit('deleteWorkflow') } const handleReleaseWorkflow = () => { ctx.emit('releaseWorkflow') } const handleCopyWorkflow = () => { ctx.emit('copyWorkflow') } const handleExportWorkflow = () => { ctx.emit('exportWorkflow') } const handleGotoTimingManage = () => { ctx.emit('gotoTimingManage') } const handleGotoWorkflowTree = () => { ctx.emit('gotoWorkflowTree') } return { handleStartWorkflow, handleTimingWorkflow, handleVersionWorkflow, handleDeleteWorkflow, handleReleaseWorkflow, handleCopyWorkflow, handleExportWorkflow, handleGotoTimingManage, handleGotoWorkflowTree, ...toRefs(props) } }, render() { const { t } = useI18n() const releaseState = this.row?.releaseState const scheduleReleaseState = this.row?.scheduleReleaseState return ( <NSpace> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.edit'), trigger: () => ( <NButton size='tiny' type='info' tag='div' circle disabled={releaseState === 'ONLINE'} /* TODO: Edit workflow */ > <NIcon> <FormOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.start'), trigger: () => ( <NButton size='tiny' type='primary' tag='div' circle onClick={this.handleStartWorkflow} disabled={releaseState === 'OFFLINE'} > <NIcon> <PlayCircleOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.timing'), trigger: () => ( <NButton size='tiny' type='info' tag='div' circle onClick={this.handleTimingWorkflow} disabled={releaseState !== 'ONLINE' || !!scheduleReleaseState} > <NIcon> <ClockCircleOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => releaseState === 'ONLINE'? t('project.workflow.down_line'):t('project.workflow.up_line'), trigger: () => ( <NButton size='tiny' type={releaseState === 'ONLINE' ? 'warning' : 'error'} tag='div' circle onClick={this.handleReleaseWorkflow} > <NIcon> {releaseState === 'ONLINE' ? ( <DownloadOutlined /> ) : ( <UploadOutlined /> )} </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.copy_workflow'), trigger: () => ( <NButton size='tiny' type='info' tag='div' circle disabled={releaseState === 'ONLINE'} onClick={this.handleCopyWorkflow} > <NIcon> <CopyOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.cron_manage'), trigger: () => ( <NButton size='tiny' type='info' tag='div' circle disabled={releaseState === 'OFFLINE'} onClick={this.handleGotoTimingManage} > <NIcon> <FieldTimeOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.delete'), trigger: () => ( <NButton size='tiny' type='error' tag='div' circle disabled={releaseState === 'ONLINE'} > <NPopconfirm onPositiveClick={this.handleDeleteWorkflow}> {{ default: () => t('project.workflow.delete_confirm'), icon: () => ( <NIcon> <InfoCircleFilled /> </NIcon> ), trigger: () => ( <NIcon> <DeleteOutlined /> </NIcon> ) }} </NPopconfirm> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.tree_view'), trigger: () => ( <NButton size='tiny' type='info' tag='div' circle onClick={this.handleGotoWorkflowTree} > <NIcon> <ApartmentOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.export'), trigger: () => ( <NButton size='tiny' type='info' tag='div' circle onClick={this.handleExportWorkflow} > <NIcon> <ExportOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.version_info'), trigger: () => ( <NButton size='tiny' type='info' tag='div' circle onClick={this.handleVersionWorkflow} > <NIcon> <InfoCircleFilled /> </NIcon> </NButton> ) }} </NTooltip> </NSpace> ) } })
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,622
[Feature][UI-Next][V1.0.0-Alpha] process definition/instance operation button too small to click
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description ![](https://vip1.loli.io/2022/03/01/7PDMXIA8U4wtWrf.png) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8622
https://github.com/apache/dolphinscheduler/pull/8702
63c476ac413597a30dd0f5dd92797c2873449825
6dc4be54e0b4f71c2ed2c4505bcaa0cb4d46cc73
"2022-03-01T08:32:48Z"
java
"2022-03-05T03:58:16Z"
dolphinscheduler-ui-next/src/views/projects/workflow/definition/use-table.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { h, ref, reactive } from 'vue' import { useI18n } from 'vue-i18n' import { useRouter } from 'vue-router' import type { Router } from 'vue-router' import type { TableColumns } from 'naive-ui/es/data-table/src/interface' import { useAsyncState } from '@vueuse/core' import { batchCopyByCodes, batchExportByCodes, deleteByCode, queryListPaging, release } from '@/service/modules/process-definition' import TableAction from './components/table-action' import { IDefinitionParam } from './types' import styles from './index.module.scss' import { NEllipsis, NTag } from 'naive-ui' export function useTable() { const { t } = useI18n() const router: Router = useRouter() const variables = reactive({ columns: [], row: {}, tableData: [], projectCode: ref(Number(router.currentRoute.value.params.projectCode)), page: ref(1), pageSize: ref(10), searchVal: ref(), totalPage: ref(1), showRef: ref(false), startShowRef: ref(false), timingShowRef: ref(false), versionShowRef: ref(false) }) const createColumns = (variables: any) => { variables.columns = [ { title: '#', key: 'id', width: 50, render: (row, index) => index + 1 }, { title: t('project.workflow.workflow_name'), key: 'name', width: 200, render: (row) => h( NEllipsis, { style: 'max-width: 200px; color: #2080f0' }, { default: () => h( 'a', { href: 'javascript:', class: styles.links, onClick: () => router.push({ name: 'workflow-definition-detail', params: { code: row.code } }) }, row.name ), tooltip: () => row.name } ) }, { title: t('project.workflow.status'), key: 'releaseState', render: (row) => row.releaseState === 'ONLINE' ? t('project.workflow.up_line') : t('project.workflow.down_line') }, { title: t('project.workflow.create_time'), key: 'createTime', width: 150 }, { title: t('project.workflow.update_time'), key: 'updateTime', width: 150 }, { title: t('project.workflow.description'), key: 'description' }, { title: t('project.workflow.create_user'), key: 'userName' }, { title: t('project.workflow.modify_user'), key: 'modifyBy' }, { title: t('project.workflow.schedule_publish_status'), key: 'scheduleReleaseState', render: (row) => { if (row.scheduleReleaseState === 'ONLINE') { return h( NTag, { type: 'success', size: 'small' }, { default: () => t('project.workflow.up_line') } ) } else if (row.scheduleReleaseState === 'OFFLINE') { return h( NTag, { type: 'warning', size: 'small' }, { default: () => t('project.workflow.down_line') } ) } else { return '-' } } }, { title: t('project.workflow.operation'), key: 'operation', width: 300, fixed: 'right', className: styles.operation, render: (row) => h(TableAction, { row, onStartWorkflow: () => startWorkflow(row), onTimingWorkflow: () => timingWorkflow(row), onVersionWorkflow: () => versionWorkflow(row), onDeleteWorkflow: () => deleteWorkflow(row), onReleaseWorkflow: () => releaseWorkflow(row), onCopyWorkflow: () => copyWorkflow(row), onExportWorkflow: () => exportWorkflow(row), onGotoTimingManage: () => gotoTimingManage(row), onGotoWorkflowTree: () => gotoWorkflowTree(row) }) } ] as TableColumns<any> } const startWorkflow = (row: any) => { variables.startShowRef = true variables.row = row } const timingWorkflow = (row: any) => { variables.timingShowRef = true variables.row = row } const versionWorkflow = (row: any) => { variables.versionShowRef = true variables.row = row } const deleteWorkflow = (row: any) => { deleteByCode(variables.projectCode, row.code) .then(() => { window.$message.success(t('project.workflow.success')) getTableData({ pageSize: variables.pageSize, pageNo: variables.page, searchVal: variables.searchVal }) }) .catch((error: any) => { window.$message.error(error.message) }) } const releaseWorkflow = (row: any) => { const data = { name: row.name, releaseState: (row.releaseState === 'ONLINE' ? 'OFFLINE' : 'ONLINE') as | 'OFFLINE' | 'ONLINE' } release(data, variables.projectCode, row.code) .then(() => { window.$message.success(t('project.workflow.success')) getTableData({ pageSize: variables.pageSize, pageNo: variables.page, searchVal: variables.searchVal }) }) .catch((error: any) => { window.$message.error(error.message) }) } const copyWorkflow = (row: any) => { const data = { codes: String(row.code), targetProjectCode: variables.projectCode } batchCopyByCodes(data, variables.projectCode) .then(() => { window.$message.success(t('project.workflow.success')) getTableData({ pageSize: variables.pageSize, pageNo: variables.page, searchVal: variables.searchVal }) }) .catch((error: any) => { window.$message.error(error.message) }) } const downloadBlob = (data: any, fileNameS = 'json') => { if (!data) { return } const blob = new Blob([data]) const fileName = `${fileNameS}.json` if ('download' in document.createElement('a')) { // Not IE const url = window.URL.createObjectURL(blob) const link = document.createElement('a') link.style.display = 'none' link.href = url link.setAttribute('download', fileName) document.body.appendChild(link) link.click() document.body.removeChild(link) // remove element after downloading is complete. window.URL.revokeObjectURL(url) // release blob object } else { // IE 10+ if (window.navigator.msSaveBlob) { window.navigator.msSaveBlob(blob, fileName) } } } const exportWorkflow = (row: any) => { const fileName = 'workflow_' + new Date().getTime() const data = { codes: String(row.code) } batchExportByCodes(data, variables.projectCode) .then((res: any) => { downloadBlob(res, fileName) }) .catch((error: any) => { window.$message.error(error.message) }) } const gotoTimingManage = (row: any) => { router.push({ name: 'workflow-definition-timing', params: { projectCode: variables.projectCode, definitionCode: row.code } }) } const gotoWorkflowTree = (row: any) => { router.push({ name: 'workflow-definition-tree', params: { projectCode: variables.projectCode, definitionCode: row.code } }) } const getTableData = (params: IDefinitionParam) => { const { state } = useAsyncState( queryListPaging({ ...params }, variables.projectCode).then((res: any) => { variables.totalPage = res.totalPage variables.tableData = res.totalList.map((item: any) => { return { ...item } }) }), { total: 0, table: [] } ) return state } return { variables, createColumns, getTableData } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,633
[Bug][UI Next][V1.0.0-Alpha] Workflow definition click to modify does not get the expected response
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened 工作流定义点击修改未得到预期相应 <img width="1429" alt="image" src="https://user-images.githubusercontent.com/76080484/156142995-927b79fd-ab03-4a2a-9c8a-84a9edf24851.png"> ### What you expected to happen 成功跳转到工作流编辑页面 ### How to reproduce 点击编辑 ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8633
https://github.com/apache/dolphinscheduler/pull/8705
6dc4be54e0b4f71c2ed2c4505bcaa0cb4d46cc73
304d7a76587ccff45ae65bdc96d895b697c4a88f
"2022-03-01T09:37:56Z"
java
"2022-03-05T05:35:26Z"
dolphinscheduler-ui-next/src/views/projects/workflow/definition/components/table-action.tsx
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { defineComponent, PropType, toRefs } from 'vue' import { NSpace, NTooltip, NButton, NIcon, NPopconfirm } from 'naive-ui' import { DeleteOutlined, DownloadOutlined, FormOutlined, InfoCircleFilled, PlayCircleOutlined, ClockCircleOutlined, CopyOutlined, FieldTimeOutlined, ExportOutlined, ApartmentOutlined, UploadOutlined } from '@vicons/antd' import { useI18n } from 'vue-i18n' import { IDefinitionData } from '../types' const props = { row: { type: Object as PropType<IDefinitionData> } } export default defineComponent({ name: 'TableAction', props, emits: [ 'updateList', 'startWorkflow', 'timingWorkflow', 'versionWorkflow', 'deleteWorkflow', 'releaseWorkflow', 'copyWorkflow', 'exportWorkflow', 'gotoTimingManage', 'gotoWorkflowTree' ], setup(props, ctx) { const handleStartWorkflow = () => { ctx.emit('startWorkflow') } const handleTimingWorkflow = () => { ctx.emit('timingWorkflow') } const handleVersionWorkflow = () => { ctx.emit('versionWorkflow') } const handleDeleteWorkflow = () => { ctx.emit('deleteWorkflow') } const handleReleaseWorkflow = () => { ctx.emit('releaseWorkflow') } const handleCopyWorkflow = () => { ctx.emit('copyWorkflow') } const handleExportWorkflow = () => { ctx.emit('exportWorkflow') } const handleGotoTimingManage = () => { ctx.emit('gotoTimingManage') } const handleGotoWorkflowTree = () => { ctx.emit('gotoWorkflowTree') } return { handleStartWorkflow, handleTimingWorkflow, handleVersionWorkflow, handleDeleteWorkflow, handleReleaseWorkflow, handleCopyWorkflow, handleExportWorkflow, handleGotoTimingManage, handleGotoWorkflowTree, ...toRefs(props) } }, render() { const { t } = useI18n() const releaseState = this.row?.releaseState const scheduleReleaseState = this.row?.scheduleReleaseState return ( <NSpace> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.edit'), trigger: () => ( <NButton size='small' type='info' tag='div' circle disabled={releaseState === 'ONLINE'} /* TODO: Edit workflow */ > <NIcon> <FormOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.start'), trigger: () => ( <NButton size='small' type='primary' tag='div' circle onClick={this.handleStartWorkflow} disabled={releaseState === 'OFFLINE'} > <NIcon> <PlayCircleOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.timing'), trigger: () => ( <NButton size='small' type='info' tag='div' circle onClick={this.handleTimingWorkflow} disabled={releaseState !== 'ONLINE' || !!scheduleReleaseState} > <NIcon> <ClockCircleOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => releaseState === 'ONLINE'? t('project.workflow.down_line'):t('project.workflow.up_line'), trigger: () => ( <NButton size='small' type={releaseState === 'ONLINE' ? 'warning' : 'error'} tag='div' circle onClick={this.handleReleaseWorkflow} > <NIcon> {releaseState === 'ONLINE' ? ( <DownloadOutlined /> ) : ( <UploadOutlined /> )} </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.copy_workflow'), trigger: () => ( <NButton size='small' type='info' tag='div' circle disabled={releaseState === 'ONLINE'} onClick={this.handleCopyWorkflow} > <NIcon> <CopyOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.cron_manage'), trigger: () => ( <NButton size='small' type='info' tag='div' circle disabled={releaseState === 'OFFLINE'} onClick={this.handleGotoTimingManage} > <NIcon> <FieldTimeOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.delete'), trigger: () => ( <NButton size='small' type='error' tag='div' circle disabled={releaseState === 'ONLINE'} > <NPopconfirm onPositiveClick={this.handleDeleteWorkflow}> {{ default: () => t('project.workflow.delete_confirm'), icon: () => ( <NIcon> <InfoCircleFilled /> </NIcon> ), trigger: () => ( <NIcon> <DeleteOutlined /> </NIcon> ) }} </NPopconfirm> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.tree_view'), trigger: () => ( <NButton size='small' type='info' tag='div' circle onClick={this.handleGotoWorkflowTree} > <NIcon> <ApartmentOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.export'), trigger: () => ( <NButton size='small' type='info' tag='div' circle onClick={this.handleExportWorkflow} > <NIcon> <ExportOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.version_info'), trigger: () => ( <NButton size='small' type='info' tag='div' circle onClick={this.handleVersionWorkflow} > <NIcon> <InfoCircleFilled /> </NIcon> </NButton> ) }} </NTooltip> </NSpace> ) } })
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,633
[Bug][UI Next][V1.0.0-Alpha] Workflow definition click to modify does not get the expected response
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened 工作流定义点击修改未得到预期相应 <img width="1429" alt="image" src="https://user-images.githubusercontent.com/76080484/156142995-927b79fd-ab03-4a2a-9c8a-84a9edf24851.png"> ### What you expected to happen 成功跳转到工作流编辑页面 ### How to reproduce 点击编辑 ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8633
https://github.com/apache/dolphinscheduler/pull/8705
6dc4be54e0b4f71c2ed2c4505bcaa0cb4d46cc73
304d7a76587ccff45ae65bdc96d895b697c4a88f
"2022-03-01T09:37:56Z"
java
"2022-03-05T05:35:26Z"
dolphinscheduler-ui-next/src/views/projects/workflow/definition/use-table.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { h, ref, reactive } from 'vue' import { useI18n } from 'vue-i18n' import { useRouter } from 'vue-router' import type { Router } from 'vue-router' import type { TableColumns } from 'naive-ui/es/data-table/src/interface' import { useAsyncState } from '@vueuse/core' import { batchCopyByCodes, batchExportByCodes, deleteByCode, queryListPaging, release } from '@/service/modules/process-definition' import TableAction from './components/table-action' import { IDefinitionParam } from './types' import styles from './index.module.scss' import { NEllipsis, NTag } from 'naive-ui' export function useTable() { const { t } = useI18n() const router: Router = useRouter() const variables = reactive({ columns: [], row: {}, tableData: [], projectCode: ref(Number(router.currentRoute.value.params.projectCode)), page: ref(1), pageSize: ref(10), searchVal: ref(), totalPage: ref(1), showRef: ref(false), startShowRef: ref(false), timingShowRef: ref(false), versionShowRef: ref(false) }) const createColumns = (variables: any) => { variables.columns = [ { title: '#', key: 'id', width: 50, render: (row, index) => index + 1 }, { title: t('project.workflow.workflow_name'), key: 'name', width: 200, render: (row) => h( NEllipsis, { style: 'max-width: 200px; color: #2080f0' }, { default: () => h( 'a', { href: 'javascript:', class: styles.links, onClick: () => router.push({ name: 'workflow-definition-detail', params: { code: row.code } }) }, row.name ), tooltip: () => row.name } ) }, { title: t('project.workflow.status'), key: 'releaseState', render: (row) => row.releaseState === 'ONLINE' ? t('project.workflow.up_line') : t('project.workflow.down_line') }, { title: t('project.workflow.create_time'), key: 'createTime', width: 150 }, { title: t('project.workflow.update_time'), key: 'updateTime', width: 150 }, { title: t('project.workflow.description'), key: 'description' }, { title: t('project.workflow.create_user'), key: 'userName' }, { title: t('project.workflow.modify_user'), key: 'modifyBy' }, { title: t('project.workflow.schedule_publish_status'), key: 'scheduleReleaseState', render: (row) => { if (row.scheduleReleaseState === 'ONLINE') { return h( NTag, { type: 'success', size: 'small' }, { default: () => t('project.workflow.up_line') } ) } else if (row.scheduleReleaseState === 'OFFLINE') { return h( NTag, { type: 'warning', size: 'small' }, { default: () => t('project.workflow.down_line') } ) } else { return '-' } } }, { title: t('project.workflow.operation'), key: 'operation', width: 360, fixed: 'right', className: styles.operation, render: (row) => h(TableAction, { row, onStartWorkflow: () => startWorkflow(row), onTimingWorkflow: () => timingWorkflow(row), onVersionWorkflow: () => versionWorkflow(row), onDeleteWorkflow: () => deleteWorkflow(row), onReleaseWorkflow: () => releaseWorkflow(row), onCopyWorkflow: () => copyWorkflow(row), onExportWorkflow: () => exportWorkflow(row), onGotoTimingManage: () => gotoTimingManage(row), onGotoWorkflowTree: () => gotoWorkflowTree(row) }) } ] as TableColumns<any> } const startWorkflow = (row: any) => { variables.startShowRef = true variables.row = row } const timingWorkflow = (row: any) => { variables.timingShowRef = true variables.row = row } const versionWorkflow = (row: any) => { variables.versionShowRef = true variables.row = row } const deleteWorkflow = (row: any) => { deleteByCode(variables.projectCode, row.code) .then(() => { window.$message.success(t('project.workflow.success')) getTableData({ pageSize: variables.pageSize, pageNo: variables.page, searchVal: variables.searchVal }) }) .catch((error: any) => { window.$message.error(error.message) }) } const releaseWorkflow = (row: any) => { const data = { name: row.name, releaseState: (row.releaseState === 'ONLINE' ? 'OFFLINE' : 'ONLINE') as | 'OFFLINE' | 'ONLINE' } release(data, variables.projectCode, row.code) .then(() => { window.$message.success(t('project.workflow.success')) getTableData({ pageSize: variables.pageSize, pageNo: variables.page, searchVal: variables.searchVal }) }) .catch((error: any) => { window.$message.error(error.message) }) } const copyWorkflow = (row: any) => { const data = { codes: String(row.code), targetProjectCode: variables.projectCode } batchCopyByCodes(data, variables.projectCode) .then(() => { window.$message.success(t('project.workflow.success')) getTableData({ pageSize: variables.pageSize, pageNo: variables.page, searchVal: variables.searchVal }) }) .catch((error: any) => { window.$message.error(error.message) }) } const downloadBlob = (data: any, fileNameS = 'json') => { if (!data) { return } const blob = new Blob([data]) const fileName = `${fileNameS}.json` if ('download' in document.createElement('a')) { // Not IE const url = window.URL.createObjectURL(blob) const link = document.createElement('a') link.style.display = 'none' link.href = url link.setAttribute('download', fileName) document.body.appendChild(link) link.click() document.body.removeChild(link) // remove element after downloading is complete. window.URL.revokeObjectURL(url) // release blob object } else { // IE 10+ if (window.navigator.msSaveBlob) { window.navigator.msSaveBlob(blob, fileName) } } } const exportWorkflow = (row: any) => { const fileName = 'workflow_' + new Date().getTime() const data = { codes: String(row.code) } batchExportByCodes(data, variables.projectCode) .then((res: any) => { downloadBlob(res, fileName) }) .catch((error: any) => { window.$message.error(error.message) }) } const gotoTimingManage = (row: any) => { router.push({ name: 'workflow-definition-timing', params: { projectCode: variables.projectCode, definitionCode: row.code } }) } const gotoWorkflowTree = (row: any) => { router.push({ name: 'workflow-definition-tree', params: { projectCode: variables.projectCode, definitionCode: row.code } }) } const getTableData = (params: IDefinitionParam) => { const { state } = useAsyncState( queryListPaging({ ...params }, variables.projectCode).then((res: any) => { variables.totalPage = res.totalPage variables.tableData = res.totalList.map((item: any) => { return { ...item } }) }), { total: 0, table: [] } ) return state } return { variables, createColumns, getTableData } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,624
[Bug][UI-Next][V1.0.0-Alpha] click timing error in process definition page
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![](https://vip2.loli.io/2022/03/01/7cNiWD5Uyw18EFB.png) ### What you expected to happen click successfully. ### How to reproduce above. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8624
https://github.com/apache/dolphinscheduler/pull/8704
304d7a76587ccff45ae65bdc96d895b697c4a88f
26f50660f39dfec18d4d03343b49c5b8b423a1d5
"2022-03-01T08:38:44Z"
java
"2022-03-05T06:40:26Z"
dolphinscheduler-ui-next/src/views/projects/workflow/definition/components/timing-modal.tsx
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { defineComponent, PropType, toRefs, h, onMounted, ref, watch } from 'vue' import { useI18n } from 'vue-i18n' import Modal from '@/components/modal' import { useForm } from './use-form' import { useModal } from './use-modal' import { NForm, NFormItem, NButton, NIcon, NInput, NSpace, NRadio, NRadioGroup, NSelect, NDatePicker, NInputGroup, NList, NListItem, NThing, NPopover } from 'naive-ui' import { ArrowDownOutlined, ArrowUpOutlined } from '@vicons/antd' import { timezoneList } from '@/utils/timezone' import Crontab from '@/components/crontab' const props = { row: { type: Object, default: {} }, show: { type: Boolean as PropType<boolean>, default: false }, type: { type: String as PropType<String>, default: 'create' } } export default defineComponent({ name: 'workflowDefinitionStart', props, emits: ['update:show', 'update:row', 'updateList'], setup(props, ctx) { const crontabRef = ref() const parallelismRef = ref(false) const { t } = useI18n() const { timingState } = useForm() const { variables, handleCreateTiming, handleUpdateTiming, getWorkerGroups, getAlertGroups, getEnvironmentList, getPreviewSchedule } = useModal(timingState, ctx) const hideModal = () => { ctx.emit('update:show') } const handleTiming = () => { if (props.type === 'create') { handleCreateTiming(props.row.code as number) } else { handleUpdateTiming(props.row.id) } } const generalWarningTypeListOptions = () => [ { value: 'NONE', label: t('project.workflow.none_send') }, { value: 'SUCCESS', label: t('project.workflow.success_send') }, { value: 'FAILURE', label: t('project.workflow.failure_send') }, { value: 'ALL', label: t('project.workflow.all_send') } ] const generalPriorityList = () => [ { value: 'HIGHEST', label: 'HIGHEST', color: '#ff0000', icon: ArrowUpOutlined }, { value: 'HIGH', label: 'HIGH', color: '#ff0000', icon: ArrowUpOutlined }, { value: 'MEDIUM', label: 'MEDIUM', color: '#EA7D24', icon: ArrowUpOutlined }, { value: 'LOW', label: 'LOW', color: '#2A8734', icon: ArrowDownOutlined }, { value: 'LOWEST', label: 'LOWEST', color: '#2A8734', icon: ArrowDownOutlined } ] const timezoneOptions = () => timezoneList.map((item) => ({ label: item, value: item })) const renderLabel = (option: any) => { return [ h( NIcon, { style: { verticalAlign: 'middle', marginRight: '4px', marginBottom: '3px' }, color: option.color }, { default: () => h(option.icon) } ), option.label ] } const updateWorkerGroup = () => { timingState.timingForm.environmentCode = null } const handlePreview = () => { getPreviewSchedule() } onMounted(() => { getWorkerGroups() getAlertGroups() getEnvironmentList() }) watch( () => props.row, () => { timingState.timingForm.startEndTime = [ new Date(props.row.startTime), new Date(props.row.endTime) ] timingState.timingForm.crontab = props.row.crontab timingState.timingForm.timezoneId = props.row.timezoneId timingState.timingForm.failureStrategy = props.row.failureStrategy timingState.timingForm.warningType = props.row.warningType timingState.timingForm.processInstancePriority = props.row.processInstancePriority timingState.timingForm.warningGroupId = props.row.warningGroupId timingState.timingForm.workerGroup = props.row.workerGroup timingState.timingForm.environmentCode = props.row.environmentCode } ) return { t, crontabRef, parallelismRef, hideModal, handleTiming, generalWarningTypeListOptions, generalPriorityList, timezoneOptions, renderLabel, updateWorkerGroup, handlePreview, ...toRefs(variables), ...toRefs(timingState), ...toRefs(props) } }, render() { const { t } = this if (Number(this.timingForm.warningGroupId) === 0) { this.timingForm.warningGroupId = '' } return ( <Modal show={this.show} title={t('project.workflow.set_parameters_before_timing')} onCancel={this.hideModal} onConfirm={this.handleTiming} > <NForm ref='timingFormRef' label-placement='left' label-width='160'> <NFormItem label={t('project.workflow.start_and_stop_time')} path='startEndTime' > <NDatePicker type='datetimerange' clearable v-model:value={this.timingForm.startEndTime} /> </NFormItem> <NFormItem label={t('project.workflow.timing')} path='crontab'> <NInputGroup> <NPopover trigger='click' showArrow={false} placement='bottom' style={{ width: '500px' }} > {{ trigger: () => ( <NInput style={{ width: '80%' }} readonly={true} v-model:value={this.timingForm.crontab} ></NInput> ), default: () => ( <Crontab v-model:value={this.timingForm.crontab} /> ) }} </NPopover> <NButton type='primary' ghost onClick={this.handlePreview}> {t('project.workflow.execute_time')} </NButton> </NInputGroup> </NFormItem> <NFormItem label={t('project.workflow.timezone')} path='timezoneId' showFeedback={false} > <NSelect v-model:value={this.timingForm.timezoneId} options={this.timezoneOptions()} /> </NFormItem> <NFormItem label=' ' showFeedback={false}> <NList> <NListItem> <NThing description={t('project.workflow.next_five_execution_times')} > {this.schedulePreviewList.map((item: string) => ( <NSpace> {item} <br /> </NSpace> ))} </NThing> </NListItem> </NList> </NFormItem> <NFormItem label={t('project.workflow.failure_strategy')} path='failureStrategy' > <NRadioGroup v-model:value={this.timingForm.failureStrategy}> <NSpace> <NRadio value='CONTINUE'> {t('project.workflow.continue')} </NRadio> <NRadio value='END'>{t('project.workflow.end')}</NRadio> </NSpace> </NRadioGroup> </NFormItem> <NFormItem label={t('project.workflow.notification_strategy')} path='warningType' > <NSelect options={this.generalWarningTypeListOptions()} v-model:value={this.timingForm.warningType} /> </NFormItem> <NFormItem label={t('project.workflow.workflow_priority')} path='processInstancePriority' > <NSelect options={this.generalPriorityList()} renderLabel={this.renderLabel} v-model:value={this.timingForm.processInstancePriority} /> </NFormItem> <NFormItem label={t('project.workflow.worker_group')} path='workerGroup' > <NSelect options={this.workerGroups} onUpdateValue={this.updateWorkerGroup} v-model:value={this.timingForm.workerGroup} /> </NFormItem> <NFormItem label={t('project.workflow.environment_name')} path='environmentCode' > <NSelect options={this.environmentList.filter((item: any) => item.workerGroups?.includes(this.timingForm.workerGroup) )} v-model:value={this.timingForm.environmentCode} clearable /> </NFormItem> <NFormItem label={t('project.workflow.alarm_group')} path='warningGroupId' > <NSelect options={this.alertGroups} placeholder={t('project.workflow.please_choose')} v-model:value={this.timingForm.warningGroupId} clearable /> </NFormItem> </NForm> </Modal> ) } })
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,709
[Bug][UI Next][V1.0.0-Alpha] The timing management list operation button is too small to be clicked accurately.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/19239641/156883277-7d0da2ff-605f-4b0b-9904-64303c34f1eb.png) ### What you expected to happen button is too small ### How to reproduce Make the button bigger ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8709
https://github.com/apache/dolphinscheduler/pull/8712
26f50660f39dfec18d4d03343b49c5b8b423a1d5
b4fdb3c8eb72312cc60ceec832bac838acb28cba
"2022-03-05T12:38:39Z"
java
"2022-03-05T13:56:31Z"
dolphinscheduler-ui-next/src/views/projects/workflow/definition/timing/use-table.ts
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { h, ref, reactive } from 'vue' import { useI18n } from 'vue-i18n' import { useRouter } from 'vue-router' import { NSpace, NTooltip, NButton, NPopconfirm, NEllipsis } from 'naive-ui' import { deleteScheduleById, offline, online, queryScheduleListPaging } from '@/service/modules/schedules' import { ArrowDownOutlined, ArrowUpOutlined, DeleteOutlined, EditOutlined } from '@vicons/antd' import type { Router } from 'vue-router' import type { TableColumns } from 'naive-ui/es/data-table/src/interface' import { ISearchParam } from './types' import styles from '../index.module.scss' export function useTable() { const { t } = useI18n() const router: Router = useRouter() const columns: TableColumns<any> = [ { title: '#', key: 'id', width: 50, render: (_row, index) => index + 1 }, { title: t('project.workflow.workflow_name'), key: 'processDefinitionName', width: 200, render: (_row) => h( NEllipsis, { style: 'max-width: 200px' }, { default: () => _row.processDefinitionName } ) }, { title: t('project.workflow.start_time'), key: 'startTime' }, { title: t('project.workflow.end_time'), key: 'endTime' }, { title: t('project.workflow.crontab'), key: 'crontab' }, { title: t('project.workflow.failure_strategy'), key: 'failureStrategy' }, { title: t('project.workflow.status'), key: 'releaseState', render: (_row) => _row.releaseState === 'ONLINE' ? t('project.workflow.up_line') : t('project.workflow.down_line') }, { title: t('project.workflow.create_time'), key: 'createTime' }, { title: t('project.workflow.update_time'), key: 'updateTime' }, { title: t('project.workflow.operation'), key: 'operation', fixed: 'right', className: styles.operation, render: (row) => { return h(NSpace, null, { default: () => [ h( NButton, { circle: true, type: 'info', size: 'tiny', disabled: row.releaseState === 'ONLINE', onClick: () => { handleEdit(row) } }, { icon: () => h(EditOutlined) } ), h( NButton, { circle: true, type: row.releaseState === 'ONLINE' ? 'error' : 'warning', size: 'tiny', onClick: () => { handleReleaseState(row) } }, { icon: () => h( row.releaseState === 'ONLINE' ? ArrowDownOutlined : ArrowUpOutlined ) } ), h( NPopconfirm, { onPositiveClick: () => { handleDelete(row.id) } }, { trigger: () => h( NTooltip, {}, { trigger: () => h( NButton, { circle: true, type: 'error', size: 'tiny' }, { icon: () => h(DeleteOutlined) } ), default: () => t('project.workflow.delete') } ), default: () => t('project.workflow.delete_confirm') } ) ] }) } } ] const handleEdit = (row: any) => { variables.showRef = true variables.row = row } const variables = reactive({ columns, row: {}, tableData: [], projectCode: ref(Number(router.currentRoute.value.params.projectCode)), page: ref(1), pageSize: ref(10), searchVal: ref(), totalPage: ref(1), showRef: ref(false) }) const getTableData = (params: ISearchParam) => { const definitionCode = Number( router.currentRoute.value.params.definitionCode ) queryScheduleListPaging( { ...params, processDefinitionCode: definitionCode }, variables.projectCode ).then((res: any) => { variables.totalPage = res.totalPage variables.tableData = res.totalList.map((item: any) => { return { ...item } }) }) } const handleReleaseState = (row: any) => { let handle = online if (row.releaseState === 'ONLINE') { handle = offline } handle(variables.projectCode, row.id).then(() => { window.$message.success(t('project.workflow.success')) getTableData({ pageSize: variables.pageSize, pageNo: variables.page, searchVal: variables.searchVal }) }) } const handleDelete = (id: number) => { /* after deleting data from the current page, you need to jump forward when the page is empty. */ if (variables.tableData.length === 1 && variables.page > 1) { variables.page -= 1 } deleteScheduleById(id, variables.projectCode) .then(() => { window.$message.success(t('project.workflow.success')) getTableData({ pageSize: variables.pageSize, pageNo: variables.page, searchVal: variables.searchVal }) }) .catch((error: any) => { window.$message.error(error.message) }) } return { variables, getTableData } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,713
[Bug] [DOCS] the link of cncf is incorrect
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened the link of cncf is incorrect ### What you expected to happen update the link to right ### How to reproduce notice the link ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8713
https://github.com/apache/dolphinscheduler/pull/8714
b4fdb3c8eb72312cc60ceec832bac838acb28cba
e2af9054b39f73183490fd8a96efe919a29a488d
"2022-03-05T13:29:15Z"
java
"2022-03-05T14:11:59Z"
README.md
Dolphin Scheduler Official Website [dolphinscheduler.apache.org](https://dolphinscheduler.apache.org) ============ [![License](https://img.shields.io/badge/license-Apache%202-4EB1BA.svg)](https://www.apache.org/licenses/LICENSE-2.0.html) [![Total Lines](https://tokei.rs/b1/github/apache/dolphinscheduler?category=lines)](https://github.com/apache/dolphinscheduler) [![codecov](https://codecov.io/gh/apache/dolphinscheduler/branch/dev/graph/badge.svg)](https://codecov.io/gh/apache/dolphinscheduler/branch/dev) [![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=apache-dolphinscheduler&metric=alert_status)](https://sonarcloud.io/dashboard?id=apache-dolphinscheduler) [![Twitter Follow](https://img.shields.io/twitter/follow/dolphinschedule.svg?style=social&label=Follow)](https://twitter.com/dolphinschedule) [![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://join.slack.com/t/asf-dolphinscheduler/shared_invite/zt-omtdhuio-_JISsxYhiVsltmC5h38yfw) [![Stargazers over time](https://starchart.cc/apache/dolphinscheduler.svg)](https://starchart.cc/apache/dolphinscheduler) [![EN doc](https://img.shields.io/badge/document-English-blue.svg)](README.md) [![CN doc](https://img.shields.io/badge/文档-中文版-blue.svg)](README_zh_CN.md) ## Design Features DolphinScheduler is a distributed and extensible workflow scheduler platform with powerful DAG visual interfaces, dedicated to solving complex job dependencies in the data pipeline and providing various types of jobs available `out of the box`. Its main objectives are as follows: - Associate the tasks according to the dependencies of the tasks in a DAG graph, which can visualize the running state of the task in real-time. - Support various task types: Shell, MR, Spark, SQL (MySQL, PostgreSQL, hive, spark SQL), Python, Sub_Process, Procedure, etc. - Support scheduling of workflows and dependencies, manual scheduling to pause/stop/recover task, support failure task retry/alarm, recover specified nodes from failure, kill task, etc. - Support the priority of workflows & tasks, task failover, and task timeout alarm or failure. - Support workflow global parameters and node customized parameter settings. - Support online upload/download/management of resource files, etc. Support online file creation and editing. - Support task log online viewing and scrolling and downloading, etc. - Have implemented cluster HA, decentralize Master cluster and Worker cluster through Zookeeper. - Support the viewing of Master/Worker CPU load, memory, and CPU usage metrics. - Support displaying workflow history in tree/Gantt chart, as well as statistical analysis on the task status & process status in each workflow. - Support back-filling data. - Support multi-tenant. - Support internationalization. - More features waiting for partners to explore... ## What's in DolphinScheduler Stability | Accessibility | Features | Scalability | --------- | ------------- | -------- | ------------| Decentralized multi-master and multi-worker | Visualization of workflow key information, such as task status, task type, retry times, task operation machine information, visual variables, and so on at a glance.  |  Support pause, recover operation | Support customized task types support HA | Visualization of all workflow operations, dragging tasks to draw DAGs, configuring data sources and resources. At the same time, for third-party systems, provide API mode operations. | Users on DolphinScheduler can achieve many-to-one or one-to-one mapping relationship through tenants and Hadoop users, which is very important for scheduling large data jobs. | The scheduler supports distributed scheduling, and the overall scheduling capability will increase linearly with the scale of the cluster. Master and Worker support dynamic adjustment. Overload processing: By using the task queue mechanism, the number of schedulable tasks on a single machine can be flexibly configured. Machine jam can be avoided with high tolerance to numbers of tasks cached in task queue. | One-click deployment | Support traditional shell tasks, and big data platform task scheduling: MR, Spark, SQL (MySQL, PostgreSQL, hive, spark SQL), Python, Procedure, Sub_Process | | ## User Interface Screenshots ![home page](https://user-images.githubusercontent.com/15833811/75218288-bf286400-57d4-11ea-8263-d639c6511d5f.jpg) ![dag](https://user-images.githubusercontent.com/15833811/75236750-3374fe80-57f9-11ea-857d-62a66a5a559d.png) ![process definition list page](https://user-images.githubusercontent.com/15833811/75216886-6f479e00-57d0-11ea-92dd-66e7640a186f.png) ![view task log online](https://user-images.githubusercontent.com/15833811/75216924-9900c500-57d0-11ea-91dc-3522a76bdbbe.png) ![resource management](https://user-images.githubusercontent.com/15833811/75216984-be8dce80-57d0-11ea-840d-58546edc8788.png) ![monitor](https://user-images.githubusercontent.com/59273635/75625839-c698a480-5bfc-11ea-8bbe-895b561b337f.png) ![security](https://user-images.githubusercontent.com/15833811/75236441-bfd2f180-57f8-11ea-88bd-f24311e01b7e.png) ![treeview](https://user-images.githubusercontent.com/15833811/75217191-3fe56100-57d1-11ea-8856-f19180d9a879.png) ## QuickStart in Docker Please refer the official website document: [QuickStart in Docker](https://dolphinscheduler.apache.org/en-us/docs/latest/user_doc/guide/installation/docker.html) ## QuickStart in Kubernetes Please refer to the official website document: [QuickStart in Kubernetes](https://dolphinscheduler.apache.org/en-us/docs/latest/user_doc/guide/installation/kubernetes.html) ## How to Build ```bash ./mvnw clean install -Prelease ``` Artifact: ``` dolphinscheduler-dist/target/apache-dolphinscheduler-${latest.release.version}-bin.tar.gz: Binary package of DolphinScheduler dolphinscheduler-dist/target/apache-dolphinscheduler-${latest.release.version}-src.tar.gz: Source code package of DolphinScheduler ``` ## Thanks DolphinScheduler is based on a lot of excellent open-source projects, such as Google guava, guice, grpc, netty, quartz, and many open-source projects of Apache and so on. We would like to express our deep gratitude to all the open-source projects used in Dolphin Scheduler. We hope that we are not only the beneficiaries of open-source, but also give back to the community. Besides, we hope everyone who have the same enthusiasm and passion for open source could join in and contribute to the open-source community! ## Get Help 1. Submit an [issue](https://github.com/apache/dolphinscheduler/issues/new/choose) 1. Subscribe to this mailing list: https://dolphinscheduler.apache.org/en-us/community/development/subscribe.html, then email dev@dolphinscheduler.apache.org ## Community You are very welcome to communicate with the developers and users of Dolphin Scheduler. There are two ways to find them: 1. Join the Slack channel by [this invitation link](https://join.slack.com/t/asf-dolphinscheduler/shared_invite/zt-omtdhuio-_JISsxYhiVsltmC5h38yfw). 2. Follow the [Twitter account of DolphinScheduler](https://twitter.com/dolphinschedule) and get the latest news on time. ### Contributor over time [![Contributor over time](https://contributor-graph-api.apiseven.com/contributors-svg?chart=contributorOverTime&repo=apache/dolphinscheduler)](https://www.apiseven.com/en/contributor-graph?chart=contributorOverTime&repo=apache/dolphinscheduler) ## How to Contribute The community welcomes everyone to contribute, please refer to this page to find out more: [How to contribute](https://dolphinscheduler.apache.org/en-us/community/development/contribute.html). # Landscapes <p align="center"> <br/><br/> <img src="https://landscape.cncf.io/images/left-logo.svg" width="150"/>&nbsp;&nbsp;<img src="https://landscape.cncf.io/images/right-logo.svg" width="200"/> <br/><br/> DolphinScheduler enriches the <a href="https://landscape.cncf.io/landscape=observability-and-analysis&license=apache-license-2-0">CNCF CLOUD NATIVE Landscape.</a > </p > ## License Please refer to the [LICENSE](https://github.com/apache/dolphinscheduler/blob/dev/LICENSE) file.
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
8,710
[Bug][UI Next][V1.0.0-Alpha] There is no tooltip for timing management table editing and up and down buttons.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/19239641/156883414-c840f2bd-38e9-495d-9c01-c366c9ffe961.png) ### What you expected to happen There is no tooltip for regular management table editing and up and down buttons. ### How to reproduce Add tooltip. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/8710
https://github.com/apache/dolphinscheduler/pull/8716
e2af9054b39f73183490fd8a96efe919a29a488d
698c795d4f3412e175fb28e569bf34c6d8085f0b
"2022-03-05T12:41:37Z"
java
"2022-03-06T12:59:20Z"
dolphinscheduler-ui-next/src/views/projects/workflow/definition/components/table-action.tsx
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { defineComponent, PropType, toRefs } from 'vue' import { NSpace, NTooltip, NButton, NIcon, NPopconfirm } from 'naive-ui' import { DeleteOutlined, DownloadOutlined, FormOutlined, InfoCircleFilled, PlayCircleOutlined, ClockCircleOutlined, CopyOutlined, FieldTimeOutlined, ExportOutlined, ApartmentOutlined, UploadOutlined } from '@vicons/antd' import { useI18n } from 'vue-i18n' import { IDefinitionData } from '../types' const props = { row: { type: Object as PropType<IDefinitionData> } } export default defineComponent({ name: 'TableAction', props, emits: [ 'editWorkflow', 'updateList', 'startWorkflow', 'timingWorkflow', 'versionWorkflow', 'deleteWorkflow', 'releaseWorkflow', 'copyWorkflow', 'exportWorkflow', 'gotoTimingManage', 'gotoWorkflowTree' ], setup(props, ctx) { const handleEditWorkflow = () => { ctx.emit('editWorkflow') } const handleStartWorkflow = () => { ctx.emit('startWorkflow') } const handleTimingWorkflow = () => { ctx.emit('timingWorkflow') } const handleVersionWorkflow = () => { ctx.emit('versionWorkflow') } const handleDeleteWorkflow = () => { ctx.emit('deleteWorkflow') } const handleReleaseWorkflow = () => { ctx.emit('releaseWorkflow') } const handleCopyWorkflow = () => { ctx.emit('copyWorkflow') } const handleExportWorkflow = () => { ctx.emit('exportWorkflow') } const handleGotoTimingManage = () => { ctx.emit('gotoTimingManage') } const handleGotoWorkflowTree = () => { ctx.emit('gotoWorkflowTree') } return { handleEditWorkflow, handleStartWorkflow, handleTimingWorkflow, handleVersionWorkflow, handleDeleteWorkflow, handleReleaseWorkflow, handleCopyWorkflow, handleExportWorkflow, handleGotoTimingManage, handleGotoWorkflowTree, ...toRefs(props) } }, render() { const { t } = useI18n() const releaseState = this.row?.releaseState const scheduleReleaseState = this.row?.scheduleReleaseState return ( <NSpace> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.edit'), trigger: () => ( <NButton size='small' type='info' tag='div' circle onClick={this.handleEditWorkflow} disabled={releaseState === 'ONLINE'} /* TODO: Edit workflow */ > <NIcon> <FormOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.start'), trigger: () => ( <NButton size='small' type='primary' tag='div' circle onClick={this.handleStartWorkflow} disabled={releaseState === 'OFFLINE'} > <NIcon> <PlayCircleOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.timing'), trigger: () => ( <NButton size='small' type='info' tag='div' circle onClick={this.handleTimingWorkflow} disabled={releaseState !== 'ONLINE' || !!scheduleReleaseState} > <NIcon> <ClockCircleOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => releaseState === 'ONLINE'? t('project.workflow.down_line'):t('project.workflow.up_line'), trigger: () => ( <NButton size='small' type={releaseState === 'ONLINE' ? 'warning' : 'error'} tag='div' circle onClick={this.handleReleaseWorkflow} > <NIcon> {releaseState === 'ONLINE' ? ( <DownloadOutlined /> ) : ( <UploadOutlined /> )} </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.copy_workflow'), trigger: () => ( <NButton size='small' type='info' tag='div' circle disabled={releaseState === 'ONLINE'} onClick={this.handleCopyWorkflow} > <NIcon> <CopyOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.cron_manage'), trigger: () => ( <NButton size='small' type='info' tag='div' circle disabled={releaseState === 'OFFLINE'} onClick={this.handleGotoTimingManage} > <NIcon> <FieldTimeOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.delete'), trigger: () => ( <NButton size='small' type='error' tag='div' circle disabled={releaseState === 'ONLINE'} > <NPopconfirm onPositiveClick={this.handleDeleteWorkflow}> {{ default: () => t('project.workflow.delete_confirm'), icon: () => ( <NIcon> <InfoCircleFilled /> </NIcon> ), trigger: () => ( <NIcon> <DeleteOutlined /> </NIcon> ) }} </NPopconfirm> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.tree_view'), trigger: () => ( <NButton size='small' type='info' tag='div' circle onClick={this.handleGotoWorkflowTree} > <NIcon> <ApartmentOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.export'), trigger: () => ( <NButton size='small' type='info' tag='div' circle onClick={this.handleExportWorkflow} > <NIcon> <ExportOutlined /> </NIcon> </NButton> ) }} </NTooltip> <NTooltip trigger={'hover'}> {{ default: () => t('project.workflow.version_info'), trigger: () => ( <NButton size='small' type='info' tag='div' circle onClick={this.handleVersionWorkflow} > <NIcon> <InfoCircleFilled /> </NIcon> </NButton> ) }} </NTooltip> </NSpace> ) } })