status
stringclasses 1
value | repo_name
stringclasses 31
values | repo_url
stringclasses 31
values | issue_id
int64 1
104k
| title
stringlengths 4
369
| body
stringlengths 0
254k
⌀ | issue_url
stringlengths 37
56
| pull_url
stringlengths 37
54
| before_fix_sha
stringlengths 40
40
| after_fix_sha
stringlengths 40
40
| report_datetime
unknown | language
stringclasses 5
values | commit_datetime
unknown | updated_file
stringlengths 4
188
| file_content
stringlengths 0
5.12M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,336 | [Bug] [UT] UT failed in DAO module | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
When I run the UT in `TaskInstanceMapperTest`, it failed.
The `process_definition_code` is null.
```java
### SQL: INSERT INTO t_ds_process_instance ( warning_group_id, timeout, executor_id, max_try_times, tenant_id, command_param, run_times, process_definition_version ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? )
### Cause: java.sql.SQLException: Field 'process_definition_code' doesn't have a default value
; Field 'process_definition_code' doesn't have a default value; nested exception is java.sql.SQLException: Field 'process_definition_code' doesn't have a default value
```
### What you expected to happen
UT execute success.
### How to reproduce
Execute TaskInstanceMapperTest.
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6336 | https://github.com/apache/dolphinscheduler/pull/6435 | ea2a8d26a0d42683309b8bda9ef24f4daaab266b | 4fbee7dc4b178b13c4a795fa848a86ec94c5cd27 | "2021-09-24T10:36:04Z" | java | "2021-10-03T15:25:06Z" | dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/mapper/TaskInstanceMapperTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.mapper;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.dao.entity.ExecuteStatusCount;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import java.util.Date;
import java.util.List;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.annotation.Rollback;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.transaction.annotation.Transactional;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
@RunWith(SpringRunner.class)
@SpringBootTest
@Transactional
@Rollback
public class TaskInstanceMapperTest {
@Autowired
TaskInstanceMapper taskInstanceMapper;
@Autowired
ProcessDefinitionMapper processDefinitionMapper;
@Autowired
ProcessInstanceMapper processInstanceMapper;
@Autowired
ProcessInstanceMapMapper processInstanceMapMapper;
private int processInstanceId;
@Before
public void before() {
ProcessInstance processInstance = new ProcessInstance();
processInstance.setWarningGroupId(0);
processInstance.setCommandParam("");
processInstanceMapper.insert(processInstance);
processInstanceId = processInstance.getId();
}
/**
* insert
*
* @return TaskInstance
*/
private TaskInstance insertTaskInstance(int processInstanceId) {
//insertOne
return insertTaskInstance(processInstanceId, TaskType.SHELL.getDesc());
}
/**
* insert
*
* @return ProcessInstance
*/
private ProcessInstance insertProcessInstance() {
ProcessInstance processInstance = new ProcessInstance();
processInstance.setId(1);
processInstance.setName("taskName");
processInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
processInstance.setStartTime(new Date());
processInstance.setEndTime(new Date());
processInstance.setProcessDefinitionCode(1L);
processInstanceMapper.insert(processInstance);
return processInstanceMapper.queryByProcessDefineCode(1L,1).get(0);
}
/**
* construct a task instance and then insert
*/
private TaskInstance insertTaskInstance(int processInstanceId, String taskType) {
TaskInstance taskInstance = new TaskInstance();
taskInstance.setFlag(Flag.YES);
taskInstance.setName("us task");
taskInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
taskInstance.setStartTime(new Date());
taskInstance.setEndTime(new Date());
taskInstance.setProcessInstanceId(processInstanceId);
taskInstance.setTaskType(taskType);
taskInstanceMapper.insert(taskInstance);
return taskInstance;
}
/**
* test update
*/
@Test
public void testUpdate() {
// insert ProcessInstance
ProcessInstance processInstance = insertProcessInstance();
// insert taskInstance
TaskInstance taskInstance = insertTaskInstance(processInstance.getId());
// update
int update = taskInstanceMapper.updateById(taskInstance);
Assert.assertEquals(1, update);
taskInstanceMapper.deleteById(taskInstance.getId());
}
/**
* test delete
*/
@Test
public void testDelete() {
// insert ProcessInstance
ProcessInstance processInstance = insertProcessInstance();
// insert taskInstance
TaskInstance taskInstance = insertTaskInstance(processInstance.getId());
int delete = taskInstanceMapper.deleteById(taskInstance.getId());
Assert.assertEquals(1, delete);
}
/**
* test query
*/
@Test
public void testQuery() {
// insert ProcessInstance
ProcessInstance processInstance = insertProcessInstance();
// insert taskInstance
TaskInstance taskInstance = insertTaskInstance(processInstance.getId());
//query
List<TaskInstance> taskInstances = taskInstanceMapper.selectList(null);
taskInstanceMapper.deleteById(taskInstance.getId());
Assert.assertNotEquals(taskInstances.size(), 0);
}
/**
* test query task instance by process instance id and state
*/
@Test
public void testQueryTaskByProcessIdAndState() {
// insert ProcessInstance
ProcessInstance processInstance = insertProcessInstance();
// insert taskInstance
TaskInstance task = insertTaskInstance(processInstance.getId());
task.setProcessInstanceId(processInstance.getId());
taskInstanceMapper.updateById(task);
List<Integer> taskInstances = taskInstanceMapper.queryTaskByProcessIdAndState(
task.getProcessInstanceId(),
ExecutionStatus.RUNNING_EXECUTION.ordinal()
);
taskInstanceMapper.deleteById(task.getId());
Assert.assertNotEquals(taskInstances.size(), 0);
}
/**
* test find valid task list by process instance id
*/
@Test
public void testFindValidTaskListByProcessId() {
// insert ProcessInstance
ProcessInstance processInstance = insertProcessInstance();
// insert taskInstance
TaskInstance task = insertTaskInstance(processInstance.getId());
TaskInstance task2 = insertTaskInstance(processInstance.getId());
task.setProcessInstanceId(processInstance.getId());
task2.setProcessInstanceId(processInstance.getId());
taskInstanceMapper.updateById(task);
taskInstanceMapper.updateById(task2);
List<TaskInstance> taskInstances = taskInstanceMapper.findValidTaskListByProcessId(
task.getProcessInstanceId(),
Flag.YES
);
task2.setFlag(Flag.NO);
taskInstanceMapper.updateById(task2);
List<TaskInstance> taskInstances1 = taskInstanceMapper.findValidTaskListByProcessId(task.getProcessInstanceId(),
Flag.NO);
taskInstanceMapper.deleteById(task2.getId());
taskInstanceMapper.deleteById(task.getId());
Assert.assertNotEquals(taskInstances.size(), 0);
Assert.assertNotEquals(taskInstances1.size(), 0);
}
/**
* test query by host and status
*/
@Test
public void testQueryByHostAndStatus() {
// insert ProcessInstance
ProcessInstance processInstance = insertProcessInstance();
// insert taskInstance
TaskInstance task = insertTaskInstance(processInstance.getId());
task.setHost("111.111.11.11");
taskInstanceMapper.updateById(task);
List<TaskInstance> taskInstances = taskInstanceMapper.queryByHostAndStatus(
task.getHost(), new int[]{ExecutionStatus.RUNNING_EXECUTION.ordinal()}
);
taskInstanceMapper.deleteById(task.getId());
Assert.assertNotEquals(taskInstances.size(), 0);
}
/**
* test set failover by host and state array
*/
@Test
public void testSetFailoverByHostAndStateArray() {
// insert ProcessInstance
ProcessInstance processInstance = insertProcessInstance();
// insert taskInstance
TaskInstance task = insertTaskInstance(processInstance.getId());
task.setHost("111.111.11.11");
taskInstanceMapper.updateById(task);
int setResult = taskInstanceMapper.setFailoverByHostAndStateArray(
task.getHost(),
new int[]{ExecutionStatus.RUNNING_EXECUTION.ordinal()},
ExecutionStatus.NEED_FAULT_TOLERANCE
);
taskInstanceMapper.deleteById(task.getId());
Assert.assertNotEquals(setResult, 0);
}
/**
* test query by task instance id and name
*/
@Test
public void testQueryByInstanceIdAndName() {
// insert ProcessInstance
ProcessInstance processInstance = insertProcessInstance();
// insert taskInstance
TaskInstance task = insertTaskInstance(processInstance.getId());
task.setHost("111.111.11.11");
taskInstanceMapper.updateById(task);
TaskInstance taskInstance = taskInstanceMapper.queryByInstanceIdAndName(
task.getProcessInstanceId(),
task.getName()
);
taskInstanceMapper.deleteById(task.getId());
Assert.assertNotEquals(taskInstance, null);
}
/**
* test count task instance
*/
@Test
public void testCountTask() {
// insert ProcessInstance
ProcessInstance processInstance = insertProcessInstance();
// insert taskInstance
TaskInstance task = insertTaskInstance(processInstance.getId());
ProcessDefinition definition = new ProcessDefinition();
definition.setCode(1L);
definition.setProjectCode(1111L);
definition.setCreateTime(new Date());
definition.setUpdateTime(new Date());
processDefinitionMapper.insert(definition);
//task.setProcessDefinitionId(definition.getId());
taskInstanceMapper.updateById(task);
int countTask = taskInstanceMapper.countTask(
new Long[0],
new int[0]
);
int countTask2 = taskInstanceMapper.countTask(
new Long[]{definition.getProjectCode()},
new int[]{task.getId()}
);
taskInstanceMapper.deleteById(task.getId());
processDefinitionMapper.deleteById(definition.getId());
Assert.assertEquals(countTask, 0);
Assert.assertEquals(countTask2, 0);
}
/**
* test count task instance state by user
*/
@Test
public void testCountTaskInstanceStateByUser() {
// insert ProcessInstance
ProcessInstance processInstance = insertProcessInstance();
// insert taskInstance
TaskInstance task = insertTaskInstance(processInstance.getId());
ProcessDefinition definition = new ProcessDefinition();
definition.setCode(1111L);
definition.setProjectCode(1111L);
definition.setCreateTime(new Date());
definition.setUpdateTime(new Date());
processDefinitionMapper.insert(definition);
//task.setProcessDefinitionId(definition.getId());
taskInstanceMapper.updateById(task);
List<ExecuteStatusCount> count = taskInstanceMapper.countTaskInstanceStateByUser(
null, null,
new Long[]{definition.getProjectCode()}
);
processDefinitionMapper.deleteById(definition.getId());
taskInstanceMapper.deleteById(task.getId());
}
/**
* test page
*/
@Test
public void testQueryTaskInstanceListPaging() {
ProcessDefinition definition = new ProcessDefinition();
definition.setCode(1L);
definition.setProjectCode(1111L);
definition.setCreateTime(new Date());
definition.setUpdateTime(new Date());
processDefinitionMapper.insert(definition);
// insert ProcessInstance
ProcessInstance processInstance = insertProcessInstance();
// insert taskInstance
TaskInstance task = insertTaskInstance(processInstance.getId());
Page<TaskInstance> page = new Page(1, 3);
IPage<TaskInstance> taskInstanceIPage = taskInstanceMapper.queryTaskInstanceListPaging(
page,
definition.getProjectCode(),
task.getProcessInstanceId(),
"",
"",
"",
0,
new int[0],
"",
null, null
);
processInstanceMapper.deleteById(processInstance.getId());
taskInstanceMapper.deleteById(task.getId());
processDefinitionMapper.deleteById(definition.getId());
Assert.assertEquals(taskInstanceIPage.getTotal(), 0);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,445 | [Improvement][Standalone] Run h2 in daemon mode | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
In standalone, we use h2 as the database.
If the creation table sql is wrong, it will throw an exception, but the server will keep running.
It's better to stop.
### Use case
_No response_
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6445 | https://github.com/apache/dolphinscheduler/pull/6446 | 4fbee7dc4b178b13c4a795fa848a86ec94c5cd27 | 6529f92883dfd62cdc7241acba8cb16e4fa60a36 | "2021-10-04T02:00:47Z" | java | "2021-10-06T02:15:36Z" | dolphinscheduler-standalone-server/src/main/java/org/apache/dolphinscheduler/server/StandaloneServer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server;
import static org.apache.dolphinscheduler.common.Constants.SPRING_DATASOURCE_DRIVER_CLASS_NAME;
import static org.apache.dolphinscheduler.common.Constants.SPRING_DATASOURCE_PASSWORD;
import static org.apache.dolphinscheduler.common.Constants.SPRING_DATASOURCE_URL;
import static org.apache.dolphinscheduler.common.Constants.SPRING_DATASOURCE_USERNAME;
import org.apache.dolphinscheduler.alert.AlertServer;
import org.apache.dolphinscheduler.api.ApiApplicationServer;
import org.apache.dolphinscheduler.common.utils.ScriptRunner;
import org.apache.dolphinscheduler.dao.datasource.ConnectionFactory;
import org.apache.dolphinscheduler.server.master.MasterServer;
import org.apache.dolphinscheduler.server.worker.WorkerServer;
import org.apache.curator.test.TestingServer;
import java.io.FileReader;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.sql.SQLException;
import javax.sql.DataSource;
import org.h2.tools.Server;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.builder.SpringApplicationBuilder;
@SpringBootApplication
public class StandaloneServer {
private static final Logger LOGGER = LoggerFactory.getLogger(StandaloneServer.class);
public static void main(String[] args) throws Exception {
Thread.currentThread().setName("Standalone-Server");
System.setProperty("spring.profiles.active", "api");
startDatabase();
startRegistry();
startAlertServer();
setTaskPlugin();
new SpringApplicationBuilder(
ApiApplicationServer.class,
MasterServer.class,
WorkerServer.class
).run(args);
}
private static void startAlertServer() {
final Path alertPluginPath = Paths.get(
StandaloneServer.class.getProtectionDomain().getCodeSource().getLocation().getPath(),
"../../../dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/pom.xml"
).toAbsolutePath();
if (Files.exists(alertPluginPath)) {
System.setProperty("alert.plugin.binding", alertPluginPath.toString());
System.setProperty("alert.plugin.dir", "");
}
AlertServer.getInstance().start();
}
private static void startRegistry() throws Exception {
final TestingServer server = new TestingServer(true);
System.setProperty("registry.servers", server.getConnectString());
final Path registryPath = Paths.get(
StandaloneServer.class.getProtectionDomain().getCodeSource().getLocation().getPath(),
"../../../dolphinscheduler-registry-plugin/dolphinscheduler-registry-zookeeper/pom.xml"
).toAbsolutePath();
if (Files.exists(registryPath)) {
System.setProperty("registry.plugin.binding", registryPath.toString());
System.setProperty("registry.plugin.dir", "");
}
}
private static void startDatabase() throws IOException, SQLException {
final Path temp = Files.createTempDirectory("dolphinscheduler_");
LOGGER.info("H2 database directory: {}", temp);
System.setProperty(
SPRING_DATASOURCE_DRIVER_CLASS_NAME,
org.h2.Driver.class.getName()
);
System.setProperty(
SPRING_DATASOURCE_URL,
String.format("jdbc:h2:tcp://localhost/%s;MODE=MySQL;DATABASE_TO_LOWER=true", temp.toAbsolutePath())
);
System.setProperty(SPRING_DATASOURCE_USERNAME, "sa");
System.setProperty(SPRING_DATASOURCE_PASSWORD, "");
Server.createTcpServer("-ifNotExists").start();
final DataSource ds = ConnectionFactory.getInstance().getDataSource();
final ScriptRunner runner = new ScriptRunner(ds.getConnection(), true, true);
runner.runScript(new FileReader("sql/dolphinscheduler_h2.sql"));
}
private static void setTaskPlugin() {
final Path taskPluginPath = Paths.get(
StandaloneServer.class.getProtectionDomain().getCodeSource().getLocation().getPath(),
"../../../dolphinscheduler-task-plugin/dolphinscheduler-task-shell/pom.xml"
).toAbsolutePath();
if (Files.exists(taskPluginPath)) {
System.setProperty("task.plugin.binding", taskPluginPath.toString());
System.setProperty("task.plugin.dir", "");
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,455 | [Bug] [Master] Cannot stop sub process task | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
cannot stop the sub process tasks.
### What you expected to happen
stop the tasks normally
### How to reproduce
start a process.
kill the process
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6455 | https://github.com/apache/dolphinscheduler/pull/6458 | 4d69685ba1f9ecc8595dffc13194ec902531ed51 | a8baa9553fd64d1414c204c6ba95019b7376f771 | "2021-10-08T02:35:44Z" | java | "2021-10-08T06:08:12Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/processor/StateEventProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.processor;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.StateEvent;
import org.apache.dolphinscheduler.common.enums.StateEventType;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.remote.command.Command;
import org.apache.dolphinscheduler.remote.command.CommandType;
import org.apache.dolphinscheduler.remote.command.StateEventChangeCommand;
import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor;
import org.apache.dolphinscheduler.server.master.processor.queue.StateEventResponseService;
import org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import io.netty.channel.Channel;
/**
* handle state event received from master/api
*/
public class StateEventProcessor implements NettyRequestProcessor {
private final Logger logger = LoggerFactory.getLogger(StateEventProcessor.class);
private StateEventResponseService stateEventResponseService;
public StateEventProcessor() {
stateEventResponseService = SpringApplicationContext.getBean(StateEventResponseService.class);
}
public void init(ConcurrentHashMap<Integer, WorkflowExecuteThread> processInstanceExecMaps) {
this.stateEventResponseService.init(processInstanceExecMaps);
}
@Override
public void process(Channel channel, Command command) {
Preconditions.checkArgument(CommandType.STATE_EVENT_REQUEST == command.getType(), String.format("invalid command type: %s", command.getType()));
StateEventChangeCommand stateEventChangeCommand = JSONUtils.parseObject(command.getBody(), StateEventChangeCommand.class);
StateEvent stateEvent = new StateEvent();
stateEvent.setExecutionStatus(ExecutionStatus.RUNNING_EXECUTION);
stateEvent.setKey(stateEventChangeCommand.getKey());
stateEvent.setProcessInstanceId(stateEventChangeCommand.getDestProcessInstanceId());
stateEvent.setTaskInstanceId(stateEventChangeCommand.getDestTaskInstanceId());
StateEventType type = stateEvent.getTaskInstanceId() == 0 ? StateEventType.PROCESS_STATE_CHANGE : StateEventType.TASK_STATE_CHANGE;
stateEvent.setType(type);
logger.info("received command : {}", stateEvent);
stateEventResponseService.addResponse(stateEvent);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,455 | [Bug] [Master] Cannot stop sub process task | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
cannot stop the sub process tasks.
### What you expected to happen
stop the tasks normally
### How to reproduce
start a process.
kill the process
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6455 | https://github.com/apache/dolphinscheduler/pull/6458 | 4d69685ba1f9ecc8595dffc13194ec902531ed51 | a8baa9553fd64d1414c204c6ba95019b7376f771 | "2021-10-08T02:35:44Z" | java | "2021-10-08T06:08:12Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteThread.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.runner;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVERY_START_NODE_STRING;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODE_NAMES;
import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP;
import static org.apache.dolphinscheduler.common.Constants.SEC_2_MINUTES_TIME_UNIT;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.DependResult;
import org.apache.dolphinscheduler.common.enums.Direct;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.enums.Priority;
import org.apache.dolphinscheduler.common.enums.StateEvent;
import org.apache.dolphinscheduler.common.enums.StateEventType;
import org.apache.dolphinscheduler.common.enums.TaskDependType;
import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy;
import org.apache.dolphinscheduler.common.enums.TimeoutFlag;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.NetUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.dao.entity.Environment;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.ProjectUser;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskDefinition;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.apache.dolphinscheduler.remote.command.HostUpdateCommand;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager;
import org.apache.dolphinscheduler.server.master.runner.task.ITaskProcessor;
import org.apache.dolphinscheduler.server.master.runner.task.TaskAction;
import org.apache.dolphinscheduler.server.master.runner.task.TaskProcessorFactory;
import org.apache.dolphinscheduler.service.alert.ProcessAlertManager;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.apache.dolphinscheduler.service.queue.PeerTaskInstancePriorityQueue;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutorService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.HashBasedTable;
import com.google.common.collect.Lists;
import com.google.common.collect.Table;
/**
* master exec thread,split dag
*/
public class WorkflowExecuteThread implements Runnable {
/**
* logger of WorkflowExecuteThread
*/
private static final Logger logger = LoggerFactory.getLogger(WorkflowExecuteThread.class);
/**
* runing TaskNode
*/
private final Map<Integer, ITaskProcessor> activeTaskProcessorMaps = new ConcurrentHashMap<>();
/**
* task exec service
*/
private final ExecutorService taskExecService;
/**
* process instance
*/
private ProcessInstance processInstance;
/**
* submit failure nodes
*/
private boolean taskFailedSubmit = false;
/**
* recover node id list
*/
private List<TaskInstance> recoverNodeIdList = new ArrayList<>();
/**
* error task list
*/
private Map<String, TaskInstance> errorTaskList = new ConcurrentHashMap<>();
/**
* complete task list
*/
private Map<String, TaskInstance> completeTaskList = new ConcurrentHashMap<>();
/**
* ready to submit task queue
*/
private PeerTaskInstancePriorityQueue readyToSubmitTaskQueue = new PeerTaskInstancePriorityQueue();
/**
* depend failed task map
*/
private Map<String, TaskInstance> dependFailedTask = new ConcurrentHashMap<>();
/**
* forbidden task map
*/
private Map<String, TaskNode> forbiddenTaskList = new ConcurrentHashMap<>();
/**
* skip task map
*/
private Map<String, TaskNode> skipTaskNodeList = new ConcurrentHashMap<>();
/**
* recover tolerance fault task list
*/
private List<TaskInstance> recoverToleranceFaultTaskList = new ArrayList<>();
/**
* alert manager
*/
private ProcessAlertManager processAlertManager;
/**
* the object of DAG
*/
private DAG<String, TaskNode, TaskNodeRelation> dag;
/**
* process service
*/
private ProcessService processService;
/**
* master config
*/
private MasterConfig masterConfig;
/**
*
*/
private NettyExecutorManager nettyExecutorManager;
private ConcurrentLinkedQueue<StateEvent> stateEvents = new ConcurrentLinkedQueue<>();
private List<Date> complementListDate = Lists.newLinkedList();
private Table<Integer, Long, TaskInstance> taskInstanceHashMap = HashBasedTable.create();
private ProcessDefinition processDefinition;
private String key;
private ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList;
/**
* start flag, true: start nodes submit completely
*
*/
private boolean isStart = false;
/**
* constructor of WorkflowExecuteThread
*
* @param processInstance processInstance
* @param processService processService
* @param nettyExecutorManager nettyExecutorManager
* @param taskTimeoutCheckList
*/
public WorkflowExecuteThread(ProcessInstance processInstance
, ProcessService processService
, NettyExecutorManager nettyExecutorManager
, ProcessAlertManager processAlertManager
, MasterConfig masterConfig
, ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList) {
this.processService = processService;
this.processInstance = processInstance;
this.masterConfig = masterConfig;
int masterTaskExecNum = masterConfig.getMasterExecTaskNum();
this.taskExecService = ThreadUtils.newDaemonFixedThreadExecutor("Master-Task-Exec-Thread",
masterTaskExecNum);
this.nettyExecutorManager = nettyExecutorManager;
this.processAlertManager = processAlertManager;
this.taskTimeoutCheckList = taskTimeoutCheckList;
}
@Override
public void run() {
try {
startProcess();
handleEvents();
} catch (Exception e) {
logger.error("handler error:", e);
}
}
/**
* the process start nodes are submitted completely.
* @return
*/
public boolean isStart() {
return this.isStart;
}
private void handleEvents() {
while (this.stateEvents.size() > 0) {
try {
StateEvent stateEvent = this.stateEvents.peek();
if (stateEventHandler(stateEvent)) {
this.stateEvents.remove(stateEvent);
}
} catch (Exception e) {
logger.error("state handle error:", e);
}
}
}
public String getKey() {
if (StringUtils.isNotEmpty(key)
|| this.processDefinition == null) {
return key;
}
key = String.format("%d_%d_%d",
this.processDefinition.getCode(),
this.processDefinition.getVersion(),
this.processInstance.getId());
return key;
}
public boolean addStateEvent(StateEvent stateEvent) {
if (processInstance.getId() != stateEvent.getProcessInstanceId()) {
logger.info("state event would be abounded :{}", stateEvent.toString());
return false;
}
this.stateEvents.add(stateEvent);
return true;
}
public int eventSize() {
return this.stateEvents.size();
}
public ProcessInstance getProcessInstance() {
return this.processInstance;
}
private boolean stateEventHandler(StateEvent stateEvent) {
logger.info("process event: {}", stateEvent.toString());
if (!checkStateEvent(stateEvent)) {
return false;
}
boolean result = false;
switch (stateEvent.getType()) {
case PROCESS_STATE_CHANGE:
result = processStateChangeHandler(stateEvent);
break;
case TASK_STATE_CHANGE:
result = taskStateChangeHandler(stateEvent);
break;
case PROCESS_TIMEOUT:
result = processTimeout();
break;
case TASK_TIMEOUT:
result = taskTimeout(stateEvent);
break;
default:
break;
}
if (result) {
this.stateEvents.remove(stateEvent);
}
return result;
}
private boolean taskTimeout(StateEvent stateEvent) {
if (taskInstanceHashMap.containsRow(stateEvent.getTaskInstanceId())) {
return true;
}
TaskInstance taskInstance = taskInstanceHashMap
.row(stateEvent.getTaskInstanceId())
.values()
.iterator().next();
if (TimeoutFlag.CLOSE == taskInstance.getTaskDefine().getTimeoutFlag()) {
return true;
}
TaskTimeoutStrategy taskTimeoutStrategy = taskInstance.getTaskDefine().getTimeoutNotifyStrategy();
if (TaskTimeoutStrategy.FAILED == taskTimeoutStrategy) {
ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId());
taskProcessor.action(TaskAction.TIMEOUT);
return false;
} else {
processAlertManager.sendTaskTimeoutAlert(processInstance, taskInstance, taskInstance.getTaskDefine());
return true;
}
}
private boolean processTimeout() {
this.processAlertManager.sendProcessTimeoutAlert(this.processInstance, this.processDefinition);
return true;
}
private boolean taskStateChangeHandler(StateEvent stateEvent) {
TaskInstance task = processService.findTaskInstanceById(stateEvent.getTaskInstanceId());
if (stateEvent.getExecutionStatus().typeIsFinished()) {
taskFinished(task);
} else if (activeTaskProcessorMaps.containsKey(stateEvent.getTaskInstanceId())) {
ITaskProcessor iTaskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId());
iTaskProcessor.run();
if (iTaskProcessor.taskState().typeIsFinished()) {
task = processService.findTaskInstanceById(stateEvent.getTaskInstanceId());
taskFinished(task);
}
} else {
logger.error("state handler error: {}", stateEvent.toString());
}
return true;
}
private void taskFinished(TaskInstance task) {
logger.info("work flow {} task {} state:{} ",
processInstance.getId(),
task.getId(),
task.getState());
if (task.taskCanRetry()) {
addTaskToStandByList(task);
return;
}
ProcessInstance processInstance = processService.findProcessInstanceById(this.processInstance.getId());
completeTaskList.put(Long.toString(task.getTaskCode()), task);
activeTaskProcessorMaps.remove(task.getId());
taskTimeoutCheckList.remove(task.getId());
if (task.getState().typeIsSuccess()) {
processInstance.setVarPool(task.getVarPool());
processService.saveProcessInstance(processInstance);
submitPostNode(Long.toString(task.getTaskCode()));
} else if (task.getState().typeIsFailure()) {
if (task.isConditionsTask()
|| DagHelper.haveConditionsAfterNode(Long.toString(task.getTaskCode()), dag)) {
submitPostNode(Long.toString(task.getTaskCode()));
} else {
errorTaskList.put(Long.toString(task.getTaskCode()), task);
if (processInstance.getFailureStrategy() == FailureStrategy.END) {
killAllTasks();
}
}
}
this.updateProcessInstanceState();
}
private boolean checkStateEvent(StateEvent stateEvent) {
if (this.processInstance.getId() != stateEvent.getProcessInstanceId()) {
logger.error("mismatch process instance id: {}, state event:{}",
this.processInstance.getId(),
stateEvent.toString());
return false;
}
return true;
}
private boolean processStateChangeHandler(StateEvent stateEvent) {
try {
logger.info("process:{} state {} change to {}", processInstance.getId(), processInstance.getState(), stateEvent.getExecutionStatus());
processInstance = processService.findProcessInstanceById(this.processInstance.getId());
if (processComplementData()) {
return true;
}
if (stateEvent.getExecutionStatus().typeIsFinished()) {
endProcess();
}
if (stateEvent.getExecutionStatus() == ExecutionStatus.READY_STOP) {
killAllTasks();
}
return true;
} catch (Exception e) {
logger.error("process state change error:", e);
}
return true;
}
private boolean processComplementData() throws Exception {
if (!needComplementProcess()) {
return false;
}
Date scheduleDate = processInstance.getScheduleTime();
if (scheduleDate == null) {
scheduleDate = complementListDate.get(0);
} else if (processInstance.getState().typeIsFinished()) {
endProcess();
if (complementListDate.size() <= 0) {
logger.info("process complement end. process id:{}", processInstance.getId());
return true;
}
int index = complementListDate.indexOf(scheduleDate);
if (index >= complementListDate.size() - 1 || !processInstance.getState().typeIsSuccess()) {
logger.info("process complement end. process id:{}", processInstance.getId());
// complement data ends || no success
return true;
}
logger.info("process complement continue. process id:{}, schedule time:{} complementListDate:{}",
processInstance.getId(),
processInstance.getScheduleTime(),
complementListDate.toString());
scheduleDate = complementListDate.get(index + 1);
//the next process complement
processInstance.setId(0);
}
processInstance.setScheduleTime(scheduleDate);
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) {
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
}
processInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
processInstance.setStartTime(new Date());
processInstance.setEndTime(null);
processService.saveProcessInstance(processInstance);
this.taskInstanceHashMap.clear();
startProcess();
return true;
}
private boolean needComplementProcess() {
if (processInstance.isComplementData()
&& Flag.NO == processInstance.getIsSubProcess()) {
return true;
}
return false;
}
private void startProcess() throws Exception {
if (this.taskInstanceHashMap.size() == 0) {
isStart = false;
buildFlowDag();
initTaskQueue();
submitPostNode(null);
isStart = true;
}
}
/**
* process end handle
*/
private void endProcess() {
this.stateEvents.clear();
processInstance.setEndTime(new Date());
processService.updateProcessInstance(processInstance);
if (processInstance.getState().typeIsWaitingThread()) {
processService.createRecoveryWaitingThreadCommand(null, processInstance);
}
List<TaskInstance> taskInstances = processService.findValidTaskListByProcessId(processInstance.getId());
ProjectUser projectUser = processService.queryProjectWithUserByProcessInstanceId(processInstance.getId());
processAlertManager.sendAlertProcessInstance(processInstance, taskInstances, projectUser);
}
/**
* generate process dag
*
* @throws Exception exception
*/
private void buildFlowDag() throws Exception {
if (this.dag != null) {
return;
}
processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(),
processInstance.getProcessDefinitionVersion());
recoverNodeIdList = getStartTaskInstanceList(processInstance.getCommandParam());
List<TaskNode> taskNodeList =
processService.transformTask(processService.findRelationByCode(processDefinition.getProjectCode(), processDefinition.getCode()), Lists.newArrayList());
forbiddenTaskList.clear();
taskNodeList.forEach(taskNode -> {
if (taskNode.isForbidden()) {
forbiddenTaskList.put(Long.toString(taskNode.getCode()), taskNode);
}
});
// generate process to get DAG info
List<String> recoveryNodeCodeList = getRecoveryNodeCodeList();
List<String> startNodeNameList = parseStartNodeName(processInstance.getCommandParam());
ProcessDag processDag = generateFlowDag(taskNodeList,
startNodeNameList, recoveryNodeCodeList, processInstance.getTaskDependType());
if (processDag == null) {
logger.error("processDag is null");
return;
}
// generate process dag
dag = DagHelper.buildDagGraph(processDag);
}
/**
* init task queue
*/
private void initTaskQueue() {
taskFailedSubmit = false;
activeTaskProcessorMaps.clear();
dependFailedTask.clear();
completeTaskList.clear();
errorTaskList.clear();
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processInstance.getId());
for (TaskInstance task : taskInstanceList) {
if (task.isTaskComplete()) {
completeTaskList.put(Long.toString(task.getTaskCode()), task);
}
if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(Long.toString(task.getTaskCode()), dag)) {
continue;
}
if (task.getState().typeIsFailure() && !task.taskCanRetry()) {
errorTaskList.put(Long.toString(task.getTaskCode()), task);
}
}
if (processInstance.isComplementData() && complementListDate.size() == 0) {
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
if (cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) {
Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionCode(processInstance.getProcessDefinitionCode());
if (complementListDate.size() == 0 && needComplementProcess()) {
complementListDate = CronUtils.getSelfFireDateList(start, end, schedules);
logger.info(" process definition code:{} complement data: {}",
processInstance.getProcessDefinitionCode(), complementListDate.toString());
}
}
}
}
/**
* submit task to execute
*
* @param taskInstance task instance
* @return TaskInstance
*/
private TaskInstance submitTaskExec(TaskInstance taskInstance) {
try {
ITaskProcessor taskProcessor = TaskProcessorFactory.getTaskProcessor(taskInstance.getTaskType());
if (taskInstance.getState() == ExecutionStatus.RUNNING_EXECUTION
&& taskProcessor.getType().equalsIgnoreCase(Constants.COMMON_TASK_TYPE)) {
notifyProcessHostUpdate(taskInstance);
}
boolean submit = taskProcessor.submit(taskInstance, processInstance, masterConfig.getMasterTaskCommitRetryTimes(), masterConfig.getMasterTaskCommitInterval());
if (submit) {
this.taskInstanceHashMap.put(taskInstance.getId(), taskInstance.getTaskCode(), taskInstance);
activeTaskProcessorMaps.put(taskInstance.getId(), taskProcessor);
taskProcessor.run();
addTimeoutCheck(taskInstance);
TaskDefinition taskDefinition = processService.findTaskDefinition(
taskInstance.getTaskCode(),
taskInstance.getTaskDefinitionVersion());
taskInstance.setTaskDefine(taskDefinition);
if (taskProcessor.taskState().typeIsFinished()) {
StateEvent stateEvent = new StateEvent();
stateEvent.setProcessInstanceId(this.processInstance.getId());
stateEvent.setTaskInstanceId(taskInstance.getId());
stateEvent.setExecutionStatus(taskProcessor.taskState());
stateEvent.setType(StateEventType.TASK_STATE_CHANGE);
this.stateEvents.add(stateEvent);
}
return taskInstance;
} else {
logger.error("process id:{} name:{} submit standby task id:{} name:{} failed!",
processInstance.getId(), processInstance.getName(),
taskInstance.getId(), taskInstance.getName());
return null;
}
} catch (Exception e) {
logger.error("submit standby task error", e);
return null;
}
}
private void notifyProcessHostUpdate(TaskInstance taskInstance) {
if (StringUtils.isEmpty(taskInstance.getHost())) {
return;
}
try {
HostUpdateCommand hostUpdateCommand = new HostUpdateCommand();
hostUpdateCommand.setProcessHost(NetUtils.getAddr(masterConfig.getListenPort()));
hostUpdateCommand.setTaskInstanceId(taskInstance.getId());
Host host = new Host(taskInstance.getHost());
nettyExecutorManager.doExecute(host, hostUpdateCommand.convert2Command());
} catch (Exception e) {
logger.error("notify process host update", e);
}
}
private void addTimeoutCheck(TaskInstance taskInstance) {
TaskDefinition taskDefinition = processService.findTaskDefinition(
taskInstance.getTaskCode(),
taskInstance.getTaskDefinitionVersion()
);
taskInstance.setTaskDefine(taskDefinition);
if (TimeoutFlag.OPEN == taskDefinition.getTimeoutFlag()) {
this.taskTimeoutCheckList.put(taskInstance.getId(), taskInstance);
return;
}
if (taskInstance.isDependTask() || taskInstance.isSubProcess()) {
this.taskTimeoutCheckList.put(taskInstance.getId(), taskInstance);
}
}
/**
* find task instance in db.
* in case submit more than one same name task in the same time.
*
* @param taskCode task code
* @param taskVersion task version
* @return TaskInstance
*/
private TaskInstance findTaskIfExists(Long taskCode, int taskVersion) {
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(this.processInstance.getId());
for (TaskInstance taskInstance : taskInstanceList) {
if (taskInstance.getTaskCode() == taskCode && taskInstance.getTaskDefinitionVersion() == taskVersion) {
return taskInstance;
}
}
return null;
}
/**
* encapsulation task
*
* @param processInstance process instance
* @param taskNode taskNode
* @return TaskInstance
*/
private TaskInstance createTaskInstance(ProcessInstance processInstance, TaskNode taskNode) {
TaskInstance taskInstance = findTaskIfExists(taskNode.getCode(), taskNode.getVersion());
if (taskInstance == null) {
taskInstance = new TaskInstance();
taskInstance.setTaskCode(taskNode.getCode());
taskInstance.setTaskDefinitionVersion(taskNode.getVersion());
// task name
taskInstance.setName(taskNode.getName());
// task instance state
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
// process instance id
taskInstance.setProcessInstanceId(processInstance.getId());
// task instance type
taskInstance.setTaskType(taskNode.getType().toUpperCase());
// task instance whether alert
taskInstance.setAlertFlag(Flag.NO);
// task instance start time
taskInstance.setStartTime(null);
// task instance flag
taskInstance.setFlag(Flag.YES);
// task dry run flag
taskInstance.setDryRun(processInstance.getDryRun());
// task instance retry times
taskInstance.setRetryTimes(0);
// max task instance retry times
taskInstance.setMaxRetryTimes(taskNode.getMaxRetryTimes());
// retry task instance interval
taskInstance.setRetryInterval(taskNode.getRetryInterval());
//set task param
taskInstance.setTaskParams(taskNode.getTaskParams());
// task instance priority
if (taskNode.getTaskInstancePriority() == null) {
taskInstance.setTaskInstancePriority(Priority.MEDIUM);
} else {
taskInstance.setTaskInstancePriority(taskNode.getTaskInstancePriority());
}
String processWorkerGroup = processInstance.getWorkerGroup();
processWorkerGroup = StringUtils.isBlank(processWorkerGroup) ? DEFAULT_WORKER_GROUP : processWorkerGroup;
String taskWorkerGroup = StringUtils.isBlank(taskNode.getWorkerGroup()) ? processWorkerGroup : taskNode.getWorkerGroup();
Long processEnvironmentCode = Objects.isNull(processInstance.getEnvironmentCode()) ? -1 : processInstance.getEnvironmentCode();
Long taskEnvironmentCode = Objects.isNull(taskNode.getEnvironmentCode()) ? processEnvironmentCode : taskNode.getEnvironmentCode();
if (!processWorkerGroup.equals(DEFAULT_WORKER_GROUP) && taskWorkerGroup.equals(DEFAULT_WORKER_GROUP)) {
taskInstance.setWorkerGroup(processWorkerGroup);
taskInstance.setEnvironmentCode(processEnvironmentCode);
} else {
taskInstance.setWorkerGroup(taskWorkerGroup);
taskInstance.setEnvironmentCode(taskEnvironmentCode);
}
if (!taskInstance.getEnvironmentCode().equals(-1L)) {
Environment environment = processService.findEnvironmentByCode(taskInstance.getEnvironmentCode());
if (Objects.nonNull(environment) && StringUtils.isNotEmpty(environment.getConfig())) {
taskInstance.setEnvironmentConfig(environment.getConfig());
}
}
// delay execution time
taskInstance.setDelayTime(taskNode.getDelayTime());
}
return taskInstance;
}
public void getPreVarPool(TaskInstance taskInstance, Set<String> preTask) {
Map<String, Property> allProperty = new HashMap<>();
Map<String, TaskInstance> allTaskInstance = new HashMap<>();
if (CollectionUtils.isNotEmpty(preTask)) {
for (String preTaskName : preTask) {
TaskInstance preTaskInstance = completeTaskList.get(preTaskName);
if (preTaskInstance == null) {
continue;
}
String preVarPool = preTaskInstance.getVarPool();
if (StringUtils.isNotEmpty(preVarPool)) {
List<Property> properties = JSONUtils.toList(preVarPool, Property.class);
for (Property info : properties) {
setVarPoolValue(allProperty, allTaskInstance, preTaskInstance, info);
}
}
}
if (allProperty.size() > 0) {
taskInstance.setVarPool(JSONUtils.toJsonString(allProperty.values()));
}
}
}
private void setVarPoolValue(Map<String, Property> allProperty, Map<String, TaskInstance> allTaskInstance, TaskInstance preTaskInstance, Property thisProperty) {
//for this taskInstance all the param in this part is IN.
thisProperty.setDirect(Direct.IN);
//get the pre taskInstance Property's name
String proName = thisProperty.getProp();
//if the Previous nodes have the Property of same name
if (allProperty.containsKey(proName)) {
//comparison the value of two Property
Property otherPro = allProperty.get(proName);
//if this property'value of loop is empty,use the other,whether the other's value is empty or not
if (StringUtils.isEmpty(thisProperty.getValue())) {
allProperty.put(proName, otherPro);
//if property'value of loop is not empty,and the other's value is not empty too, use the earlier value
} else if (StringUtils.isNotEmpty(otherPro.getValue())) {
TaskInstance otherTask = allTaskInstance.get(proName);
if (otherTask.getEndTime().getTime() > preTaskInstance.getEndTime().getTime()) {
allProperty.put(proName, thisProperty);
allTaskInstance.put(proName, preTaskInstance);
} else {
allProperty.put(proName, otherPro);
}
} else {
allProperty.put(proName, thisProperty);
allTaskInstance.put(proName, preTaskInstance);
}
} else {
allProperty.put(proName, thisProperty);
allTaskInstance.put(proName, preTaskInstance);
}
}
private void submitPostNode(String parentNodeCode) {
Set<String> submitTaskNodeList = DagHelper.parsePostNodes(parentNodeCode, skipTaskNodeList, dag, completeTaskList);
List<TaskInstance> taskInstances = new ArrayList<>();
for (String taskNode : submitTaskNodeList) {
TaskNode taskNodeObject = dag.getNode(taskNode);
if (taskInstanceHashMap.containsColumn(taskNodeObject.getCode())) {
continue;
}
TaskInstance task = createTaskInstance(processInstance, taskNodeObject);
taskInstances.add(task);
}
// if previous node success , post node submit
for (TaskInstance task : taskInstances) {
if (readyToSubmitTaskQueue.contains(task)) {
continue;
}
if (completeTaskList.containsKey(Long.toString(task.getTaskCode()))) {
logger.info("task {} has already run success", task.getName());
continue;
}
if (task.getState().typeIsPause() || task.getState().typeIsCancel()) {
logger.info("task {} stopped, the state is {}", task.getName(), task.getState());
} else {
addTaskToStandByList(task);
}
}
submitStandByTask();
updateProcessInstanceState();
}
/**
* determine whether the dependencies of the task node are complete
*
* @return DependResult
*/
private DependResult isTaskDepsComplete(String taskCode) {
Collection<String> startNodes = dag.getBeginNode();
// if vertex,returns true directly
if (startNodes.contains(taskCode)) {
return DependResult.SUCCESS;
}
TaskNode taskNode = dag.getNode(taskCode);
List<String> depCodeList = taskNode.getDepList();
for (String depsNode : depCodeList) {
if (!dag.containsNode(depsNode)
|| forbiddenTaskList.containsKey(depsNode)
|| skipTaskNodeList.containsKey(depsNode)) {
continue;
}
// dependencies must be fully completed
if (!completeTaskList.containsKey(depsNode)) {
return DependResult.WAITING;
}
ExecutionStatus depTaskState = completeTaskList.get(depsNode).getState();
if (depTaskState.typeIsPause() || depTaskState.typeIsCancel()) {
return DependResult.NON_EXEC;
}
// ignore task state if current task is condition
if (taskNode.isConditionsTask()) {
continue;
}
if (!dependTaskSuccess(depsNode, taskCode)) {
return DependResult.FAILED;
}
}
logger.info("taskCode: {} completeDependTaskList: {}", taskCode, Arrays.toString(completeTaskList.keySet().toArray()));
return DependResult.SUCCESS;
}
/**
* depend node is completed, but here need check the condition task branch is the next node
*/
private boolean dependTaskSuccess(String dependNodeName, String nextNodeName) {
if (dag.getNode(dependNodeName).isConditionsTask()) {
//condition task need check the branch to run
List<String> nextTaskList = DagHelper.parseConditionTask(dependNodeName, skipTaskNodeList, dag, completeTaskList);
if (!nextTaskList.contains(nextNodeName)) {
return false;
}
} else {
ExecutionStatus depTaskState = completeTaskList.get(dependNodeName).getState();
if (depTaskState.typeIsFailure()) {
return false;
}
}
return true;
}
/**
* query task instance by complete state
*
* @param state state
* @return task instance list
*/
private List<TaskInstance> getCompleteTaskByState(ExecutionStatus state) {
List<TaskInstance> resultList = new ArrayList<>();
for (Map.Entry<String, TaskInstance> entry : completeTaskList.entrySet()) {
if (entry.getValue().getState() == state) {
resultList.add(entry.getValue());
}
}
return resultList;
}
/**
* where there are ongoing tasks
*
* @param state state
* @return ExecutionStatus
*/
private ExecutionStatus runningState(ExecutionStatus state) {
if (state == ExecutionStatus.READY_STOP
|| state == ExecutionStatus.READY_PAUSE
|| state == ExecutionStatus.WAITING_THREAD
|| state == ExecutionStatus.DELAY_EXECUTION) {
// if the running task is not completed, the state remains unchanged
return state;
} else {
return ExecutionStatus.RUNNING_EXECUTION;
}
}
/**
* exists failure task,contains submit failure、dependency failure,execute failure(retry after)
*
* @return Boolean whether has failed task
*/
private boolean hasFailedTask() {
if (this.taskFailedSubmit) {
return true;
}
if (this.errorTaskList.size() > 0) {
return true;
}
return this.dependFailedTask.size() > 0;
}
/**
* process instance failure
*
* @return Boolean whether process instance failed
*/
private boolean processFailed() {
if (hasFailedTask()) {
if (processInstance.getFailureStrategy() == FailureStrategy.END) {
return true;
}
if (processInstance.getFailureStrategy() == FailureStrategy.CONTINUE) {
return readyToSubmitTaskQueue.size() == 0 || activeTaskProcessorMaps.size() == 0;
}
}
return false;
}
/**
* whether task for waiting thread
*
* @return Boolean whether has waiting thread task
*/
private boolean hasWaitingThreadTask() {
List<TaskInstance> waitingList = getCompleteTaskByState(ExecutionStatus.WAITING_THREAD);
return CollectionUtils.isNotEmpty(waitingList);
}
/**
* prepare for pause
* 1,failed retry task in the preparation queue , returns to failure directly
* 2,exists pause task,complement not completed, pending submission of tasks, return to suspension
* 3,success
*
* @return ExecutionStatus
*/
private ExecutionStatus processReadyPause() {
if (hasRetryTaskInStandBy()) {
return ExecutionStatus.FAILURE;
}
List<TaskInstance> pauseList = getCompleteTaskByState(ExecutionStatus.PAUSE);
if (CollectionUtils.isNotEmpty(pauseList)
|| !isComplementEnd()
|| readyToSubmitTaskQueue.size() > 0) {
return ExecutionStatus.PAUSE;
} else {
return ExecutionStatus.SUCCESS;
}
}
/**
* generate the latest process instance status by the tasks state
*
* @param instance
* @return process instance execution status
*/
private ExecutionStatus getProcessInstanceState(ProcessInstance instance) {
ExecutionStatus state = instance.getState();
if (activeTaskProcessorMaps.size() > 0 || hasRetryTaskInStandBy()) {
// active task and retry task exists
return runningState(state);
}
// process failure
if (processFailed()) {
return ExecutionStatus.FAILURE;
}
// waiting thread
if (hasWaitingThreadTask()) {
return ExecutionStatus.WAITING_THREAD;
}
// pause
if (state == ExecutionStatus.READY_PAUSE) {
return processReadyPause();
}
// stop
if (state == ExecutionStatus.READY_STOP) {
List<TaskInstance> stopList = getCompleteTaskByState(ExecutionStatus.STOP);
List<TaskInstance> killList = getCompleteTaskByState(ExecutionStatus.KILL);
if (CollectionUtils.isNotEmpty(stopList)
|| CollectionUtils.isNotEmpty(killList)
|| !isComplementEnd()) {
return ExecutionStatus.STOP;
} else {
return ExecutionStatus.SUCCESS;
}
}
// success
if (state == ExecutionStatus.RUNNING_EXECUTION) {
List<TaskInstance> killTasks = getCompleteTaskByState(ExecutionStatus.KILL);
if (readyToSubmitTaskQueue.size() > 0) {
//tasks currently pending submission, no retries, indicating that depend is waiting to complete
return ExecutionStatus.RUNNING_EXECUTION;
} else if (CollectionUtils.isNotEmpty(killTasks)) {
// tasks maybe killed manually
return ExecutionStatus.FAILURE;
} else {
// if the waiting queue is empty and the status is in progress, then success
return ExecutionStatus.SUCCESS;
}
}
return state;
}
/**
* whether complement end
*
* @return Boolean whether is complement end
*/
private boolean isComplementEnd() {
if (!processInstance.isComplementData()) {
return true;
}
try {
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
Date endTime = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
return processInstance.getScheduleTime().equals(endTime);
} catch (Exception e) {
logger.error("complement end failed ", e);
return false;
}
}
/**
* updateProcessInstance process instance state
* after each batch of tasks is executed, the status of the process instance is updated
*/
private void updateProcessInstanceState() {
ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId());
ExecutionStatus state = getProcessInstanceState(instance);
if (processInstance.getState() != state) {
logger.info(
"work flow process instance [id: {}, name:{}], state change from {} to {}, cmd type: {}",
processInstance.getId(), processInstance.getName(),
processInstance.getState(), state,
processInstance.getCommandType());
instance.setState(state);
processService.updateProcessInstance(instance);
processInstance = instance;
StateEvent stateEvent = new StateEvent();
stateEvent.setExecutionStatus(processInstance.getState());
stateEvent.setProcessInstanceId(this.processInstance.getId());
stateEvent.setType(StateEventType.PROCESS_STATE_CHANGE);
this.processStateChangeHandler(stateEvent);
}
}
/**
* get task dependency result
*
* @param taskInstance task instance
* @return DependResult
*/
private DependResult getDependResultForTask(TaskInstance taskInstance) {
return isTaskDepsComplete(Long.toString(taskInstance.getTaskCode()));
}
/**
* add task to standby list
*
* @param taskInstance task instance
*/
private void addTaskToStandByList(TaskInstance taskInstance) {
logger.info("add task to stand by list: {}", taskInstance.getName());
try {
readyToSubmitTaskQueue.put(taskInstance);
} catch (Exception e) {
logger.error("add task instance to readyToSubmitTaskQueue error, taskName: {}", taskInstance.getName(), e);
}
}
/**
* remove task from stand by list
*
* @param taskInstance task instance
*/
private void removeTaskFromStandbyList(TaskInstance taskInstance) {
logger.info("remove task from stand by list, id: {} name:{}",
taskInstance.getId(),
taskInstance.getName());
try {
readyToSubmitTaskQueue.remove(taskInstance);
} catch (Exception e) {
logger.error("remove task instance from readyToSubmitTaskQueue error, task id:{}, Name: {}",
taskInstance.getId(),
taskInstance.getName(), e);
}
}
/**
* has retry task in standby
*
* @return Boolean whether has retry task in standby
*/
private boolean hasRetryTaskInStandBy() {
for (Iterator<TaskInstance> iter = readyToSubmitTaskQueue.iterator(); iter.hasNext(); ) {
if (iter.next().getState().typeIsFailure()) {
return true;
}
}
return false;
}
/**
* close the on going tasks
*/
private void killAllTasks() {
logger.info("kill called on process instance id: {}, num: {}", processInstance.getId(),
activeTaskProcessorMaps.size());
for (int taskId : activeTaskProcessorMaps.keySet()) {
TaskInstance taskInstance = processService.findTaskInstanceById(taskId);
if (taskInstance == null || taskInstance.getState().typeIsFinished()) {
continue;
}
ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(taskId);
taskProcessor.action(TaskAction.STOP);
if (taskProcessor.taskState().typeIsFinished()) {
StateEvent stateEvent = new StateEvent();
stateEvent.setType(StateEventType.TASK_STATE_CHANGE);
stateEvent.setProcessInstanceId(this.processInstance.getId());
stateEvent.setTaskInstanceId(taskInstance.getId());
stateEvent.setExecutionStatus(taskProcessor.taskState());
this.addStateEvent(stateEvent);
}
}
}
public boolean workFlowFinish() {
return this.processInstance.getState().typeIsFinished();
}
/**
* whether the retry interval is timed out
*
* @param taskInstance task instance
* @return Boolean
*/
private boolean retryTaskIntervalOverTime(TaskInstance taskInstance) {
if (taskInstance.getState() != ExecutionStatus.FAILURE) {
return true;
}
if (taskInstance.getId() == 0
||
taskInstance.getMaxRetryTimes() == 0
||
taskInstance.getRetryInterval() == 0) {
return true;
}
Date now = new Date();
long failedTimeInterval = DateUtils.differSec(now, taskInstance.getEndTime());
// task retry does not over time, return false
return taskInstance.getRetryInterval() * SEC_2_MINUTES_TIME_UNIT < failedTimeInterval;
}
/**
* handling the list of tasks to be submitted
*/
private void submitStandByTask() {
try {
int length = readyToSubmitTaskQueue.size();
for (int i = 0; i < length; i++) {
TaskInstance task = readyToSubmitTaskQueue.peek();
if (task == null) {
continue;
}
// stop tasks which is retrying if forced success happens
if (task.taskCanRetry()) {
TaskInstance retryTask = processService.findTaskInstanceById(task.getId());
if (retryTask != null && retryTask.getState().equals(ExecutionStatus.FORCED_SUCCESS)) {
task.setState(retryTask.getState());
logger.info("task: {} has been forced success, put it into complete task list and stop retrying", task.getName());
removeTaskFromStandbyList(task);
completeTaskList.put(Long.toString(task.getTaskCode()), task);
submitPostNode(Long.toString(task.getTaskCode()));
continue;
}
}
//init varPool only this task is the first time running
if (task.isFirstRun()) {
//get pre task ,get all the task varPool to this task
Set<String> preTask = dag.getPreviousNodes(Long.toString(task.getTaskCode()));
getPreVarPool(task, preTask);
}
DependResult dependResult = getDependResultForTask(task);
if (DependResult.SUCCESS == dependResult) {
if (retryTaskIntervalOverTime(task)) {
TaskInstance taskInstance = submitTaskExec(task);
if (taskInstance == null) {
this.taskFailedSubmit = true;
} else {
removeTaskFromStandbyList(task);
}
}
} else if (DependResult.FAILED == dependResult) {
// if the dependency fails, the current node is not submitted and the state changes to failure.
dependFailedTask.put(Long.toString(task.getTaskCode()), task);
removeTaskFromStandbyList(task);
logger.info("task {},id:{} depend result : {}", task.getName(), task.getId(), dependResult);
} else if (DependResult.NON_EXEC == dependResult) {
// for some reasons(depend task pause/stop) this task would not be submit
removeTaskFromStandbyList(task);
logger.info("remove task {},id:{} , because depend result : {}", task.getName(), task.getId(), dependResult);
}
}
} catch (Exception e) {
logger.error("submit standby task error", e);
}
}
/**
* get recovery task instance
*
* @param taskId task id
* @return recovery task instance
*/
private TaskInstance getRecoveryTaskInstance(String taskId) {
if (!StringUtils.isNotEmpty(taskId)) {
return null;
}
try {
Integer intId = Integer.valueOf(taskId);
TaskInstance task = processService.findTaskInstanceById(intId);
if (task == null) {
logger.error("start node id cannot be found: {}", taskId);
} else {
return task;
}
} catch (Exception e) {
logger.error("get recovery task instance failed ", e);
}
return null;
}
/**
* get start task instance list
*
* @param cmdParam command param
* @return task instance list
*/
private List<TaskInstance> getStartTaskInstanceList(String cmdParam) {
List<TaskInstance> instanceList = new ArrayList<>();
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
if (paramMap != null && paramMap.containsKey(CMD_PARAM_RECOVERY_START_NODE_STRING)) {
String[] idList = paramMap.get(CMD_PARAM_RECOVERY_START_NODE_STRING).split(Constants.COMMA);
for (String nodeId : idList) {
TaskInstance task = getRecoveryTaskInstance(nodeId);
if (task != null) {
instanceList.add(task);
}
}
}
return instanceList;
}
/**
* parse "StartNodeNameList" from cmd param
*
* @param cmdParam command param
* @return start node name list
*/
private List<String> parseStartNodeName(String cmdParam) {
List<String> startNodeNameList = new ArrayList<>();
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
if (paramMap == null) {
return startNodeNameList;
}
if (paramMap.containsKey(CMD_PARAM_START_NODE_NAMES)) {
startNodeNameList = Arrays.asList(paramMap.get(CMD_PARAM_START_NODE_NAMES).split(Constants.COMMA));
}
return startNodeNameList;
}
/**
* generate start node code list from parsing command param;
* if "StartNodeIdList" exists in command param, return StartNodeIdList
*
* @return recovery node code list
*/
private List<String> getRecoveryNodeCodeList() {
List<String> recoveryNodeCodeList = new ArrayList<>();
if (CollectionUtils.isNotEmpty(recoverNodeIdList)) {
for (TaskInstance task : recoverNodeIdList) {
recoveryNodeCodeList.add(Long.toString(task.getTaskCode()));
}
}
return recoveryNodeCodeList;
}
/**
* generate flow dag
*
* @param totalTaskNodeList total task node list
* @param startNodeNameList start node name list
* @param recoveryNodeCodeList recovery node code list
* @param depNodeType depend node type
* @return ProcessDag process dag
* @throws Exception exception
*/
public ProcessDag generateFlowDag(List<TaskNode> totalTaskNodeList,
List<String> startNodeNameList,
List<String> recoveryNodeCodeList,
TaskDependType depNodeType) throws Exception {
return DagHelper.generateFlowDag(totalTaskNodeList, startNodeNameList, recoveryNodeCodeList, depNodeType);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,455 | [Bug] [Master] Cannot stop sub process task | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
cannot stop the sub process tasks.
### What you expected to happen
stop the tasks normally
### How to reproduce
start a process.
kill the process
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6455 | https://github.com/apache/dolphinscheduler/pull/6458 | 4d69685ba1f9ecc8595dffc13194ec902531ed51 | a8baa9553fd64d1414c204c6ba95019b7376f771 | "2021-10-08T02:35:44Z" | java | "2021-10-08T06:08:12Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/task/CommonTaskProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.runner.task;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.remote.command.TaskKillRequestCommand;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.master.dispatch.context.ExecutionContext;
import org.apache.dolphinscheduler.server.master.dispatch.enums.ExecutorType;
import org.apache.dolphinscheduler.server.master.dispatch.exceptions.ExecuteException;
import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.queue.TaskPriority;
import org.apache.dolphinscheduler.service.queue.TaskPriorityQueue;
import org.apache.dolphinscheduler.service.queue.TaskPriorityQueueImpl;
import org.apache.dolphinscheduler.service.queue.entity.TaskExecutionContext;
import org.apache.commons.lang.StringUtils;
import java.util.Date;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
/**
* common task processor
*/
public class CommonTaskProcessor extends BaseTaskProcessor {
@Autowired
private TaskPriorityQueue taskUpdateQueue;
@Autowired
MasterConfig masterConfig;
@Autowired
NettyExecutorManager nettyExecutorManager;
/**
* logger of MasterBaseTaskExecThread
*/
protected Logger logger = LoggerFactory.getLogger(getClass());
@Override
public boolean submit(TaskInstance task, ProcessInstance processInstance, int maxRetryTimes, int commitInterval) {
this.processInstance = processInstance;
this.taskInstance = processService.submitTask(task, maxRetryTimes, commitInterval);
if (this.taskInstance == null) {
return false;
}
dispatchTask(taskInstance, processInstance);
return true;
}
@Override
public ExecutionStatus taskState() {
return this.taskInstance.getState();
}
@Override
public void run() {
}
@Override
protected boolean taskTimeout() {
return true;
}
/**
* common task cannot be paused
*/
@Override
protected boolean pauseTask() {
return true;
}
@Override
public String getType() {
return Constants.COMMON_TASK_TYPE;
}
private boolean dispatchTask(TaskInstance taskInstance, ProcessInstance processInstance) {
try {
if (taskUpdateQueue == null) {
this.initQueue();
}
if (taskInstance.getState().typeIsFinished()) {
logger.info(String.format("submit task , but task [%s] state [%s] is already finished. ", taskInstance.getName(), taskInstance.getState().toString()));
return true;
}
// task cannot be submitted because its execution state is RUNNING or DELAY.
if (taskInstance.getState() == ExecutionStatus.RUNNING_EXECUTION
|| taskInstance.getState() == ExecutionStatus.DELAY_EXECUTION) {
logger.info("submit task, but the status of the task {} is already running or delayed.", taskInstance.getName());
return true;
}
logger.info("task ready to submit: {}", taskInstance);
TaskPriority taskPriority = new TaskPriority(processInstance.getProcessInstancePriority().getCode(),
processInstance.getId(), taskInstance.getProcessInstancePriority().getCode(),
taskInstance.getId(), org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP);
TaskExecutionContext taskExecutionContext = getTaskExecutionContext(taskInstance);
taskPriority.setTaskExecutionContext(taskExecutionContext);
taskUpdateQueue.put(taskPriority);
logger.info(String.format("master submit success, task : %s", taskInstance.getName()));
return true;
} catch (Exception e) {
logger.error("submit task Exception: ", e);
logger.error("task error : {}", JSONUtils.toJsonString(taskInstance));
return false;
}
}
public void initQueue() {
this.taskUpdateQueue = SpringApplicationContext.getBean(TaskPriorityQueueImpl.class);
}
@Override
public boolean killTask() {
try {
taskInstance = processService.findTaskInstanceById(taskInstance.getId());
if (taskInstance == null) {
return true;
}
if (taskInstance.getState().typeIsFinished()) {
return true;
}
if (StringUtils.isBlank(taskInstance.getHost())) {
taskInstance.setState(ExecutionStatus.KILL);
taskInstance.setEndTime(new Date());
processService.updateTaskInstance(taskInstance);
return true;
}
TaskKillRequestCommand killCommand = new TaskKillRequestCommand();
killCommand.setTaskInstanceId(taskInstance.getId());
ExecutionContext executionContext = new ExecutionContext(killCommand.convert2Command(), ExecutorType.WORKER);
Host host = Host.of(taskInstance.getHost());
executionContext.setHost(host);
nettyExecutorManager.executeDirectly(executionContext);
} catch (ExecuteException e) {
logger.error("kill task error:", e);
return false;
}
logger.info("master kill taskInstance name :{} taskInstance id:{}",
taskInstance.getName(), taskInstance.getId());
return true;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,455 | [Bug] [Master] Cannot stop sub process task | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
cannot stop the sub process tasks.
### What you expected to happen
stop the tasks normally
### How to reproduce
start a process.
kill the process
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6455 | https://github.com/apache/dolphinscheduler/pull/6458 | 4d69685ba1f9ecc8595dffc13194ec902531ed51 | a8baa9553fd64d1414c204c6ba95019b7376f771 | "2021-10-08T02:35:44Z" | java | "2021-10-08T06:08:12Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/task/SubTaskProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.runner.task;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.TaskDefinition;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import java.util.Date;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
/**
*
*/
public class SubTaskProcessor extends BaseTaskProcessor {
private ProcessInstance processInstance;
private ProcessInstance subProcessInstance = null;
private TaskDefinition taskDefinition;
/**
* run lock
*/
private final Lock runLock = new ReentrantLock();
@Override
public boolean submit(TaskInstance task, ProcessInstance processInstance, int masterTaskCommitRetryTimes, int masterTaskCommitInterval) {
this.processInstance = processInstance;
taskDefinition = processService.findTaskDefinition(
task.getTaskCode(), task.getTaskDefinitionVersion()
);
this.taskInstance = processService.submitTask(task, masterTaskCommitRetryTimes, masterTaskCommitInterval);
if (this.taskInstance == null) {
return false;
}
return true;
}
@Override
public ExecutionStatus taskState() {
return this.taskInstance.getState();
}
@Override
public void run() {
try {
this.runLock.lock();
if (setSubWorkFlow()) {
updateTaskState();
}
} catch (Exception e) {
logger.error("work flow {} sub task {} exceptions",
this.processInstance.getId(),
this.taskInstance.getId(),
e);
} finally {
this.runLock.unlock();
}
}
@Override
protected boolean taskTimeout() {
TaskTimeoutStrategy taskTimeoutStrategy =
taskDefinition.getTimeoutNotifyStrategy();
if (TaskTimeoutStrategy.FAILED != taskTimeoutStrategy
&& TaskTimeoutStrategy.WARNFAILED != taskTimeoutStrategy) {
return true;
}
logger.info("sub process task {} timeout, strategy {} ",
taskInstance.getId(), taskTimeoutStrategy.getDescp());
killTask();
return true;
}
private void updateTaskState() {
subProcessInstance = processService.findSubProcessInstance(processInstance.getId(), taskInstance.getId());
logger.info("work flow {} task {}, sub work flow: {} state: {}",
this.processInstance.getId(),
this.taskInstance.getId(),
subProcessInstance.getId(),
subProcessInstance.getState().getDescp());
if (subProcessInstance != null && subProcessInstance.getState().typeIsFinished()) {
taskInstance.setState(subProcessInstance.getState());
taskInstance.setEndTime(new Date());
processService.saveTaskInstance(taskInstance);
}
}
@Override
protected boolean pauseTask() {
pauseSubWorkFlow();
return true;
}
private boolean pauseSubWorkFlow() {
ProcessInstance subProcessInstance = processService.findSubProcessInstance(processInstance.getId(), taskInstance.getId());
if (subProcessInstance == null || taskInstance.getState().typeIsFinished()) {
return false;
}
subProcessInstance.setState(ExecutionStatus.READY_PAUSE);
processService.updateProcessInstance(subProcessInstance);
//TODO...
// send event to sub process master
return true;
}
private boolean setSubWorkFlow() {
logger.info("set work flow {} task {} running",
this.processInstance.getId(),
this.taskInstance.getId());
if (this.subProcessInstance != null) {
return true;
}
subProcessInstance = processService.findSubProcessInstance(processInstance.getId(), taskInstance.getId());
if (subProcessInstance == null || taskInstance.getState().typeIsFinished()) {
return false;
}
taskInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
taskInstance.setStartTime(new Date());
processService.updateTaskInstance(taskInstance);
logger.info("set sub work flow {} task {} state: {}",
processInstance.getId(),
taskInstance.getId(),
taskInstance.getState());
return true;
}
@Override
protected boolean killTask() {
ProcessInstance subProcessInstance = processService.findSubProcessInstance(processInstance.getId(), taskInstance.getId());
if (subProcessInstance == null || taskInstance.getState().typeIsFinished()) {
return false;
}
subProcessInstance.setState(ExecutionStatus.READY_STOP);
processService.updateProcessInstance(subProcessInstance);
return true;
}
@Override
public String getType() {
return TaskType.SUB_PROCESS.getDesc();
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,455 | [Bug] [Master] Cannot stop sub process task | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
cannot stop the sub process tasks.
### What you expected to happen
stop the tasks normally
### How to reproduce
start a process.
kill the process
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6455 | https://github.com/apache/dolphinscheduler/pull/6458 | 4d69685ba1f9ecc8595dffc13194ec902531ed51 | a8baa9553fd64d1414c204c6ba95019b7376f771 | "2021-10-08T02:35:44Z" | java | "2021-10-08T06:08:12Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.service.process;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_EMPTY_SUB_PROCESS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_FATHER_PARAMS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID;
import static org.apache.dolphinscheduler.common.Constants.LOCAL_PARAMS;
import static java.util.stream.Collectors.toSet;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.Direct;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.enums.ReleaseState;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.TaskDependType;
import org.apache.dolphinscheduler.common.enums.TimeoutFlag;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.DateInterval;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter;
import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils;
import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException;
import org.apache.dolphinscheduler.common.utils.TaskParametersUtils;
import org.apache.dolphinscheduler.dao.entity.Command;
import org.apache.dolphinscheduler.dao.entity.DagData;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.dao.entity.Environment;
import org.apache.dolphinscheduler.dao.entity.ErrorCommand;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap;
import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation;
import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.ProjectUser;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskDefinition;
import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.CommandMapper;
import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper;
import org.apache.dolphinscheduler.dao.mapper.EnvironmentMapper;
import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper;
import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.TenantMapper;
import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper;
import org.apache.dolphinscheduler.dao.mapper.UserMapper;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.service.log.LogClientService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.facebook.presto.jdbc.internal.guava.collect.Lists;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.node.ObjectNode;
/**
* process relative dao that some mappers in this.
*/
@Component
public class ProcessService {
private final Logger logger = LoggerFactory.getLogger(getClass());
private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
ExecutionStatus.RUNNING_EXECUTION.ordinal(),
ExecutionStatus.DELAY_EXECUTION.ordinal(),
ExecutionStatus.READY_PAUSE.ordinal(),
ExecutionStatus.READY_STOP.ordinal()};
@Autowired
private UserMapper userMapper;
@Autowired
private ProcessDefinitionMapper processDefineMapper;
@Autowired
private ProcessDefinitionLogMapper processDefineLogMapper;
@Autowired
private ProcessInstanceMapper processInstanceMapper;
@Autowired
private DataSourceMapper dataSourceMapper;
@Autowired
private ProcessInstanceMapMapper processInstanceMapMapper;
@Autowired
private TaskInstanceMapper taskInstanceMapper;
@Autowired
private CommandMapper commandMapper;
@Autowired
private ScheduleMapper scheduleMapper;
@Autowired
private UdfFuncMapper udfFuncMapper;
@Autowired
private ResourceMapper resourceMapper;
@Autowired
private ResourceUserMapper resourceUserMapper;
@Autowired
private ErrorCommandMapper errorCommandMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private ProjectMapper projectMapper;
@Autowired
private TaskDefinitionMapper taskDefinitionMapper;
@Autowired
private TaskDefinitionLogMapper taskDefinitionLogMapper;
@Autowired
private ProcessTaskRelationMapper processTaskRelationMapper;
@Autowired
private ProcessTaskRelationLogMapper processTaskRelationLogMapper;
@Autowired
private EnvironmentMapper environmentMapper;
/**
* handle Command (construct ProcessInstance from Command) , wrapped in transaction
*
* @param logger logger
* @param host host
* @param validThreadNum validThreadNum
* @param command found command
* @return process instance
*/
@Transactional(rollbackFor = Exception.class)
public ProcessInstance handleCommand(Logger logger, String host, int validThreadNum, Command command) {
ProcessInstance processInstance = constructProcessInstance(command, host);
// cannot construct process instance, return null
if (processInstance == null) {
logger.error("scan command, command parameter is error: {}", command);
moveToErrorCommand(command, "process instance is null");
return null;
}
if (!checkThreadNum(command, validThreadNum)) {
logger.info("there is not enough thread for this command: {}", command);
return setWaitingThreadProcess(command, processInstance);
}
processInstance.setCommandType(command.getCommandType());
processInstance.addHistoryCmd(command.getCommandType());
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
this.commandMapper.deleteById(command.getId());
return processInstance;
}
/**
* save error command, and delete original command
*
* @param command command
* @param message message
*/
@Transactional(rollbackFor = Exception.class)
public void moveToErrorCommand(Command command, String message) {
ErrorCommand errorCommand = new ErrorCommand(command, message);
this.errorCommandMapper.insert(errorCommand);
this.commandMapper.deleteById(command.getId());
}
/**
* set process waiting thread
*
* @param command command
* @param processInstance processInstance
* @return process instance
*/
private ProcessInstance setWaitingThreadProcess(Command command, ProcessInstance processInstance) {
processInstance.setState(ExecutionStatus.WAITING_THREAD);
if (command.getCommandType() != CommandType.RECOVER_WAITING_THREAD) {
processInstance.addHistoryCmd(command.getCommandType());
}
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
createRecoveryWaitingThreadCommand(command, processInstance);
return null;
}
/**
* check thread num
*
* @param command command
* @param validThreadNum validThreadNum
* @return if thread is enough
*/
private boolean checkThreadNum(Command command, int validThreadNum) {
int commandThreadCount = this.workProcessThreadNumCount(command.getProcessDefinitionCode());
return validThreadNum >= commandThreadCount;
}
/**
* insert one command
*
* @param command command
* @return create result
*/
public int createCommand(Command command) {
int result = 0;
if (command != null) {
result = commandMapper.insert(command);
}
return result;
}
/**
* find one command from queue list
*
* @return command
*/
public Command findOneCommand() {
return commandMapper.getOneToRun();
}
/**
* get command page
*
* @param pageSize
* @param pageNumber
* @return
*/
public List<Command> findCommandPage(int pageSize, int pageNumber) {
return commandMapper.queryCommandPage(pageSize, pageNumber * pageSize);
}
/**
* check the input command exists in queue list
*
* @param command command
* @return create command result
*/
public boolean verifyIsNeedCreateCommand(Command command) {
boolean isNeedCreate = true;
EnumMap<CommandType, Integer> cmdTypeMap = new EnumMap<>(CommandType.class);
cmdTypeMap.put(CommandType.REPEAT_RUNNING, 1);
cmdTypeMap.put(CommandType.RECOVER_SUSPENDED_PROCESS, 1);
cmdTypeMap.put(CommandType.START_FAILURE_TASK_PROCESS, 1);
CommandType commandType = command.getCommandType();
if (cmdTypeMap.containsKey(commandType)) {
ObjectNode cmdParamObj = JSONUtils.parseObject(command.getCommandParam());
int processInstanceId = cmdParamObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt();
List<Command> commands = commandMapper.selectList(null);
// for all commands
for (Command tmpCommand : commands) {
if (cmdTypeMap.containsKey(tmpCommand.getCommandType())) {
ObjectNode tempObj = JSONUtils.parseObject(tmpCommand.getCommandParam());
if (tempObj != null && processInstanceId == tempObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt()) {
isNeedCreate = false;
break;
}
}
}
}
return isNeedCreate;
}
/**
* find process instance detail by id
*
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceDetailById(int processId) {
return processInstanceMapper.queryDetailById(processId);
}
/**
* get task node list by definitionId
*/
public List<TaskDefinition> getTaskNodeListByDefinitionId(Integer defineId) {
ProcessDefinition processDefinition = processDefineMapper.selectById(defineId);
if (processDefinition == null) {
logger.error("process define not exists");
return new ArrayList<>();
}
List<ProcessTaskRelationLog> processTaskRelations = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion());
Set<TaskDefinition> taskDefinitionSet = new HashSet<>();
for (ProcessTaskRelationLog processTaskRelation : processTaskRelations) {
if (processTaskRelation.getPostTaskCode() > 0) {
taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion()));
}
}
List<TaskDefinitionLog> taskDefinitionLogs = taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet);
return new ArrayList<>(taskDefinitionLogs);
}
/**
* find process instance by id
*
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceById(int processId) {
return processInstanceMapper.selectById(processId);
}
/**
* find process define by id.
*
* @param processDefinitionId processDefinitionId
* @return process definition
*/
public ProcessDefinition findProcessDefineById(int processDefinitionId) {
return processDefineMapper.selectById(processDefinitionId);
}
/**
* find process define by code and version.
*
* @param processDefinitionCode processDefinitionCode
* @return process definition
*/
public ProcessDefinition findProcessDefinition(Long processDefinitionCode, int version) {
ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode);
if (processDefinition == null || processDefinition.getVersion() != version) {
processDefinition = processDefineLogMapper.queryByDefinitionCodeAndVersion(processDefinitionCode, version);
if (processDefinition != null) {
processDefinition.setId(0);
}
}
return processDefinition;
}
/**
* find process define by code.
*
* @param processDefinitionCode processDefinitionCode
* @return process definition
*/
public ProcessDefinition findProcessDefinitionByCode(Long processDefinitionCode) {
return processDefineMapper.queryByCode(processDefinitionCode);
}
/**
* delete work process instance by id
*
* @param processInstanceId processInstanceId
* @return delete process instance result
*/
public int deleteWorkProcessInstanceById(int processInstanceId) {
return processInstanceMapper.deleteById(processInstanceId);
}
/**
* delete all sub process by parent instance id
*
* @param processInstanceId processInstanceId
* @return delete all sub process instance result
*/
public int deleteAllSubWorkProcessByParentId(int processInstanceId) {
List<Integer> subProcessIdList = processInstanceMapMapper.querySubIdListByParentId(processInstanceId);
for (Integer subId : subProcessIdList) {
deleteAllSubWorkProcessByParentId(subId);
deleteWorkProcessMapByParentId(subId);
removeTaskLogFile(subId);
deleteWorkProcessInstanceById(subId);
}
return 1;
}
/**
* remove task log file
*
* @param processInstanceId processInstanceId
*/
public void removeTaskLogFile(Integer processInstanceId) {
List<TaskInstance> taskInstanceList = findValidTaskListByProcessId(processInstanceId);
if (CollectionUtils.isEmpty(taskInstanceList)) {
return;
}
try (LogClientService logClient = new LogClientService()) {
for (TaskInstance taskInstance : taskInstanceList) {
String taskLogPath = taskInstance.getLogPath();
if (StringUtils.isEmpty(taskInstance.getHost())) {
continue;
}
int port = Constants.RPC_PORT;
String ip = "";
try {
ip = Host.of(taskInstance.getHost()).getIp();
} catch (Exception e) {
// compatible old version
ip = taskInstance.getHost();
}
// remove task log from loggerserver
logClient.removeTaskLog(ip, port, taskLogPath);
}
}
}
/**
* calculate sub process number in the process define.
*
* @param processDefinitionCode processDefinitionCode
* @return process thread num count
*/
private Integer workProcessThreadNumCount(long processDefinitionCode) {
ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode);
List<Integer> ids = new ArrayList<>();
recurseFindSubProcessId(processDefinition.getId(), ids);
return ids.size() + 1;
}
/**
* recursive query sub process definition id by parent id.
*
* @param parentId parentId
* @param ids ids
*/
public void recurseFindSubProcessId(int parentId, List<Integer> ids) {
List<TaskDefinition> taskNodeList = this.getTaskNodeListByDefinitionId(parentId);
if (taskNodeList != null && !taskNodeList.isEmpty()) {
for (TaskDefinition taskNode : taskNodeList) {
String parameter = taskNode.getTaskParams();
ObjectNode parameterJson = JSONUtils.parseObject(parameter);
if (parameterJson.get(CMD_PARAM_SUB_PROCESS_DEFINE_ID) != null) {
SubProcessParameters subProcessParam = JSONUtils.parseObject(parameter, SubProcessParameters.class);
ids.add(subProcessParam.getProcessDefinitionId());
recurseFindSubProcessId(subProcessParam.getProcessDefinitionId(), ids);
}
}
}
}
/**
* create recovery waiting thread command when thread pool is not enough for the process instance.
* sub work process instance need not to create recovery command.
* create recovery waiting thread command and delete origin command at the same time.
* if the recovery command is exists, only update the field update_time
*
* @param originCommand originCommand
* @param processInstance processInstance
*/
public void createRecoveryWaitingThreadCommand(Command originCommand, ProcessInstance processInstance) {
// sub process doesnot need to create wait command
if (processInstance.getIsSubProcess() == Flag.YES) {
if (originCommand != null) {
commandMapper.deleteById(originCommand.getId());
}
return;
}
Map<String, String> cmdParam = new HashMap<>();
cmdParam.put(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD, String.valueOf(processInstance.getId()));
// process instance quit by "waiting thread" state
if (originCommand == null) {
Command command = new Command(
CommandType.RECOVER_WAITING_THREAD,
processInstance.getTaskDependType(),
processInstance.getFailureStrategy(),
processInstance.getExecutorId(),
processInstance.getProcessDefinition().getCode(),
JSONUtils.toJsonString(cmdParam),
processInstance.getWarningType(),
processInstance.getWarningGroupId(),
processInstance.getScheduleTime(),
processInstance.getWorkerGroup(),
processInstance.getEnvironmentCode(),
processInstance.getProcessInstancePriority(),
processInstance.getDryRun()
);
saveCommand(command);
return;
}
// update the command time if current command if recover from waiting
if (originCommand.getCommandType() == CommandType.RECOVER_WAITING_THREAD) {
originCommand.setUpdateTime(new Date());
saveCommand(originCommand);
} else {
// delete old command and create new waiting thread command
commandMapper.deleteById(originCommand.getId());
originCommand.setId(0);
originCommand.setCommandType(CommandType.RECOVER_WAITING_THREAD);
originCommand.setUpdateTime(new Date());
originCommand.setCommandParam(JSONUtils.toJsonString(cmdParam));
originCommand.setProcessInstancePriority(processInstance.getProcessInstancePriority());
saveCommand(originCommand);
}
}
/**
* get schedule time from command
*
* @param command command
* @param cmdParam cmdParam map
* @return date
*/
private Date getScheduleTime(Command command, Map<String, String> cmdParam) {
Date scheduleTime = command.getScheduleTime();
if (scheduleTime == null
&& cmdParam != null
&& cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) {
Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
List<Schedule> schedules = queryReleaseSchedulerListByProcessDefinitionCode(command.getProcessDefinitionCode());
List<Date> complementDateList = CronUtils.getSelfFireDateList(start, end, schedules);
if (complementDateList.size() > 0) {
scheduleTime = complementDateList.get(0);
} else {
logger.error("set scheduler time error: complement date list is empty, command: {}",
command.toString());
}
}
return scheduleTime;
}
/**
* generate a new work process instance from command.
*
* @param processDefinition processDefinition
* @param command command
* @param cmdParam cmdParam map
* @return process instance
*/
private ProcessInstance generateNewProcessInstance(ProcessDefinition processDefinition,
Command command,
Map<String, String> cmdParam) {
ProcessInstance processInstance = new ProcessInstance(processDefinition);
processInstance.setProcessDefinitionCode(processDefinition.getCode());
processInstance.setProcessDefinitionVersion(processDefinition.getVersion());
processInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
processInstance.setRecovery(Flag.NO);
processInstance.setStartTime(new Date());
processInstance.setRunTimes(1);
processInstance.setMaxTryTimes(0);
//processInstance.setProcessDefinitionId(command.getProcessDefinitionId());
processInstance.setCommandParam(command.getCommandParam());
processInstance.setCommandType(command.getCommandType());
processInstance.setIsSubProcess(Flag.NO);
processInstance.setTaskDependType(command.getTaskDependType());
processInstance.setFailureStrategy(command.getFailureStrategy());
processInstance.setExecutorId(command.getExecutorId());
WarningType warningType = command.getWarningType() == null ? WarningType.NONE : command.getWarningType();
processInstance.setWarningType(warningType);
Integer warningGroupId = command.getWarningGroupId() == null ? 0 : command.getWarningGroupId();
processInstance.setWarningGroupId(warningGroupId);
processInstance.setDryRun(command.getDryRun());
// schedule time
Date scheduleTime = getScheduleTime(command, cmdParam);
if (scheduleTime != null) {
processInstance.setScheduleTime(scheduleTime);
}
processInstance.setCommandStartTime(command.getStartTime());
processInstance.setLocations(processDefinition.getLocations());
// reset global params while there are start parameters
setGlobalParamIfCommanded(processDefinition, cmdParam);
// curing global params
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
getCommandTypeIfComplement(processInstance, command),
processInstance.getScheduleTime()));
// set process instance priority
processInstance.setProcessInstancePriority(command.getProcessInstancePriority());
String workerGroup = StringUtils.isBlank(command.getWorkerGroup()) ? Constants.DEFAULT_WORKER_GROUP : command.getWorkerGroup();
processInstance.setWorkerGroup(workerGroup);
processInstance.setEnvironmentCode(Objects.isNull(command.getEnvironmentCode()) ? -1 : command.getEnvironmentCode());
processInstance.setTimeout(processDefinition.getTimeout());
processInstance.setTenantId(processDefinition.getTenantId());
return processInstance;
}
private void setGlobalParamIfCommanded(ProcessDefinition processDefinition, Map<String, String> cmdParam) {
// get start params from command param
Map<String, String> startParamMap = new HashMap<>();
if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_START_PARAMS)) {
String startParamJson = cmdParam.get(Constants.CMD_PARAM_START_PARAMS);
startParamMap = JSONUtils.toMap(startParamJson);
}
Map<String, String> fatherParamMap = new HashMap<>();
if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_FATHER_PARAMS)) {
String fatherParamJson = cmdParam.get(Constants.CMD_PARAM_FATHER_PARAMS);
fatherParamMap = JSONUtils.toMap(fatherParamJson);
}
startParamMap.putAll(fatherParamMap);
// set start param into global params
if (startParamMap.size() > 0
&& processDefinition.getGlobalParamMap() != null) {
for (Map.Entry<String, String> param : processDefinition.getGlobalParamMap().entrySet()) {
String val = startParamMap.get(param.getKey());
if (val != null) {
param.setValue(val);
}
}
}
}
/**
* get process tenant
* there is tenant id in definition, use the tenant of the definition.
* if there is not tenant id in the definiton or the tenant not exist
* use definition creator's tenant.
*
* @param tenantId tenantId
* @param userId userId
* @return tenant
*/
public Tenant getTenantForProcess(int tenantId, int userId) {
Tenant tenant = null;
if (tenantId >= 0) {
tenant = tenantMapper.queryById(tenantId);
}
if (userId == 0) {
return null;
}
if (tenant == null) {
User user = userMapper.selectById(userId);
tenant = tenantMapper.queryById(user.getTenantId());
}
return tenant;
}
/**
* get an environment
* use the code of the environment to find a environment.
*
* @param environmentCode environmentCode
* @return Environment
*/
public Environment findEnvironmentByCode(Long environmentCode) {
Environment environment = null;
if (environmentCode >= 0) {
environment = environmentMapper.queryByEnvironmentCode(environmentCode);
}
return environment;
}
/**
* check command parameters is valid
*
* @param command command
* @param cmdParam cmdParam map
* @return whether command param is valid
*/
private Boolean checkCmdParam(Command command, Map<String, String> cmdParam) {
if (command.getTaskDependType() == TaskDependType.TASK_ONLY || command.getTaskDependType() == TaskDependType.TASK_PRE) {
if (cmdParam == null
|| !cmdParam.containsKey(Constants.CMD_PARAM_START_NODE_NAMES)
|| cmdParam.get(Constants.CMD_PARAM_START_NODE_NAMES).isEmpty()) {
logger.error("command node depend type is {}, but start nodes is null ", command.getTaskDependType());
return false;
}
}
return true;
}
/**
* construct process instance according to one command.
*
* @param command command
* @param host host
* @return process instance
*/
private ProcessInstance constructProcessInstance(Command command, String host) {
ProcessInstance processInstance;
CommandType commandType = command.getCommandType();
Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam());
ProcessDefinition processDefinition = getProcessDefinitionByCommand(command.getProcessDefinitionCode(), cmdParam);
if (processDefinition == null) {
logger.error("cannot find the work process define! define code : {}", command.getProcessDefinitionCode());
return null;
}
if (cmdParam != null) {
int processInstanceId = 0;
// recover from failure or pause tasks
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) {
String processId = cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING);
processInstanceId = Integer.parseInt(processId);
if (processInstanceId == 0) {
logger.error("command parameter is error, [ ProcessInstanceId ] is 0");
return null;
}
} else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) {
// sub process map
String pId = cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS);
processInstanceId = Integer.parseInt(pId);
} else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) {
// waiting thread command
String pId = cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD);
processInstanceId = Integer.parseInt(pId);
}
if (processInstanceId == 0) {
processInstance = generateNewProcessInstance(processDefinition, command, cmdParam);
} else {
processInstance = this.findProcessInstanceDetailById(processInstanceId);
if (processInstance == null) {
return processInstance;
}
CommandType commandTypeIfComplement = getCommandTypeIfComplement(processInstance, command);
// reset global params while repeat running is needed by cmdParam
if (commandTypeIfComplement == CommandType.REPEAT_RUNNING) {
setGlobalParamIfCommanded(processDefinition, cmdParam);
}
// Recalculate global parameters after rerun.
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
commandTypeIfComplement,
processInstance.getScheduleTime()));
processInstance.setProcessDefinition(processDefinition);
}
//reset command parameter
if (processInstance.getCommandParam() != null) {
Map<String, String> processCmdParam = JSONUtils.toMap(processInstance.getCommandParam());
for (Map.Entry<String, String> entry : processCmdParam.entrySet()) {
if (!cmdParam.containsKey(entry.getKey())) {
cmdParam.put(entry.getKey(), entry.getValue());
}
}
}
// reset command parameter if sub process
if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) {
processInstance.setCommandParam(command.getCommandParam());
}
} else {
// generate one new process instance
processInstance = generateNewProcessInstance(processDefinition, command, cmdParam);
}
if (Boolean.FALSE.equals(checkCmdParam(command, cmdParam))) {
logger.error("command parameter check failed!");
return null;
}
if (command.getScheduleTime() != null) {
processInstance.setScheduleTime(command.getScheduleTime());
}
processInstance.setHost(host);
ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXECUTION;
int runTime = processInstance.getRunTimes();
switch (commandType) {
case START_PROCESS:
break;
case START_FAILURE_TASK_PROCESS:
// find failed tasks and init these tasks
List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE);
List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE);
List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL);
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
failedList.addAll(killedList);
failedList.addAll(toleranceList);
for (Integer taskId : failedList) {
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING,
String.join(Constants.COMMA, convertIntListToString(failedList)));
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
processInstance.setRunTimes(runTime + 1);
break;
case START_CURRENT_TASK_PROCESS:
break;
case RECOVER_WAITING_THREAD:
break;
case RECOVER_SUSPENDED_PROCESS:
// find pause tasks and init task's state
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE);
List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(),
ExecutionStatus.KILL);
suspendedNodeList.addAll(stopNodeList);
for (Integer taskId : suspendedNodeList) {
// initialize the pause state
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(",", convertIntListToString(suspendedNodeList)));
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
processInstance.setRunTimes(runTime + 1);
break;
case RECOVER_TOLERANCE_FAULT_PROCESS:
// recover tolerance fault process
processInstance.setRecovery(Flag.YES);
runStatus = processInstance.getState();
break;
case COMPLEMENT_DATA:
// delete all the valid tasks when complement data
List<TaskInstance> taskInstanceList = this.findValidTaskListByProcessId(processInstance.getId());
for (TaskInstance taskInstance : taskInstanceList) {
taskInstance.setFlag(Flag.NO);
this.updateTaskInstance(taskInstance);
}
initComplementDataParam(processDefinition, processInstance, cmdParam);
break;
case REPEAT_RUNNING:
// delete the recover task names from command parameter
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) {
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
}
// delete all the valid tasks when repeat running
List<TaskInstance> validTaskList = findValidTaskListByProcessId(processInstance.getId());
for (TaskInstance taskInstance : validTaskList) {
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
}
processInstance.setStartTime(new Date());
processInstance.setEndTime(null);
processInstance.setRunTimes(runTime + 1);
initComplementDataParam(processDefinition, processInstance, cmdParam);
break;
case SCHEDULER:
break;
default:
break;
}
processInstance.setState(runStatus);
return processInstance;
}
/**
* get process definition by command
* If it is a fault-tolerant command, get the specified version of ProcessDefinition through ProcessInstance
* Otherwise, get the latest version of ProcessDefinition
*
* @return ProcessDefinition
*/
private ProcessDefinition getProcessDefinitionByCommand(long processDefinitionCode, Map<String, String> cmdParam) {
if (cmdParam != null) {
int processInstanceId = 0;
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) {
processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING));
} else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) {
processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS));
} else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) {
processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD));
}
if (processInstanceId != 0) {
ProcessInstance processInstance = this.findProcessInstanceDetailById(processInstanceId);
if (processInstance == null) {
return null;
}
return processDefineLogMapper.queryByDefinitionCodeAndVersion(
processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion());
}
}
return processDefineMapper.queryByCode(processDefinitionCode);
}
/**
* return complement data if the process start with complement data
*
* @param processInstance processInstance
* @param command command
* @return command type
*/
private CommandType getCommandTypeIfComplement(ProcessInstance processInstance, Command command) {
if (CommandType.COMPLEMENT_DATA == processInstance.getCmdTypeIfComplement()) {
return CommandType.COMPLEMENT_DATA;
} else {
return command.getCommandType();
}
}
/**
* initialize complement data parameters
*
* @param processDefinition processDefinition
* @param processInstance processInstance
* @param cmdParam cmdParam
*/
private void initComplementDataParam(ProcessDefinition processDefinition,
ProcessInstance processInstance,
Map<String, String> cmdParam) {
if (!processInstance.isComplementData()) {
return;
}
Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
List<Schedule> listSchedules = queryReleaseSchedulerListByProcessDefinitionCode(processInstance.getProcessDefinitionCode());
List<Date> complementDate = CronUtils.getSelfFireDateList(start, end, listSchedules);
if (complementDate.size() > 0
&& Flag.NO == processInstance.getIsSubProcess()) {
processInstance.setScheduleTime(complementDate.get(0));
}
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
}
/**
* set sub work process parameters.
* handle sub work process instance, update relation table and command parameters
* set sub work process flag, extends parent work process command parameters
*
* @param subProcessInstance subProcessInstance
*/
public void setSubProcessParam(ProcessInstance subProcessInstance) {
String cmdParam = subProcessInstance.getCommandParam();
if (StringUtils.isEmpty(cmdParam)) {
return;
}
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
// write sub process id into cmd param.
if (paramMap.containsKey(CMD_PARAM_SUB_PROCESS)
&& CMD_PARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMD_PARAM_SUB_PROCESS))) {
paramMap.remove(CMD_PARAM_SUB_PROCESS);
paramMap.put(CMD_PARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId()));
subProcessInstance.setCommandParam(JSONUtils.toJsonString(paramMap));
subProcessInstance.setIsSubProcess(Flag.YES);
this.saveProcessInstance(subProcessInstance);
}
// copy parent instance user def params to sub process..
String parentInstanceId = paramMap.get(CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID);
if (StringUtils.isNotEmpty(parentInstanceId)) {
ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId));
if (parentInstance != null) {
subProcessInstance.setGlobalParams(
joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams()));
this.saveProcessInstance(subProcessInstance);
} else {
logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam);
}
}
ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class);
if (processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0) {
return;
}
// update sub process id to process map table
processInstanceMap.setProcessInstanceId(subProcessInstance.getId());
this.updateWorkProcessInstanceMap(processInstanceMap);
}
/**
* join parent global params into sub process.
* only the keys doesn't in sub process global would be joined.
*
* @param parentGlobalParams parentGlobalParams
* @param subGlobalParams subGlobalParams
* @return global params join
*/
private String joinGlobalParams(String parentGlobalParams, String subGlobalParams) {
List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class);
List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class);
Map<String, String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
for (Property parent : parentPropertyList) {
if (!subMap.containsKey(parent.getProp())) {
subPropertyList.add(parent);
}
}
return JSONUtils.toJsonString(subPropertyList);
}
/**
* initialize task instance
*
* @param taskInstance taskInstance
*/
private void initTaskInstance(TaskInstance taskInstance) {
if (!taskInstance.isSubProcess()
&& (taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure())) {
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
return;
}
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
updateTaskInstance(taskInstance);
}
/**
* retry submit task to db
*
* @param taskInstance
* @param commitRetryTimes
* @param commitInterval
* @return
*/
public TaskInstance submitTask(TaskInstance taskInstance, int commitRetryTimes, int commitInterval) {
int retryTimes = 1;
boolean submitDB = false;
TaskInstance task = null;
while (retryTimes <= commitRetryTimes) {
try {
if (!submitDB) {
// submit task to db
task = submitTask(taskInstance);
if (task != null && task.getId() != 0) {
submitDB = true;
break;
}
}
if (!submitDB) {
logger.error("task commit to db failed , taskId {} has already retry {} times, please check the database", taskInstance.getId(), retryTimes);
}
Thread.sleep(commitInterval);
} catch (Exception e) {
logger.error("task commit to mysql failed", e);
}
retryTimes += 1;
}
return task;
}
/**
* submit task to db
* submit sub process to command
*
* @param taskInstance taskInstance
* @return task instance
*/
@Transactional(rollbackFor = Exception.class)
public TaskInstance submitTask(TaskInstance taskInstance) {
ProcessInstance processInstance = this.findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
logger.info("start submit task : {}, instance id:{}, state: {}",
taskInstance.getName(), taskInstance.getProcessInstanceId(), processInstance.getState());
//submit to db
TaskInstance task = submitTaskInstanceToDB(taskInstance, processInstance);
if (task == null) {
logger.error("end submit task to db error, task name:{}, process id:{} state: {} ",
taskInstance.getName(), taskInstance.getProcessInstance(), processInstance.getState());
return task;
}
if (!task.getState().typeIsFinished()) {
createSubWorkProcess(processInstance, task);
}
logger.info("end submit task to db successfully:{} {} state:{} complete, instance id:{} state: {} ",
taskInstance.getId(), taskInstance.getName(), task.getState(), processInstance.getId(), processInstance.getState());
return task;
}
/**
* set work process instance map
* consider o
* repeat running does not generate new sub process instance
* set map {parent instance id, task instance id, 0(child instance id)}
*
* @param parentInstance parentInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask) {
ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId());
if (processMap != null) {
return processMap;
}
if (parentInstance.getCommandType() == CommandType.REPEAT_RUNNING) {
// update current task id to map
processMap = findPreviousTaskProcessMap(parentInstance, parentTask);
if (processMap != null) {
processMap.setParentTaskInstanceId(parentTask.getId());
updateWorkProcessInstanceMap(processMap);
return processMap;
}
}
// new task
processMap = new ProcessInstanceMap();
processMap.setParentProcessInstanceId(parentInstance.getId());
processMap.setParentTaskInstanceId(parentTask.getId());
createWorkProcessInstanceMap(processMap);
return processMap;
}
/**
* find previous task work process map.
*
* @param parentProcessInstance parentProcessInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap findPreviousTaskProcessMap(ProcessInstance parentProcessInstance,
TaskInstance parentTask) {
Integer preTaskId = 0;
List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId());
for (TaskInstance task : preTaskList) {
if (task.getName().equals(parentTask.getName())) {
preTaskId = task.getId();
ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId);
if (map != null) {
return map;
}
}
}
logger.info("sub process instance is not found,parent task:{},parent instance:{}",
parentTask.getId(), parentProcessInstance.getId());
return null;
}
/**
* create sub work process command
*
* @param parentProcessInstance parentProcessInstance
* @param task task
*/
public void createSubWorkProcess(ProcessInstance parentProcessInstance, TaskInstance task) {
if (!task.isSubProcess()) {
return;
}
//check create sub work flow firstly
ProcessInstanceMap instanceMap = findWorkProcessMapByParent(parentProcessInstance.getId(), task.getId());
if (null != instanceMap && CommandType.RECOVER_TOLERANCE_FAULT_PROCESS == parentProcessInstance.getCommandType()) {
// recover failover tolerance would not create a new command when the sub command already have been created
return;
}
instanceMap = setProcessInstanceMap(parentProcessInstance, task);
ProcessInstance childInstance = null;
if (instanceMap.getProcessInstanceId() != 0) {
childInstance = findProcessInstanceById(instanceMap.getProcessInstanceId());
}
Command subProcessCommand = createSubProcessCommand(parentProcessInstance, childInstance, instanceMap, task);
updateSubProcessDefinitionByParent(parentProcessInstance, subProcessCommand.getProcessDefinitionCode());
initSubInstanceState(childInstance);
createCommand(subProcessCommand);
logger.info("sub process command created: {} ", subProcessCommand);
}
/**
* complement data needs transform parent parameter to child.
*/
private String getSubWorkFlowParam(ProcessInstanceMap instanceMap, ProcessInstance parentProcessInstance, Map<String, String> fatherParams) {
// set sub work process command
String processMapStr = JSONUtils.toJsonString(instanceMap);
Map<String, String> cmdParam = JSONUtils.toMap(processMapStr);
if (parentProcessInstance.isComplementData()) {
Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam());
String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE);
String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime);
processMapStr = JSONUtils.toJsonString(cmdParam);
}
if (fatherParams.size() != 0) {
cmdParam.put(CMD_PARAM_FATHER_PARAMS, JSONUtils.toJsonString(fatherParams));
processMapStr = JSONUtils.toJsonString(cmdParam);
}
return processMapStr;
}
public Map<String, String> getGlobalParamMap(String globalParams) {
List<Property> propList;
Map<String, String> globalParamMap = new HashMap<>();
if (StringUtils.isNotEmpty(globalParams)) {
propList = JSONUtils.toList(globalParams, Property.class);
globalParamMap = propList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
}
return globalParamMap;
}
/**
* create sub work process command
*/
public Command createSubProcessCommand(ProcessInstance parentProcessInstance,
ProcessInstance childInstance,
ProcessInstanceMap instanceMap,
TaskInstance task) {
CommandType commandType = getSubCommandType(parentProcessInstance, childInstance);
Map<String, String> subProcessParam = JSONUtils.toMap(task.getTaskParams());
int childDefineId = Integer.parseInt(subProcessParam.get(Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID));
ProcessDefinition processDefinition = processDefineMapper.queryByDefineId(childDefineId);
Object localParams = subProcessParam.get(Constants.LOCAL_PARAMS);
List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class);
Map<String, String> globalMap = this.getGlobalParamMap(parentProcessInstance.getGlobalParams());
Map<String, String> fatherParams = new HashMap<>();
if (CollectionUtils.isNotEmpty(allParam)) {
for (Property info : allParam) {
fatherParams.put(info.getProp(), globalMap.get(info.getProp()));
}
}
String processParam = getSubWorkFlowParam(instanceMap, parentProcessInstance, fatherParams);
return new Command(
commandType,
TaskDependType.TASK_POST,
parentProcessInstance.getFailureStrategy(),
parentProcessInstance.getExecutorId(),
processDefinition.getCode(),
processParam,
parentProcessInstance.getWarningType(),
parentProcessInstance.getWarningGroupId(),
parentProcessInstance.getScheduleTime(),
task.getWorkerGroup(),
task.getEnvironmentCode(),
parentProcessInstance.getProcessInstancePriority(),
parentProcessInstance.getDryRun()
);
}
/**
* initialize sub work flow state
* child instance state would be initialized when 'recovery from pause/stop/failure'
*/
private void initSubInstanceState(ProcessInstance childInstance) {
if (childInstance != null) {
childInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
updateProcessInstance(childInstance);
}
}
/**
* get sub work flow command type
* child instance exist: child command = fatherCommand
* child instance not exists: child command = fatherCommand[0]
*/
private CommandType getSubCommandType(ProcessInstance parentProcessInstance, ProcessInstance childInstance) {
CommandType commandType = parentProcessInstance.getCommandType();
if (childInstance == null) {
String fatherHistoryCommand = parentProcessInstance.getHistoryCmd();
commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]);
}
return commandType;
}
/**
* update sub process definition
*
* @param parentProcessInstance parentProcessInstance
* @param childDefinitionCode childDefinitionId
*/
private void updateSubProcessDefinitionByParent(ProcessInstance parentProcessInstance, long childDefinitionCode) {
ProcessDefinition fatherDefinition = this.findProcessDefinition(parentProcessInstance.getProcessDefinitionCode(),
parentProcessInstance.getProcessDefinitionVersion());
ProcessDefinition childDefinition = this.findProcessDefinitionByCode(childDefinitionCode);
if (childDefinition != null && fatherDefinition != null) {
childDefinition.setWarningGroupId(fatherDefinition.getWarningGroupId());
processDefineMapper.updateById(childDefinition);
}
}
/**
* submit task to mysql
*
* @param taskInstance taskInstance
* @param processInstance processInstance
* @return task instance
*/
public TaskInstance submitTaskInstanceToDB(TaskInstance taskInstance, ProcessInstance processInstance) {
ExecutionStatus processInstanceState = processInstance.getState();
if (taskInstance.getState().typeIsFailure()) {
if (taskInstance.isSubProcess()) {
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1);
} else {
if (processInstanceState != ExecutionStatus.READY_STOP
&& processInstanceState != ExecutionStatus.READY_PAUSE) {
// failure task set invalid
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
// crate new task instance
if (taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE) {
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1);
}
taskInstance.setSubmitTime(null);
taskInstance.setStartTime(null);
taskInstance.setEndTime(null);
taskInstance.setFlag(Flag.YES);
taskInstance.setHost(null);
taskInstance.setId(0);
}
}
}
taskInstance.setExecutorId(processInstance.getExecutorId());
taskInstance.setProcessInstancePriority(processInstance.getProcessInstancePriority());
taskInstance.setState(getSubmitTaskState(taskInstance, processInstanceState));
if (taskInstance.getSubmitTime() == null) {
taskInstance.setSubmitTime(new Date());
}
if (taskInstance.getFirstSubmitTime() == null) {
taskInstance.setFirstSubmitTime(taskInstance.getSubmitTime());
}
boolean saveResult = saveTaskInstance(taskInstance);
if (!saveResult) {
return null;
}
return taskInstance;
}
/**
* get submit task instance state by the work process state
* cannot modify the task state when running/kill/submit success, or this
* task instance is already exists in task queue .
* return pause if work process state is ready pause
* return stop if work process state is ready stop
* if all of above are not satisfied, return submit success
*
* @param taskInstance taskInstance
* @param processInstanceState processInstanceState
* @return process instance state
*/
public ExecutionStatus getSubmitTaskState(TaskInstance taskInstance, ExecutionStatus processInstanceState) {
ExecutionStatus state = taskInstance.getState();
// running, delayed or killed
// the task already exists in task queue
// return state
if (
state == ExecutionStatus.RUNNING_EXECUTION
|| state == ExecutionStatus.DELAY_EXECUTION
|| state == ExecutionStatus.KILL
) {
return state;
}
//return pasue /stop if process instance state is ready pause / stop
// or return submit success
if (processInstanceState == ExecutionStatus.READY_PAUSE) {
state = ExecutionStatus.PAUSE;
} else if (processInstanceState == ExecutionStatus.READY_STOP
|| !checkProcessStrategy(taskInstance)) {
state = ExecutionStatus.KILL;
} else {
state = ExecutionStatus.SUBMITTED_SUCCESS;
}
return state;
}
/**
* check process instance strategy
*
* @param taskInstance taskInstance
* @return check strategy result
*/
private boolean checkProcessStrategy(TaskInstance taskInstance) {
ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId());
FailureStrategy failureStrategy = processInstance.getFailureStrategy();
if (failureStrategy == FailureStrategy.CONTINUE) {
return true;
}
List<TaskInstance> taskInstances = this.findValidTaskListByProcessId(taskInstance.getProcessInstanceId());
for (TaskInstance task : taskInstances) {
if (task.getState() == ExecutionStatus.FAILURE
&& task.getRetryTimes() >= task.getMaxRetryTimes()) {
return false;
}
}
return true;
}
/**
* insert or update work process instance to data base
*
* @param processInstance processInstance
*/
public void saveProcessInstance(ProcessInstance processInstance) {
if (processInstance == null) {
logger.error("save error, process instance is null!");
return;
}
if (processInstance.getId() != 0) {
processInstanceMapper.updateById(processInstance);
} else {
processInstanceMapper.insert(processInstance);
}
}
/**
* insert or update command
*
* @param command command
* @return save command result
*/
public int saveCommand(Command command) {
if (command.getId() != 0) {
return commandMapper.updateById(command);
} else {
return commandMapper.insert(command);
}
}
/**
* insert or update task instance
*
* @param taskInstance taskInstance
* @return save task instance result
*/
public boolean saveTaskInstance(TaskInstance taskInstance) {
if (taskInstance.getId() != 0) {
return updateTaskInstance(taskInstance);
} else {
return createTaskInstance(taskInstance);
}
}
/**
* insert task instance
*
* @param taskInstance taskInstance
* @return create task instance result
*/
public boolean createTaskInstance(TaskInstance taskInstance) {
int count = taskInstanceMapper.insert(taskInstance);
return count > 0;
}
/**
* update task instance
*
* @param taskInstance taskInstance
* @return update task instance result
*/
public boolean updateTaskInstance(TaskInstance taskInstance) {
int count = taskInstanceMapper.updateById(taskInstance);
return count > 0;
}
/**
* find task instance by id
*
* @param taskId task id
* @return task intance
*/
public TaskInstance findTaskInstanceById(Integer taskId) {
return taskInstanceMapper.selectById(taskId);
}
/**
* package task instance,associate processInstance and processDefine
*
* @param taskInstId taskInstId
* @return task instance
*/
public TaskInstance getTaskInstanceDetailByTaskId(int taskInstId) {
// get task instance
TaskInstance taskInstance = findTaskInstanceById(taskInstId);
if (taskInstance == null) {
return null;
}
setTaskInstanceDetail(taskInstance);
return taskInstance;
}
/**
* package task instance,associate processInstance and processDefine
*
* @param taskInstance taskInstance
* @return task instance
*/
public void setTaskInstanceDetail(TaskInstance taskInstance) {
// get process instance
ProcessInstance processInstance = findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
// get process define
ProcessDefinition processDefine = findProcessDefinition(processInstance.getProcessDefinitionCode(),
processInstance.getProcessDefinitionVersion());
taskInstance.setProcessInstance(processInstance);
taskInstance.setProcessDefine(processDefine);
TaskDefinition taskDefinition = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(
taskInstance.getTaskCode(),
taskInstance.getTaskDefinitionVersion());
updateTaskDefinitionResources(taskDefinition);
taskInstance.setTaskDefine(taskDefinition);
}
/**
* Update {@link ResourceInfo} information in {@link TaskDefinition}
*
* @param taskDefinition the given {@link TaskDefinition}
*/
private void updateTaskDefinitionResources(TaskDefinition taskDefinition) {
Map<String, Object> taskParameters = JSONUtils.parseObject(
taskDefinition.getTaskParams(),
new TypeReference<Map<String, Object>>() { });
if (taskParameters != null) {
// if contains mainJar field, query resource from database
// Flink, Spark, MR
if (taskParameters.containsKey("mainJar")) {
Object mainJarObj = taskParameters.get("mainJar");
ResourceInfo mainJar = JSONUtils.parseObject(
JSONUtils.toJsonString(mainJarObj),
ResourceInfo.class);
ResourceInfo resourceInfo = updateResourceInfo(mainJar);
if (resourceInfo != null) {
taskParameters.put("mainJar", resourceInfo);
}
}
// update resourceList information
if (taskParameters.containsKey("resourceList")) {
String resourceListStr = JSONUtils.toJsonString(taskParameters.get("resourceList"));
List<ResourceInfo> resourceInfos = JSONUtils.toList(resourceListStr, ResourceInfo.class);
List<ResourceInfo> updatedResourceInfos = resourceInfos
.stream()
.map(this::updateResourceInfo)
.filter(Objects::nonNull)
.collect(Collectors.toList());
taskParameters.put("resourceList", updatedResourceInfos);
}
// set task parameters
taskDefinition.setTaskParams(JSONUtils.toJsonString(taskParameters));
}
}
/**
* update {@link ResourceInfo} by given original ResourceInfo
*
* @param res origin resource info
* @return {@link ResourceInfo}
*/
private ResourceInfo updateResourceInfo(ResourceInfo res) {
ResourceInfo resourceInfo = null;
// only if mainJar is not null and does not contains "resourceName" field
if (res != null) {
int resourceId = res.getId();
if (resourceId <= 0) {
logger.error("invalid resourceId, {}", resourceId);
return null;
}
resourceInfo = new ResourceInfo();
// get resource from database, only one resource should be returned
Resource resource = getResourceById(resourceId);
resourceInfo.setId(resourceId);
resourceInfo.setRes(resource.getFileName());
resourceInfo.setResourceName(resource.getFullName());
if (logger.isInfoEnabled()) {
logger.info("updated resource info {}",
JSONUtils.toJsonString(resourceInfo));
}
}
return resourceInfo;
}
/**
* get id list by task state
*
* @param instanceId instanceId
* @param state state
* @return task instance states
*/
public List<Integer> findTaskIdByInstanceState(int instanceId, ExecutionStatus state) {
return taskInstanceMapper.queryTaskByProcessIdAndState(instanceId, state.ordinal());
}
/**
* find valid task list by process definition id
*
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findValidTaskListByProcessId(Integer processInstanceId) {
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.YES);
}
/**
* find previous task list by work process id
*
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId) {
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO);
}
/**
* update work process instance map
*
* @param processInstanceMap processInstanceMap
* @return update process instance result
*/
public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) {
return processInstanceMapMapper.updateById(processInstanceMap);
}
/**
* create work process instance map
*
* @param processInstanceMap processInstanceMap
* @return create process instance result
*/
public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) {
int count = 0;
if (processInstanceMap != null) {
return processInstanceMapMapper.insert(processInstanceMap);
}
return count;
}
/**
* find work process map by parent process id and parent task id.
*
* @param parentWorkProcessId parentWorkProcessId
* @param parentTaskId parentTaskId
* @return process instance map
*/
public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId) {
return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId);
}
/**
* delete work process map by parent process id
*
* @param parentWorkProcessId parentWorkProcessId
* @return delete process map result
*/
public int deleteWorkProcessMapByParentId(int parentWorkProcessId) {
return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId);
}
/**
* find sub process instance
*
* @param parentProcessId parentProcessId
* @param parentTaskId parentTaskId
* @return process instance
*/
public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId) {
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId);
if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) {
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId());
return processInstance;
}
/**
* find parent process instance
*
* @param subProcessId subProcessId
* @return process instance
*/
public ProcessInstance findParentProcessInstance(Integer subProcessId) {
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId);
if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) {
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId());
return processInstance;
}
/**
* change task state
*
* @param state state
* @param startTime startTime
* @param host host
* @param executePath executePath
* @param logPath logPath
* @param taskInstId taskInstId
*/
public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date startTime, String host,
String executePath,
String logPath,
int taskInstId) {
taskInstance.setState(state);
taskInstance.setStartTime(startTime);
taskInstance.setHost(host);
taskInstance.setExecutePath(executePath);
taskInstance.setLogPath(logPath);
saveTaskInstance(taskInstance);
}
/**
* update process instance
*
* @param processInstance processInstance
* @return update process instance result
*/
public int updateProcessInstance(ProcessInstance processInstance) {
return processInstanceMapper.updateById(processInstance);
}
/**
* change task state
*
* @param state state
* @param endTime endTime
* @param taskInstId taskInstId
* @param varPool varPool
*/
public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state,
Date endTime,
int processId,
String appIds,
int taskInstId,
String varPool) {
taskInstance.setPid(processId);
taskInstance.setAppLink(appIds);
taskInstance.setState(state);
taskInstance.setEndTime(endTime);
taskInstance.setVarPool(varPool);
changeOutParam(taskInstance);
saveTaskInstance(taskInstance);
}
/**
* for show in page of taskInstance
*
* @param taskInstance
*/
public void changeOutParam(TaskInstance taskInstance) {
if (StringUtils.isEmpty(taskInstance.getVarPool())) {
return;
}
List<Property> properties = JSONUtils.toList(taskInstance.getVarPool(), Property.class);
if (CollectionUtils.isEmpty(properties)) {
return;
}
//if the result more than one line,just get the first .
Map<String, Object> taskParams = JSONUtils.parseObject(taskInstance.getTaskParams(), new TypeReference<Map<String, Object>>() {});
Object localParams = taskParams.get(LOCAL_PARAMS);
if (localParams == null) {
return;
}
List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class);
Map<String, String> outProperty = new HashMap<>();
for (Property info : properties) {
if (info.getDirect() == Direct.OUT) {
outProperty.put(info.getProp(), info.getValue());
}
}
for (Property info : allParam) {
if (info.getDirect() == Direct.OUT) {
String paramName = info.getProp();
info.setValue(outProperty.get(paramName));
}
}
taskParams.put(LOCAL_PARAMS, allParam);
taskInstance.setTaskParams(JSONUtils.toJsonString(taskParams));
}
/**
* convert integer list to string list
*
* @param intList intList
* @return string list
*/
public List<String> convertIntListToString(List<Integer> intList) {
if (intList == null) {
return new ArrayList<>();
}
List<String> result = new ArrayList<>(intList.size());
for (Integer intVar : intList) {
result.add(String.valueOf(intVar));
}
return result;
}
/**
* query schedule by id
*
* @param id id
* @return schedule
*/
public Schedule querySchedule(int id) {
return scheduleMapper.selectById(id);
}
/**
* query Schedule by processDefinitionCode
*
* @param processDefinitionCode processDefinitionCode
* @see Schedule
*/
public List<Schedule> queryReleaseSchedulerListByProcessDefinitionCode(long processDefinitionCode) {
return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode);
}
/**
* query need failover process instance
*
* @param host host
* @return process instance list
*/
public List<ProcessInstance> queryNeedFailoverProcessInstances(String host) {
return processInstanceMapper.queryByHostAndStatus(host, stateArray);
}
/**
* process need failover process instance
*
* @param processInstance processInstance
*/
@Transactional(rollbackFor = RuntimeException.class)
public void processNeedFailoverProcessInstances(ProcessInstance processInstance) {
//1 update processInstance host is null
processInstance.setHost(Constants.NULL);
processInstanceMapper.updateById(processInstance);
ProcessDefinition processDefinition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion());
//2 insert into recover command
Command cmd = new Command();
cmd.setProcessDefinitionCode(processDefinition.getCode());
cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId()));
cmd.setExecutorId(processInstance.getExecutorId());
cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS);
createCommand(cmd);
}
/**
* query all need failover task instances by host
*
* @param host host
* @return task instance list
*/
public List<TaskInstance> queryNeedFailoverTaskInstances(String host) {
return taskInstanceMapper.queryByHostAndStatus(host,
stateArray);
}
/**
* find data source by id
*
* @param id id
* @return datasource
*/
public DataSource findDataSourceById(int id) {
return dataSourceMapper.selectById(id);
}
/**
* update process instance state by id
*
* @param processInstanceId processInstanceId
* @param executionStatus executionStatus
* @return update process result
*/
public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) {
ProcessInstance instance = processInstanceMapper.selectById(processInstanceId);
instance.setState(executionStatus);
return processInstanceMapper.updateById(instance);
}
/**
* find process instance by the task id
*
* @param taskId taskId
* @return process instance
*/
public ProcessInstance findProcessInstanceByTaskId(int taskId) {
TaskInstance taskInstance = taskInstanceMapper.selectById(taskId);
if (taskInstance != null) {
return processInstanceMapper.selectById(taskInstance.getProcessInstanceId());
}
return null;
}
/**
* find udf function list by id list string
*
* @param ids ids
* @return udf function list
*/
public List<UdfFunc> queryUdfFunListByIds(int[] ids) {
return udfFuncMapper.queryUdfByIdStr(ids, null);
}
/**
* find tenant code by resource name
*
* @param resName resource name
* @param resourceType resource type
* @return tenant code
*/
public String queryTenantCodeByResName(String resName, ResourceType resourceType) {
// in order to query tenant code successful although the version is older
String fullName = resName.startsWith("/") ? resName : String.format("/%s", resName);
List<Resource> resourceList = resourceMapper.queryResource(fullName, resourceType.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
return StringUtils.EMPTY;
}
int userId = resourceList.get(0).getUserId();
User user = userMapper.selectById(userId);
if (Objects.isNull(user)) {
return StringUtils.EMPTY;
}
Tenant tenant = tenantMapper.selectById(user.getTenantId());
if (Objects.isNull(tenant)) {
return StringUtils.EMPTY;
}
return tenant.getTenantCode();
}
/**
* find schedule list by process define codes.
*
* @param codes codes
* @return schedule list
*/
public List<Schedule> selectAllByProcessDefineCode(long[] codes) {
return scheduleMapper.selectAllByProcessDefineArray(codes);
}
/**
* find last scheduler process instance in the date interval
*
* @param definitionCode definitionCode
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastSchedulerProcessInterval(Long definitionCode, DateInterval dateInterval) {
return processInstanceMapper.queryLastSchedulerProcess(definitionCode,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last manual process instance interval
*
* @param definitionCode process definition code
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastManualProcessInterval(Long definitionCode, DateInterval dateInterval) {
return processInstanceMapper.queryLastManualProcess(definitionCode,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last running process instance
*
* @param definitionCode process definition code
* @param startTime start time
* @param endTime end time
* @return process instance
*/
public ProcessInstance findLastRunningProcess(Long definitionCode, Date startTime, Date endTime) {
return processInstanceMapper.queryLastRunningProcess(definitionCode,
startTime,
endTime,
stateArray);
}
/**
* query user queue by process instance id
*
* @param processInstanceId processInstanceId
* @return queue
*/
public String queryUserQueueByProcessInstanceId(int processInstanceId) {
String queue = "";
ProcessInstance processInstance = processInstanceMapper.selectById(processInstanceId);
if (processInstance == null) {
return queue;
}
User executor = userMapper.selectById(processInstance.getExecutorId());
if (executor != null) {
queue = executor.getQueue();
}
return queue;
}
/**
* query project name and user name by processInstanceId.
*
* @param processInstanceId processInstanceId
* @return projectName and userName
*/
public ProjectUser queryProjectWithUserByProcessInstanceId(int processInstanceId) {
return projectMapper.queryProjectWithUserByProcessInstanceId(processInstanceId);
}
/**
* get task worker group
*
* @param taskInstance taskInstance
* @return workerGroupId
*/
public String getTaskWorkerGroup(TaskInstance taskInstance) {
String workerGroup = taskInstance.getWorkerGroup();
if (StringUtils.isNotBlank(workerGroup)) {
return workerGroup;
}
int processInstanceId = taskInstance.getProcessInstanceId();
ProcessInstance processInstance = findProcessInstanceById(processInstanceId);
if (processInstance != null) {
return processInstance.getWorkerGroup();
}
logger.info("task : {} will use default worker group", taskInstance.getId());
return Constants.DEFAULT_WORKER_GROUP;
}
/**
* get have perm project list
*
* @param userId userId
* @return project list
*/
public List<Project> getProjectListHavePerm(int userId) {
List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId);
List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId);
if (createProjects == null) {
createProjects = new ArrayList<>();
}
if (authedProjects != null) {
createProjects.addAll(authedProjects);
}
return createProjects;
}
/**
* list unauthorized udf function
*
* @param userId user id
* @param needChecks data source id array
* @return unauthorized udf function list
*/
public <T> List<T> listUnauthorized(int userId, T[] needChecks, AuthorizationType authorizationType) {
List<T> resultList = new ArrayList<>();
if (Objects.nonNull(needChecks) && needChecks.length > 0) {
Set<T> originResSet = new HashSet<>(Arrays.asList(needChecks));
switch (authorizationType) {
case RESOURCE_FILE_ID:
case UDF_FILE:
List<Resource> ownUdfResources = resourceMapper.listAuthorizedResourceById(userId, needChecks);
addAuthorizedResources(ownUdfResources, userId);
Set<Integer> authorizedResourceFiles = ownUdfResources.stream().map(Resource::getId).collect(toSet());
originResSet.removeAll(authorizedResourceFiles);
break;
case RESOURCE_FILE_NAME:
List<Resource> ownResources = resourceMapper.listAuthorizedResource(userId, needChecks);
addAuthorizedResources(ownResources, userId);
Set<String> authorizedResources = ownResources.stream().map(Resource::getFullName).collect(toSet());
originResSet.removeAll(authorizedResources);
break;
case DATASOURCE:
Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId, needChecks).stream().map(DataSource::getId).collect(toSet());
originResSet.removeAll(authorizedDatasources);
break;
case UDF:
Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(UdfFunc::getId).collect(toSet());
originResSet.removeAll(authorizedUdfs);
break;
default:
break;
}
resultList.addAll(originResSet);
}
return resultList;
}
/**
* get user by user id
*
* @param userId user id
* @return User
*/
public User getUserById(int userId) {
return userMapper.selectById(userId);
}
/**
* get resource by resource id
*
* @param resourceId resource id
* @return Resource
*/
public Resource getResourceById(int resourceId) {
return resourceMapper.selectById(resourceId);
}
/**
* list resources by ids
*
* @param resIds resIds
* @return resource list
*/
public List<Resource> listResourceByIds(Integer[] resIds) {
return resourceMapper.listResourceByIds(resIds);
}
/**
* format task app id in task instance
*/
public String formatTaskAppId(TaskInstance taskInstance) {
ProcessInstance processInstance = findProcessInstanceById(taskInstance.getProcessInstanceId());
if (processInstance == null) {
return "";
}
ProcessDefinition definition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion());
if (definition == null) {
return "";
}
return String.format("%s_%s_%s", definition.getId(), processInstance.getId(), taskInstance.getId());
}
/**
* switch process definition version to process definition log version
*/
public int switchVersion(ProcessDefinition processDefinition, ProcessDefinitionLog processDefinitionLog) {
if (null == processDefinition || null == processDefinitionLog) {
return Constants.DEFINITION_FAILURE;
}
processDefinitionLog.setId(processDefinition.getId());
processDefinitionLog.setReleaseState(ReleaseState.OFFLINE);
processDefinitionLog.setFlag(Flag.YES);
int result = processDefineMapper.updateById(processDefinitionLog);
if (result > 0) {
result = switchProcessTaskRelationVersion(processDefinitionLog);
if (result <= 0) {
return Constants.DEFINITION_FAILURE;
}
}
return result;
}
public int switchProcessTaskRelationVersion(ProcessDefinition processDefinition) {
List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode());
if (!processTaskRelationList.isEmpty()) {
processTaskRelationMapper.deleteByCode(processDefinition.getProjectCode(), processDefinition.getCode());
}
List<ProcessTaskRelationLog> processTaskRelationLogList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion());
return processTaskRelationMapper.batchInsert(processTaskRelationLogList);
}
/**
* get resource ids
*
* @param taskDefinition taskDefinition
* @return resource ids
*/
public String getResourceIds(TaskDefinition taskDefinition) {
Set<Integer> resourceIds = null;
AbstractParameters params = TaskParametersUtils.getParameters(taskDefinition.getTaskType(), taskDefinition.getTaskParams());
if (params != null && CollectionUtils.isNotEmpty(params.getResourceFilesList())) {
resourceIds = params.getResourceFilesList().
stream()
.filter(t -> t.getId() != 0)
.map(ResourceInfo::getId)
.collect(Collectors.toSet());
}
if (CollectionUtils.isEmpty(resourceIds)) {
return StringUtils.EMPTY;
}
return StringUtils.join(resourceIds, ",");
}
public int saveTaskDefine(User operator, long projectCode, List<TaskDefinitionLog> taskDefinitionLogs) {
Date now = new Date();
List<TaskDefinitionLog> newTaskDefinitionLogs = new ArrayList<>();
List<TaskDefinitionLog> updateTaskDefinitionLogs = new ArrayList<>();
for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) {
taskDefinitionLog.setProjectCode(projectCode);
taskDefinitionLog.setUpdateTime(now);
taskDefinitionLog.setOperateTime(now);
taskDefinitionLog.setOperator(operator.getId());
taskDefinitionLog.setResourceIds(getResourceIds(taskDefinitionLog));
if (taskDefinitionLog.getCode() > 0 && taskDefinitionLog.getVersion() > 0) {
TaskDefinitionLog definitionCodeAndVersion = taskDefinitionLogMapper
.queryByDefinitionCodeAndVersion(taskDefinitionLog.getCode(), taskDefinitionLog.getVersion());
if (definitionCodeAndVersion != null) {
if (!taskDefinitionLog.equals(definitionCodeAndVersion)) {
taskDefinitionLog.setUserId(definitionCodeAndVersion.getUserId());
Integer version = taskDefinitionLogMapper.queryMaxVersionForDefinition(taskDefinitionLog.getCode());
taskDefinitionLog.setVersion(version + 1);
taskDefinitionLog.setCreateTime(definitionCodeAndVersion.getCreateTime());
updateTaskDefinitionLogs.add(taskDefinitionLog);
}
continue;
}
}
taskDefinitionLog.setUserId(operator.getId());
taskDefinitionLog.setVersion(Constants.VERSION_FIRST);
taskDefinitionLog.setCreateTime(now);
if (taskDefinitionLog.getCode() == 0) {
try {
taskDefinitionLog.setCode(SnowFlakeUtils.getInstance().nextId());
} catch (SnowFlakeException e) {
logger.error("Task code get error, ", e);
return Constants.DEFINITION_FAILURE;
}
}
newTaskDefinitionLogs.add(taskDefinitionLog);
}
int insertResult = 0;
int updateResult = 0;
for (TaskDefinitionLog taskDefinitionToUpdate : updateTaskDefinitionLogs) {
TaskDefinition task = taskDefinitionMapper.queryByCode(taskDefinitionToUpdate.getCode());
if (task == null) {
newTaskDefinitionLogs.add(taskDefinitionToUpdate);
} else {
insertResult += taskDefinitionLogMapper.insert(taskDefinitionToUpdate);
taskDefinitionToUpdate.setId(task.getId());
updateResult += taskDefinitionMapper.updateById(taskDefinitionToUpdate);
}
}
if (!newTaskDefinitionLogs.isEmpty()) {
updateResult += taskDefinitionMapper.batchInsert(newTaskDefinitionLogs);
insertResult += taskDefinitionLogMapper.batchInsert(newTaskDefinitionLogs);
}
return (insertResult & updateResult) > 0 ? 1 : Constants.EXIT_CODE_SUCCESS;
}
/**
* save processDefinition (including create or update processDefinition)
*/
public int saveProcessDefine(User operator, ProcessDefinition processDefinition, Boolean isFromProcessDefine) {
ProcessDefinitionLog processDefinitionLog = new ProcessDefinitionLog(processDefinition);
Integer version = processDefineLogMapper.queryMaxVersionForDefinition(processDefinition.getCode());
int insertVersion = version == null || version == 0 ? Constants.VERSION_FIRST : version + 1;
processDefinitionLog.setVersion(insertVersion);
processDefinitionLog.setReleaseState(isFromProcessDefine ? ReleaseState.OFFLINE : ReleaseState.ONLINE);
processDefinitionLog.setOperator(operator.getId());
processDefinitionLog.setOperateTime(processDefinition.getUpdateTime());
int insertLog = processDefineLogMapper.insert(processDefinitionLog);
int result;
if (0 == processDefinition.getId()) {
result = processDefineMapper.insert(processDefinitionLog);
} else {
processDefinitionLog.setId(processDefinition.getId());
result = processDefineMapper.updateById(processDefinitionLog);
}
return (insertLog & result) > 0 ? insertVersion : 0;
}
/**
* save task relations
*/
public int saveTaskRelation(User operator, long projectCode, long processDefinitionCode, int processDefinitionVersion,
List<ProcessTaskRelationLog> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) {
Map<Long, TaskDefinitionLog> taskDefinitionLogMap = null;
if (CollectionUtils.isNotEmpty(taskDefinitionLogs)) {
taskDefinitionLogMap = taskDefinitionLogs.stream()
.collect(Collectors.toMap(TaskDefinition::getCode, taskDefinitionLog -> taskDefinitionLog));
}
Date now = new Date();
for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) {
processTaskRelationLog.setProjectCode(projectCode);
processTaskRelationLog.setProcessDefinitionCode(processDefinitionCode);
processTaskRelationLog.setProcessDefinitionVersion(processDefinitionVersion);
if (taskDefinitionLogMap != null) {
TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(processTaskRelationLog.getPreTaskCode());
if (taskDefinitionLog != null) {
processTaskRelationLog.setPreTaskVersion(taskDefinitionLog.getVersion());
}
processTaskRelationLog.setPostTaskVersion(taskDefinitionLogMap.get(processTaskRelationLog.getPostTaskCode()).getVersion());
}
processTaskRelationLog.setCreateTime(now);
processTaskRelationLog.setUpdateTime(now);
processTaskRelationLog.setOperator(operator.getId());
processTaskRelationLog.setOperateTime(now);
}
List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode);
if (!processTaskRelationList.isEmpty()) {
Set<Integer> processTaskRelationSet = processTaskRelationList.stream().map(ProcessTaskRelation::hashCode).collect(toSet());
Set<Integer> taskRelationSet = taskRelationList.stream().map(ProcessTaskRelationLog::hashCode).collect(toSet());
if (CollectionUtils.isEqualCollection(processTaskRelationSet, taskRelationSet)) {
return Constants.EXIT_CODE_SUCCESS;
}
processTaskRelationMapper.deleteByCode(projectCode, processDefinitionCode);
}
int result = processTaskRelationMapper.batchInsert(taskRelationList);
int resultLog = processTaskRelationLogMapper.batchInsert(taskRelationList);
return (result & resultLog) > 0 ? Constants.EXIT_CODE_SUCCESS : Constants.EXIT_CODE_FAILURE;
}
public boolean isTaskOnline(long taskCode) {
List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByTaskCode(taskCode);
if (!processTaskRelationList.isEmpty()) {
Set<Long> processDefinitionCodes = processTaskRelationList
.stream()
.map(ProcessTaskRelation::getProcessDefinitionCode)
.collect(Collectors.toSet());
List<ProcessDefinition> processDefinitionList = processDefineMapper.queryByCodes(processDefinitionCodes);
// check process definition is already online
for (ProcessDefinition processDefinition : processDefinitionList) {
if (processDefinition.getReleaseState() == ReleaseState.ONLINE) {
return true;
}
}
}
return false;
}
/**
* Generate the DAG Graph based on the process definition id
*
* @param processDefinition process definition
* @return dag graph
*/
public DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) {
List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode());
List<TaskNode> taskNodeList = transformTask(processTaskRelations, Lists.newArrayList());
ProcessDag processDag = DagHelper.getProcessDag(taskNodeList, new ArrayList<>(processTaskRelations));
// Generate concrete Dag to be executed
return DagHelper.buildDagGraph(processDag);
}
/**
* generate DagData
*/
public DagData genDagData(ProcessDefinition processDefinition) {
List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode());
List<TaskDefinitionLog> taskDefinitionLogList = genTaskDefineList(processTaskRelations);
List<TaskDefinition> taskDefinitions = taskDefinitionLogList.stream()
.map(taskDefinitionLog -> JSONUtils.parseObject(JSONUtils.toJsonString(taskDefinitionLog), TaskDefinition.class))
.collect(Collectors.toList());
return new DagData(processDefinition, processTaskRelations, taskDefinitions);
}
public List<TaskDefinitionLog> genTaskDefineList(List<ProcessTaskRelation> processTaskRelations) {
Set<TaskDefinition> taskDefinitionSet = new HashSet<>();
for (ProcessTaskRelation processTaskRelation : processTaskRelations) {
if (processTaskRelation.getPreTaskCode() > 0) {
taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPreTaskCode(), processTaskRelation.getPreTaskVersion()));
}
if (processTaskRelation.getPostTaskCode() > 0) {
taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion()));
}
}
return taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet);
}
/**
* find task definition by code and version
*/
public TaskDefinition findTaskDefinition(long taskCode, int taskDefinitionVersion) {
return taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskCode, taskDefinitionVersion);
}
/**
* find process task relation list by projectCode and processDefinitionCode
*/
public List<ProcessTaskRelation> findRelationByCode(long projectCode, long processDefinitionCode) {
return processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode);
}
/**
* add authorized resources
*
* @param ownResources own resources
* @param userId userId
*/
private void addAuthorizedResources(List<Resource> ownResources, int userId) {
List<Integer> relationResourceIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 7);
List<Resource> relationResources = CollectionUtils.isNotEmpty(relationResourceIds) ? resourceMapper.queryResourceListById(relationResourceIds) : new ArrayList<>();
ownResources.addAll(relationResources);
}
/**
* Use temporarily before refactoring taskNode
*/
public List<TaskNode> transformTask(List<ProcessTaskRelation> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) {
Map<Long, List<Long>> taskCodeMap = new HashMap<>();
for (ProcessTaskRelation processTaskRelation : taskRelationList) {
taskCodeMap.compute(processTaskRelation.getPostTaskCode(), (k, v) -> {
if (v == null) {
v = new ArrayList<>();
}
if (processTaskRelation.getPreTaskCode() != 0L) {
v.add(processTaskRelation.getPreTaskCode());
}
return v;
});
}
if (CollectionUtils.isEmpty(taskDefinitionLogs)) {
taskDefinitionLogs = genTaskDefineList(taskRelationList);
}
Map<Long, TaskDefinitionLog> taskDefinitionLogMap = taskDefinitionLogs.stream()
.collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog));
List<TaskNode> taskNodeList = new ArrayList<>();
for (Entry<Long, List<Long>> code : taskCodeMap.entrySet()) {
TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(code.getKey());
if (taskDefinitionLog != null) {
TaskNode taskNode = new TaskNode();
taskNode.setCode(taskDefinitionLog.getCode());
taskNode.setVersion(taskDefinitionLog.getVersion());
taskNode.setName(taskDefinitionLog.getName());
taskNode.setDesc(taskDefinitionLog.getDescription());
taskNode.setType(taskDefinitionLog.getTaskType().toUpperCase());
taskNode.setRunFlag(taskDefinitionLog.getFlag() == Flag.YES ? Constants.FLOWNODE_RUN_FLAG_NORMAL : Constants.FLOWNODE_RUN_FLAG_FORBIDDEN);
taskNode.setMaxRetryTimes(taskDefinitionLog.getFailRetryTimes());
taskNode.setRetryInterval(taskDefinitionLog.getFailRetryInterval());
Map<String, Object> taskParamsMap = taskNode.taskParamsToJsonObj(taskDefinitionLog.getTaskParams());
taskNode.setConditionResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.CONDITION_RESULT)));
taskNode.setSwitchResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.SWITCH_RESULT)));
taskNode.setDependence(JSONUtils.toJsonString(taskParamsMap.get(Constants.DEPENDENCE)));
taskParamsMap.remove(Constants.CONDITION_RESULT);
taskParamsMap.remove(Constants.DEPENDENCE);
taskNode.setParams(JSONUtils.toJsonString(taskParamsMap));
taskNode.setTaskInstancePriority(taskDefinitionLog.getTaskPriority());
taskNode.setWorkerGroup(taskDefinitionLog.getWorkerGroup());
taskNode.setEnvironmentCode(taskDefinitionLog.getEnvironmentCode());
taskNode.setTimeout(JSONUtils.toJsonString(new TaskTimeoutParameter(taskDefinitionLog.getTimeoutFlag() == TimeoutFlag.OPEN,
taskDefinitionLog.getTimeoutNotifyStrategy(),
taskDefinitionLog.getTimeout())));
taskNode.setDelayTime(taskDefinitionLog.getDelayTime());
taskNode.setPreTasks(JSONUtils.toJsonString(code.getValue().stream().map(taskDefinitionLogMap::get).map(TaskDefinition::getCode).collect(Collectors.toList())));
taskNodeList.add(taskNode);
}
}
return taskNodeList;
}
public Map<ProcessInstance, TaskInstance> notifyProcessList(int processId, int taskId) {
HashMap<ProcessInstance, TaskInstance> processTaskMap = new HashMap<>();
//find sub tasks
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(processId);
if (processInstanceMap == null) {
return processTaskMap;
}
ProcessInstance fatherProcess = this.findProcessInstanceById(processInstanceMap.getParentProcessInstanceId());
TaskInstance fatherTask = this.findTaskInstanceById(processInstanceMap.getParentTaskInstanceId());
if (fatherProcess != null) {
processTaskMap.put(fatherProcess, fatherTask);
}
return processTaskMap;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,459 | [Bug] [Master] Complemente data error | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
complement data error when start_date=end_date
### What you expected to happen
complement the start_date when start_date = end_date
### How to reproduce
complement data error when start_date=end_date
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6459 | https://github.com/apache/dolphinscheduler/pull/6460 | a8baa9553fd64d1414c204c6ba95019b7376f771 | a502e643bcf9a321385485dd98e7e95056ed5eae | "2021-10-08T07:58:42Z" | java | "2021-10-08T08:56:33Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/quartz/cron/CronUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.service.quartz.cron;
import static org.apache.dolphinscheduler.service.quartz.cron.CycleFactory.day;
import static org.apache.dolphinscheduler.service.quartz.cron.CycleFactory.hour;
import static org.apache.dolphinscheduler.service.quartz.cron.CycleFactory.min;
import static org.apache.dolphinscheduler.service.quartz.cron.CycleFactory.month;
import static org.apache.dolphinscheduler.service.quartz.cron.CycleFactory.week;
import static com.cronutils.model.CronType.QUARTZ;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CycleEnum;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collections;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.List;
import org.quartz.CronExpression;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.cronutils.model.Cron;
import com.cronutils.model.definition.CronDefinitionBuilder;
import com.cronutils.parser.CronParser;
/**
* cron utils
*/
public class CronUtils {
private CronUtils() {
throw new IllegalStateException("CronUtils class");
}
private static final Logger logger = LoggerFactory.getLogger(CronUtils.class);
private static final CronParser QUARTZ_CRON_PARSER = new CronParser(CronDefinitionBuilder.instanceDefinitionFor(QUARTZ));
/**
* parse to cron
*
* @param cronExpression cron expression, never null
* @return Cron instance, corresponding to cron expression received
*/
public static Cron parse2Cron(String cronExpression) {
return QUARTZ_CRON_PARSER.parse(cronExpression);
}
/**
* build a new CronExpression based on the string cronExpression
*
* @param cronExpression String representation of the cron expression the new object should represent
* @return CronExpression
* @throws ParseException if the string expression cannot be parsed into a valid
*/
public static CronExpression parse2CronExpression(String cronExpression) throws ParseException {
return new CronExpression(cronExpression);
}
/**
* get max cycle
*
* @param cron cron
* @return CycleEnum
*/
public static CycleEnum getMaxCycle(Cron cron) {
return min(cron).addCycle(hour(cron)).addCycle(day(cron)).addCycle(week(cron)).addCycle(month(cron)).getCycle();
}
/**
* get min cycle
*
* @param cron cron
* @return CycleEnum
*/
public static CycleEnum getMiniCycle(Cron cron) {
return min(cron).addCycle(hour(cron)).addCycle(day(cron)).addCycle(week(cron)).addCycle(month(cron)).getMiniCycle();
}
/**
* get max cycle
*
* @param crontab crontab
* @return CycleEnum
*/
public static CycleEnum getMaxCycle(String crontab) {
return getMaxCycle(parse2Cron(crontab));
}
/**
* gets all scheduled times for a period of time based on not self dependency
*
* @param startTime startTime
* @param endTime endTime
* @param cronExpression cronExpression
* @return date list
*/
public static List<Date> getFireDateList(Date startTime, Date endTime, CronExpression cronExpression) {
List<Date> dateList = new ArrayList<>();
while (Stopper.isRunning()) {
startTime = cronExpression.getNextValidTimeAfter(startTime);
if (startTime.after(endTime)) {
break;
}
dateList.add(startTime);
}
return dateList;
}
/**
* gets expect scheduled times for a period of time based on self dependency
*
* @param startTime startTime
* @param endTime endTime
* @param cronExpression cronExpression
* @param fireTimes fireTimes
* @return date list
*/
public static List<Date> getSelfFireDateList(Date startTime, Date endTime, CronExpression cronExpression, int fireTimes) {
List<Date> dateList = new ArrayList<>();
while (fireTimes > 0) {
startTime = cronExpression.getNextValidTimeAfter(startTime);
if (startTime.after(endTime) || startTime.equals(endTime)) {
break;
}
dateList.add(startTime);
fireTimes--;
}
return dateList;
}
/**
* gets all scheduled times for a period of time based on self dependency
*
* @param startTime startTime
* @param endTime endTime
* @param cronExpression cronExpression
* @return date list
*/
public static List<Date> getSelfFireDateList(Date startTime, Date endTime, CronExpression cronExpression) {
List<Date> dateList = new ArrayList<>();
while (Stopper.isRunning()) {
startTime = cronExpression.getNextValidTimeAfter(startTime);
if (startTime.after(endTime) || startTime.equals(endTime)) {
break;
}
dateList.add(startTime);
}
return dateList;
}
/**
* gets all scheduled times for a period of time based on self dependency
* if schedulers is empty then default scheduler = 1 day
*/
public static List<Date> getSelfFireDateList(final Date startTime, final Date endTime, final List<Schedule> schedules) {
List<Date> result = new ArrayList<>();
Date from = new Date(startTime.getTime() - Constants.SECOND_TIME_MILLIS);
Date to = new Date(endTime.getTime() - Constants.SECOND_TIME_MILLIS);
List<Schedule> listSchedule = new ArrayList<>();
listSchedule.addAll(schedules);
if (CollectionUtils.isEmpty(listSchedule)) {
Schedule schedule = new Schedule();
schedule.setCrontab(Constants.DEFAULT_CRON_STRING);
listSchedule.add(schedule);
}
for (Schedule schedule : listSchedule) {
result.addAll(CronUtils.getSelfFireDateList(from, to, schedule.getCrontab()));
}
return result;
}
/**
* gets all scheduled times for a period of time based on self dependency
*
* @param startTime startTime
* @param endTime endTime
* @param cron cron
* @return date list
*/
public static List<Date> getSelfFireDateList(Date startTime, Date endTime, String cron) {
CronExpression cronExpression = null;
try {
cronExpression = parse2CronExpression(cron);
} catch (ParseException e) {
logger.error(e.getMessage(), e);
return Collections.emptyList();
}
return getSelfFireDateList(startTime, endTime, cronExpression);
}
/**
* get expiration time
*
* @param startTime startTime
* @param cycleEnum cycleEnum
* @return date
*/
public static Date getExpirationTime(Date startTime, CycleEnum cycleEnum) {
Date maxExpirationTime = null;
Date startTimeMax = null;
try {
startTimeMax = getEndTime(startTime);
Calendar calendar = Calendar.getInstance();
calendar.setTime(startTime);
switch (cycleEnum) {
case HOUR:
calendar.add(Calendar.HOUR, 1);
break;
case DAY:
calendar.add(Calendar.DATE, 1);
break;
case WEEK:
calendar.add(Calendar.DATE, 1);
break;
case MONTH:
calendar.add(Calendar.DATE, 1);
break;
default:
logger.error("Dependent process definition's cycleEnum is {},not support!!", cycleEnum);
break;
}
maxExpirationTime = calendar.getTime();
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
return DateUtils.compare(startTimeMax, maxExpirationTime) ? maxExpirationTime : startTimeMax;
}
/**
* get the end time of the day by value of date
*
* @return date
*/
private static Date getEndTime(Date date) {
Calendar end = new GregorianCalendar();
end.setTime(date);
end.set(Calendar.HOUR_OF_DAY, 23);
end.set(Calendar.MINUTE, 59);
end.set(Calendar.SECOND, 59);
end.set(Calendar.MILLISECOND, 999);
return end.getTime();
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,428 | [Bug] [dev] Code style different between review dog and Intellij configure | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Following the [submit guide][1], I run formatter by using `Ctrl+L` in Intellij and push to upsteam, the commit is https://github.com/apache/dolphinscheduler/pull/6269/commits/bb9f050fdf87c91f1dfe8ad6d9e91f3875475abe, but it failed in our reviewdog CI, see [ci run][2].
It means that our auto formatter and reviewdog checker have different rule. what reivewdog accept format is
```java
@ComponentScan(value = "org.apache.dolphinscheduler", excludeFilters = {
@ComponentScan.Filter(type = FilterType.REGEX, pattern = {
"org.apache.dolphinscheduler.server.master.*",
"org.apache.dolphinscheduler.server.worker.*",
"org.apache.dolphinscheduler.server.monitor.*",
"org.apache.dolphinscheduler.server.log.*"
})
})
```
but our auto formatter is
```java
@ComponentScan(value = "org.apache.dolphinscheduler", excludeFilters = {
@ComponentScan.Filter(type = FilterType.REGEX, pattern = {
"org.apache.dolphinscheduler.server.master.*",
"org.apache.dolphinscheduler.server.worker.*",
"org.apache.dolphinscheduler.server.monitor.*",
"org.apache.dolphinscheduler.server.log.*"
})
})
```
[1]: https://dolphinscheduler.apache.org/en-us/community/development/pull-request.html
[2]: https://github.com/apache/dolphinscheduler/actions/runs/1287112918
### What you expected to happen
Auto formatter and reviewdog should in the same rule
### How to reproduce
Create new java code contain `@ComponentScan(value = "org.apache.dolphinscheduler"` and create new PR
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6428 | https://github.com/apache/dolphinscheduler/pull/6432 | a502e643bcf9a321385485dd98e7e95056ed5eae | a3119330b70ffed8fe01da0776a14678db52b9fb | "2021-09-30T07:50:58Z" | java | "2021-10-08T16:29:13Z" | style/intellij-java-code-style.xml | <?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<code_scheme name="CodeStyle">
<option name="OTHER_INDENT_OPTIONS">
<value>
<option name="INDENT_SIZE" value="2" />
<option name="CONTINUATION_INDENT_SIZE" value="4" />
<option name="TAB_SIZE" value="2" />
<option name="USE_TAB_CHARACTER" value="false" />
<option name="SMART_TABS" value="false" />
<option name="LABEL_INDENT_SIZE" value="0" />
<option name="LABEL_INDENT_ABSOLUTE" value="false" />
<option name="USE_RELATIVE_INDENTS" value="false" />
</value>
</option>
<option name="INSERT_INNER_CLASS_IMPORTS" value="true" />
<option name="CLASS_COUNT_TO_USE_IMPORT_ON_DEMAND" value="999" />
<option name="NAMES_COUNT_TO_USE_IMPORT_ON_DEMAND" value="999" />
<option name="PACKAGES_TO_USE_IMPORT_ON_DEMAND">
<value />
</option>
<option name="IMPORT_LAYOUT_TABLE">
<value>
<package name="org.apache.dolphinscheduler" withSubpackages="true" static="true" />
<emptyLine />
<package name="org.apache" withSubpackages="true" static="true" />
<emptyLine />
<package name="java" withSubpackages="true" static="true" />
<emptyLine />
<package name="javax" withSubpackages="true" static="true" />
<emptyLine />
<package name="org" withSubpackages="true" static="true" />
<emptyLine />
<package name="com" withSubpackages="true" static="true" />
<emptyLine />
<package name="" withSubpackages="true" static="true" />
<emptyLine />
<package name="org.apache.dolphinscheduler" withSubpackages="true" static="false" />
<emptyLine />
<package name="org.apache" withSubpackages="true" static="false" />
<emptyLine />
<package name="java" withSubpackages="true" static="false" />
<emptyLine />
<package name="javax" withSubpackages="true" static="false" />
<emptyLine />
<package name="org" withSubpackages="true" static="false" />
<emptyLine />
<package name="com" withSubpackages="true" static="false" />
<emptyLine />
<package name="" withSubpackages="true" static="false" />
</value>
</option>
<JavaCodeStyleSettings>
<option name="SPACE_AFTER_CLOSING_ANGLE_BRACKET_IN_TYPE_ARGUMENT" value="true" />
<option name="INSERT_INNER_CLASS_IMPORTS" value="true" />
<option name="CLASS_COUNT_TO_USE_IMPORT_ON_DEMAND" value="999" />
<option name="NAMES_COUNT_TO_USE_IMPORT_ON_DEMAND" value="999" />
<option name="PACKAGES_TO_USE_IMPORT_ON_DEMAND">
<value />
</option>
<option name="IMPORT_LAYOUT_TABLE">
<value>
<package name="" withSubpackages="true" static="true" />
<emptyLine />
<package name="java" withSubpackages="true" static="false" />
<emptyLine />
<package name="javax" withSubpackages="true" static="false" />
<emptyLine />
<package name="org" withSubpackages="true" static="false" />
<emptyLine />
<package name="com" withSubpackages="true" static="false" />
<emptyLine />
<package name="" withSubpackages="true" static="false" />
</value>
</option>
<option name="JD_ALIGN_PARAM_COMMENTS" value="false" />
<option name="JD_ALIGN_EXCEPTION_COMMENTS" value="false" />
<option name="JD_KEEP_EMPTY_PARAMETER" value="false" />
<option name="JD_KEEP_EMPTY_EXCEPTION" value="false" />
<option name="JD_KEEP_EMPTY_RETURN" value="false" />
</JavaCodeStyleSettings>
<codeStyleSettings language="JAVA">
<option name="BINARY_OPERATION_SIGN_ON_NEXT_LINE" value="true" />
</codeStyleSettings>
</code_scheme> |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,276 | [Improvement][Common] Read file with UTF-8 | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
You can find the code in org.apache.dolphinscheduler.common.utils.FileUtils
https://github.com/apache/dolphinscheduler/blob/69a153c5f5c9d91b4eececbcfbf9e2fb407f89c4/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/FileUtils.java#L234-L254
The `output.toString()` method will use the system' default charset to deserialize the byte to string.
This may cause a problem if we use UTF-8 to write a string to a file and use this method to read the file in some environments.
related issue #6273, fix FileUtilsTest#testWriteContent2File
### Use case
_No response_
### Related issues
_No response_
### Are you willing to submit a PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6276 | https://github.com/apache/dolphinscheduler/pull/6466 | a3119330b70ffed8fe01da0776a14678db52b9fb | 59daf3c837c4f9ef99d0f67044cd8129ee30a547 | "2021-09-20T02:34:43Z" | java | "2021-10-08T16:40:10Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/FileUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import static org.apache.dolphinscheduler.common.Constants.DATA_BASEDIR_PATH;
import static org.apache.dolphinscheduler.common.Constants.RESOURCE_VIEW_SUFFIXS;
import static org.apache.dolphinscheduler.common.Constants.RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE;
import static org.apache.dolphinscheduler.common.Constants.YYYYMMDDHHMMSS;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.StringUtils;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStreamWriter;
import java.io.StringReader;
import java.nio.charset.StandardCharsets;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* file utils
*/
public class FileUtils {
public static final Logger logger = LoggerFactory.getLogger(FileUtils.class);
public static final String DATA_BASEDIR = PropertyUtils.getString(DATA_BASEDIR_PATH, "/tmp/dolphinscheduler");
public static final ThreadLocal<Logger> taskLoggerThreadLocal = new ThreadLocal<>();
private FileUtils() {
throw new UnsupportedOperationException("Construct FileUtils");
}
/**
* get file suffix
*
* @param filename file name
* @return file suffix
*/
public static String suffix(String filename) {
String fileSuffix = "";
if (!StringUtils.isEmpty(filename)) {
int lastIndex = filename.lastIndexOf('.');
if (lastIndex > 0) {
fileSuffix = filename.substring(lastIndex + 1);
}
}
return fileSuffix;
}
/**
* get download file absolute path and name
*
* @param filename file name
* @return download file name
*/
public static String getDownloadFilename(String filename) {
String fileName = String.format("%s/download/%s/%s", DATA_BASEDIR, DateUtils.getCurrentTime(YYYYMMDDHHMMSS), filename);
File file = new File(fileName);
if (!file.getParentFile().exists()) {
file.getParentFile().mkdirs();
}
return fileName;
}
/**
* get upload file absolute path and name
*
* @param tenantCode tenant code
* @param filename file name
* @return local file path
*/
public static String getUploadFilename(String tenantCode, String filename) {
String fileName = String.format("%s/%s/resources/%s", DATA_BASEDIR, tenantCode, filename);
File file = new File(fileName);
if (!file.getParentFile().exists()) {
file.getParentFile().mkdirs();
}
return fileName;
}
/**
* directory of process execution
*
* @param projectCode project code
* @param processDefineCode process definition Code
* @param processDefineVersion process definition version
* @param processInstanceId process instance id
* @param taskInstanceId task instance id
* @return directory of process execution
*/
public static String getProcessExecDir(long projectCode, long processDefineCode, int processDefineVersion, int processInstanceId, int taskInstanceId) {
String fileName = String.format("%s/exec/process/%d/%s/%d/%d", DATA_BASEDIR,
projectCode, processDefineCode + "_" + processDefineVersion, processInstanceId, taskInstanceId);
File file = new File(fileName);
if (!file.getParentFile().exists()) {
file.getParentFile().mkdirs();
}
return fileName;
}
/**
* @return get suffixes for resource files that support online viewing
*/
public static String getResourceViewSuffixs() {
return PropertyUtils.getString(RESOURCE_VIEW_SUFFIXS, RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE);
}
/**
* create directory if absent
*
* @param execLocalPath execute local path
* @throws IOException errors
*/
public static void createWorkDirIfAbsent(String execLocalPath) throws IOException {
//if work dir exists, first delete
File execLocalPathFile = new File(execLocalPath);
if (execLocalPathFile.exists()) {
org.apache.commons.io.FileUtils.forceDelete(execLocalPathFile);
}
//create work dir
org.apache.commons.io.FileUtils.forceMkdir(execLocalPathFile);
String mkdirLog = "create dir success " + execLocalPath;
LoggerUtils.logInfo(Optional.ofNullable(logger), mkdirLog);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), mkdirLog);
}
/**
* write content to file ,if parent path not exists, it will do one's utmost to mkdir
*
* @param content content
* @param filePath target file path
* @return true if write success
*/
public static boolean writeContent2File(String content, String filePath) {
BufferedReader bufferedReader = null;
BufferedWriter bufferedWriter = null;
try {
File distFile = new File(filePath);
if (!distFile.getParentFile().exists() && !distFile.getParentFile().mkdirs()) {
FileUtils.logger.error("mkdir parent failed");
return false;
}
bufferedReader = new BufferedReader(new StringReader(content));
bufferedWriter = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(distFile), StandardCharsets.UTF_8));
char[] buf = new char[1024];
int len;
while ((len = bufferedReader.read(buf)) != -1) {
bufferedWriter.write(buf, 0, len);
}
bufferedWriter.flush();
bufferedReader.close();
bufferedWriter.close();
} catch (IOException e) {
FileUtils.logger.error(e.getMessage(), e);
return false;
} finally {
IOUtils.closeQuietly(bufferedWriter);
IOUtils.closeQuietly(bufferedReader);
}
return true;
}
/**
* Deletes a file. If file is a directory, delete it and all sub-directories.
* <p>
* The difference between File.delete() and this method are:
* <ul>
* <li>A directory to be deleted does not have to be empty.</li>
* <li>You get exceptions when a file or directory cannot be deleted.
* (java.io.File methods returns a boolean)</li>
* </ul>
*
* @param filename file name
* @throws IOException in case deletion is unsuccessful
*/
public static void deleteFile(String filename) throws IOException {
File file = new File(filename);
if (file.exists()) {
org.apache.commons.io.FileUtils.forceDelete(file);
}
}
/**
* Gets all the parent subdirectories of the parentDir directory
*
* @param parentDir parent dir
* @return all dirs
*/
public static File[] getAllDir(String parentDir) {
if (parentDir == null || "".equals(parentDir)) {
throw new RuntimeException("parentDir can not be empty");
}
File file = new File(parentDir);
if (!file.exists() || !file.isDirectory()) {
throw new RuntimeException("parentDir not exist, or is not a directory:" + parentDir);
}
return file.listFiles(File::isDirectory);
}
/**
* Get Content
*
* @param inputStream input stream
* @return string of input stream
*/
public static String readFile2Str(InputStream inputStream) {
try {
ByteArrayOutputStream output = new ByteArrayOutputStream();
byte[] buffer = new byte[1024];
int length;
while ((length = inputStream.read(buffer)) != -1) {
output.write(buffer, 0, length);
}
return output.toString();
} catch (Exception e) {
logger.error(e.getMessage(), e);
throw new RuntimeException(e);
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,276 | [Improvement][Common] Read file with UTF-8 | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
You can find the code in org.apache.dolphinscheduler.common.utils.FileUtils
https://github.com/apache/dolphinscheduler/blob/69a153c5f5c9d91b4eececbcfbf9e2fb407f89c4/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/FileUtils.java#L234-L254
The `output.toString()` method will use the system' default charset to deserialize the byte to string.
This may cause a problem if we use UTF-8 to write a string to a file and use this method to read the file in some environments.
related issue #6273, fix FileUtilsTest#testWriteContent2File
### Use case
_No response_
### Related issues
_No response_
### Are you willing to submit a PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6276 | https://github.com/apache/dolphinscheduler/pull/6466 | a3119330b70ffed8fe01da0776a14678db52b9fb | 59daf3c837c4f9ef99d0f67044cd8129ee30a547 | "2021-09-20T02:34:43Z" | java | "2021-10-08T16:40:10Z" | dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/FileUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import static org.apache.dolphinscheduler.common.Constants.YYYYMMDDHHMMSS;
import org.apache.dolphinscheduler.common.Constants;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
@RunWith(PowerMockRunner.class)
@PrepareForTest(DateUtils.class)
public class FileUtilsTest {
@Test
public void suffix() {
Assert.assertEquals("java", FileUtils.suffix("ninfor.java"));
Assert.assertEquals("", FileUtils.suffix(null));
Assert.assertEquals("", FileUtils.suffix(""));
Assert.assertEquals("", FileUtils.suffix("ninfor-java"));
}
@Test
public void testGetDownloadFilename() {
PowerMockito.mockStatic(DateUtils.class);
PowerMockito.when(DateUtils.getCurrentTime(YYYYMMDDHHMMSS)).thenReturn("20190101101059");
Assert.assertEquals("/tmp/dolphinscheduler/download/20190101101059/test",
FileUtils.getDownloadFilename("test"));
}
@Test
public void testGetUploadFilename() {
Assert.assertEquals("/tmp/dolphinscheduler/aaa/resources/bbb",
FileUtils.getUploadFilename("aaa","bbb"));
}
@Test
public void testGetProcessExecDir() {
String dir = FileUtils.getProcessExecDir(1L, 2L, 1, 3, 4);
Assert.assertEquals("/tmp/dolphinscheduler/exec/process/1/2_1/3/4", dir);
}
@Test
public void testCreateWorkDirIfAbsent() {
try {
FileUtils.createWorkDirIfAbsent("/tmp/createWorkDirAndUserIfAbsent");
Assert.assertTrue(true);
} catch (Exception e) {
Assert.assertTrue(false);
}
}
@Test
public void testSetValue() {
try {
PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE,"true");
Assert.assertTrue(PropertyUtils.getBoolean(Constants.DATASOURCE_ENCRYPTION_ENABLE));
PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE,"false");
Assert.assertFalse(PropertyUtils.getBoolean(Constants.DATASOURCE_ENCRYPTION_ENABLE));
} catch (Exception e) {
Assert.assertTrue(false);
}
}
@Test
public void testWriteContent2File() throws FileNotFoundException {
// file exists, fmt is invalid
String filePath = "test/testFile.txt";
String content = "正正正faffdasfasdfas";
FileUtils.writeContent2File(content, filePath);
String fileContent = FileUtils.readFile2Str(new FileInputStream(new File(filePath)));
Assert.assertEquals(content, fileContent);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,472 | [Bug] [Task] The dist generated package is missing some task plugins | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
The dist generated package is missing some task plugins,such as mr, sqoop, etc.
### What you expected to happen
The dist module generation package contains all task plugins.
### How to reproduce
The dist module configuration generates missing task plugins.
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6472 | https://github.com/apache/dolphinscheduler/pull/6473 | 59daf3c837c4f9ef99d0f67044cd8129ee30a547 | 7459ee2531beff236b1f15bd99f1c6f678153a57 | "2021-10-09T07:23:03Z" | java | "2021-10-09T13:35:28Z" | dolphinscheduler-dist/src/main/provisio/dolphinscheduler.xml | <!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<runtime>
<artifactSet to="/lib">
<exclusion>org.slf4j:slf4j-log4j12</exclusion>
<exclusion>org.slf4j:slf4j-api</exclusion>
<exclusion>ch.qos.logback:logback-classic</exclusion>
</artifactSet>
<!-- Target -->
<archive name="${project.artifactId}-${project.version}.tar.gz" hardLinkIncludes="**/*.jar"/>
<!-- Notices -->
<fileSet to="/">
<directory path="${basedir}/../">
<include>DISCLAIMER</include>
<include>install.sh</include>
<include>LICENSE</include>
<include>NOTICE</include>
</directory>
</fileSet>
<!-- Server -->
<artifactSet to="bin">
<artifact id="io.airlift:launcher:tar.gz:bin:${dep.packaging.version}">
<unpack/>
</artifact>
<artifact id="io.airlift:launcher:tar.gz:properties:${dep.packaging.version}">
<unpack filter="true"/>
</artifact>
</artifactSet>
<!-- Plugins -->
<artifactSet to="lib/plugin/alert/email">
<artifact id="${project.groupId}:dolphinscheduler-alert-email:zip:${project.version}">
<unpack/>
</artifact>
</artifactSet>
<artifactSet to="lib/plugin/alert/script">
<artifact id="${project.groupId}:dolphinscheduler-alert-script:zip:${project.version}">
<unpack/>
</artifact>
</artifactSet>
<artifactSet to="lib/plugin/alert/wechat">
<artifact id="${project.groupId}:dolphinscheduler-alert-wechat:zip:${project.version}">
<unpack/>
</artifact>
</artifactSet>
<artifactSet to="lib/plugin/alert/dingtalk">
<artifact id="${project.groupId}:dolphinscheduler-alert-dingtalk:zip:${project.version}">
<unpack/>
</artifact>
</artifactSet>
<artifactSet to="lib/plugin/alert/http">
<artifact id="${project.groupId}:dolphinscheduler-alert-http:zip:${project.version}">
<unpack/>
</artifact>
</artifactSet>
<artifactSet to="lib/plugin/alert/feishu">
<artifact id="${project.groupId}:dolphinscheduler-alert-feishu:zip:${project.version}">
<unpack/>
</artifact>
</artifactSet>
<artifactSet to="lib/plugin/registry/zookeeper">
<artifact id="${project.groupId}:dolphinscheduler-registry-zookeeper:zip:${project.version}">
<unpack/>
</artifact>
</artifactSet>
<!-- Task Plugins -->
<artifactSet to="lib/plugin/task/shell">
<artifact id="${project.groupId}:dolphinscheduler-task-shell:zip:${project.version}">
<unpack/>
</artifact>
</artifactSet>
<artifactSet to="lib/plugin/task/python">
<artifact id="${project.groupId}:dolphinscheduler-task-python:zip:${project.version}">
<unpack/>
</artifact>
</artifactSet>
<artifactSet to="lib/plugin/task/flink">
<artifact id="${project.groupId}:dolphinscheduler-task-flink:zip:${project.version}">
<unpack/>
</artifact>
</artifactSet>
<artifactSet to="lib/plugin/task/spark">
<artifact id="${project.groupId}:dolphinscheduler-task-spark:zip:${project.version}">
<unpack/>
</artifact>
</artifactSet>
<artifactSet to="lib/plugin/task/http">
<artifact id="${project.groupId}:dolphinscheduler-task-http:zip:${project.version}">
<unpack/>
</artifact>
</artifactSet>
<artifactSet to="lib/plugin/task/pigeon">
<artifact id="${project.groupId}:dolphinscheduler-task-pigeon:zip:${project.version}">
<unpack/>
</artifact>
</artifactSet>
<artifactSet to="lib/plugin/task/sql">
<artifact id="${project.groupId}:dolphinscheduler-task-sql:zip:${project.version}">
<unpack/>
</artifact>
</artifactSet>
</runtime> |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,486 | [Bug] [UI] update process instance error | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
branch: dev
update process instance error:
miss some required parameters.
![image](https://user-images.githubusercontent.com/29528966/136700807-74b9d1e9-516b-4a7b-a27a-a10cc48b965a.png)
![image](https://user-images.githubusercontent.com/29528966/136700821-f574ee8d-bf6e-4422-93e6-7465e9b2c429.png)
![image](https://user-images.githubusercontent.com/29528966/136700831-553a184d-5364-4111-ae70-a1f2c908e426.png)
### What you expected to happen
update process instance normally.
### How to reproduce
run a process definition.
edit the process instance and 'save' process instance
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6486 | https://github.com/apache/dolphinscheduler/pull/6487 | 7459ee2531beff236b1f15bd99f1c6f678153a57 | eb21a1763e43ee5b1cb418a0835b0bf1d13e295e | "2021-10-10T14:49:34Z" | java | "2021-10-10T15:18:07Z" | dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/dag.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div :class="['dag-chart', fullScreen ? 'full-screen' : '']">
<dag-toolbar />
<dag-canvas ref="canvas" />
<el-drawer
:visible.sync="taskDrawer"
size=""
:with-header="false"
:wrapperClosable="false"
>
<!-- fix the bug that Element-ui(2.13.2) auto focus on the first input -->
<div style="width: 0px; height: 0px; overflow: hidden">
<el-input type="text" />
</div>
<m-form-model
v-if="taskDrawer"
:nodeData="nodeData"
@seeHistory="seeHistory"
@addTaskInfo="addTaskInfo"
@close="closeTaskDrawer"
@onSubProcess="toSubProcess"
:type="type"
></m-form-model>
</el-drawer>
<el-dialog
:title="$t('Set the DAG diagram name')"
:visible.sync="saveDialog"
width="auto"
>
<m-udp ref="mUdp" @onUdp="onSave" @close="cancelSave"></m-udp>
</el-dialog>
<el-dialog
:title="$t('Please set the parameters before starting')"
:visible.sync="startDialog"
width="auto"
>
<m-start
:startData="{ code: definitionCode, name: name }"
:startNodeList="startTaskName"
:sourceType="'contextmenu'"
@onUpdateStart="onUpdateStart"
@closeStart="closeStart"
></m-start>
</el-dialog>
<edge-edit-model ref="edgeEditModel" />
<el-drawer :visible.sync="versionDrawer" size="" :with-header="false">
<m-versions
:versionData="versionData"
:isInstance="type === 'instance'"
@mVersionSwitchProcessDefinitionVersion="switchProcessVersion"
@mVersionGetProcessDefinitionVersionsPage="getProcessVersions"
@mVersionDeleteProcessDefinitionVersion="deleteProcessVersion"
@closeVersion="closeVersion"
></m-versions>
</el-drawer>
<m-log
v-if="type === 'instance' && logDialog"
:item="logTaskInstance"
source='dag'
:task-instance-id="logTaskInstance.id"
@close="closeLogDialog"
></m-log>
</div>
</template>
<script>
import { debounce } from 'lodash'
import dagToolbar from './canvas/toolbar.vue'
import dagCanvas from './canvas/canvas.vue'
import mFormModel from '../_source/formModel/formModel.vue'
import { mapActions, mapState, mapMutations } from 'vuex'
import mUdp from '../_source/udp/udp.vue'
import mStart from '../../projects/pages/definition/pages/list/_source/start.vue'
import edgeEditModel from './canvas/edgeEditModel.vue'
import mVersions from '../../projects/pages/definition/pages/list/_source/versions.vue'
import mLog from './formModel/log.vue'
const DEFAULT_NODE_DATA = {
id: null,
taskType: '',
self: {},
instanceId: null
}
export default {
name: 'dag-chart',
components: {
dagCanvas,
dagToolbar,
mFormModel,
mUdp,
mStart,
edgeEditModel,
mVersions,
mLog
},
provide () {
return {
dagChart: this
}
},
inject: ['definitionDetails'],
props: {
type: String,
releaseState: String
},
data () {
return {
definitionCode: 0,
// full screen mode
fullScreen: false,
// whether the task config drawer is visible
taskDrawer: false,
nodeData: { ...DEFAULT_NODE_DATA },
// whether the save dialog is visible
saveDialog: false,
// whether the start dialog is visible
startDialog: false,
startTaskName: '',
// whether the version drawer is visible
versionDrawer: false,
versionData: {
processDefinition: {
id: null,
version: '',
releaseState: ''
},
processDefinitionVersions: [],
total: null,
pageNo: null,
pageSize: null
},
// the task status refresh timer
statusTimer: null,
// the process instance id
instanceId: -1,
// log dialog
logDialog: false,
logTaskInstance: null,
taskInstances: []
}
},
mounted () {
this.setIsEditDag(false)
if (this.type === 'instance') {
this.instanceId = this.$route.params.id
this.definitionCode = this.$route.query.code
} else if (this.type === 'definition') {
this.definitionCode = this.$route.params.code
}
// auto resize canvas
this.resizeDebounceFunc = debounce(this.canvasResize, 200)
window.addEventListener('resize', this.resizeDebounceFunc)
// init graph
this.$refs.canvas.graphInit(!this.isDetails)
// backfill graph with tasks, locations and connects
this.backfill()
// refresh task status
if (this.type === 'instance') {
this.refreshTaskStatus()
// status polling
this.statusTimer = setInterval(() => {
this.refreshTaskStatus()
}, 90000)
}
},
beforeDestroy () {
this.resetParams()
clearInterval(this.statusTimer)
window.removeEventListener('resize', this.resizeDebounceFunc)
},
computed: {
...mapState('dag', [
'tasks',
'locations',
'connects',
'name',
'isDetails',
'projectCode',
'version'
])
},
methods: {
...mapActions('dag', [
'saveDAGchart',
'updateInstance',
'updateDefinition',
'getTaskState',
'getStartCheck',
'genTaskCodeList',
'switchProcessDefinitionVersion',
'getProcessDefinitionVersionsPage',
'deleteProcessDefinitionVersion'
]),
...mapMutations('dag', [
'addTask',
'setConnects',
'resetParams',
'setIsEditDag',
'setName',
'setLocations',
'resetLocalParam'
]),
/**
* Toggle full screen
*/
canvasResize () {
const canvas = this.$refs.canvas
canvas && canvas.paperResize()
},
toggleFullScreen () {
this.fullScreen = !this.fullScreen
this.$nextTick(this.canvasResize)
},
/**
* Task Drawer
* @param {boolean} visible
*/
toggleTaskDrawer (visible) {
this.taskDrawer = visible
},
/**
* Set the current node data
*/
setNodeData (nodeData) {
this.nodeData = Object.assign(DEFAULT_NODE_DATA, nodeData)
},
/**
* open form model
* @desc Edit task config
* @param {number} taskCode
* @param {string} taskType
*/
openFormModel (taskCode, taskType) {
this.setNodeData({
id: taskCode,
taskType: taskType
})
this.toggleTaskDrawer(true)
},
addTaskInfo ({ item }) {
this.addTask(item)
this.$refs.canvas.setNodeName(item.code, item.name)
this.taskDrawer = false
},
closeTaskDrawer ({ flag }) {
if (flag) {
const canvas = this.$refs.canvas
canvas.removeNode(this.nodeData.id)
}
this.taskDrawer = false
},
/**
* Save dialog
*/
toggleSaveDialog (value) {
this.saveDialog = value
if (value) {
this.$nextTick(() => {
this.$refs.mUdp.reloadParam()
})
}
},
onSave (sourceType) {
this.toggleSaveDialog(false)
return new Promise((resolve, reject) => {
let tasks = this.tasks || []
const edges = this.$refs.canvas.getEdges()
const nodes = this.$refs.canvas.getNodes()
if (!nodes.length) {
reject(this.$t('Failed to create node to save'))
}
const connects = this.buildConnects(edges, tasks)
this.setConnects(connects)
const locations = nodes.map((node) => {
return {
taskCode: node.id,
x: node.position.x,
y: node.position.y
}
})
this.setLocations(locations)
resolve({
connects: connects,
tasks: tasks,
locations: locations
})
})
.then((res) => {
if (this.verifyConditions(res.tasks)) {
this.loading(true)
const definitionCode = this.definitionCode
if (definitionCode) {
// Edit
return this[
this.type === 'instance' ? 'updateInstance' : 'updateDefinition'
](definitionCode)
.then((res) => {
this.$message({
message: res.msg,
type: 'success',
offset: 80
})
if (this.type === 'instance') {
this.$router.push({
path: `/projects/${this.projectCode}/instance/list`
})
} else {
this.$router.push({
path: `/projects/${this.projectCode}/definition/list`
})
}
})
.catch((e) => {
this.$message.error(e.msg || '')
})
.finally((e) => {
this.loading(false)
})
} else {
// Create
return this.saveDAGchart()
.then((res) => {
this.$message.success(res.msg)
// source @/conf/home/pages/dag/_source/editAffirmModel/index.js
if (sourceType !== 'affirm') {
// Jump process definition
this.$router.push({ name: 'projects-definition-list' })
}
})
.catch((e) => {
this.setName('')
this.$message.error(e.msg || '')
})
.finally((e) => {
this.loading(false)
})
}
}
})
.catch((err) => {
let msg = typeof err === 'string' ? err : err.msg || ''
this.$message.error(msg)
})
},
verifyConditions (value) {
let tasks = value
let bool = true
tasks.map((v) => {
if (
v.taskType === 'CONDITIONS' &&
(v.taskParams.conditionResult.successNode[0] === '' ||
v.taskParams.conditionResult.successNode[0] === null ||
v.taskParams.conditionResult.failedNode[0] === '' ||
v.taskParams.conditionResult.failedNode[0] === null)
) {
bool = false
return false
}
})
if (!bool) {
this.$message.warning(
`${this.$t(
'Successful branch flow and failed branch flow are required'
)}`
)
return false
}
return true
},
cancelSave () {
this.toggleSaveDialog(false)
},
/**
* build graph json
*/
buildGraphJSON (tasks, locations, connects) {
const nodes = []
const edges = []
tasks.forEach((task) => {
const location = locations.find((l) => l.taskCode === task.code) || {}
const node = this.$refs.canvas.genNodeJSON(
task.code,
task.taskType,
task.name,
{
x: location.x,
y: location.y
}
)
nodes.push(node)
})
connects
.filter((r) => !!r.preTaskCode)
.forEach((c) => {
const edge = this.$refs.canvas.genEdgeJSON(
c.preTaskCode,
c.postTaskCode,
c.name
)
edges.push(edge)
})
return {
nodes,
edges
}
},
/**
* Build connects by edges and tasks
* @param {Edge[]} edges
* @param {Task[]} tasks
* @returns
*/
buildConnects (edges, tasks) {
const preTaskMap = {}
const tasksMap = {}
edges.forEach((edge) => {
preTaskMap[edge.targetId] = {
sourceId: edge.sourceId,
edgeLabel: edge.label || ''
}
})
tasks.forEach((task) => {
tasksMap[task.code] = task
})
const headEdges = tasks
.filter((task) => !preTaskMap[task.code])
.map((task) => {
return {
name: '',
preTaskCode: 0,
preTaskVersion: 0,
postTaskCode: task.code,
postTaskVersion: task.version || 0,
// conditionType and conditionParams are reserved
conditionType: 0,
conditionParams: {}
}
})
return edges
.map((edge) => {
return {
name: edge.label,
preTaskCode: edge.sourceId,
preTaskVersion: tasksMap[edge.sourceId].version || 0,
postTaskCode: edge.targetId,
postTaskVersion: tasksMap[edge.targetId].version || 0,
// conditionType and conditionParams are reserved
conditionType: 0,
conditionParams: {}
}
})
.concat(headEdges)
},
backfill () {
const tasks = this.tasks
const locations = this.locations
const connects = this.connects
const json = this.buildGraphJSON(tasks, locations, connects)
this.$refs.canvas.fromJSON(json)
},
/**
* Return to the previous process
*/
returnToPrevProcess () {
let $name = this.$route.name.split('-')
let subs = this.$route.query.subs
let ids = subs.split(',')
const last = ids.pop()
this.$router.push({
path: `/${$name[0]}/${this.projectCode}/${$name[1]}/list/${last}`,
query: ids.length > 0 ? { subs: ids.join(',') } : null
})
},
toSubProcess ({ subProcessCode, subInstanceId }) {
const tarIdentifier =
this.type === 'instance' ? subInstanceId : subProcessCode
const curIdentifier =
this.type === 'instance' ? this.instanceId : this.definitionCode
let subs = []
let olds = this.$route.query.subs
if (olds) {
subs = olds.split(',')
subs.push(curIdentifier)
} else {
subs.push(curIdentifier)
}
let $name = this.$route.name.split('-')
this.$router.push({
path: `/${$name[0]}/${this.projectCode}/${$name[1]}/list/${tarIdentifier}`,
query: { subs: subs.join(',') }
})
},
seeHistory (taskName) {
this.$router.push({
name: 'task-instance',
query: {
processInstanceId: this.$route.params.code,
taskName: taskName
}
})
},
/**
* Start dialog
*/
startRunning (taskName) {
this.startTaskName = taskName
this.getStartCheck({ processDefinitionCode: this.definitionCode }).then(
(res) => {
this.startDialog = true
}
)
},
onUpdateStart () {
this.startDialog = false
},
closeStart () {
this.startDialog = false
},
/**
* Task status
*/
refreshTaskStatus () {
const instanceId = this.$route.params.id
this.loading(true)
this.getTaskState(instanceId)
.then((res) => {
this.$message(this.$t('Refresh status succeeded'))
const { taskList } = res.data
if (taskList) {
this.taskInstances = taskList
taskList.forEach((taskInstance) => {
this.$refs.canvas.setNodeStatus({
code: taskInstance.taskCode,
state: taskInstance.state,
taskInstance
})
})
}
})
.finally(() => {
this.loading(false)
})
},
/**
* Loading
* @param {boolean} visible
*/
loading (visible) {
if (visible) {
this.spinner = this.$loading({
lock: true,
text: this.$t('Loading...'),
spinner: 'el-icon-loading',
background: 'rgba(0, 0, 0, 0.4)',
customClass: 'dag-fullscreen-loading'
})
} else {
this.spinner && this.spinner.close()
}
},
/**
* change process definition version
*/
showVersions () {
this.getProcessDefinitionVersionsPage({
pageNo: 1,
pageSize: 10,
code: this.definitionCode
})
.then((res) => {
let processDefinitionVersions = res.data.totalList
let total = res.data.total
let pageSize = res.data.pageSize
let pageNo = res.data.currentPage
// this.versionData.processDefinition.id = this.urlParam.id
this.versionData.processDefinition.code = this.definitionCode
this.versionData.processDefinition.version = this.version
this.versionData.processDefinition.releaseState = this.releaseState
this.versionData.processDefinitionVersions =
processDefinitionVersions
this.versionData.total = total
this.versionData.pageNo = pageNo
this.versionData.pageSize = pageSize
this.versionDrawer = true
})
.catch((e) => {
this.$message.error(e.msg || '')
})
},
closeVersion () {
this.versionDrawer = false
},
switchProcessVersion ({ version, processDefinitionCode }) {
this.switchProcessDefinitionVersion({
version: version,
code: processDefinitionCode
})
.then((res) => {
this.$message.success($t('Switch Version Successfully'))
this.closeVersion()
this.definitionDetails.init()
})
.catch((e) => {
this.$message.error(e.msg || '')
})
},
getProcessVersions ({ pageNo, pageSize, processDefinitionCode }) {
this.getProcessDefinitionVersionsPage({
pageNo: pageNo,
pageSize: pageSize,
code: processDefinitionCode
})
.then((res) => {
this.versionData.processDefinitionVersions = res.data.totalList
this.versionData.total = res.data.total
this.versionData.pageSize = res.data.pageSize
this.versionData.pageNo = res.data.currentPage
})
.catch((e) => {
this.$message.error(e.msg || '')
})
},
deleteProcessVersion ({ version, processDefinitionCode }) {
this.deleteProcessDefinitionVersion({
version: version,
code: processDefinitionCode
})
.then((res) => {
this.$message.success(res.msg || '')
this.getProcessVersions({
pageNo: 1,
pageSize: 10,
processDefinitionCode: processDefinitionCode
})
})
.catch((e) => {
this.$message.error(e.msg || '')
})
},
/**
* Log dialog
*/
closeLogDialog () {
this.logDialog = false
this.logTaskInstance = null
},
showLogDialog (taskDefinitionCode) {
const taskInstance = this.taskInstances.find(taskInstance => {
return taskInstance.taskCode === taskDefinitionCode
})
if (taskInstance) {
this.logTaskInstance = {
id: taskInstance.id,
type: taskInstance.taskType
}
this.logDialog = true
}
}
}
}
</script>
<style lang="scss" scoped>
@import "./dag";
</style>
<style lang="scss">
@import "./loading";
</style>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,486 | [Bug] [UI] update process instance error | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
branch: dev
update process instance error:
miss some required parameters.
![image](https://user-images.githubusercontent.com/29528966/136700807-74b9d1e9-516b-4a7b-a27a-a10cc48b965a.png)
![image](https://user-images.githubusercontent.com/29528966/136700821-f574ee8d-bf6e-4422-93e6-7465e9b2c429.png)
![image](https://user-images.githubusercontent.com/29528966/136700831-553a184d-5364-4111-ae70-a1f2c908e426.png)
### What you expected to happen
update process instance normally.
### How to reproduce
run a process definition.
edit the process instance and 'save' process instance
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6486 | https://github.com/apache/dolphinscheduler/pull/6487 | 7459ee2531beff236b1f15bd99f1c6f678153a57 | eb21a1763e43ee5b1cb418a0835b0bf1d13e295e | "2021-10-10T14:49:34Z" | java | "2021-10-10T15:18:07Z" | dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/instance/pages/list/_source/list.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="list-model" style="position: relative;">
<div class="table-box">
<el-table class="fixed" :data="list" size="mini" style="width: 100%" @selection-change="_arrDelChange">
<el-table-column type="selection" width="50"></el-table-column>
<el-table-column prop="id" :label="$t('#')" width="50"></el-table-column>
<el-table-column :label="$t('Process Name')" min-width="200">
<template slot-scope="scope">
<el-popover trigger="hover" placement="top">
<p>{{ scope.row.name }}</p>
<div slot="reference" class="name-wrapper">
<router-link :to="{ path: `/projects/${projectCode}/instance/list/${scope.row.id}` , query:{code: scope.row.processDefinitionCode}}" tag="a" class="links"><span class="ellipsis">{{ scope.row.name }}</span></router-link>
</div>
</el-popover>
</template>
</el-table-column>
<el-table-column :label="$t('State')" width="50">
<template slot-scope="scope">
<span v-html="_rtState(scope.row.state)" style="cursor: pointer;"></span>
</template>
</el-table-column>
<el-table-column :label="$t('Run Type')">
<template slot-scope="scope">
{{_rtRunningType(scope.row.commandType)}}
</template>
</el-table-column>
<el-table-column :label="$t('Scheduling Time')" width="135">
<template slot-scope="scope">
<span v-if="scope.row.scheduleTime">{{scope.row.scheduleTime | formatDate}}</span>
<span v-else>-</span>
</template>
</el-table-column>
<el-table-column :label="$t('Start Time')" width="135">
<template slot-scope="scope">
<span>{{scope.row.startTime | formatDate}}</span>
</template>
</el-table-column>
<el-table-column :label="$t('End Time')" width="135">
<template slot-scope="scope">
<span>{{scope.row.endTime | formatDate}}</span>
</template>
</el-table-column>
<el-table-column :label="$t('Duration')">
<template slot-scope="scope">
<span>{{scope.row.duration | filterNull}}</span>
</template>
</el-table-column>
<el-table-column prop="runTimes" :label="$t('Run Times')"></el-table-column>
<el-table-column prop="recovery" :label="$t('fault-tolerant sign')"></el-table-column>
<el-table-column :label="$t('Dry-run flag')" width="100">
<template slot-scope="scope">
<span v-if="scope.row.dryRun == 1">YES</span>
<span v-else>NO</span>
</template>
</el-table-column>
<el-table-column prop="executorName" :label="$t('Executor')"></el-table-column>
<el-table-column prop="host" :label="$t('host')" min-width="210"></el-table-column>
<el-table-column :label="$t('Operation')" width="240" fixed="right">
<template slot-scope="scope">
<div v-show="scope.row.disabled">
<el-tooltip :content="$t('Edit')" placement="top" :enterable="false">
<span>
<el-button type="primary" size="mini" icon="el-icon-edit-outline" :disabled="scope.row.state !== 'SUCCESS' && scope.row.state !== 'PAUSE' && scope.row.state !== 'FAILURE' && scope.row.state !== 'STOP'" @click="_reEdit(scope.row)" circle></el-button>
</span>
</el-tooltip>
<el-tooltip :content="$t('Rerun')" placement="top" :enterable="false">
<span><el-button type="primary" size="mini" :disabled="scope.row.state !== 'SUCCESS' && scope.row.state !== 'PAUSE' && scope.row.state !== 'FAILURE' && scope.row.state !== 'STOP'" icon="el-icon-refresh" @click="_reRun(scope.row,scope.$index)" circle></el-button></span>
</el-tooltip>
<el-tooltip :content="$t('Recovery Failed')" placement="top" :enterable="false">
<span>
<el-button type="success" size="mini" icon="el-icon-circle-close" :disabled="scope.row.state !== 'FAILURE'" @click="_restore(scope.row,scope.$index)" circle></el-button>
</span>
</el-tooltip>
<el-tooltip :content="scope.row.state === 'STOP' ? $t('Recovery Suspend') : $t('Stop')" placement="top" :enterable="false">
<span><el-button type="danger" size="mini" :disabled="scope.row.state !== 'RUNNING_EXECUTION' && scope.row.state !== 'STOP'" :icon="scope.row.state === 'STOP' ? 'el-icon-video-play' : 'el-icon-close'" @click="_stop(scope.row,scope.$index)" circle></el-button></span>
</el-tooltip>
<el-tooltip :content="scope.row.state === 'PAUSE' ? $t('Recovery Suspend') : $t('Pause')" placement="top" :enterable="false">
<span><el-button type="warning" size="mini" :icon="scope.row.state === 'PAUSE' ? 'el-icon-video-play' : 'el-icon-video-pause'" :disabled="scope.row.state !== 'RUNNING_EXECUTION' && scope.row.state !== 'PAUSE'" @click="_suspend(scope.row,scope.$index)" circle></el-button></span>
</el-tooltip>
<el-tooltip :content="$t('Delete')" placement="top" :enterable="false">
<el-popconfirm
:confirmButtonText="$t('Confirm')"
:cancelButtonText="$t('Cancel')"
icon="el-icon-info"
iconColor="red"
:title="$t('Delete?')"
@onConfirm="_delete(scope.row,scope.row.id)">
<el-button type="danger" size="mini" icon="el-icon-delete" :disabled="scope.row.state !== 'SUCCESS' && scope.row.state !== 'FAILURE' && scope.row.state !== 'STOP' && scope.row.state !== 'PAUSE'" circle slot="reference"></el-button>
</el-popconfirm>
</el-tooltip>
<el-tooltip :content="$t('Gantt')" placement="top" :enterable="false">
<span><el-button type="primary" size="mini" icon="el-icon-s-operation" @click="_gantt(scope.row)" circle></el-button></span>
</el-tooltip>
</div>
<div v-show="!scope.row.disabled">
<!--Edit-->
<el-button
type="info"
size="mini"
icon="el-icon-edit-outline"
disabled="true"
circle>
</el-button>
<!--Rerun-->
<span>
<el-button
v-show="buttonType === 'run'"
type="info"
size="mini"
disabled="true"
circle>
<span style="padding: 0 2px">{{scope.row.count}}</span>
</el-button>
</span>
<el-button
v-show="buttonType !== 'run'"
type="info"
size="mini"
icon="el-icon-refresh"
disabled="true"
circle>
</el-button>
<!--Store-->
<span>
<el-button
v-show="buttonType === 'store'"
type="success"
size="mini"
circle
disabled="true">
<span style="padding: 0 3px">{{scope.row.count}}</span>
</el-button>
</span>
<el-button
v-show="buttonType !== 'store'"
type="success"
size="mini"
circle
icon="el-icon-circle-close"
disabled="true">
</el-button>
<!--Recovery Suspend/Pause-->
<span>
<el-button
v-show="(scope.row.state === 'PAUSE' || scope.row.state === 'STOP') && buttonType === 'suspend'"
type="warning"
size="mini"
circle
disabled="true">
<span style="padding: 0 3px">{{scope.row.count}}</span>
</el-button>
</span>
<!--Recovery Suspend-->
<el-button
v-show="(scope.row.state === 'PAUSE' || scope.row.state === 'STOP') && buttonType !== 'suspend'"
type="warning"
size="mini"
circle
icon="el-icon-video-play"
disabled="true">
</el-button>
<!--Pause-->
<span>
<el-button
v-show="scope.row.state !== 'PAUSE'"
type="warning"
size="mini"
circle
icon="el-icon-close"
disabled="true">
</el-button>
</span>
<!--Stop-->
<span>
<el-button
v-show="scope.row.state !== 'STOP'"
type="warning"
size="mini"
circle
icon="el-icon-video-pause"
disabled="true">
</el-button>
</span>
<!--Delete-->
<el-button
type="danger"
circle
size="mini"
icon="el-icon-delete"
:disabled="true">
</el-button>
<!--Gantt-->
<el-button
type="success"
circle
size="mini"
icon="el-icon-s-operation"
disabled="true">
</el-button>
</div>
</template>
</el-table-column>
</el-table>
</div>
<el-tooltip :content="$t('Delete')" placement="top" :enterable="false">
<el-popconfirm
:confirmButtonText="$t('Confirm')"
:cancelButtonText="$t('Cancel')"
:title="$t('Delete?')"
@onConfirm="_delete({},-1)"
>
<el-button style="position: absolute; bottom: -48px; left: 19px;" type="primary" size="mini" :disabled="!strDelete" slot="reference">{{$t('Delete')}}</el-button>
</el-popconfirm>
</el-tooltip>
</div>
</template>
<script>
import _ from 'lodash'
import { mapActions, mapState } from 'vuex'
import { tasksState, runningType } from '@/conf/home/pages/dag/_source/config'
export default {
name: 'list',
data () {
return {
// data
list: [],
// btn type
buttonType: '',
strDelete: '',
checkAll: false
}
},
props: {
processInstanceList: Array,
pageNo: Number,
pageSize: Number
},
methods: {
...mapActions('dag', ['editExecutorsState', 'deleteInstance', 'batchDeleteInstance']),
/**
* Return run type
*/
_rtRunningType (code) {
return _.filter(runningType, v => v.code === code)[0].desc
},
/**
* Return status
*/
_rtState (code) {
let o = tasksState[code]
return `<em class="fa ansfont ${o.icoUnicode} ${o.isSpin ? 'as as-spin' : ''}" style="color:${o.color}" data-toggle="tooltip" data-container="body" title="${o.desc}"></em>`
},
/**
* delete
*/
_delete (item, i) {
// remove tow++
if (i < 0) {
this._batchDelete()
return
}
// remove one
this.deleteInstance({
processInstanceId: item.id
}).then(res => {
this._onUpdate()
this.$message.success(res.msg)
}).catch(e => {
this.$message.error(e.msg || '')
})
},
/**
* edit
*/
_reEdit (item) {
this.$router.push({ path: `/projects/${this.projectCode}/instance/list/${item.id}` })
},
/**
* Rerun
* @param REPEAT_RUNNING
*/
_reRun (item, index) {
this._countDownFn({
id: item.id,
executeType: 'REPEAT_RUNNING',
index: index,
buttonType: 'run'
})
},
/**
* Resume running
* @param PAUSE => RECOVER_SUSPENDED_PROCESS
* @param FAILURE => START_FAILURE_TASK_PROCESS
*/
_restore (item, index) {
this._countDownFn({
id: item.id,
executeType: 'START_FAILURE_TASK_PROCESS',
index: index,
buttonType: 'store'
})
},
/**
* stop
* @param STOP
*/
_stop (item, index) {
if (item.state === 'STOP') {
this._countDownFn({
id: item.id,
executeType: 'RECOVER_SUSPENDED_PROCESS',
index: index,
buttonType: 'suspend'
})
} else {
this._upExecutorsState({
processInstanceId: item.id,
executeType: 'STOP'
})
}
},
/**
* pause
* @param PAUSE
*/
_suspend (item, index) {
if (item.state === 'PAUSE') {
this._countDownFn({
id: item.id,
executeType: 'RECOVER_SUSPENDED_PROCESS',
index: index,
buttonType: 'suspend'
})
} else {
this._upExecutorsState({
processInstanceId: item.id,
executeType: 'PAUSE'
})
}
},
/**
* operating
*/
_upExecutorsState (o) {
this.editExecutorsState(o).then(res => {
this.$message.success(res.msg)
$('body').find('.tooltip.fade.top.in').remove()
this._onUpdate()
}).catch(e => {
this.$message.error(e.msg || '')
this._onUpdate()
})
},
/**
* Countdown method refresh
*/
_countDownFn (param) {
this.buttonType = param.buttonType
this.editExecutorsState({
processInstanceId: param.id,
executeType: param.executeType
}).then(res => {
this.list[param.index].disabled = false
$('body').find('.tooltip.fade.top.in').remove()
this.$forceUpdate()
this.$message.success(res.msg)
// Countdown
this._countDown(() => {
this._onUpdate()
}, param.index)
}).catch(e => {
this.$message.error(e.msg || '')
this._onUpdate()
})
},
/**
* update
*/
_onUpdate () {
this.$emit('on-update')
},
/**
* list data handle
*/
_listDataHandle (data) {
if (data.length) {
_.map(data, v => {
v.disabled = true
v.count = 9
})
}
return data
},
/**
* Countdown
*/
_countDown (fn, index) {
const TIME_COUNT = 10
let timer
let $count
if (!timer) {
$count = TIME_COUNT
timer = setInterval(() => {
if ($count > 0 && $count <= TIME_COUNT) {
$count--
this.list[index].count = $count
this.$forceUpdate()
} else {
fn()
clearInterval(timer)
timer = null
}
}, 1000)
}
},
_gantt (item) {
this.$router.push({ path: `/projects/${this.projectCode}/instance/gantt/${item.id}` })
},
_topCheckBoxClick (v) {
this.list.forEach((item, i) => {
this.$set(this.list[i], 'isCheck', v)
})
this._arrDelChange()
},
_arrDelChange (v) {
let arr = []
arr = _.map(v, 'id')
this.strDelete = _.join(arr, ',')
},
_batchDelete () {
this.batchDeleteInstance({
processInstanceIds: this.strDelete
}).then(res => {
this._onUpdate()
this.checkAll = false
this.strDelete = ''
this.$message.success(res.msg)
}).catch(e => {
this.checkAll = false
this.strDelete = ''
this.$message.error(e.msg || '')
})
}
},
watch: {
processInstanceList: {
handler (a) {
this.checkAll = false
this.list = []
setTimeout(() => {
this.list = _.cloneDeep(this._listDataHandle(a))
})
},
immediate: true,
deep: true
},
pageNo () {
this.strDelete = ''
}
},
created () {
},
mounted () {
},
computed: {
...mapState('dag', ['projectCode'])
},
components: { }
}
</script>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,486 | [Bug] [UI] update process instance error | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
branch: dev
update process instance error:
miss some required parameters.
![image](https://user-images.githubusercontent.com/29528966/136700807-74b9d1e9-516b-4a7b-a27a-a10cc48b965a.png)
![image](https://user-images.githubusercontent.com/29528966/136700821-f574ee8d-bf6e-4422-93e6-7465e9b2c429.png)
![image](https://user-images.githubusercontent.com/29528966/136700831-553a184d-5364-4111-ae70-a1f2c908e426.png)
### What you expected to happen
update process instance normally.
### How to reproduce
run a process definition.
edit the process instance and 'save' process instance
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6486 | https://github.com/apache/dolphinscheduler/pull/6487 | 7459ee2531beff236b1f15bd99f1c6f678153a57 | eb21a1763e43ee5b1cb418a0835b0bf1d13e295e | "2021-10-10T14:49:34Z" | java | "2021-10-10T15:18:07Z" | dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/taskInstance/_source/list.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="list-model">
<div class="table-box">
<el-table :data="list" size="mini" style="width: 100%">
<el-table-column prop="id" :label="$t('#')" width="50"></el-table-column>
<el-table-column prop="name" :label="$t('Name')"></el-table-column>
<el-table-column :label="$t('Process Instance')" min-width="200">
<template slot-scope="scope">
<el-popover trigger="hover" placement="top">
<p>{{ scope.row.processInstanceName }}</p>
<div slot="reference" class="name-wrapper">
<a href="javascript:" class="links" @click="_go(scope.row)"><span class="ellipsis">{{scope.row.processInstanceName}}</span></a>
</div>
</el-popover>
</template>
</el-table-column>
<el-table-column prop="executorName" :label="$t('Executor')"></el-table-column>
<el-table-column prop="taskType" :label="$t('Node Type')"></el-table-column>
<el-table-column :label="$t('State')" width="50">
<template slot-scope="scope">
<span v-html="_rtState(scope.row.state)" style="cursor: pointer;"></span>
</template>
</el-table-column>
<el-table-column :label="$t('Submit Time')" width="135">
<template slot-scope="scope">
<span v-if="scope.row.submitTime">{{scope.row.submitTime | formatDate}}</span>
<span v-else>-</span>
</template>
</el-table-column>
<el-table-column :label="$t('Start Time')" width="135">
<template slot-scope="scope">
<span>{{scope.row.startTime | formatDate}}</span>
</template>
</el-table-column>
<el-table-column :label="$t('End Time')" width="135">
<template slot-scope="scope">
<span>{{scope.row.endTime | formatDate}}</span>
</template>
</el-table-column>
<el-table-column :label="$t('Duration')">
<template slot-scope="scope">
<span>{{scope.row.duration | filterNull}}</span>
</template>
</el-table-column>
<el-table-column prop="retryTimes" :label="$t('Retry Count')"></el-table-column>
<el-table-column :label="$t('Dry-run flag')" width="100">
<template slot-scope="scope">
<span v-if="scope.row.dryRun == 1">YES</span>
<span v-else>NO</span>
</template>
</el-table-column>
<el-table-column :label="$t('host')" min-width="210">
<template slot-scope="scope">
<span>{{scope.row.host | filterNull}}</span>
</template>
</el-table-column>
<el-table-column :label="$t('Operation')" width="80" fixed="right">
<template slot-scope="scope">
<div>
<el-tooltip :content="$t('Force success')" placement="top" :enterable="false">
<span>
<el-button type="primary" size="mini" icon="el-icon-success" :disabled="!(scope.row.state === 'FAILURE' || scope.row.state === 'NEED_FAULT_TOLERANCE' || scope.row.state === 'KILL')" @click="_forceSuccess(scope.row)" circle></el-button>
</span>
</el-tooltip>
<el-tooltip :content="$t('View log')" placement="top" :enterable="false">
<span><el-button type="primary" size="mini" :disabled="scope.row.taskType==='SUB_PROCESS'? true: false" icon="el-icon-tickets" @click="_refreshLog(scope.row)" circle></el-button></span>
</el-tooltip>
</div>
</template>
</el-table-column>
</el-table>
</div>
<el-dialog
:show-close="false"
:visible.sync="logDialog"
width="auto">
<m-log :key="taskInstanceId" :item="item" :source="source" :taskInstanceId="taskInstanceId" @close="close"></m-log>
</el-dialog>
</div>
</template>
<script>
import { mapActions, mapState } from 'vuex'
import Permissions from '@/module/permissions'
import mLog from '@/conf/home/pages/dag/_source/formModel/log'
import { tasksState } from '@/conf/home/pages/dag/_source/config'
export default {
name: 'list',
data () {
return {
list: [],
isAuth: Permissions.getAuth(),
backfillItem: {},
logDialog: false,
item: {},
source: '',
taskInstanceId: null
}
},
props: {
taskInstanceList: Array,
pageNo: Number,
pageSize: Number
},
methods: {
...mapActions('dag', ['forceTaskSuccess']),
_rtState (code) {
let o = tasksState[code]
return `<em class="${o.icoUnicode} ${o.isSpin ? 'as as-spin' : ''}" style="color:${o.color}" data-toggle="tooltip" data-container="body" title="${o.desc}"></em>`
},
_refreshLog (item) {
this.item = item
this.source = 'list'
this.taskInstanceId = item.id
this.logDialog = true
},
close () {
this.logDialog = false
},
_forceSuccess (item) {
this.forceTaskSuccess({ taskInstanceId: item.id }).then(res => {
if (res.code === 0) {
this.$message.success(res.msg)
setTimeout(this._onUpdate, 1000)
} else {
this.$message.error(res.msg)
}
}).catch(e => {
this.$message.error(e.msg)
})
},
_onUpdate () {
this.$emit('on-update')
},
_go (item) {
this.$router.push({ path: `/projects/${this.projectId}/instance/list/${item.processInstanceId}` })
}
},
watch: {
taskInstanceList (a) {
this.list = []
setTimeout(() => {
this.list = a
})
}
},
created () {
},
mounted () {
this.list = this.taskInstanceList
},
computed: {
...mapState('dag', ['projectId'])
},
components: { mLog }
}
</script>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,486 | [Bug] [UI] update process instance error | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
branch: dev
update process instance error:
miss some required parameters.
![image](https://user-images.githubusercontent.com/29528966/136700807-74b9d1e9-516b-4a7b-a27a-a10cc48b965a.png)
![image](https://user-images.githubusercontent.com/29528966/136700821-f574ee8d-bf6e-4422-93e6-7465e9b2c429.png)
![image](https://user-images.githubusercontent.com/29528966/136700831-553a184d-5364-4111-ae70-a1f2c908e426.png)
### What you expected to happen
update process instance normally.
### How to reproduce
run a process definition.
edit the process instance and 'save' process instance
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6486 | https://github.com/apache/dolphinscheduler/pull/6487 | 7459ee2531beff236b1f15bd99f1c6f678153a57 | eb21a1763e43ee5b1cb418a0835b0bf1d13e295e | "2021-10-10T14:49:34Z" | java | "2021-10-10T15:18:07Z" | dolphinscheduler-ui/src/js/conf/home/store/dag/actions.js | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import _ from 'lodash'
import io from '@/module/io'
export default {
/**
* Task status acquisition
*/
getTaskState ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectCode}/process-instances/${payload}/tasks`, {
processInstanceId: payload
}, res => {
state.taskInstances = res.data.taskList
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Update process definition status
*/
editProcessState ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post(`projects/${state.projectCode}/process-definition/${payload.code}/release`, {
name: payload.name,
releaseState: payload.releaseState
}, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* get process definition versions pagination info
*/
getProcessDefinitionVersionsPage ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectCode}/process-definition/${payload.code}/versions`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* switch process definition version
*/
switchProcessDefinitionVersion ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectCode}/process-definition/${payload.code}/versions/${payload.version}`, {}, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* delete process definition version
*/
deleteProcessDefinitionVersion ({ state }, payload) {
return new Promise((resolve, reject) => {
io.delete(`projects/${state.projectCode}/process-definition/${payload.code}/versions/${payload.version}`, {}, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Update process instance status
*/
editExecutorsState ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post(`projects/${state.projectCode}/executors/execute`, {
processInstanceId: payload.processInstanceId,
executeType: payload.executeType
}, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Verify that the DGA map name exists
*/
verifDAGName ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectCode}/process-definition/verify-name`, {
name: payload
}, res => {
state.name = payload
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Get process definition DAG diagram details
*/
getProcessDetails ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectCode}/process-definition/${payload}`, {
}, res => {
// process definition code
state.code = res.data.processDefinition.code
// version
state.version = res.data.processDefinition.version
// name
state.name = res.data.processDefinition.name
// description
state.description = res.data.processDefinition.description
// taskRelationJson
state.connects = res.data.processTaskRelationList
// locations
state.locations = JSON.parse(res.data.processDefinition.locations)
// global params
state.globalParams = res.data.processDefinition.globalParamList
// timeout
state.timeout = res.data.processDefinition.timeout
// tenantCode
state.tenantCode = res.data.processDefinition.tenantCode || 'default'
// tasks info
state.tasks = res.data.taskDefinitionList.map(task => _.pick(task, [
'code',
'name',
'version',
'description',
'delayTime',
'taskType',
'taskParams',
'flag',
'taskPriority',
'workerGroup',
'failRetryTimes',
'failRetryInterval',
'timeoutFlag',
'timeoutNotifyStrategy',
'timeout',
'environmentCode'
]))
resolve(res.data)
}).catch(res => {
reject(res)
})
})
},
/**
* Get process definition DAG diagram details
*/
copyProcess ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post(`projects/${state.projectCode}/process-definition/batch-copy`, {
codes: payload.codes,
targetProjectCode: payload.targetProjectCode
}, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Get process definition DAG diagram details
*/
moveProcess ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post(`projects/${state.projectCode}/process-definition/batch-move`, {
codes: payload.codes,
targetProjectCode: payload.targetProjectCode
}, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Get all the items created by the logged in user
*/
getAllItems ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('projects/created-and-authed', {}, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Get the process instance DAG diagram details
*/
getInstancedetail ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectCode}/process-instances/${payload}`, {
}, res => {
const { processDefinition, processTaskRelationList, taskDefinitionList } = res.data.dagData
// code
state.code = processDefinition.code
// version
state.version = processDefinition.version
// name
state.name = res.data.name
// desc
state.description = processDefinition.description
// connects
state.connects = processTaskRelationList
// locations
state.locations = JSON.parse(processDefinition.locations)
// global params
state.globalParams = processDefinition.globalParamList
// timeout
state.timeout = processDefinition.timeout
// tenantCode
state.tenantCode = res.data.tenantCode || 'default'
// tasks info
state.tasks = taskDefinitionList.map(task => _.pick(task, [
'code',
'name',
'version',
'description',
'delayTime',
'taskType',
'taskParams',
'flag',
'taskPriority',
'workerGroup',
'failRetryTimes',
'failRetryInterval',
'timeoutFlag',
'timeoutNotifyStrategy',
'timeout',
'environmentCode'
]))
// startup parameters
state.startup = _.assign(state.startup, _.pick(res.data, ['commandType', 'failureStrategy', 'processInstancePriority', 'workerGroup', 'warningType', 'warningGroupId', 'receivers', 'receiversCc']))
state.startup.commandParam = JSON.parse(res.data.commandParam)
resolve(res.data)
}).catch(res => {
reject(res)
})
})
},
/**
* Create process definition
*/
saveDAGchart ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post(`projects/${state.projectCode}/process-definition`, {
locations: JSON.stringify(state.locations),
name: _.trim(state.name),
taskDefinitionJson: JSON.stringify(state.tasks),
taskRelationJson: JSON.stringify(state.connects),
tenantCode: state.tenantCode,
description: _.trim(state.description),
globalParams: JSON.stringify(state.globalParams),
timeout: state.timeout
}, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Process definition update
*/
updateDefinition ({ state }, payload) {
return new Promise((resolve, reject) => {
io.put(`projects/${state.projectCode}/process-definition/${payload}`, {
locations: JSON.stringify(state.locations),
name: _.trim(state.name),
taskDefinitionJson: JSON.stringify(state.tasks),
taskRelationJson: JSON.stringify(state.connects),
tenantCode: state.tenantCode,
description: _.trim(state.description),
globalParams: JSON.stringify(state.globalParams),
timeout: state.timeout,
releaseState: state.releaseState
}, res => {
resolve(res)
state.isEditDag = false
}).catch(e => {
reject(e)
})
})
},
/**
* Process instance update
*/
updateInstance ({ state }, payload) {
return new Promise((resolve, reject) => {
const data = {
globalParams: state.globalParams,
tasks: state.tasks,
tenantId: state.tenantId,
timeout: state.timeout
}
io.put(`projects/${state.projectCode}/process-instances/${payload}`, {
processInstanceJson: JSON.stringify(data),
locations: JSON.stringify(state.locations),
connects: JSON.stringify(state.connects),
syncDefine: state.syncDefine
}, res => {
resolve(res)
state.isEditDag = false
}).catch(e => {
reject(e)
})
})
},
/**
* Get a list of process definitions (sub-workflow usage is not paged)
*/
getProcessList ({ state }, payload) {
return new Promise((resolve, reject) => {
if (state.processListS.length) {
resolve()
return
}
io.get(`projects/${state.projectCode}/process-definition/list`, payload, res => {
state.processListS = res.data
resolve(res.data)
}).catch(res => {
reject(res)
})
})
},
/**
* Get a list of process definitions (list page usage with pagination)
*/
getProcessListP ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectCode}/process-definition`, payload, res => {
resolve(res.data)
}).catch(res => {
reject(res)
})
})
},
/**
* Get a list of project
*/
getProjectList ({ state }, payload) {
return new Promise((resolve, reject) => {
if (state.projectListS.length) {
resolve()
return
}
io.get('projects/created-and-authed', payload, res => {
state.projectListS = res.data
resolve(res.data)
}).catch(res => {
reject(res)
})
})
},
/**
* Get a list of process definitions by project code
*/
getProcessByProjectCode ({ state }, code) {
return new Promise((resolve, reject) => {
io.get(`projects/${code}/process-definition/all`, res => {
resolve(res.data)
}).catch(res => {
reject(res)
})
})
},
/**
* get datasource
*/
getDatasourceList ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('datasources/list', {
type: payload
}, res => {
resolve(res)
}).catch(res => {
reject(res)
})
})
},
/**
* get resources
*/
getResourcesList ({ state }) {
return new Promise((resolve, reject) => {
if (state.resourcesListS.length) {
resolve()
return
}
io.get('resources/list', {
type: 'FILE'
}, res => {
state.resourcesListS = res.data
resolve(res.data)
}).catch(res => {
reject(res)
})
})
},
/**
* get jar
*/
getResourcesListJar ({ state }) {
return new Promise((resolve, reject) => {
if (state.resourcesListJar.length) {
resolve()
return
}
io.get('resources/query-by-type', {
type: 'FILE'
}, res => {
state.resourcesListJar = res.data
resolve(res.data)
}).catch(res => {
reject(res)
})
})
},
/**
* Get process instance
*/
getProcessInstance ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectCode}/process-instances`, payload, res => {
state.instanceListS = res.data.totalList
resolve(res.data)
}).catch(res => {
reject(res)
})
})
},
/**
* Get alarm list
*/
getNotifyGroupList ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('alert-groups/list', res => {
state.notifyGroupListS = _.map(res.data, v => {
return {
id: v.id,
code: v.groupName,
disabled: false
}
})
resolve(_.cloneDeep(state.notifyGroupListS))
}).catch(res => {
reject(res)
})
})
},
/**
* Process definition startup interface
*/
processStart ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post(`projects/${state.projectCode}/executors/start-process-instance`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* View log
*/
getLog ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('log/detail', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Get the process instance id according to the process definition id
* @param taskId
*/
getSubProcessId ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectCode}/process-instances/query-sub-by-parent`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Called before the process definition starts
*/
getStartCheck ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post(`projects/${state.projectCode}/executors/start-check`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Create timing
*/
createSchedule ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post(`projects/${state.projectCode}/schedules`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Preview timing
*/
previewSchedule ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post(`projects/${state.projectCode}/schedules/preview`, payload, res => {
resolve(res.data)
// alert(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* Timing list paging
*/
getScheduleList ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectCode}/schedules`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Timing online
*/
scheduleOffline ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post(`projects/${state.projectCode}/schedules/${payload.id}/offline`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Timed offline
*/
scheduleOnline ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post(`projects/${state.projectCode}/schedules/${payload.id}/online`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Edit timing
*/
updateSchedule ({ state }, payload) {
return new Promise((resolve, reject) => {
io.put(`projects/${state.projectCode}/schedules/${payload.id}`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Delete process instance
*/
deleteInstance ({ state }, payload) {
return new Promise((resolve, reject) => {
io.delete(`projects/${state.projectCode}/process-instances/${payload.processInstanceId}`, {}, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Batch delete process instance
*/
batchDeleteInstance ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post(`projects/${state.projectCode}/process-instances/batch-delete`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Delete definition
*/
deleteDefinition ({ state }, payload) {
return new Promise((resolve, reject) => {
io.delete(`projects/${state.projectCode}/process-definition/${payload.code}`, {}, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Batch delete definition
*/
batchDeleteDefinition ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post(`projects/${state.projectCode}/process-definition/batch-delete`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* export definition
*/
exportDefinition ({ state }, payload) {
const downloadBlob = (data, fileNameS = 'json') => {
if (!data) {
return
}
const blob = new Blob([data])
const fileName = `${fileNameS}.json`
if ('download' in document.createElement('a')) { // 不是IE浏览器
const url = window.URL.createObjectURL(blob)
const link = document.createElement('a')
link.style.display = 'none'
link.href = url
link.setAttribute('download', fileName)
document.body.appendChild(link)
link.click()
document.body.removeChild(link) // 下载完成移除元素
window.URL.revokeObjectURL(url) // 释放掉blob对象
} else { // IE 10+
window.navigator.msSaveBlob(blob, fileName)
}
}
io.post(`projects/${state.projectCode}/process-definition/batch-export`, { codes: payload.codes }, res => {
downloadBlob(res, payload.fileName)
}, e => {
}, {
responseType: 'blob'
})
},
/**
* Process instance get variable
*/
getViewvariables ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectCode}/process-instances/${payload.processInstanceId}/view-variables`, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Get udfs function based on data source
*/
getUdfList ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('resources/udf-func/list', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Query task instance list
*/
getTaskInstanceList ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectCode}/task-instances`, payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* Force fail/kill/need_fault_tolerance task success
*/
forceTaskSuccess ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post(`projects/${state.projectCode}/task-instances/${payload.taskInstanceId}/force-success`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Query task record list
*/
getTaskRecordList ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('projects/task-record/list-paging', payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* Query history task record list
*/
getHistoryTaskRecordList ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('projects/task-record/history-list-paging', payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* tree chart
*/
getViewTree ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectCode}/process-definition/${payload.code}/view-tree`, { limit: payload.limit }, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* gantt chart
*/
getViewGantt ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectCode}/process-instances/${payload.processInstanceId}/view-gantt`, payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* Query task node list
*/
getProcessTasksList ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectCode}/process-definition/${payload.code}/tasks`, payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
getTaskListDefIdAll ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectCode}/process-definition/batch-query-tasks`, payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* remove timing
*/
deleteTiming ({ state }, payload) {
return new Promise((resolve, reject) => {
io.delete(`projects/${state.projectCode}/schedules/${payload.scheduleId}`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
getResourceId ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`resources/${payload.id}`, payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
genTaskCodeList ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectCode}/task-definition/gen-task-codes`, payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
getTaskDefinitions ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectCode}/task-definition`, payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,483 | [Bug] [MasterServer] With MySQL 8.0, Druid make thread blocked to wait lock when load class | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
`"MasterEventExecution" #130 daemon prio=5 os_prio=0 tid=0x00007f3590024000 nid=0x355414 waiting for monitor entry [0x00007f35843fe000]
java.lang.Thread.State: BLOCKED (on object monitor)
at java.lang.ClassLoader.loadClass(ClassLoader.java:404)
- waiting to lock <0x0000000740022240> (a java.lang.Object)
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:335)
at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
at java.lang.Class.forName0(Native Method)
at java.lang.Class.forName(Class.java:264)
at com.alibaba.druid.util.Utils.loadClass(Utils.java:212)
at com.alibaba.druid.util.MySqlUtils.getLastPacketReceivedTimeMs(MySqlUtils.java:372)
at com.alibaba.druid.pool.DruidAbstractDataSource.testConnectionInternal(DruidAbstractDataSource.java:1471)
at com.alibaba.druid.pool.DruidDataSource.recycle(DruidDataSource.java:1938)
at com.alibaba.druid.pool.DruidPooledConnection.recycle(DruidPooledConnection.java:324)
at com.alibaba.druid.pool.DruidPooledConnection.close(DruidPooledConnection.java:269)
at org.springframework.jdbc.datasource.DataSourceUtils.doCloseConnection(DataSourceUtils.java:360)
at org.springframework.jdbc.datasource.DataSourceUtils.doReleaseConnection(DataSourceUtils.java:347)
at org.springframework.jdbc.datasource.DataSourceUtils.releaseConnection(DataSourceUtils.java:314)
at org.mybatis.spring.transaction.SpringManagedTransaction.close(SpringManagedTransaction.java:115)
at org.apache.ibatis.executor.BaseExecutor.close(BaseExecutor.java:90)
at org.apache.ibatis.session.defaults.DefaultSqlSession.close(DefaultSqlSession.java:263)
at org.mybatis.spring.SqlSessionUtils.closeSqlSession(SqlSessionUtils.java:195)
at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:448)
at com.sun.proxy.$Proxy90.selectOne(Unknown Source)
at org.mybatis.spring.SqlSessionTemplate.selectOne(SqlSessionTemplate.java:159)
at com.baomidou.mybatisplus.core.override.MybatisMapperMethod.execute(MybatisMapperMethod.java:89)
at com.baomidou.mybatisplus.core.override.MybatisMapperProxy.invoke(MybatisMapperProxy.java:61)
at com.sun.proxy.$Proxy95.selectById(Unknown Source)
at org.apache.dolphinscheduler.service.process.ProcessService.findProcessInstanceById(ProcessService.java:379)
at org.apache.dolphinscheduler.service.process.ProcessService$$FastClassBySpringCGLIB$$ed138739.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:687)
at org.apache.dolphinscheduler.service.process.ProcessService$$EnhancerBySpringCGLIB$$f0a248d9.findProcessInstanceById(<generated>)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.updateProcessInstanceState(WorkflowExecuteThread.java:1085)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.submitPostNode(WorkflowExecuteThread.java:839)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.taskFinished(WorkflowExecuteThread.java:384)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.taskStateChangeHandler(WorkflowExecuteThread.java:353)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.stateEventHandler(WorkflowExecuteThread.java:302)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.handleEvents(WorkflowExecuteThread.java:250)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.run(WorkflowExecuteThread.java:231)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:125)
at com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:57)
at com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:78)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)`
branch: dev
database: mysql 8.0.21
The MasterServer jstack show that when druid load class, many threads is blocked to wait lock.
I found that when I change durid config like that:
`spring.datasource.testOnBorrow=false
spring.datasource.testOnReturn=false`
can solve this problem, but I think it's not the right way.
See druid issue: https://github.com/alibaba/druid/issues/3808.
Maybe we should upgrade the mysql connector version and druid version.
### What you expected to happen
thread not blocked by druid when loadClass.
### How to reproduce
run by complete data mode, and use jstack MasterServer
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6483 | https://github.com/apache/dolphinscheduler/pull/6484 | eb21a1763e43ee5b1cb418a0835b0bf1d13e295e | c8ee2df2164308a20bb9234ac719c03f4652f433 | "2021-10-10T07:54:04Z" | java | "2021-10-10T16:40:49Z" | dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/SpringConnectionFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.datasource;
import static org.apache.dolphinscheduler.common.Constants.DATASOURCE_PROPERTIES;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.utils.PropertyUtils;
import org.apache.ibatis.mapping.DatabaseIdProvider;
import org.apache.ibatis.mapping.VendorDatabaseIdProvider;
import org.apache.ibatis.session.SqlSession;
import org.apache.ibatis.session.SqlSessionFactory;
import org.apache.ibatis.type.JdbcType;
import java.sql.SQLException;
import java.util.Properties;
import org.mybatis.spring.SqlSessionTemplate;
import org.mybatis.spring.annotation.MapperScan;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import org.springframework.core.io.support.ResourcePatternResolver;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import com.alibaba.druid.pool.DruidDataSource;
import com.baomidou.mybatisplus.annotation.IdType;
import com.baomidou.mybatisplus.core.MybatisConfiguration;
import com.baomidou.mybatisplus.core.config.GlobalConfig;
import com.baomidou.mybatisplus.extension.plugins.PaginationInterceptor;
import com.baomidou.mybatisplus.extension.spring.MybatisSqlSessionFactoryBean;
/**
* data source connection factory
*/
@Configuration
@MapperScan("org.apache.dolphinscheduler.*.mapper")
public class SpringConnectionFactory {
private static final Logger logger = LoggerFactory.getLogger(SpringConnectionFactory.class);
static {
PropertyUtils.loadPropertyFile(DATASOURCE_PROPERTIES);
}
/**
* pagination interceptor
*
* @return pagination interceptor
*/
@Bean
public PaginationInterceptor paginationInterceptor() {
return new PaginationInterceptor();
}
/**
* get the data source
*
* @return druid dataSource
*/
@Bean(destroyMethod = "")
public DruidDataSource dataSource() throws SQLException {
DruidDataSource druidDataSource = new DruidDataSource();
druidDataSource.setDriverClassName(PropertyUtils.getString(Constants.SPRING_DATASOURCE_DRIVER_CLASS_NAME));
druidDataSource.setUrl(PropertyUtils.getString(Constants.SPRING_DATASOURCE_URL));
druidDataSource.setUsername(PropertyUtils.getString(Constants.SPRING_DATASOURCE_USERNAME));
druidDataSource.setPassword(PropertyUtils.getString(Constants.SPRING_DATASOURCE_PASSWORD));
druidDataSource.setValidationQuery(PropertyUtils.getString(Constants.SPRING_DATASOURCE_VALIDATION_QUERY, "SELECT 1"));
druidDataSource.setPoolPreparedStatements(PropertyUtils.getBoolean(Constants.SPRING_DATASOURCE_POOL_PREPARED_STATEMENTS, true));
druidDataSource.setTestWhileIdle(PropertyUtils.getBoolean(Constants.SPRING_DATASOURCE_TEST_WHILE_IDLE, true));
druidDataSource.setTestOnBorrow(PropertyUtils.getBoolean(Constants.SPRING_DATASOURCE_TEST_ON_BORROW, true));
druidDataSource.setTestOnReturn(PropertyUtils.getBoolean(Constants.SPRING_DATASOURCE_TEST_ON_RETURN, true));
druidDataSource.setKeepAlive(PropertyUtils.getBoolean(Constants.SPRING_DATASOURCE_KEEP_ALIVE, true));
druidDataSource.setMinIdle(PropertyUtils.getInt(Constants.SPRING_DATASOURCE_MIN_IDLE, 5));
druidDataSource.setMaxActive(PropertyUtils.getInt(Constants.SPRING_DATASOURCE_MAX_ACTIVE, 50));
druidDataSource.setMaxWait(PropertyUtils.getInt(Constants.SPRING_DATASOURCE_MAX_WAIT, 60000));
druidDataSource.setMaxPoolPreparedStatementPerConnectionSize(PropertyUtils.getInt(Constants.SPRING_DATASOURCE_MAX_POOL_PREPARED_STATEMENT_PER_CONNECTION_SIZE, 20));
druidDataSource.setInitialSize(PropertyUtils.getInt(Constants.SPRING_DATASOURCE_INITIAL_SIZE, 5));
druidDataSource.setTimeBetweenEvictionRunsMillis(PropertyUtils.getLong(Constants.SPRING_DATASOURCE_TIME_BETWEEN_EVICTION_RUNS_MILLIS, 60000));
druidDataSource.setTimeBetweenConnectErrorMillis(PropertyUtils.getLong(Constants.SPRING_DATASOURCE_TIME_BETWEEN_CONNECT_ERROR_MILLIS, 60000));
druidDataSource.setMinEvictableIdleTimeMillis(PropertyUtils.getLong(Constants.SPRING_DATASOURCE_MIN_EVICTABLE_IDLE_TIME_MILLIS, 300000));
druidDataSource.setValidationQueryTimeout(PropertyUtils.getInt(Constants.SPRING_DATASOURCE_VALIDATION_QUERY_TIMEOUT, 3));
//auto commit
druidDataSource.setDefaultAutoCommit(PropertyUtils.getBoolean(Constants.SPRING_DATASOURCE_DEFAULT_AUTO_COMMIT, true));
druidDataSource.init();
return druidDataSource;
}
/**
* * get transaction manager
*
* @return DataSourceTransactionManager
*/
@Bean
public DataSourceTransactionManager transactionManager() throws SQLException {
return new DataSourceTransactionManager(dataSource());
}
/**
* * get sql session factory
*
* @return sqlSessionFactory
* @throws Exception sqlSessionFactory exception
*/
@Bean
public SqlSessionFactory sqlSessionFactory() throws Exception {
MybatisConfiguration configuration = new MybatisConfiguration();
configuration.setMapUnderscoreToCamelCase(true);
configuration.setCacheEnabled(false);
configuration.setCallSettersOnNulls(true);
configuration.setJdbcTypeForNull(JdbcType.NULL);
configuration.addInterceptor(paginationInterceptor());
MybatisSqlSessionFactoryBean sqlSessionFactoryBean = new MybatisSqlSessionFactoryBean();
sqlSessionFactoryBean.setConfiguration(configuration);
sqlSessionFactoryBean.setDataSource(dataSource());
GlobalConfig.DbConfig dbConfig = new GlobalConfig.DbConfig();
dbConfig.setIdType(IdType.AUTO);
GlobalConfig globalConfig = new GlobalConfig();
globalConfig.setDbConfig(dbConfig);
sqlSessionFactoryBean.setGlobalConfig(globalConfig);
sqlSessionFactoryBean.setTypeAliasesPackage("org.apache.dolphinscheduler.dao.entity");
ResourcePatternResolver resolver = new PathMatchingResourcePatternResolver();
sqlSessionFactoryBean.setMapperLocations(resolver.getResources("org/apache/dolphinscheduler/dao/mapper/*Mapper.xml"));
sqlSessionFactoryBean.setTypeEnumsPackage("org.apache.dolphinscheduler.*.enums");
sqlSessionFactoryBean.setDatabaseIdProvider(databaseIdProvider());
return sqlSessionFactoryBean.getObject();
}
/**
* get sql session
*
* @return SqlSession
*/
@Bean
public SqlSession sqlSession() throws Exception {
return new SqlSessionTemplate(sqlSessionFactory());
}
@Bean
public DatabaseIdProvider databaseIdProvider() {
DatabaseIdProvider databaseIdProvider = new VendorDatabaseIdProvider();
Properties properties = new Properties();
properties.setProperty("MySQL", "mysql");
properties.setProperty("PostgreSQL", "pg");
properties.setProperty("h2", "h2");
databaseIdProvider.setProperties(properties);
return databaseIdProvider;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,483 | [Bug] [MasterServer] With MySQL 8.0, Druid make thread blocked to wait lock when load class | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
`"MasterEventExecution" #130 daemon prio=5 os_prio=0 tid=0x00007f3590024000 nid=0x355414 waiting for monitor entry [0x00007f35843fe000]
java.lang.Thread.State: BLOCKED (on object monitor)
at java.lang.ClassLoader.loadClass(ClassLoader.java:404)
- waiting to lock <0x0000000740022240> (a java.lang.Object)
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:335)
at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
at java.lang.Class.forName0(Native Method)
at java.lang.Class.forName(Class.java:264)
at com.alibaba.druid.util.Utils.loadClass(Utils.java:212)
at com.alibaba.druid.util.MySqlUtils.getLastPacketReceivedTimeMs(MySqlUtils.java:372)
at com.alibaba.druid.pool.DruidAbstractDataSource.testConnectionInternal(DruidAbstractDataSource.java:1471)
at com.alibaba.druid.pool.DruidDataSource.recycle(DruidDataSource.java:1938)
at com.alibaba.druid.pool.DruidPooledConnection.recycle(DruidPooledConnection.java:324)
at com.alibaba.druid.pool.DruidPooledConnection.close(DruidPooledConnection.java:269)
at org.springframework.jdbc.datasource.DataSourceUtils.doCloseConnection(DataSourceUtils.java:360)
at org.springframework.jdbc.datasource.DataSourceUtils.doReleaseConnection(DataSourceUtils.java:347)
at org.springframework.jdbc.datasource.DataSourceUtils.releaseConnection(DataSourceUtils.java:314)
at org.mybatis.spring.transaction.SpringManagedTransaction.close(SpringManagedTransaction.java:115)
at org.apache.ibatis.executor.BaseExecutor.close(BaseExecutor.java:90)
at org.apache.ibatis.session.defaults.DefaultSqlSession.close(DefaultSqlSession.java:263)
at org.mybatis.spring.SqlSessionUtils.closeSqlSession(SqlSessionUtils.java:195)
at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:448)
at com.sun.proxy.$Proxy90.selectOne(Unknown Source)
at org.mybatis.spring.SqlSessionTemplate.selectOne(SqlSessionTemplate.java:159)
at com.baomidou.mybatisplus.core.override.MybatisMapperMethod.execute(MybatisMapperMethod.java:89)
at com.baomidou.mybatisplus.core.override.MybatisMapperProxy.invoke(MybatisMapperProxy.java:61)
at com.sun.proxy.$Proxy95.selectById(Unknown Source)
at org.apache.dolphinscheduler.service.process.ProcessService.findProcessInstanceById(ProcessService.java:379)
at org.apache.dolphinscheduler.service.process.ProcessService$$FastClassBySpringCGLIB$$ed138739.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:687)
at org.apache.dolphinscheduler.service.process.ProcessService$$EnhancerBySpringCGLIB$$f0a248d9.findProcessInstanceById(<generated>)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.updateProcessInstanceState(WorkflowExecuteThread.java:1085)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.submitPostNode(WorkflowExecuteThread.java:839)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.taskFinished(WorkflowExecuteThread.java:384)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.taskStateChangeHandler(WorkflowExecuteThread.java:353)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.stateEventHandler(WorkflowExecuteThread.java:302)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.handleEvents(WorkflowExecuteThread.java:250)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.run(WorkflowExecuteThread.java:231)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:125)
at com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:57)
at com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:78)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)`
branch: dev
database: mysql 8.0.21
The MasterServer jstack show that when druid load class, many threads is blocked to wait lock.
I found that when I change durid config like that:
`spring.datasource.testOnBorrow=false
spring.datasource.testOnReturn=false`
can solve this problem, but I think it's not the right way.
See druid issue: https://github.com/alibaba/druid/issues/3808.
Maybe we should upgrade the mysql connector version and druid version.
### What you expected to happen
thread not blocked by druid when loadClass.
### How to reproduce
run by complete data mode, and use jstack MasterServer
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6483 | https://github.com/apache/dolphinscheduler/pull/6484 | eb21a1763e43ee5b1cb418a0835b0bf1d13e295e | c8ee2df2164308a20bb9234ac719c03f4652f433 | "2021-10-10T07:54:04Z" | java | "2021-10-10T16:40:49Z" | pom.xml | <?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler</artifactId>
<version>2.0.0-SNAPSHOT</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
<url>http://dolphinscheduler.apache.org</url>
<description>Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated
to solving the complex dependencies in data processing, making the scheduling system out of the box for data
processing.
</description>
<licenses>
<license>
<name>Apache License 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:https://github.com/apache/dolphinscheduler.git</connection>
<developerConnection>scm:git:https://github.com/apache/dolphinscheduler.git</developerConnection>
<url>https://github.com/apache/dolphinscheduler</url>
<tag>HEAD</tag>
</scm>
<mailingLists>
<mailingList>
<name>DolphinScheduler Developer List</name>
<post>dev@dolphinscheduler.apache.org</post>
<subscribe>dev-subscribe@dolphinscheduler.apache.org</subscribe>
<unsubscribe>dev-unsubscribe@dolphinscheduler.apache.org</unsubscribe>
</mailingList>
</mailingLists>
<parent>
<groupId>org.apache</groupId>
<artifactId>apache</artifactId>
<version>21</version>
</parent>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<curator.version>4.3.0</curator.version>
<zookeeper.version>3.4.14</zookeeper.version>
<spring.version>5.1.19.RELEASE</spring.version>
<spring.boot.version>2.1.18.RELEASE</spring.boot.version>
<java.version>1.8</java.version>
<logback.version>1.2.3</logback.version>
<hadoop.version>2.7.3</hadoop.version>
<quartz.version>2.3.0</quartz.version>
<jackson.version>2.10.5</jackson.version>
<mybatis-plus.version>3.2.0</mybatis-plus.version>
<mybatis.spring.version>2.0.1</mybatis.spring.version>
<cron.utils.version>5.0.5</cron.utils.version>
<druid.version>1.1.22</druid.version>
<h2.version>1.4.200</h2.version>
<commons.codec.version>1.11</commons.codec.version>
<commons.logging.version>1.1.1</commons.logging.version>
<httpclient.version>4.4.1</httpclient.version>
<httpcore.version>4.4.1</httpcore.version>
<junit.version>4.12</junit.version>
<mysql.connector.version>5.1.34</mysql.connector.version>
<slf4j.api.version>1.7.5</slf4j.api.version>
<slf4j.log4j12.version>1.7.5</slf4j.log4j12.version>
<commons.collections.version>3.2.2</commons.collections.version>
<commons.httpclient>3.0.1</commons.httpclient>
<commons.beanutils.version>1.9.4</commons.beanutils.version>
<commons.configuration.version>1.10</commons.configuration.version>
<commons.email.version>1.5</commons.email.version>
<poi.version>4.1.2</poi.version>
<javax.servlet.api.version>3.1.0</javax.servlet.api.version>
<commons.collections4.version>4.1</commons.collections4.version>
<guava.version>24.1-jre</guava.version>
<postgresql.version>42.2.5</postgresql.version>
<hive.jdbc.version>2.1.0</hive.jdbc.version>
<commons.io.version>2.4</commons.io.version>
<oshi.core.version>3.9.1</oshi.core.version>
<clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version>
<mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version>
<presto.jdbc.version>0.238.1</presto.jdbc.version>
<spotbugs.version>3.1.12</spotbugs.version>
<checkstyle.version>3.1.2</checkstyle.version>
<zookeeper.version>3.4.14</zookeeper.version>
<curator.test>2.12.0</curator.test>
<frontend-maven-plugin.version>1.6</frontend-maven-plugin.version>
<maven-compiler-plugin.version>3.3</maven-compiler-plugin.version>
<maven-assembly-plugin.version>3.1.0</maven-assembly-plugin.version>
<maven-release-plugin.version>2.5.3</maven-release-plugin.version>
<maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version>
<maven-source-plugin.version>2.4</maven-source-plugin.version>
<maven-surefire-plugin.version>2.22.1</maven-surefire-plugin.version>
<maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version>
<rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version>
<jacoco.version>0.8.4</jacoco.version>
<jcip.version>1.0</jcip.version>
<maven.deploy.skip>false</maven.deploy.skip>
<cobertura-maven-plugin.version>2.7</cobertura-maven-plugin.version>
<mockito.version>2.21.0</mockito.version>
<powermock.version>2.0.2</powermock.version>
<servlet-api.version>2.5</servlet-api.version>
<swagger.version>1.9.3</swagger.version>
<springfox.version>2.9.2</springfox.version>
<swagger-models.version>1.5.24</swagger-models.version>
<guava-retry.version>2.0.0</guava-retry.version>
<dep.airlift.version>0.184</dep.airlift.version>
<dep.packaging.version>${dep.airlift.version}</dep.packaging.version>
<protostuff.version>1.7.2</protostuff.version>
<reflections.version>0.9.12</reflections.version>
<byte-buddy.version>1.9.16</byte-buddy.version>
<java-websocket.version>1.5.1</java-websocket.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.java-websocket</groupId>
<artifactId>Java-WebSocket</artifactId>
<version>${java-websocket.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<!-- quartz-->
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz-jobs</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>com.cronutils</groupId>
<artifactId>cron-utils</artifactId>
<version>${cron.utils.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>${druid.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>${spring.boot.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-core</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-beans</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-tx</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-jdbc</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
<version>${spring.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-server</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-standalone-server</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert-plugin</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-registry-plugin</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-dao</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-remote</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-service</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-spi</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<version>${zookeeper.version}</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<artifactId>netty</artifactId>
<groupId>io.netty</groupId>
</exclusion>
<exclusion>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-annotations</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-client</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>log4j-1.2-api</groupId>
<artifactId>org.apache.logging.log4j</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-test</artifactId>
<version>${curator.test}</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>${commons.codec.version}</version>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons.logging.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>${httpclient.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>${httpcore.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<version>${jackson.version}</version>
</dependency>
<!--protostuff-->
<!-- https://mvnrepository.com/artifact/io.protostuff/protostuff-core -->
<dependency>
<groupId>io.protostuff</groupId>
<artifactId>protostuff-core</artifactId>
<version>${protostuff.version}</version>
</dependency>
<!-- https://mvnrepository.com/artifact/io.protostuff/protostuff-runtime -->
<dependency>
<groupId>io.protostuff</groupId>
<artifactId>protostuff-runtime</artifactId>
<version>${protostuff.version}</version>
</dependency>
<dependency>
<groupId>net.bytebuddy</groupId>
<artifactId>byte-buddy</artifactId>
<version>${byte-buddy.version}</version>
</dependency>
<dependency>
<groupId>org.reflections</groupId>
<artifactId>reflections</artifactId>
<version>${reflections.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>${mockito.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-junit4</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-api-mockito2</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.jacoco</groupId>
<artifactId>org.jacoco.agent</artifactId>
<version>${jacoco.version}</version>
<classifier>runtime</classifier>
<scope>test</scope>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${mysql.connector.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.h2database</groupId>
<artifactId>h2</artifactId>
<version>${h2.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${slf4j.api.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>${slf4j.log4j12.version}</version>
</dependency>
<dependency>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
<version>${commons.collections.version}</version>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>${commons.httpclient}</version>
</dependency>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<version>${commons.beanutils.version}</version>
</dependency>
<dependency>
<groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId>
<version>${commons.configuration.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-email</artifactId>
<version>${commons.email.version}</version>
</dependency>
<!--excel poi-->
<dependency>
<groupId>org.apache.poi</groupId>
<artifactId>poi</artifactId>
<version>${poi.version}</version>
</dependency>
<dependency>
<groupId>org.apache.poi</groupId>
<artifactId>poi-ooxml</artifactId>
<version>${poi.version}</version>
</dependency>
<!-- hadoop -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>com.sun.jersey</artifactId>
<groupId>jersey-json</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-aws</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>${commons.collections4.version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>${postgresql.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>${hive.jdbc.version}</version>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>${commons.io.version}</version>
</dependency>
<dependency>
<groupId>com.github.oshi</groupId>
<artifactId>oshi-core</artifactId>
<version>${oshi.core.version}</version>
</dependency>
<dependency>
<groupId>ru.yandex.clickhouse</groupId>
<artifactId>clickhouse-jdbc</artifactId>
<version>${clickhouse.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
<version>${mssql.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.facebook.presto</groupId>
<artifactId>presto-jdbc</artifactId>
<version>${presto.jdbc.version}</version>
</dependency>
<dependency>
<groupId>net.jcip</groupId>
<artifactId>jcip-annotations</artifactId>
<version>${jcip.version}</version>
<optional>true</optional>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<version>${servlet-api.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
<version>${javax.servlet.api.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger2</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger-ui</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.swagger</groupId>
<artifactId>swagger-models</artifactId>
<version>${swagger-models.version}</version>
</dependency>
<dependency>
<groupId>com.github.xiaoymin</groupId>
<artifactId>swagger-bootstrap-ui</artifactId>
<version>${swagger.version}</version>
</dependency>
<dependency>
<groupId>com.github.rholder</groupId>
<artifactId>guava-retrying</artifactId>
<version>${guava-retry.version}</version>
</dependency>
<dependency>
<groupId>org.sonatype.aether</groupId>
<artifactId>aether-api</artifactId>
<version>1.13.1</version>
</dependency>
<dependency>
<groupId>io.airlift.resolver</groupId>
<artifactId>resolver</artifactId>
<version>1.5</version>
</dependency>
<dependency>
<groupId>org.ow2.asm</groupId>
<artifactId>asm</artifactId>
<version>6.2.1</version>
</dependency>
<dependency>
<groupId>javax.activation</groupId>
<artifactId>activation</artifactId>
<version>1.1</version>
</dependency>
<dependency>
<groupId>com.sun.mail</groupId>
<artifactId>javax.mail</artifactId>
<version>1.6.2</version>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<finalName>apache-dolphinscheduler-${project.version}</finalName>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-maven-plugin</artifactId>
<version>1.0.0</version>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>ca.vanzyl.maven.plugins</groupId>
<artifactId>provisio-maven-plugin</artifactId>
<version>1.0.4</version>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>rpm-maven-plugin</artifactId>
<version>${rpm-maven-plugion.version}</version>
<inherited>false</inherited>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<testSource>${java.version}</testSource>
<testTarget>${java.version}</testTarget>
</configuration>
<version>${maven-compiler-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<tagNameFormat>@{project.version}</tagNameFormat>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>${maven-assembly-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<configuration>
<source>8</source>
<failOnError>false</failOnError>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>${maven-source-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>${maven-dependency-plugin.version}</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-maven-plugin</artifactId>
<extensions>true</extensions>
<!--<configuration>-->
<!--<allowedProvidedDependencies>-->
<!--<allowedProvidedDependency>org.apache.dolphinscheduler:dolphinscheduler-common</allowedProvidedDependency>-->
<!--</allowedProvidedDependencies>-->
<!--</configuration>-->
</plugin>
<plugin>
<groupId>ca.vanzyl.maven.plugins</groupId>
<artifactId>provisio-maven-plugin</artifactId>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<id>attach-sources</id>
<phase>verify</phase>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
<configuration>
<aggregate>true</aggregate>
<charset>${project.build.sourceEncoding}</charset>
<encoding>${project.build.sourceEncoding}</encoding>
<docencoding>${project.build.sourceEncoding}</docencoding>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<autoVersionSubmodules>true</autoVersionSubmodules>
<tagNameFormat>@{project.version}</tagNameFormat>
<tagBase>${project.version}</tagBase>
<!--<goals>-f pom.xml deploy</goals>-->
</configuration>
<dependencies>
<dependency>
<groupId>org.apache.maven.scm</groupId>
<artifactId>maven-scm-provider-jgit</artifactId>
<version>1.9.5</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<encoding>${project.build.sourceEncoding}</encoding>
<skip>false</skip><!--not skip compile test classes-->
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<systemPropertyVariables>
<jacoco-agent.destfile>${project.build.directory}/jacoco.exec</jacoco-agent.destfile>
</systemPropertyVariables>
<includes>
<!--registry plugin -->
<include>**/plugin/registry/zookeeper/ZookeeperRegistryTest.java</include>
<!-- API -->
<include>**/api/controller/ProjectControllerTest.java</include>
<include>**/api/controller/QueueControllerTest.java</include>
<include>**/api/configuration/TrafficConfigurationTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TenantControllerTest.java</include>
<include>**/api/controller/SchedulerControllerTest.java</include>
<include>**/api/dto/resources/filter/ResourceFilterTest.java</include>
<include>**/api/dto/resources/visitor/ResourceTreeVisitorTest.java</include>
<includeDataxTaskTest>**/api/enums/testGetEnum.java</includeDataxTaskTest>
<include>**/api/enums/StatusTest.java</include>
<include>**/api/exceptions/ApiExceptionHandlerTest.java</include>
<include>**/api/exceptions/ServiceExceptionTest.java</include>
<include>**/api/interceptor/LocaleChangeInterceptorTest.java</include>
<include>**/api/interceptor/LoginHandlerInterceptorTest.java</include>
<include>**/api/interceptor/RateLimitInterceptorTest.java</include>
<include>**/api/security/impl/pwd/PasswordAuthenticatorTest.java</include>
<include>**/api/security/impl/ldap/LdapAuthenticatorTest.java</include>
<include>**/api/security/SecurityConfigLDAPTest.java</include>
<include>**/api/security/SecurityConfigPasswordTest.java</include>
<include>**/api/service/AccessTokenServiceTest.java</include>
<include>**/api/service/AlertGroupServiceTest.java</include>
<include>**/api/service/BaseDAGServiceTest.java</include>
<include>**/api/service/BaseServiceTest.java</include>
<include>**/api/service/DataAnalysisServiceTest.java</include>
<include>**/api/service/AlertPluginInstanceServiceTest.java</include>
<include>**/api/service/DataSourceServiceTest.java</include>
<include>**/api/service/ExecutorService2Test.java</include>
<include>**/api/service/ExecutorServiceTest.java</include>
<include>**/api/service/LoggerServiceTest.java</include>
<include>**/api/service/MonitorServiceTest.java</include>
<include>**/api/service/ProcessDefinitionServiceTest.java</include>
<include>**/api/service/ProcessTaskRelationServiceImplTest.java</include>
<include>**/api/service/TaskDefinitionServiceImplTest.java</include>
<include>**/api/service/ProcessInstanceServiceTest.java</include>
<include>**/api/service/ProjectServiceTest.java</include>
<include>**/api/service/QueueServiceTest.java</include>
<include>**/api/service/ResourcesServiceTest.java</include>
<include>**/api/service/SchedulerServiceTest.java</include>
<include>**/api/service/SessionServiceTest.java</include>
<include>**/api/service/TaskInstanceServiceTest.java</include>
<include>**/api/service/TenantServiceTest.java</include>
<include>**/api/service/UdfFuncServiceTest.java</include>
<include>**/api/service/UiPluginServiceTest.java</include>
<include>**/api/service/UserAlertGroupServiceTest.java</include>
<include>**/api/service/UsersServiceTest.java</include>
<include>**/api/service/WorkerGroupServiceTest.java</include>
<include>**/api/service/WorkFlowLineageServiceTest.java</include>
<include>**/api/service/EnvironmentServiceTest.java</include>
<include>**/api/service/EnvironmentWorkerGroupRelationServiceTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TaskInstanceControllerTest.java</include>
<include>**/api/controller/WorkFlowLineageControllerTest.java</include>
<include>**/api/controller/EnvironmentControllerTest.java</include>
<include>**/api/utils/exportprocess/DataSourceParamTest.java</include>
<include>**/api/utils/exportprocess/DependentParamTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/FileUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/ResultTest.java</include>
<include>**/common/graph/DAGTest.java</include>
<include>**/common/os/OshiTest.java</include>
<include>**/common/shell/ShellExecutorTest.java</include>
<include>**/common/task/DataxParametersTest.java</include>
<include>**/common/task/EntityTestUtils.java</include>
<include>**/common/task/FlinkParametersTest.java</include>
<include>**/common/task/HttpParametersTest.java</include>
<include>**/common/task/SparkParametersTest.java</include>
<include>**/common/task/SqlParametersTest.java</include>
<include>**/common/task/SqoopParameterEntityTest.java</include>
<include>**/common/threadutils/ThreadPoolExecutorsTest.java</include>
<include>**/common/threadutils/ThreadUtilsTest.java</include>
<include>**/common/utils/CollectionUtilsTest.java</include>
<include>**/common/utils/CommonUtilsTest.java</include>
<include>**/common/utils/DateUtilsTest.java</include>
<include>**/common/utils/DependentUtilsTest.java</include>
<include>**/common/utils/EncryptionUtilsTest.java</include>
<include>**/common/utils/FileUtilsTest.java</include>
<include>**/common/utils/JSONUtilsTest.java</include>
<include>**/common/utils/LoggerUtilsTest.java</include>
<include>**/common/utils/NetUtilsTest.java</include>
<include>**/common/utils/ParameterUtilsTest.java</include>
<include>**/common/utils/TimePlaceholderUtilsTest.java</include>
<include>**/common/utils/PreconditionsTest.java</include>
<include>**/common/utils/PropertyUtilsTest.java</include>
<include>**/common/utils/SchemaUtilsTest.java</include>
<include>**/common/utils/ScriptRunnerTest.java</include>
<include>**/common/utils/SensitiveLogUtilsTest.java</include>
<include>**/common/utils/StringTest.java</include>
<include>**/common/utils/StringUtilsTest.java</include>
<include>**/common/utils/TaskParametersUtilsTest.java</include>
<include>**/common/utils/VarPoolUtilsTest.java</include>
<include>**/common/utils/HadoopUtilsTest.java</include>
<include>**/common/utils/HttpUtilsTest.java</include>
<include>**/common/utils/KerberosHttpClientTest.java</include>
<include>**/common/utils/HiveConfUtilsTest.java</include>
<include>**/common/ConstantsTest.java</include>
<include>**/common/utils/HadoopUtils.java</include>
<include>**/common/utils/RetryerUtilsTest.java</include>
<include>**/common/datasource/clickhouse/ClickHouseDatasourceProcessorTest.java</include>
<include>**/common/datasource/db2/Db2DatasourceProcessorTest.java</include>
<include>**/common/datasource/hive/HiveDatasourceProcessorTest.java</include>
<include>**/common/datasource/mysql/MysqlDatasourceProcessorTest.java</include>
<include>**/common/datasource/oracle/OracleDatasourceProcessorTest.java</include>
<include>**/common/datasource/postgresql/PostgreSqlDatasourceProcessorTest.java</include>
<include>**/common/datasource/presto/PrestoDatasourceProcessorTest.java</include>
<include>**/common/datasource/spark/SparkDatasourceProcessorTest.java</include>
<include>**/common/datasource/sqlserver/SqlServerDatasourceProcessorTest.java</include>
<include>**/common/datasource/DatasourceUtilTest.java</include>
<include>**/common/enums/ExecutionStatusTest</include>
<include>**/dao/mapper/AccessTokenMapperTest.java</include>
<include>**/dao/mapper/AlertGroupMapperTest.java</include>
<include>**/dao/mapper/CommandMapperTest.java</include>
<include>**/dao/mapper/ConnectionFactoryTest.java</include>
<include>**/dao/mapper/DataSourceMapperTest.java</include>
<include>**/dao/datasource/MySQLDataSourceTest.java</include>
<include>**/dao/entity/TaskInstanceTest.java</include>
<include>**/dao/entity/UdfFuncTest.java</include>
<include>**/remote/command/alert/AlertSendRequestCommandTest.java</include>
<include>**/remote/command/alert/AlertSendResponseCommandTest.java</include>
<include>**/remote/command/future/ResponseFutureTest.java</include>
<include>**/remote/command/log/RemoveTaskLogRequestCommandTest.java</include>
<include>**/remote/command/log/RemoveTaskLogResponseCommandTest.java</include>
<include>**/remote/command/log/GetLogBytesRequestCommandTest.java</include>
<include>**/remote/command/log/GetLogBytesResponseCommandTest.java</include>
<include>**/remote/command/log/ViewLogRequestCommandTest.java</include>
<include>**/remote/utils/HostTest.java</include>
<include>**/remote/utils/NettyUtilTest.java</include>
<include>**/remote/NettyRemotingClientTest.java</include>
<include>**/rpc/RpcTest.java</include>
<include>**/server/log/LoggerServerTest.java</include>
<include>**/server/entity/SQLTaskExecutionContextTest.java</include>
<include>**/server/log/MasterLogFilterTest.java</include>
<include>**/server/log/SensitiveDataConverterTest.java</include>
<include>**/server/log/LoggerRequestProcessorTest.java</include>
<!--<include>**/server/log/TaskLogDiscriminatorTest.java</include>-->
<include>**/server/log/TaskLogFilterTest.java</include>
<include>**/server/log/WorkerLogFilterTest.java</include>
<include>**/server/master/cache/impl/TaskInstanceCacheManagerImplTest.java</include>
<include>**/server/master/config/MasterConfigTest.java</include>
<include>**/server/master/consumer/TaskPriorityQueueConsumerTest.java</include>
<include>**/server/master/runner/MasterTaskExecThreadTest.java</include>
<!--<include>**/server/master/dispatch/executor/NettyExecutorManagerTest.java</include>-->
<include>**/server/master/dispatch/host/assign/LowerWeightRoundRobinTest.java</include>
<include>**/server/master/dispatch/host/assign/RandomSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/HostWorkerTest.java</include>
<include>**/server/master/registry/MasterRegistryClientTest.java</include>
<include>**/server/master/registry/ServerNodeManagerTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinHostManagerTest.java</include>
<include>**/server/master/MasterCommandTest.java</include>
<include>**/server/master/DependentTaskTest.java</include>
<include>**/server/master/ConditionsTaskTest.java</include>
<include>**/server/master/SwitchTaskTest.java</include>
<include>**/server/master/MasterExecThreadTest.java</include>
<include>**/server/master/ParamsTest.java</include>
<include>**/server/master/SubProcessTaskTest.java</include>
<include>**/server/master/processor/TaskAckProcessorTest.java</include>
<include>**/server/master/processor/TaskKillResponseProcessorTest.java</include>
<include>**/server/master/processor/queue/TaskResponseServiceTest.java</include>
<include>**/server/master/zk/ZKMasterClientTest.java</include>
<include>**/server/registry/ZookeeperRegistryCenterTest.java</include>
<include>**/server/utils/DataxUtilsTest.java</include>
<include>**/server/utils/ExecutionContextTestUtils.java</include>
<include>**/server/utils/FlinkArgsUtilsTest.java</include>
<include>**/server/utils/LogUtilsTest.java</include>
<include>**/server/utils/MapReduceArgsUtilsTest.java</include>
<include>**/server/utils/ParamUtilsTest.java</include>
<include>**/server/utils/ProcessUtilsTest.java</include>
<include>**/server/utils/SparkArgsUtilsTest.java</include>
<include>**/server/worker/processor/TaskCallbackServiceTest.java</include>
<include>**/server/worker/processor/TaskExecuteProcessorTest.java</include>
<include>**/server/worker/registry/WorkerRegistryTest.java</include>
<include>**/server/worker/shell/ShellCommandExecutorTest.java</include>
<include>**/server/worker/sql/SqlExecutorTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<include>**/server/worker/task/datax/DataxTaskTest.java</include>
<!--<include>**/server/worker/task/http/HttpTaskTest.java</include>-->
<include>**/server/worker/task/sqoop/SqoopTaskTest.java</include>
<include>**/server/worker/task/processdure/ProcedureTaskTest.java</include>
<include>**/server/worker/task/shell/ShellTaskTest.java</include>
<include>**/server/worker/task/TaskManagerTest.java</include>
<include>**/server/worker/task/PythonCommandExecutorTest.java</include>
<include>**/server/worker/task/TaskParamsTest.java</include>
<include>**/server/worker/task/ShellTaskReturnTest.java</include>
<include>**/server/worker/task/sql/SqlTaskTest.java</include>
<include>**/server/worker/runner/TaskExecuteThreadTest.java</include>
<include>**/server/worker/runner/WorkerManagerThreadTest.java</include>
<include>**/service/quartz/cron/CronUtilsTest.java</include>
<include>**/service/process/ProcessServiceTest.java</include>
<include>**/service/registry/RegistryClientTest.java</include>
<include>**/service/registry/RegistryPluginTest.java</include>
<include>**/service/queue/TaskUpdateQueueTest.java</include>
<include>**/service/queue/PeerTaskInstancePriorityQueueTest.java</include>
<include>**/service/log/LogClientServiceTest.java</include>
<include>**/service/alert/AlertClientServiceTest.java</include>
<include>**/service/alert/ProcessAlertManagerTest.java</include>
<include>**/dao/mapper/DataSourceUserMapperTest.java</include>
<!--<iTaskUpdateQueueConsumerThreadnclude>**/dao/mapper/ErrorCommandMapperTest.java</iTaskUpdateQueueConsumerThreadnclude>-->
<include>**/dao/mapper/ProcessDefinitionMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapperTest.java</include>
<include>**/dao/mapper/ProjectMapperTest.java</include>
<include>**/dao/mapper/ProjectUserMapperTest.java</include>
<include>**/dao/mapper/QueueMapperTest.java</include>
<include>**/dao/mapper/ResourceUserMapperTest.java</include>
<include>**/dao/mapper/ScheduleMapperTest.java</include>
<include>**/dao/mapper/SessionMapperTest.java</include>
<include>**/dao/mapper/TaskInstanceMapperTest.java</include>
<include>**/dao/mapper/TenantMapperTest.java</include>
<include>**/dao/mapper/UdfFuncMapperTest.java</include>
<include>**/dao/mapper/UDFUserMapperTest.java</include>
<include>**/dao/mapper/UserMapperTest.java</include>
<include>**/dao/mapper/AlertPluginInstanceMapperTest.java</include>
<include>**/dao/mapper/PluginDefineTest.java</include>
<include>**/dao/utils/DagHelperTest.java</include>
<include>**/dao/AlertDaoTest.java</include>
<include>**/dao/datasource/OracleDataSourceTest.java</include>
<include>**/dao/datasource/HiveDataSourceTest.java</include>
<include>**/dao/datasource/BaseDataSourceTest.java</include>
<include>**/dao/upgrade/ProcessDefinitionDaoTest.java</include>
<include>**/dao/upgrade/WokrerGrouopDaoTest.java</include>
<include>**/dao/upgrade/UpgradeDaoTest.java</include>
<include>**/plugin/alert/email/EmailAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/email/EmailAlertChannelTest.java</include>
<include>**/plugin/alert/email/ExcelUtilsTest.java</include>
<include>**/plugin/alert/email/template/DefaultHTMLTemplateTest.java</include>
<include>**/plugin/alert/dingtalk/DingTalkSenderTest.java</include>
<include>**/plugin/alert/dingtalk/DingTalkAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/wechat/WeChatSenderTest.java</include>
<include>**/plugin/alert/wechat/WeChatAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/script/ProcessUtilsTest.java</include>
<include>**/plugin/alert/script/ScriptAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/script/ScriptSenderTest.java</include>
<include>**/plugin/alert/http/HttpAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/http/HttpAlertChannelTest.java</include>
<include>**/plugin/alert/feishu/FeiShuAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/feishu/FeiShuSenderTest.java</include>
<include>**/plugin/alert/http/HttpAlertPluginTest.java</include>
<include>**/plugin/alert/http/HttpSenderTest.java</include>
<include>**/plugin/alert/slack/SlackAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/slack/SlackAlertPluginTest.java</include>
<include>**/plugin/alert/slack/SlackSenderTest.java</include>
<include>**/spi/params/PluginParamsTransferTest.java</include>
<include>**/spi/plugin/DolphinSchedulerPluginLoaderTest.java</include>
<include>**/alert/plugin/EmailAlertPluginTest.java</include>
<include>**/alert/plugin/AlertPluginManagerTest.java</include>
<include>**/alert/plugin/DolphinPluginLoaderTest.java</include>
<include>**/alert/utils/FuncUtilsTest.java</include>
<include>**/alert/processor/AlertRequestProcessorTest.java</include>
<include>**/alert/runner/AlertSenderTest.java</include>
<include>**/alert/AlertServerTest.java</include>
<include>**/plugin/task/pigeon/PigeonTaskTest.java</include>
</includes>
<!-- <skip>true</skip> -->
</configuration>
</plugin>
<!-- jenkins plugin jacoco report-->
<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<version>${jacoco.version}</version>
<configuration>
<dataFile>${project.build.directory}/jacoco.exec</dataFile>
</configuration>
<executions>
<execution>
<id>default-instrument</id>
<goals>
<goal>instrument</goal>
</goals>
</execution>
<execution>
<id>default-restore-instrumented-classes</id>
<goals>
<goal>restore-instrumented-classes</goal>
</goals>
</execution>
<execution>
<id>default-report</id>
<goals>
<goal>report</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-maven-plugin</artifactId>
<version>${spotbugs.version}</version>
<configuration>
<xmlOutput>true</xmlOutput>
<threshold>medium</threshold>
<effort>default</effort>
<excludeFilterFile>dev-config/spotbugs-exclude.xml</excludeFilterFile>
<failOnError>true</failOnError>
</configuration>
<dependencies>
<dependency>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs</artifactId>
<version>4.0.0-beta4</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>${checkstyle.version}</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
<version>8.45</version>
</dependency>
</dependencies>
<configuration>
<consoleOutput>true</consoleOutput>
<encoding>UTF-8</encoding>
<configLocation>style/checkstyle.xml</configLocation>
<failOnViolation>true</failOnViolation>
<violationSeverity>warning</violationSeverity>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<sourceDirectories>
<sourceDirectory>${project.build.sourceDirectory}</sourceDirectory>
</sourceDirectories>
<excludes>**\/generated-sources\/</excludes>
</configuration>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>cobertura-maven-plugin</artifactId>
<version>${cobertura-maven-plugin.version}</version>
<configuration>
<check>
</check>
<aggregate>true</aggregate>
<outputDirectory>./target/cobertura</outputDirectory>
<encoding>${project.build.sourceEncoding}</encoding>
<quiet>true</quiet>
<format>xml</format>
<instrumentation>
<ignoreTrivial>true</ignoreTrivial>
</instrumentation>
</configuration>
</plugin>
</plugins>
</build>
<modules>
<module>dolphinscheduler-spi</module>
<module>dolphinscheduler-alert-plugin</module>
<module>dolphinscheduler-registry-plugin</module>
<module>dolphinscheduler-task-plugin</module>
<module>dolphinscheduler-ui</module>
<module>dolphinscheduler-server</module>
<module>dolphinscheduler-common</module>
<module>dolphinscheduler-api</module>
<module>dolphinscheduler-dao</module>
<module>dolphinscheduler-alert</module>
<module>dolphinscheduler-dist</module>
<module>dolphinscheduler-remote</module>
<module>dolphinscheduler-service</module>
<module>dolphinscheduler-microbench</module>
<module>dolphinscheduler-standalone-server</module>
</modules>
</project>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,483 | [Bug] [MasterServer] With MySQL 8.0, Druid make thread blocked to wait lock when load class | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
`"MasterEventExecution" #130 daemon prio=5 os_prio=0 tid=0x00007f3590024000 nid=0x355414 waiting for monitor entry [0x00007f35843fe000]
java.lang.Thread.State: BLOCKED (on object monitor)
at java.lang.ClassLoader.loadClass(ClassLoader.java:404)
- waiting to lock <0x0000000740022240> (a java.lang.Object)
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:335)
at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
at java.lang.Class.forName0(Native Method)
at java.lang.Class.forName(Class.java:264)
at com.alibaba.druid.util.Utils.loadClass(Utils.java:212)
at com.alibaba.druid.util.MySqlUtils.getLastPacketReceivedTimeMs(MySqlUtils.java:372)
at com.alibaba.druid.pool.DruidAbstractDataSource.testConnectionInternal(DruidAbstractDataSource.java:1471)
at com.alibaba.druid.pool.DruidDataSource.recycle(DruidDataSource.java:1938)
at com.alibaba.druid.pool.DruidPooledConnection.recycle(DruidPooledConnection.java:324)
at com.alibaba.druid.pool.DruidPooledConnection.close(DruidPooledConnection.java:269)
at org.springframework.jdbc.datasource.DataSourceUtils.doCloseConnection(DataSourceUtils.java:360)
at org.springframework.jdbc.datasource.DataSourceUtils.doReleaseConnection(DataSourceUtils.java:347)
at org.springframework.jdbc.datasource.DataSourceUtils.releaseConnection(DataSourceUtils.java:314)
at org.mybatis.spring.transaction.SpringManagedTransaction.close(SpringManagedTransaction.java:115)
at org.apache.ibatis.executor.BaseExecutor.close(BaseExecutor.java:90)
at org.apache.ibatis.session.defaults.DefaultSqlSession.close(DefaultSqlSession.java:263)
at org.mybatis.spring.SqlSessionUtils.closeSqlSession(SqlSessionUtils.java:195)
at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:448)
at com.sun.proxy.$Proxy90.selectOne(Unknown Source)
at org.mybatis.spring.SqlSessionTemplate.selectOne(SqlSessionTemplate.java:159)
at com.baomidou.mybatisplus.core.override.MybatisMapperMethod.execute(MybatisMapperMethod.java:89)
at com.baomidou.mybatisplus.core.override.MybatisMapperProxy.invoke(MybatisMapperProxy.java:61)
at com.sun.proxy.$Proxy95.selectById(Unknown Source)
at org.apache.dolphinscheduler.service.process.ProcessService.findProcessInstanceById(ProcessService.java:379)
at org.apache.dolphinscheduler.service.process.ProcessService$$FastClassBySpringCGLIB$$ed138739.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:687)
at org.apache.dolphinscheduler.service.process.ProcessService$$EnhancerBySpringCGLIB$$f0a248d9.findProcessInstanceById(<generated>)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.updateProcessInstanceState(WorkflowExecuteThread.java:1085)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.submitPostNode(WorkflowExecuteThread.java:839)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.taskFinished(WorkflowExecuteThread.java:384)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.taskStateChangeHandler(WorkflowExecuteThread.java:353)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.stateEventHandler(WorkflowExecuteThread.java:302)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.handleEvents(WorkflowExecuteThread.java:250)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.run(WorkflowExecuteThread.java:231)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:125)
at com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:57)
at com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:78)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)`
branch: dev
database: mysql 8.0.21
The MasterServer jstack show that when druid load class, many threads is blocked to wait lock.
I found that when I change durid config like that:
`spring.datasource.testOnBorrow=false
spring.datasource.testOnReturn=false`
can solve this problem, but I think it's not the right way.
See druid issue: https://github.com/alibaba/druid/issues/3808.
Maybe we should upgrade the mysql connector version and druid version.
### What you expected to happen
thread not blocked by druid when loadClass.
### How to reproduce
run by complete data mode, and use jstack MasterServer
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6483 | https://github.com/apache/dolphinscheduler/pull/6484 | eb21a1763e43ee5b1cb418a0835b0bf1d13e295e | c8ee2df2164308a20bb9234ac719c03f4652f433 | "2021-10-10T07:54:04Z" | java | "2021-10-10T16:40:49Z" | tools/dependencies/known-dependencies.txt | HikariCP-3.2.0.jar
activation-1.1.jar
aether-api-1.13.1.jar
aether-connector-asynchttpclient-1.13.1.jar
aether-connector-file-1.13.1.jar
aether-impl-1.13.1.jar
aether-spi-1.13.1.jar
aether-util-1.13.1.jar
animal-sniffer-annotations-1.14.jar
aopalliance-1.0.jar
apache-el-8.5.54.jar
apacheds-i18n-2.0.0-M15.jar
apacheds-kerberos-codec-2.0.0-M15.jar
api-asn1-api-1.0.0-M20.jar
api-util-1.0.0-M20.jar
asm-3.1.jar
asm-6.2.1.jar
aspectjweaver-1.9.6.jar
async-http-client-1.6.5.jar
audience-annotations-0.5.0.jar
avro-1.7.4.jar
aws-java-sdk-1.7.4.jar
bonecp-0.8.0.RELEASE.jar
byte-buddy-1.9.16.jar
checker-compat-qual-2.0.0.jar
classmate-1.4.0.jar
clickhouse-jdbc-0.1.52.jar
commons-email-1.5.jar
commons-cli-1.2.jar
commons-codec-1.11.jar
commons-collections-3.2.2.jar
commons-collections4-4.1.jar
commons-compress-1.19.jar
commons-compress-1.4.1.jar
commons-compiler-3.0.16.jar
commons-configuration-1.10.jar
commons-daemon-1.0.13.jar
commons-beanutils-1.9.4.jar
commons-dbcp-1.4.jar
commons-httpclient-3.0.1.jar
commons-io-2.4.jar
commons-lang-2.6.jar
commons-logging-1.1.1.jar
commons-math3-3.1.1.jar
commons-math3-3.6.1.jar
commons-net-3.1.jar
commons-pool-1.6.jar
cron-utils-5.0.5.jar
curator-client-4.3.0.jar
curator-framework-4.3.0.jar
curator-recipes-4.3.0.jar
curator-test-2.12.0.jar
curvesapi-1.06.jar
datanucleus-api-jdo-4.2.1.jar
datanucleus-core-4.1.6.jar
datanucleus-rdbms-4.1.7.jar
derby-10.14.2.0.jar
error_prone_annotations-2.1.3.jar
druid-1.1.22.jar
gson-2.8.6.jar
guava-24.1-jre.jar
guava-retrying-2.0.0.jar
guice-3.0.jar
guice-servlet-3.0.jar
h2-1.4.200.jar
hadoop-annotations-2.7.3.jar
hadoop-auth-2.7.3.jar
hadoop-aws-2.7.3.jar
hadoop-client-2.7.3.jar
hadoop-common-2.7.3.jar
hadoop-hdfs-2.7.3.jar
hadoop-mapreduce-client-app-2.7.3.jar
hadoop-mapreduce-client-common-2.7.3.jar
hadoop-mapreduce-client-core-2.7.3.jar
hadoop-mapreduce-client-jobclient-2.7.3.jar
hadoop-mapreduce-client-shuffle-2.7.3.jar
hadoop-yarn-api-2.7.3.jar
hadoop-yarn-client-2.7.3.jar
hadoop-yarn-common-2.7.3.jar
hadoop-yarn-server-common-2.7.3.jar
hamcrest-core-1.3.jar
hibernate-validator-6.0.21.Final.jar
hive-common-2.1.0.jar
hive-jdbc-2.1.0.jar
hive-metastore-2.1.0.jar
hive-orc-2.1.0.jar
hive-serde-2.1.0.jar
hive-service-2.1.0.jar
hive-service-rpc-2.1.0.jar
hive-storage-api-2.1.0.jar
htrace-core-3.1.0-incubating.jar
httpclient-4.4.1.jar
httpcore-4.4.1.jar
httpmime-4.5.13.jar
j2objc-annotations-1.1.jar
jackson-annotations-2.10.5.jar
jackson-core-2.10.5.jar
jackson-core-asl-1.9.13.jar
jackson-databind-2.10.5.jar
jackson-datatype-jdk8-2.9.10.jar
jackson-datatype-jsr310-2.9.10.jar
jackson-jaxrs-1.9.13.jar
jackson-mapper-asl-1.9.13.jar
jackson-module-parameter-names-2.9.10.jar
jackson-xc-1.9.13.jar
jamon-runtime-2.3.1.jar
janino-3.0.16.jar
java-xmlbuilder-0.4.jar
javassist-3.26.0-GA.jar
javax.activation-api-1.2.0.jar
javax.annotation-api-1.3.2.jar
javax.inject-1.jar
javax.jdo-3.2.0-m3.jar
javax.mail-1.6.2.jar
javax.servlet-api-3.1.0.jar
javolution-5.5.1.jar
jaxb-api-2.3.1.jar
jaxb-impl-2.2.3-1.jar
jboss-logging-3.3.3.Final.jar
jdo-api-3.0.1.jar
jersey-client-1.9.jar
jersey-core-1.9.jar
jersey-guice-1.9.jar
jersey-json-1.9.jar
jersey-server-1.9.jar
jets3t-0.9.0.jar
jettison-1.1.jar
jetty-6.1.26.jar
jetty-continuation-9.4.33.v20201020.jar
jetty-http-9.4.33.v20201020.jar
jetty-io-9.4.33.v20201020.jar
jetty-security-9.4.33.v20201020.jar
jetty-server-9.4.33.v20201020.jar
jetty-servlet-9.4.33.v20201020.jar
jetty-servlets-9.4.33.v20201020.jar
jetty-util-6.1.26.jar
jetty-util-9.4.33.v20201020.jar
jetty-webapp-9.4.33.v20201020.jar
jetty-xml-9.4.33.v20201020.jar
jline-0.9.94.jar
jna-4.5.2.jar
jna-platform-4.5.2.jar
joda-time-2.10.8.jar
jpam-1.1.jar
jsch-0.1.42.jar
jsp-api-2.1.jar
jsqlparser-2.1.jar
jsr305-1.3.9.jar
jsr305-3.0.0.jar
jta-1.1.jar
jul-to-slf4j-1.7.30.jar
junit-4.12.jar
leveldbjni-all-1.8.jar
libfb303-0.9.3.jar
libthrift-0.9.3.jar
log4j-1.2-api-2.11.2.jar
log4j-1.2.17.jar
logback-classic-1.2.3.jar
logback-core-1.2.3.jar
lz4-1.3.0.jar
mapstruct-1.2.0.Final.jar
maven-aether-provider-3.0.4.jar
maven-artifact-3.0.4.jar
maven-compat-3.0.4.jar
maven-core-3.0.4.jar
maven-embedder-3.0.4.jar
maven-model-3.0.4.jar
maven-model-builder-3.0.4.jar
maven-plugin-api-3.0.4.jar
maven-repository-metadata-3.0.4.jar
maven-settings-3.0.4.jar
maven-settings-builder-3.0.4.jar
mssql-jdbc-6.1.0.jre8.jar
mybatis-3.5.2.jar
mybatis-plus-3.2.0.jar
mybatis-plus-annotation-3.2.0.jar
mybatis-plus-boot-starter-3.2.0.jar
mybatis-plus-core-3.2.0.jar
mybatis-plus-extension-3.2.0.jar
mybatis-spring-2.0.2.jar
netty-3.6.2.Final.jar
netty-all-4.1.53.Final.jar
opencsv-2.3.jar
oshi-core-3.9.1.jar
paranamer-2.3.jar
parquet-hadoop-bundle-1.8.1.jar
plexus-cipher-1.7.jar
plexus-classworlds-2.4.jar
plexus-component-annotations-1.5.5.jar
plexus-container-default-1.5.5.jar
plexus-interpolation-1.14.jar
plexus-sec-dispatcher-1.3.jar
plexus-utils-2.0.6.jar
poi-4.1.2.jar
poi-ooxml-4.1.2.jar
poi-ooxml-schemas-4.1.2.jar
postgresql-42.2.5.jar
presto-jdbc-0.238.1.jar
protobuf-java-2.5.0.jar
protostuff-core-1.7.2.jar
protostuff-runtime-1.7.2.jar
protostuff-api-1.7.2.jar
protostuff-collectionschema-1.7.2.jar
quartz-2.3.0.jar
quartz-jobs-2.3.0.jar
reflections-0.9.12.jar
resolver-1.5.jar
slf4j-api-1.7.5.jar
snakeyaml-1.23.jar
snappy-0.2.jar
snappy-java-1.0.4.1.jar
SparseBitSet-1.2.jar
spring-aop-5.1.19.RELEASE.jar
spring-beans-5.1.19.RELEASE.jar
spring-boot-2.1.18.RELEASE.jar
spring-boot-autoconfigure-2.1.18.RELEASE.jar
spring-boot-starter-2.1.18.RELEASE.jar
spring-boot-starter-aop-2.1.18.RELEASE.jar
spring-boot-starter-jdbc-2.1.18.RELEASE.jar
spring-boot-starter-jetty-2.1.18.RELEASE.jar
spring-boot-starter-json-2.1.18.RELEASE.jar
spring-boot-starter-logging-2.1.18.RELEASE.jar
spring-boot-starter-web-2.1.18.RELEASE.jar
spring-context-5.1.19.RELEASE.jar
spring-core-5.1.19.RELEASE.jar
spring-expression-5.1.19.RELEASE.jar
spring-jcl-5.1.19.RELEASE.jar
spring-jdbc-5.1.19.RELEASE.jar
spring-plugin-core-1.2.0.RELEASE.jar
spring-plugin-metadata-1.2.0.RELEASE.jar
spring-tx-5.1.19.RELEASE.jar
spring-web-5.1.19.RELEASE.jar
spring-webmvc-5.1.19.RELEASE.jar
springfox-core-2.9.2.jar
springfox-schema-2.9.2.jar
springfox-spi-2.9.2.jar
springfox-spring-web-2.9.2.jar
springfox-swagger-common-2.9.2.jar
springfox-swagger-ui-2.9.2.jar
springfox-swagger2-2.9.2.jar
swagger-annotations-1.5.20.jar
swagger-bootstrap-ui-1.9.3.jar
swagger-models-1.5.24.jar
tephra-api-0.6.0.jar
transaction-api-1.1.jar
validation-api-2.0.1.Final.jar
wagon-provider-api-2.2.jar
xbean-reflect-3.4.jar
xercesImpl-2.9.1.jar
xml-apis-1.4.01.jar
xmlbeans-3.1.0.jar
xmlenc-0.52.jar
xz-1.0.jar
zookeeper-3.4.14.jar
Java-WebSocket-1.5.1.jar |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,463 | [Bug] [UI] The front-end verification task name already exists | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
The front-end verification task name already exists
### What you expected to happen
The task name can have the same
### How to reproduce
remove front-end verification in the formModel.vue
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6463 | https://github.com/apache/dolphinscheduler/pull/6464 | c8ee2df2164308a20bb9234ac719c03f4652f433 | 6e70e51e9f730cff8653496a28c76e84823403c7 | "2021-10-08T10:13:22Z" | java | "2021-10-11T02:07:57Z" | dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/formModel.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="form-model-wrapper" v-clickoutside="_handleClose">
<div class="title-box">
<span class="name">{{ $t("Current node settings") }}</span>
<span class="go-subtask">
<!-- Component can't pop up box to do component processing -->
<m-log
v-if="type === 'instance' && taskInstance"
:item="backfillItem"
:task-instance-id="taskInstance.id"
>
<template slot="history"
><a href="javascript:" @click="_seeHistory"
><em class="ansicon el-icon-alarm-clock"></em
><em>{{ $t("View history") }}</em></a
></template
>
<template slot="log"
><a href="javascript:"
><em class="ansicon el-icon-document"></em
><em>{{ $t("View log") }}</em></a
></template
>
</m-log>
<a href="javascript:" @click="_goSubProcess" v-if="_isGoSubProcess"
><em class="ansicon ri-node-tree"></em
><em>{{ $t("Enter this child node") }}</em></a
>
</span>
</div>
<div class="content-box" v-if="isContentBox">
<div class="form-model">
<!-- Reference from task -->
<!-- <reference-from-task :taskType="nodeData.taskType" /> -->
<!-- Node name -->
<m-list-box>
<div slot="text">{{ $t("Node name") }}</div>
<div slot="content">
<el-input
type="text"
v-model="name"
size="small"
:disabled="isDetails"
:placeholder="$t('Please enter name (required)')"
maxlength="100"
@blur="_verifName()"
>
</el-input>
</div>
</m-list-box>
<!-- Running sign -->
<m-list-box>
<div slot="text">{{ $t("Run flag") }}</div>
<div slot="content">
<el-radio-group v-model="runFlag" size="small">
<el-radio :label="'YES'" :disabled="isDetails">{{
$t("Normal")
}}</el-radio>
<el-radio :label="'NO'" :disabled="isDetails">{{
$t("Prohibition execution")
}}</el-radio>
</el-radio-group>
</div>
</m-list-box>
<!-- description -->
<m-list-box>
<div slot="text">{{ $t("Description") }}</div>
<div slot="content">
<el-input
:rows="2"
type="textarea"
:disabled="isDetails"
v-model="desc"
:placeholder="$t('Please enter description')"
>
</el-input>
</div>
</m-list-box>
<!-- Task priority -->
<m-list-box>
<div slot="text">{{ $t("Task priority") }}</div>
<div slot="content">
<span class="label-box" style="width: 193px; display: inline-block">
<m-priority v-model="taskInstancePriority"></m-priority>
</span>
</div>
</m-list-box>
<!-- Worker group and environment -->
<m-list-box>
<div slot="text">{{ $t("Worker group") }}</div>
<div slot="content">
<span class="label-box" style="width: 193px; display: inline-block">
<m-worker-groups v-model="workerGroup"></m-worker-groups>
</span>
<span class="text-b">{{ $t("Environment Name") }}</span>
<m-related-environment
v-model="environmentCode"
:workerGroup="workerGroup"
:isNewCreate="isNewCreate"
v-on:environmentCodeEvent="_onUpdateEnvironmentCode"
></m-related-environment>
</div>
</m-list-box>
<!-- Number of failed retries -->
<m-list-box v-if="nodeData.taskType !== 'SUB_PROCESS'">
<div slot="text">{{ $t("Number of failed retries") }}</div>
<div slot="content">
<m-select-input
v-model="maxRetryTimes"
:list="[0, 1, 2, 3, 4]"
></m-select-input>
<span>({{ $t("Times") }})</span>
<span class="text-b">{{ $t("Failed retry interval") }}</span>
<m-select-input
v-model="retryInterval"
:list="[1, 10, 30, 60, 120]"
></m-select-input>
<span>({{ $t("Minute") }})</span>
</div>
</m-list-box>
<!-- Delay execution time -->
<m-list-box
v-if="
nodeData.taskType !== 'SUB_PROCESS' &&
nodeData.taskType !== 'CONDITIONS' &&
nodeData.taskType !== 'DEPENDENT' &&
nodeData.taskType !== 'SWITCH'
"
>
<div slot="text">{{ $t("Delay execution time") }}</div>
<div slot="content">
<m-select-input
v-model="delayTime"
:list="[0, 1, 5, 10]"
></m-select-input>
<span>({{ $t("Minute") }})</span>
</div>
</m-list-box>
<!-- Branch flow -->
<m-list-box v-if="nodeData.taskType === 'CONDITIONS'">
<div slot="text">{{ $t("State") }}</div>
<div slot="content">
<span class="label-box" style="width: 193px; display: inline-block">
<el-select
style="width: 157px"
size="small"
v-model="successNode"
:disabled="true"
>
<el-option
v-for="item in stateList"
:key="item.value"
:value="item.value"
:label="item.label"
></el-option>
</el-select>
</span>
<span class="text-b" style="padding-left: 38px">{{
$t("Branch flow")
}}</span>
<el-select
style="width: 157px"
size="small"
v-model="successBranch"
clearable
:disabled="isDetails"
>
<el-option
v-for="item in postTasks"
:key="item.code"
:value="item.name"
:label="item.name"
></el-option>
</el-select>
</div>
</m-list-box>
<m-list-box v-if="nodeData.taskType === 'CONDITIONS'">
<div slot="text">{{ $t("State") }}</div>
<div slot="content">
<span class="label-box" style="width: 193px; display: inline-block">
<el-select
style="width: 157px"
size="small"
v-model="failedNode"
:disabled="true"
>
<el-option
v-for="item in stateList"
:key="item.value"
:value="item.value"
:label="item.label"
></el-option>
</el-select>
</span>
<span class="text-b" style="padding-left: 38px">{{
$t("Branch flow")
}}</span>
<el-select
style="width: 157px"
size="small"
v-model="failedBranch"
clearable
:disabled="isDetails"
>
<el-option
v-for="item in postTasks"
:key="item.code"
:value="item.name"
:label="item.name"
></el-option>
</el-select>
</div>
</m-list-box>
<div v-if="backfillRefresh">
<!-- Task timeout alarm -->
<m-timeout-alarm
v-if="nodeData.taskType !== 'DEPENDENT'"
ref="timeout"
:backfill-item="backfillItem"
@on-timeout="_onTimeout"
>
</m-timeout-alarm>
<!-- Dependent timeout alarm -->
<m-dependent-timeout
v-if="nodeData.taskType === 'DEPENDENT'"
ref="dependentTimeout"
:backfill-item="backfillItem"
@on-timeout="_onDependentTimeout"
>
</m-dependent-timeout>
<!-- shell node -->
<m-shell
v-if="nodeData.taskType === 'SHELL'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="SHELL"
:backfill-item="backfillItem"
>
</m-shell>
<!-- sub_process node -->
<m-sub-process
v-if="nodeData.taskType === 'SUB_PROCESS'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
@on-set-process-name="_onSetProcessName"
ref="SUB_PROCESS"
:backfill-item="backfillItem"
>
</m-sub-process>
<!-- procedure node -->
<m-procedure
v-if="nodeData.taskType === 'PROCEDURE'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="PROCEDURE"
:backfill-item="backfillItem"
>
</m-procedure>
<!-- sql node -->
<m-sql
v-if="nodeData.taskType === 'SQL'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="SQL"
:create-node-id="nodeData.id"
:backfill-item="backfillItem"
>
</m-sql>
<!-- spark node -->
<m-spark
v-if="nodeData.taskType === 'SPARK'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="SPARK"
:backfill-item="backfillItem"
>
</m-spark>
<m-flink
v-if="nodeData.taskType === 'FLINK'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="FLINK"
:backfill-item="backfillItem"
>
</m-flink>
<!-- mr node -->
<m-mr
v-if="nodeData.taskType === 'MR'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="MR"
:backfill-item="backfillItem"
>
</m-mr>
<!-- python node -->
<m-python
v-if="nodeData.taskType === 'PYTHON'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="PYTHON"
:backfill-item="backfillItem"
>
</m-python>
<!-- dependent node -->
<m-dependent
v-if="nodeData.taskType === 'DEPENDENT'"
@on-dependent="_onDependent"
@on-cache-dependent="_onCacheDependent"
ref="DEPENDENT"
:backfill-item="backfillItem"
>
</m-dependent>
<m-http
v-if="nodeData.taskType === 'HTTP'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="HTTP"
:backfill-item="backfillItem"
>
</m-http>
<m-datax
v-if="nodeData.taskType === 'DATAX'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="DATAX"
:backfill-item="backfillItem"
>
</m-datax>
<m-pigeon
v-if="nodeData.taskType === 'PIGEON'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
:backfill-item="backfillItem"
ref="PIGEON">
</m-pigeon>
<m-sqoop
v-if="nodeData.taskType === 'SQOOP'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="SQOOP"
:backfill-item="backfillItem"
>
</m-sqoop>
<m-conditions
v-if="nodeData.taskType === 'CONDITIONS'"
ref="CONDITIONS"
@on-dependent="_onDependent"
@on-cache-dependent="_onCacheDependent"
:backfill-item="backfillItem"
:prev-tasks="prevTasks"
>
</m-conditions>
<m-switch
v-if="nodeData.taskType === 'SWITCH'"
ref="SWITCH"
@on-switch-result="_onSwitchResult"
:backfill-item="backfillItem"
:nodeData="nodeData"
></m-switch>
<!-- waterdrop node -->
<m-waterdrop
v-if="nodeData.taskType === 'WATERDROP'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="WATERDROP"
:backfill-item="backfillItem"
>
</m-waterdrop>
</div>
<!-- Pre-tasks in workflow -->
<m-pre-tasks
ref="preTasks"
v-if="['SHELL', 'SUB_PROCESS'].indexOf(nodeData.taskType) > -1"
:code="code"
/>
</div>
</div>
<div class="bottom-box">
<div class="submit" style="background: #fff">
<el-button type="text" size="small" id="cancelBtn">
{{ $t("Cancel") }}
</el-button>
<el-button
type="primary"
size="small"
round
:loading="spinnerLoading"
@click="ok()"
:disabled="isDetails"
>{{ spinnerLoading ? $t("Loading...") : $t("Confirm add") }}
</el-button>
</div>
</div>
</div>
</template>
<script>
import _ from 'lodash'
import { mapActions, mapState } from 'vuex'
import mLog from './log'
import mMr from './tasks/mr'
import mSql from './tasks/sql'
import i18n from '@/module/i18n'
import mListBox from './tasks/_source/listBox'
import mShell from './tasks/shell'
import mWaterdrop from './tasks/waterdrop'
import mSpark from './tasks/spark'
import mFlink from './tasks/flink'
import mPython from './tasks/python'
import mProcedure from './tasks/procedure'
import mDependent from './tasks/dependent'
import mHttp from './tasks/http'
import mDatax from './tasks/datax'
import mPigeon from './tasks/pigeon'
import mConditions from './tasks/conditions'
import mSwitch from './tasks/switch.vue'
import mSqoop from './tasks/sqoop'
import mSubProcess from './tasks/sub_process'
import mSelectInput from './_source/selectInput'
import mTimeoutAlarm from './_source/timeoutAlarm'
import mDependentTimeout from './_source/dependentTimeout'
import mWorkerGroups from './_source/workerGroups'
import mRelatedEnvironment from './_source/relatedEnvironment'
import mPreTasks from './tasks/pre_tasks'
import clickoutside from '@/module/util/clickoutside'
import disabledState from '@/module/mixin/disabledState'
import mPriority from '@/module/components/priority/priority'
import { findComponentDownward } from '@/module/util/'
// import ReferenceFromTask from './_source/referenceFromTask.vue'
export default {
name: 'form-model',
data () {
return {
// loading
spinnerLoading: false,
// node name
name: '',
// description
desc: '',
// Node echo data
backfillItem: {},
cacheBackfillItem: {},
// Resource(list)
resourcesList: [],
successNode: 'success',
failedNode: 'failed',
successBranch: '',
failedBranch: '',
conditionResult: {
successNode: [],
failedNode: []
},
switchResult: {},
// dependence
dependence: {},
// cache dependence
cacheDependence: {},
// task code
code: 0,
// Current node params data
params: {},
// Running sign
runFlag: 'YES',
// The second echo problem caused by the node data is specifically which node hook caused the unfinished special treatment
isContentBox: false,
// Number of failed retries
maxRetryTimes: '0',
// Failure retry interval
retryInterval: '1',
// Delay execution time
delayTime: '0',
// Task timeout alarm
timeout: {},
// (For Dependent nodes) Wait start timeout alarm
waitStartTimeout: {},
// Task priority
taskInstancePriority: 'MEDIUM',
// worker group id
workerGroup: 'default',
// selected environment
environmentCode: '',
selectedWorkerGroup: '',
stateList: [
{
value: 'success',
label: `${i18n.$t('Success')}`
},
{
value: 'failed',
label: `${i18n.$t('Failed')}`
}
],
// for CONDITIONS
postTasks: [],
prevTasks: [],
// refresh part of the formModel, after set backfillItem outside
backfillRefresh: true,
// whether this is a new Task
isNewCreate: true
}
},
provide () {
return {
formModel: this
}
},
/**
* Click on events that are not generated internally by the component
*/
directives: { clickoutside },
mixins: [disabledState],
props: {
nodeData: Object,
type: {
type: String,
default: ''
}
},
inject: ['dagChart'],
methods: {
...mapActions('dag', ['getTaskInstanceList']),
taskToBackfillItem (task) {
return {
code: task.code,
conditionResult: task.taskParams.conditionResult,
delayTime: task.delayTime,
dependence: task.taskParams.dependence,
desc: task.description,
id: task.id,
maxRetryTimes: task.failRetryTimes,
name: task.name,
params: _.omit(task.taskParams, [
'conditionResult',
'dependence',
'waitStartTimeout'
]),
retryInterval: task.failRetryInterval,
runFlag: task.flag,
taskInstancePriority: task.taskPriority,
timeout: {
interval: task.timeout,
strategy: task.timeoutNotifyStrategy,
enable: task.timeoutFlag === 'OPEN'
},
type: task.taskType,
waitStartTimeout: task.taskParams.waitStartTimeout,
workerGroup: task.workerGroup,
environmentCode: task.environmentCode
}
},
/**
* depend
*/
_onDependent (o) {
this.dependence = Object.assign(this.dependence, {}, o)
},
_onSwitchResult (o) {
this.switchResult = o
},
/**
* cache dependent
*/
_onCacheDependent (o) {
this.cacheDependence = Object.assign(this.cacheDependence, {}, o)
},
/**
* Task timeout alarm
*/
_onTimeout (o) {
this.timeout = Object.assign(this.timeout, {}, o)
},
/**
* Dependent timeout alarm
*/
_onDependentTimeout (o) {
this.timeout = Object.assign(this.timeout, {}, o.waitCompleteTimeout)
this.waitStartTimeout = Object.assign(
this.waitStartTimeout,
{},
o.waitStartTimeout
)
},
/**
* Click external to close the current component
*/
_handleClose () {
// this.close()
},
/**
* Jump to task instance
*/
_seeHistory () {
this.$emit('seeHistory', this.backfillItem.name)
},
/**
* Enter the child node to judge the process instance or the process definition
* @param type = instance
*/
_goSubProcess () {
if (_.isEmpty(this.backfillItem)) {
this.$message.warning(
`${i18n.$t(
'The newly created sub-Process has not yet been executed and cannot enter the sub-Process'
)}`
)
return
}
if (this.router.history.current.name === 'projects-instance-details') {
if (!this.taskInstance) {
this.$message.warning(
`${i18n.$t(
'The task has not been executed and cannot enter the sub-Process'
)}`
)
return
}
this.store
.dispatch('dag/getSubProcessId', { taskId: this.taskInstance.id })
.then((res) => {
this.$emit('onSubProcess', {
subInstanceId: res.data.subProcessInstanceId,
fromThis: this
})
})
.catch((e) => {
this.$message.error(e.msg || '')
})
} else {
const processDefinitionId =
this.backfillItem.params.processDefinitionId
const process = this.processListS.find(
(process) => process.processDefinition.id === processDefinitionId
)
this.$emit('onSubProcess', {
subProcessCode: process.processDefinition.code,
fromThis: this
})
}
},
_onUpdateWorkerGroup (o) {
this.selectedWorkerGroup = o
},
/**
* return params
*/
_onParams (o) {
this.params = Object.assign({}, o)
},
_onUpdateEnvironmentCode (o) {
this.environmentCode = o
},
/**
* _onCacheParams is reserved
*/
_onCacheParams (o) {
this.params = Object.assign(this.params, {}, o)
},
/**
* verification name
*/
_verifName () {
if (!_.trim(this.name)) {
this.$message.warning(`${i18n.$t('Please enter name (required)')}`)
return false
}
if (
this.successBranch !== '' &&
this.successBranch !== null &&
this.successBranch === this.failedBranch
) {
this.$message.warning(
`${i18n.$t(
'Cannot select the same node for successful branch flow and failed branch flow'
)}`
)
return false
}
if (this.name === this.backfillItem.name) {
return true
}
// Name repeat depends on dom backfill dependent store
const tasks = this.store.state.dag.tasks
const task = tasks.find((t) => t.name === this.name)
if (task) {
this.$message.warning(`${i18n.$t('Name already exists')}`)
return false
}
return true
},
_verifWorkGroup () {
let item = this.store.state.security.workerGroupsListAll.find((item) => {
return item.id === this.workerGroup
})
if (item === undefined) {
this.$message.warning(
`${i18n.$t(
'The Worker group no longer exists, please select the correct Worker group!'
)}`
)
return false
}
return true
},
/**
* Global verification procedure
*/
_verification () {
// Verify name
if (!this._verifName()) {
return
}
// verif workGroup
if (!this._verifWorkGroup()) {
return
}
// Verify task alarm parameters
if (this.nodeData.taskType === 'DEPENDENT') {
if (!this.$refs.dependentTimeout._verification()) {
return
}
} else {
if (!this.$refs.timeout._verification()) {
return
}
}
// Verify node parameters
if (!this.$refs[this.nodeData.taskType]._verification()) {
return
}
// set preTask
if (this.$refs.preTasks) {
this.$refs.preTasks.setPreNodes()
}
this.conditionResult.successNode[0] = this.successBranch
this.conditionResult.failedNode[0] = this.failedBranch
this.$emit('addTaskInfo', {
item: {
code: this.nodeData.id,
name: this.name,
description: this.desc,
taskType: this.nodeData.taskType,
taskParams: {
...this.params,
dependence: this.cacheDependence,
conditionResult: this.conditionResult,
waitStartTimeout: this.waitStartTimeout
},
flag: this.runFlag,
taskPriority: this.taskInstancePriority,
workerGroup: this.workerGroup,
failRetryTimes: this.maxRetryTimes,
failRetryInterval: this.retryInterval,
timeoutFlag: this.timeout.enable ? 'OPEN' : 'CLOSE',
timeoutNotifyStrategy: this.timeout.strategy,
timeout: this.timeout.interval || 0,
delayTime: this.delayTime,
environmentCode: this.environmentCode || -1,
status: this.status,
branch: this.branch
},
fromThis: this
})
// set run flag
this._setRunFlag()
// set edge label
this._setEdgeLabel()
},
/**
* Sub-workflow selected node echo name
*/
_onSetProcessName (name) {
this.name = name
},
/**
* set run flag
* TODO
*/
_setRunFlag () {},
_setEdgeLabel () {
if (this.successBranch || this.failedBranch) {
const canvas = findComponentDownward(this.dagChart, 'dag-canvas')
const edges = canvas.getEdges()
const successTask = this.postTasks.find(
(t) => t.name === this.successBranch
)
const failedTask = this.postTasks.find(
(t) => t.name === this.failedBranch
)
const sEdge = edges.find(
(edge) =>
successTask &&
edge.sourceId === this.code &&
edge.targetId === successTask.code
)
const fEdge = edges.find(
(edge) =>
failedTask &&
edge.sourceId === this.code &&
edge.targetId === failedTask.code
)
sEdge && canvas.setEdgeLabel(sEdge.id, this.$t('Success'))
fEdge && canvas.setEdgeLabel(fEdge.id, this.$t('Failed'))
}
},
/**
* Submit verification
*/
ok () {
this._verification()
},
/**
* Close and destroy component and component internal events
*/
close () {
let flag = false
// Delete node without storage
if (!this.backfillItem.name) {
flag = true
}
this.isContentBox = false
// flag Whether to delete a node this.$destroy()
this.$emit('close', {
item: this.cacheBackfillItem,
flag: flag,
fromThis: this
})
},
backfill (backfillItem) {
const o = backfillItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
this.code = o.code
this.name = o.name
this.taskInstancePriority = o.taskInstancePriority
this.runFlag = o.runFlag || 'YES'
this.desc = o.desc
this.maxRetryTimes = o.maxRetryTimes
this.retryInterval = o.retryInterval
this.delayTime = o.delayTime
if (o.conditionResult) {
this.successBranch = o.conditionResult.successNode[0]
this.failedBranch = o.conditionResult.failedNode[0]
}
// If the workergroup has been deleted, set the default workergroup
for (
let i = 0;
i < this.store.state.security.workerGroupsListAll.length;
i++
) {
let workerGroup = this.store.state.security.workerGroupsListAll[i].id
if (o.workerGroup === workerGroup) {
break
}
}
if (o.workerGroup === undefined) {
this.store
.dispatch('dag/getTaskInstanceList', {
pageSize: 10,
pageNo: 1,
processInstanceId: this.nodeData.instanceId,
name: o.name
})
.then((res) => {
this.workerGroup = res.totalList[0].workerGroup
})
} else {
this.workerGroup = o.workerGroup
}
this.environmentCode = o.environmentCode === -1 ? '' : o.environmentCode
this.params = o.params || {}
this.dependence = o.dependence || {}
this.cacheDependence = o.dependence || {}
} else {
this.workerGroup = this.store.state.security.workerGroupsListAll[0].id
}
this.cacheBackfillItem = JSON.parse(JSON.stringify(o))
this.isContentBox = true
}
},
created () {
// Backfill data
let taskList = this.store.state.dag.tasks
let o = {}
if (taskList.length) {
taskList.forEach((task) => {
if (task.code === this.nodeData.id) {
const backfillItem = this.taskToBackfillItem(task)
o = backfillItem
this.backfillItem = backfillItem
this.isNewCreate = false
}
})
}
this.code = this.nodeData.id
this.backfill(o)
if (this.dagChart) {
const canvas = findComponentDownward(this.dagChart, 'dag-canvas')
const postNodes = canvas.getPostNodes(this.code)
const prevNodes = canvas.getPrevNodes(this.code)
const buildTask = (node) => ({
code: node.id,
name: node.data.taskName,
type: node.data.taskType
})
this.postTasks = postNodes.map(buildTask)
this.prevTasks = prevNodes.map(buildTask)
}
},
mounted () {
let self = this
$('#cancelBtn').mousedown(function (event) {
event.preventDefault()
self.close()
})
},
updated () {},
beforeDestroy () {},
destroyed () {},
computed: {
...mapState('dag', ['processListS', 'taskInstances']),
/**
* Child workflow entry show/hide
*/
_isGoSubProcess () {
return this.nodeData.taskType === 'SUB_PROCESS' && this.name
},
taskInstance () {
if (this.taskInstances.length > 0) {
return this.taskInstances.find(
(instance) => instance.taskCode === this.nodeData.id
)
}
return null
}
},
components: {
mListBox,
mMr,
mShell,
mWaterdrop,
mSubProcess,
mProcedure,
mSql,
mLog,
mSpark,
mFlink,
mPython,
mDependent,
mHttp,
mDatax,
mPigeon,
mSqoop,
mConditions,
mSwitch,
mSelectInput,
mTimeoutAlarm,
mDependentTimeout,
mPriority,
mWorkerGroups,
mRelatedEnvironment,
mPreTasks
// ReferenceFromTask
}
}
</script>
<style lang="scss" rel="stylesheet/scss">
@import "./formModel";
.ans-radio-disabled {
.ans-radio-inner:after {
background-color: #6f8391;
}
}
</style>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,474 | [Improvement] [MasterServer] schedule time for process instance optimization | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
branch: dev
When I use arthas to trace methods by complete data mode, I found that ProcessService.constructProcessInstance is slow because three point:
- getScheduleTime, which seems like to handle in api is better;
- complete data type handle, delete all the valid tasks when complement data;
- initComplementDataParam, the schedule time again;
### What you expected to happen
construct process instance is more quickly without needless DB operation;
### How to reproduce
use arthas to trace ProcessService.constructProcessInstance
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6474 | https://github.com/apache/dolphinscheduler/pull/6477 | ea493534df7b924220c9c1e7f6529f548d3912c9 | e091801e05c8e9243ec96f76a2e7cc1f1f5cecab | "2021-10-09T09:23:27Z" | java | "2021-10-11T07:13:38Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteThread.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.runner;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVERY_START_NODE_STRING;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODE_NAMES;
import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP;
import static org.apache.dolphinscheduler.common.Constants.SEC_2_MINUTES_TIME_UNIT;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.DependResult;
import org.apache.dolphinscheduler.common.enums.Direct;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.enums.Priority;
import org.apache.dolphinscheduler.common.enums.StateEvent;
import org.apache.dolphinscheduler.common.enums.StateEventType;
import org.apache.dolphinscheduler.common.enums.TaskDependType;
import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy;
import org.apache.dolphinscheduler.common.enums.TimeoutFlag;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.NetUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.dao.entity.Environment;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.ProjectUser;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskDefinition;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.apache.dolphinscheduler.remote.command.HostUpdateCommand;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager;
import org.apache.dolphinscheduler.server.master.runner.task.ITaskProcessor;
import org.apache.dolphinscheduler.server.master.runner.task.TaskAction;
import org.apache.dolphinscheduler.server.master.runner.task.TaskProcessorFactory;
import org.apache.dolphinscheduler.service.alert.ProcessAlertManager;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.apache.dolphinscheduler.service.queue.PeerTaskInstancePriorityQueue;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutorService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.HashBasedTable;
import com.google.common.collect.Lists;
import com.google.common.collect.Table;
/**
* master exec thread,split dag
*/
public class WorkflowExecuteThread implements Runnable {
/**
* logger of WorkflowExecuteThread
*/
private static final Logger logger = LoggerFactory.getLogger(WorkflowExecuteThread.class);
/**
* runing TaskNode
*/
private final Map<Integer, ITaskProcessor> activeTaskProcessorMaps = new ConcurrentHashMap<>();
/**
* task exec service
*/
private final ExecutorService taskExecService;
/**
* process instance
*/
private ProcessInstance processInstance;
/**
* submit failure nodes
*/
private boolean taskFailedSubmit = false;
/**
* recover node id list
*/
private List<TaskInstance> recoverNodeIdList = new ArrayList<>();
/**
* error task list
*/
private Map<String, TaskInstance> errorTaskList = new ConcurrentHashMap<>();
/**
* complete task list
*/
private Map<String, TaskInstance> completeTaskList = new ConcurrentHashMap<>();
/**
* ready to submit task queue
*/
private PeerTaskInstancePriorityQueue readyToSubmitTaskQueue = new PeerTaskInstancePriorityQueue();
/**
* depend failed task map
*/
private Map<String, TaskInstance> dependFailedTask = new ConcurrentHashMap<>();
/**
* forbidden task map
*/
private Map<String, TaskNode> forbiddenTaskList = new ConcurrentHashMap<>();
/**
* skip task map
*/
private Map<String, TaskNode> skipTaskNodeList = new ConcurrentHashMap<>();
/**
* recover tolerance fault task list
*/
private List<TaskInstance> recoverToleranceFaultTaskList = new ArrayList<>();
/**
* alert manager
*/
private ProcessAlertManager processAlertManager;
/**
* the object of DAG
*/
private DAG<String, TaskNode, TaskNodeRelation> dag;
/**
* process service
*/
private ProcessService processService;
/**
* master config
*/
private MasterConfig masterConfig;
/**
*
*/
private NettyExecutorManager nettyExecutorManager;
private ConcurrentLinkedQueue<StateEvent> stateEvents = new ConcurrentLinkedQueue<>();
private List<Date> complementListDate = Lists.newLinkedList();
private Table<Integer, Long, TaskInstance> taskInstanceHashMap = HashBasedTable.create();
private ProcessDefinition processDefinition;
private String key;
private ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList;
/**
* start flag, true: start nodes submit completely
*
*/
private boolean isStart = false;
/**
* constructor of WorkflowExecuteThread
*
* @param processInstance processInstance
* @param processService processService
* @param nettyExecutorManager nettyExecutorManager
* @param taskTimeoutCheckList
*/
public WorkflowExecuteThread(ProcessInstance processInstance
, ProcessService processService
, NettyExecutorManager nettyExecutorManager
, ProcessAlertManager processAlertManager
, MasterConfig masterConfig
, ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList) {
this.processService = processService;
this.processInstance = processInstance;
this.masterConfig = masterConfig;
int masterTaskExecNum = masterConfig.getMasterExecTaskNum();
this.taskExecService = ThreadUtils.newDaemonFixedThreadExecutor("Master-Task-Exec-Thread",
masterTaskExecNum);
this.nettyExecutorManager = nettyExecutorManager;
this.processAlertManager = processAlertManager;
this.taskTimeoutCheckList = taskTimeoutCheckList;
}
@Override
public void run() {
try {
startProcess();
handleEvents();
} catch (Exception e) {
logger.error("handler error:", e);
}
}
/**
* the process start nodes are submitted completely.
* @return
*/
public boolean isStart() {
return this.isStart;
}
private void handleEvents() {
while (this.stateEvents.size() > 0) {
try {
StateEvent stateEvent = this.stateEvents.peek();
if (stateEventHandler(stateEvent)) {
this.stateEvents.remove(stateEvent);
}
} catch (Exception e) {
logger.error("state handle error:", e);
}
}
}
public String getKey() {
if (StringUtils.isNotEmpty(key)
|| this.processDefinition == null) {
return key;
}
key = String.format("%d_%d_%d",
this.processDefinition.getCode(),
this.processDefinition.getVersion(),
this.processInstance.getId());
return key;
}
public boolean addStateEvent(StateEvent stateEvent) {
if (processInstance.getId() != stateEvent.getProcessInstanceId()) {
logger.info("state event would be abounded :{}", stateEvent.toString());
return false;
}
this.stateEvents.add(stateEvent);
return true;
}
public int eventSize() {
return this.stateEvents.size();
}
public ProcessInstance getProcessInstance() {
return this.processInstance;
}
private boolean stateEventHandler(StateEvent stateEvent) {
logger.info("process event: {}", stateEvent.toString());
if (!checkStateEvent(stateEvent)) {
return false;
}
boolean result = false;
switch (stateEvent.getType()) {
case PROCESS_STATE_CHANGE:
result = processStateChangeHandler(stateEvent);
break;
case TASK_STATE_CHANGE:
result = taskStateChangeHandler(stateEvent);
break;
case PROCESS_TIMEOUT:
result = processTimeout();
break;
case TASK_TIMEOUT:
result = taskTimeout(stateEvent);
break;
default:
break;
}
if (result) {
this.stateEvents.remove(stateEvent);
}
return result;
}
private boolean taskTimeout(StateEvent stateEvent) {
if (taskInstanceHashMap.containsRow(stateEvent.getTaskInstanceId())) {
return true;
}
TaskInstance taskInstance = taskInstanceHashMap
.row(stateEvent.getTaskInstanceId())
.values()
.iterator().next();
if (TimeoutFlag.CLOSE == taskInstance.getTaskDefine().getTimeoutFlag()) {
return true;
}
TaskTimeoutStrategy taskTimeoutStrategy = taskInstance.getTaskDefine().getTimeoutNotifyStrategy();
if (TaskTimeoutStrategy.FAILED == taskTimeoutStrategy) {
ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId());
taskProcessor.action(TaskAction.TIMEOUT);
return false;
} else {
processAlertManager.sendTaskTimeoutAlert(processInstance, taskInstance, taskInstance.getTaskDefine());
return true;
}
}
private boolean processTimeout() {
this.processAlertManager.sendProcessTimeoutAlert(this.processInstance, this.processDefinition);
return true;
}
private boolean taskStateChangeHandler(StateEvent stateEvent) {
TaskInstance task = processService.findTaskInstanceById(stateEvent.getTaskInstanceId());
if (stateEvent.getExecutionStatus().typeIsFinished()) {
taskFinished(task);
} else if (activeTaskProcessorMaps.containsKey(stateEvent.getTaskInstanceId())) {
ITaskProcessor iTaskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId());
iTaskProcessor.run();
if (iTaskProcessor.taskState().typeIsFinished()) {
task = processService.findTaskInstanceById(stateEvent.getTaskInstanceId());
taskFinished(task);
}
} else {
logger.error("state handler error: {}", stateEvent.toString());
}
return true;
}
private void taskFinished(TaskInstance task) {
logger.info("work flow {} task {} state:{} ",
processInstance.getId(),
task.getId(),
task.getState());
if (task.taskCanRetry()) {
addTaskToStandByList(task);
return;
}
ProcessInstance processInstance = processService.findProcessInstanceById(this.processInstance.getId());
completeTaskList.put(Long.toString(task.getTaskCode()), task);
activeTaskProcessorMaps.remove(task.getId());
taskTimeoutCheckList.remove(task.getId());
if (task.getState().typeIsSuccess()) {
processInstance.setVarPool(task.getVarPool());
processService.saveProcessInstance(processInstance);
submitPostNode(Long.toString(task.getTaskCode()));
} else if (task.getState().typeIsFailure()) {
if (task.isConditionsTask()
|| DagHelper.haveConditionsAfterNode(Long.toString(task.getTaskCode()), dag)) {
submitPostNode(Long.toString(task.getTaskCode()));
} else {
errorTaskList.put(Long.toString(task.getTaskCode()), task);
if (processInstance.getFailureStrategy() == FailureStrategy.END) {
killAllTasks();
}
}
}
this.updateProcessInstanceState();
}
private boolean checkStateEvent(StateEvent stateEvent) {
if (this.processInstance.getId() != stateEvent.getProcessInstanceId()) {
logger.error("mismatch process instance id: {}, state event:{}",
this.processInstance.getId(),
stateEvent.toString());
return false;
}
return true;
}
private boolean processStateChangeHandler(StateEvent stateEvent) {
try {
logger.info("process:{} state {} change to {}", processInstance.getId(), processInstance.getState(), stateEvent.getExecutionStatus());
processInstance = processService.findProcessInstanceById(this.processInstance.getId());
if (processComplementData()) {
return true;
}
if (stateEvent.getExecutionStatus().typeIsFinished()) {
endProcess();
}
if (processInstance.getState() == ExecutionStatus.READY_STOP) {
killAllTasks();
}
return true;
} catch (Exception e) {
logger.error("process state change error:", e);
}
return true;
}
private boolean processComplementData() throws Exception {
if (!needComplementProcess()) {
return false;
}
Date scheduleDate = processInstance.getScheduleTime();
if (scheduleDate == null) {
scheduleDate = complementListDate.get(0);
} else if (processInstance.getState().typeIsFinished()) {
endProcess();
if (complementListDate.size() <= 0) {
logger.info("process complement end. process id:{}", processInstance.getId());
return true;
}
int index = complementListDate.indexOf(scheduleDate);
if (index >= complementListDate.size() - 1 || !processInstance.getState().typeIsSuccess()) {
logger.info("process complement end. process id:{}", processInstance.getId());
// complement data ends || no success
return true;
}
logger.info("process complement continue. process id:{}, schedule time:{} complementListDate:{}",
processInstance.getId(),
processInstance.getScheduleTime(),
complementListDate.toString());
scheduleDate = complementListDate.get(index + 1);
//the next process complement
processInstance.setId(0);
}
processInstance.setScheduleTime(scheduleDate);
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) {
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
}
processInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
processInstance.setStartTime(new Date());
processInstance.setEndTime(null);
processService.saveProcessInstance(processInstance);
this.taskInstanceHashMap.clear();
startProcess();
return true;
}
private boolean needComplementProcess() {
if (processInstance.isComplementData()
&& Flag.NO == processInstance.getIsSubProcess()) {
return true;
}
return false;
}
private void startProcess() throws Exception {
if (this.taskInstanceHashMap.size() == 0) {
isStart = false;
buildFlowDag();
initTaskQueue();
submitPostNode(null);
isStart = true;
}
}
/**
* process end handle
*/
private void endProcess() {
this.stateEvents.clear();
processInstance.setEndTime(new Date());
processService.updateProcessInstance(processInstance);
if (processInstance.getState().typeIsWaitingThread()) {
processService.createRecoveryWaitingThreadCommand(null, processInstance);
}
List<TaskInstance> taskInstances = processService.findValidTaskListByProcessId(processInstance.getId());
ProjectUser projectUser = processService.queryProjectWithUserByProcessInstanceId(processInstance.getId());
processAlertManager.sendAlertProcessInstance(processInstance, taskInstances, projectUser);
}
/**
* generate process dag
*
* @throws Exception exception
*/
private void buildFlowDag() throws Exception {
if (this.dag != null) {
return;
}
processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(),
processInstance.getProcessDefinitionVersion());
recoverNodeIdList = getStartTaskInstanceList(processInstance.getCommandParam());
List<TaskNode> taskNodeList =
processService.transformTask(processService.findRelationByCode(processDefinition.getProjectCode(), processDefinition.getCode()), Lists.newArrayList());
forbiddenTaskList.clear();
taskNodeList.forEach(taskNode -> {
if (taskNode.isForbidden()) {
forbiddenTaskList.put(Long.toString(taskNode.getCode()), taskNode);
}
});
// generate process to get DAG info
List<String> recoveryNodeCodeList = getRecoveryNodeCodeList();
List<String> startNodeNameList = parseStartNodeName(processInstance.getCommandParam());
ProcessDag processDag = generateFlowDag(taskNodeList,
startNodeNameList, recoveryNodeCodeList, processInstance.getTaskDependType());
if (processDag == null) {
logger.error("processDag is null");
return;
}
// generate process dag
dag = DagHelper.buildDagGraph(processDag);
}
/**
* init task queue
*/
private void initTaskQueue() {
taskFailedSubmit = false;
activeTaskProcessorMaps.clear();
dependFailedTask.clear();
completeTaskList.clear();
errorTaskList.clear();
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processInstance.getId());
for (TaskInstance task : taskInstanceList) {
if (task.isTaskComplete()) {
completeTaskList.put(Long.toString(task.getTaskCode()), task);
}
if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(Long.toString(task.getTaskCode()), dag)) {
continue;
}
if (task.getState().typeIsFailure() && !task.taskCanRetry()) {
errorTaskList.put(Long.toString(task.getTaskCode()), task);
}
}
if (processInstance.isComplementData() && complementListDate.size() == 0) {
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
if (cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) {
Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionCode(processInstance.getProcessDefinitionCode());
if (complementListDate.size() == 0 && needComplementProcess()) {
complementListDate = CronUtils.getSelfFireDateList(start, end, schedules);
logger.info(" process definition code:{} complement data: {}",
processInstance.getProcessDefinitionCode(), complementListDate.toString());
}
}
}
}
/**
* submit task to execute
*
* @param taskInstance task instance
* @return TaskInstance
*/
private TaskInstance submitTaskExec(TaskInstance taskInstance) {
try {
ITaskProcessor taskProcessor = TaskProcessorFactory.getTaskProcessor(taskInstance.getTaskType());
if (taskInstance.getState() == ExecutionStatus.RUNNING_EXECUTION
&& taskProcessor.getType().equalsIgnoreCase(Constants.COMMON_TASK_TYPE)) {
notifyProcessHostUpdate(taskInstance);
}
boolean submit = taskProcessor.submit(taskInstance, processInstance, masterConfig.getMasterTaskCommitRetryTimes(), masterConfig.getMasterTaskCommitInterval());
if (submit) {
this.taskInstanceHashMap.put(taskInstance.getId(), taskInstance.getTaskCode(), taskInstance);
activeTaskProcessorMaps.put(taskInstance.getId(), taskProcessor);
taskProcessor.run();
addTimeoutCheck(taskInstance);
TaskDefinition taskDefinition = processService.findTaskDefinition(
taskInstance.getTaskCode(),
taskInstance.getTaskDefinitionVersion());
taskInstance.setTaskDefine(taskDefinition);
if (taskProcessor.taskState().typeIsFinished()) {
StateEvent stateEvent = new StateEvent();
stateEvent.setProcessInstanceId(this.processInstance.getId());
stateEvent.setTaskInstanceId(taskInstance.getId());
stateEvent.setExecutionStatus(taskProcessor.taskState());
stateEvent.setType(StateEventType.TASK_STATE_CHANGE);
this.stateEvents.add(stateEvent);
}
return taskInstance;
} else {
logger.error("process id:{} name:{} submit standby task id:{} name:{} failed!",
processInstance.getId(), processInstance.getName(),
taskInstance.getId(), taskInstance.getName());
return null;
}
} catch (Exception e) {
logger.error("submit standby task error", e);
return null;
}
}
private void notifyProcessHostUpdate(TaskInstance taskInstance) {
if (StringUtils.isEmpty(taskInstance.getHost())) {
return;
}
try {
HostUpdateCommand hostUpdateCommand = new HostUpdateCommand();
hostUpdateCommand.setProcessHost(NetUtils.getAddr(masterConfig.getListenPort()));
hostUpdateCommand.setTaskInstanceId(taskInstance.getId());
Host host = new Host(taskInstance.getHost());
nettyExecutorManager.doExecute(host, hostUpdateCommand.convert2Command());
} catch (Exception e) {
logger.error("notify process host update", e);
}
}
private void addTimeoutCheck(TaskInstance taskInstance) {
TaskDefinition taskDefinition = processService.findTaskDefinition(
taskInstance.getTaskCode(),
taskInstance.getTaskDefinitionVersion()
);
taskInstance.setTaskDefine(taskDefinition);
if (TimeoutFlag.OPEN == taskDefinition.getTimeoutFlag()) {
this.taskTimeoutCheckList.put(taskInstance.getId(), taskInstance);
return;
}
if (taskInstance.isDependTask() || taskInstance.isSubProcess()) {
this.taskTimeoutCheckList.put(taskInstance.getId(), taskInstance);
}
}
/**
* find task instance in db.
* in case submit more than one same name task in the same time.
*
* @param taskCode task code
* @param taskVersion task version
* @return TaskInstance
*/
private TaskInstance findTaskIfExists(Long taskCode, int taskVersion) {
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(this.processInstance.getId());
for (TaskInstance taskInstance : taskInstanceList) {
if (taskInstance.getTaskCode() == taskCode && taskInstance.getTaskDefinitionVersion() == taskVersion) {
return taskInstance;
}
}
return null;
}
/**
* encapsulation task
*
* @param processInstance process instance
* @param taskNode taskNode
* @return TaskInstance
*/
private TaskInstance createTaskInstance(ProcessInstance processInstance, TaskNode taskNode) {
TaskInstance taskInstance = findTaskIfExists(taskNode.getCode(), taskNode.getVersion());
if (taskInstance == null) {
taskInstance = new TaskInstance();
taskInstance.setTaskCode(taskNode.getCode());
taskInstance.setTaskDefinitionVersion(taskNode.getVersion());
// task name
taskInstance.setName(taskNode.getName());
// task instance state
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
// process instance id
taskInstance.setProcessInstanceId(processInstance.getId());
// task instance type
taskInstance.setTaskType(taskNode.getType().toUpperCase());
// task instance whether alert
taskInstance.setAlertFlag(Flag.NO);
// task instance start time
taskInstance.setStartTime(null);
// task instance flag
taskInstance.setFlag(Flag.YES);
// task dry run flag
taskInstance.setDryRun(processInstance.getDryRun());
// task instance retry times
taskInstance.setRetryTimes(0);
// max task instance retry times
taskInstance.setMaxRetryTimes(taskNode.getMaxRetryTimes());
// retry task instance interval
taskInstance.setRetryInterval(taskNode.getRetryInterval());
//set task param
taskInstance.setTaskParams(taskNode.getTaskParams());
// task instance priority
if (taskNode.getTaskInstancePriority() == null) {
taskInstance.setTaskInstancePriority(Priority.MEDIUM);
} else {
taskInstance.setTaskInstancePriority(taskNode.getTaskInstancePriority());
}
String processWorkerGroup = processInstance.getWorkerGroup();
processWorkerGroup = StringUtils.isBlank(processWorkerGroup) ? DEFAULT_WORKER_GROUP : processWorkerGroup;
String taskWorkerGroup = StringUtils.isBlank(taskNode.getWorkerGroup()) ? processWorkerGroup : taskNode.getWorkerGroup();
Long processEnvironmentCode = Objects.isNull(processInstance.getEnvironmentCode()) ? -1 : processInstance.getEnvironmentCode();
Long taskEnvironmentCode = Objects.isNull(taskNode.getEnvironmentCode()) ? processEnvironmentCode : taskNode.getEnvironmentCode();
if (!processWorkerGroup.equals(DEFAULT_WORKER_GROUP) && taskWorkerGroup.equals(DEFAULT_WORKER_GROUP)) {
taskInstance.setWorkerGroup(processWorkerGroup);
taskInstance.setEnvironmentCode(processEnvironmentCode);
} else {
taskInstance.setWorkerGroup(taskWorkerGroup);
taskInstance.setEnvironmentCode(taskEnvironmentCode);
}
if (!taskInstance.getEnvironmentCode().equals(-1L)) {
Environment environment = processService.findEnvironmentByCode(taskInstance.getEnvironmentCode());
if (Objects.nonNull(environment) && StringUtils.isNotEmpty(environment.getConfig())) {
taskInstance.setEnvironmentConfig(environment.getConfig());
}
}
// delay execution time
taskInstance.setDelayTime(taskNode.getDelayTime());
}
return taskInstance;
}
public void getPreVarPool(TaskInstance taskInstance, Set<String> preTask) {
Map<String, Property> allProperty = new HashMap<>();
Map<String, TaskInstance> allTaskInstance = new HashMap<>();
if (CollectionUtils.isNotEmpty(preTask)) {
for (String preTaskName : preTask) {
TaskInstance preTaskInstance = completeTaskList.get(preTaskName);
if (preTaskInstance == null) {
continue;
}
String preVarPool = preTaskInstance.getVarPool();
if (StringUtils.isNotEmpty(preVarPool)) {
List<Property> properties = JSONUtils.toList(preVarPool, Property.class);
for (Property info : properties) {
setVarPoolValue(allProperty, allTaskInstance, preTaskInstance, info);
}
}
}
if (allProperty.size() > 0) {
taskInstance.setVarPool(JSONUtils.toJsonString(allProperty.values()));
}
}
}
private void setVarPoolValue(Map<String, Property> allProperty, Map<String, TaskInstance> allTaskInstance, TaskInstance preTaskInstance, Property thisProperty) {
//for this taskInstance all the param in this part is IN.
thisProperty.setDirect(Direct.IN);
//get the pre taskInstance Property's name
String proName = thisProperty.getProp();
//if the Previous nodes have the Property of same name
if (allProperty.containsKey(proName)) {
//comparison the value of two Property
Property otherPro = allProperty.get(proName);
//if this property'value of loop is empty,use the other,whether the other's value is empty or not
if (StringUtils.isEmpty(thisProperty.getValue())) {
allProperty.put(proName, otherPro);
//if property'value of loop is not empty,and the other's value is not empty too, use the earlier value
} else if (StringUtils.isNotEmpty(otherPro.getValue())) {
TaskInstance otherTask = allTaskInstance.get(proName);
if (otherTask.getEndTime().getTime() > preTaskInstance.getEndTime().getTime()) {
allProperty.put(proName, thisProperty);
allTaskInstance.put(proName, preTaskInstance);
} else {
allProperty.put(proName, otherPro);
}
} else {
allProperty.put(proName, thisProperty);
allTaskInstance.put(proName, preTaskInstance);
}
} else {
allProperty.put(proName, thisProperty);
allTaskInstance.put(proName, preTaskInstance);
}
}
private void submitPostNode(String parentNodeCode) {
Set<String> submitTaskNodeList = DagHelper.parsePostNodes(parentNodeCode, skipTaskNodeList, dag, completeTaskList);
List<TaskInstance> taskInstances = new ArrayList<>();
for (String taskNode : submitTaskNodeList) {
TaskNode taskNodeObject = dag.getNode(taskNode);
if (taskInstanceHashMap.containsColumn(taskNodeObject.getCode())) {
continue;
}
TaskInstance task = createTaskInstance(processInstance, taskNodeObject);
taskInstances.add(task);
}
// if previous node success , post node submit
for (TaskInstance task : taskInstances) {
if (readyToSubmitTaskQueue.contains(task)) {
continue;
}
if (completeTaskList.containsKey(Long.toString(task.getTaskCode()))) {
logger.info("task {} has already run success", task.getName());
continue;
}
if (task.getState().typeIsPause() || task.getState().typeIsCancel()) {
logger.info("task {} stopped, the state is {}", task.getName(), task.getState());
} else {
addTaskToStandByList(task);
}
}
submitStandByTask();
updateProcessInstanceState();
}
/**
* determine whether the dependencies of the task node are complete
*
* @return DependResult
*/
private DependResult isTaskDepsComplete(String taskCode) {
Collection<String> startNodes = dag.getBeginNode();
// if vertex,returns true directly
if (startNodes.contains(taskCode)) {
return DependResult.SUCCESS;
}
TaskNode taskNode = dag.getNode(taskCode);
List<String> depCodeList = taskNode.getDepList();
for (String depsNode : depCodeList) {
if (!dag.containsNode(depsNode)
|| forbiddenTaskList.containsKey(depsNode)
|| skipTaskNodeList.containsKey(depsNode)) {
continue;
}
// dependencies must be fully completed
if (!completeTaskList.containsKey(depsNode)) {
return DependResult.WAITING;
}
ExecutionStatus depTaskState = completeTaskList.get(depsNode).getState();
if (depTaskState.typeIsPause() || depTaskState.typeIsCancel()) {
return DependResult.NON_EXEC;
}
// ignore task state if current task is condition
if (taskNode.isConditionsTask()) {
continue;
}
if (!dependTaskSuccess(depsNode, taskCode)) {
return DependResult.FAILED;
}
}
logger.info("taskCode: {} completeDependTaskList: {}", taskCode, Arrays.toString(completeTaskList.keySet().toArray()));
return DependResult.SUCCESS;
}
/**
* depend node is completed, but here need check the condition task branch is the next node
*/
private boolean dependTaskSuccess(String dependNodeName, String nextNodeName) {
if (dag.getNode(dependNodeName).isConditionsTask()) {
//condition task need check the branch to run
List<String> nextTaskList = DagHelper.parseConditionTask(dependNodeName, skipTaskNodeList, dag, completeTaskList);
if (!nextTaskList.contains(nextNodeName)) {
return false;
}
} else {
ExecutionStatus depTaskState = completeTaskList.get(dependNodeName).getState();
if (depTaskState.typeIsFailure()) {
return false;
}
}
return true;
}
/**
* query task instance by complete state
*
* @param state state
* @return task instance list
*/
private List<TaskInstance> getCompleteTaskByState(ExecutionStatus state) {
List<TaskInstance> resultList = new ArrayList<>();
for (Map.Entry<String, TaskInstance> entry : completeTaskList.entrySet()) {
if (entry.getValue().getState() == state) {
resultList.add(entry.getValue());
}
}
return resultList;
}
/**
* where there are ongoing tasks
*
* @param state state
* @return ExecutionStatus
*/
private ExecutionStatus runningState(ExecutionStatus state) {
if (state == ExecutionStatus.READY_STOP
|| state == ExecutionStatus.READY_PAUSE
|| state == ExecutionStatus.WAITING_THREAD
|| state == ExecutionStatus.DELAY_EXECUTION) {
// if the running task is not completed, the state remains unchanged
return state;
} else {
return ExecutionStatus.RUNNING_EXECUTION;
}
}
/**
* exists failure task,contains submit failure、dependency failure,execute failure(retry after)
*
* @return Boolean whether has failed task
*/
private boolean hasFailedTask() {
if (this.taskFailedSubmit) {
return true;
}
if (this.errorTaskList.size() > 0) {
return true;
}
return this.dependFailedTask.size() > 0;
}
/**
* process instance failure
*
* @return Boolean whether process instance failed
*/
private boolean processFailed() {
if (hasFailedTask()) {
if (processInstance.getFailureStrategy() == FailureStrategy.END) {
return true;
}
if (processInstance.getFailureStrategy() == FailureStrategy.CONTINUE) {
return readyToSubmitTaskQueue.size() == 0 || activeTaskProcessorMaps.size() == 0;
}
}
return false;
}
/**
* whether task for waiting thread
*
* @return Boolean whether has waiting thread task
*/
private boolean hasWaitingThreadTask() {
List<TaskInstance> waitingList = getCompleteTaskByState(ExecutionStatus.WAITING_THREAD);
return CollectionUtils.isNotEmpty(waitingList);
}
/**
* prepare for pause
* 1,failed retry task in the preparation queue , returns to failure directly
* 2,exists pause task,complement not completed, pending submission of tasks, return to suspension
* 3,success
*
* @return ExecutionStatus
*/
private ExecutionStatus processReadyPause() {
if (hasRetryTaskInStandBy()) {
return ExecutionStatus.FAILURE;
}
List<TaskInstance> pauseList = getCompleteTaskByState(ExecutionStatus.PAUSE);
if (CollectionUtils.isNotEmpty(pauseList)
|| !isComplementEnd()
|| readyToSubmitTaskQueue.size() > 0) {
return ExecutionStatus.PAUSE;
} else {
return ExecutionStatus.SUCCESS;
}
}
/**
* generate the latest process instance status by the tasks state
*
* @param instance
* @return process instance execution status
*/
private ExecutionStatus getProcessInstanceState(ProcessInstance instance) {
ExecutionStatus state = instance.getState();
if (activeTaskProcessorMaps.size() > 0 || hasRetryTaskInStandBy()) {
// active task and retry task exists
return runningState(state);
}
// process failure
if (processFailed()) {
return ExecutionStatus.FAILURE;
}
// waiting thread
if (hasWaitingThreadTask()) {
return ExecutionStatus.WAITING_THREAD;
}
// pause
if (state == ExecutionStatus.READY_PAUSE) {
return processReadyPause();
}
// stop
if (state == ExecutionStatus.READY_STOP) {
List<TaskInstance> stopList = getCompleteTaskByState(ExecutionStatus.STOP);
List<TaskInstance> killList = getCompleteTaskByState(ExecutionStatus.KILL);
if (CollectionUtils.isNotEmpty(stopList)
|| CollectionUtils.isNotEmpty(killList)
|| !isComplementEnd()) {
return ExecutionStatus.STOP;
} else {
return ExecutionStatus.SUCCESS;
}
}
// success
if (state == ExecutionStatus.RUNNING_EXECUTION) {
List<TaskInstance> killTasks = getCompleteTaskByState(ExecutionStatus.KILL);
if (readyToSubmitTaskQueue.size() > 0) {
//tasks currently pending submission, no retries, indicating that depend is waiting to complete
return ExecutionStatus.RUNNING_EXECUTION;
} else if (CollectionUtils.isNotEmpty(killTasks)) {
// tasks maybe killed manually
return ExecutionStatus.FAILURE;
} else {
// if the waiting queue is empty and the status is in progress, then success
return ExecutionStatus.SUCCESS;
}
}
return state;
}
/**
* whether complement end
*
* @return Boolean whether is complement end
*/
private boolean isComplementEnd() {
if (!processInstance.isComplementData()) {
return true;
}
try {
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
Date endTime = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
return processInstance.getScheduleTime().equals(endTime);
} catch (Exception e) {
logger.error("complement end failed ", e);
return false;
}
}
/**
* updateProcessInstance process instance state
* after each batch of tasks is executed, the status of the process instance is updated
*/
private void updateProcessInstanceState() {
ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId());
ExecutionStatus state = getProcessInstanceState(instance);
if (processInstance.getState() != state) {
logger.info(
"work flow process instance [id: {}, name:{}], state change from {} to {}, cmd type: {}",
processInstance.getId(), processInstance.getName(),
processInstance.getState(), state,
processInstance.getCommandType());
instance.setState(state);
processService.updateProcessInstance(instance);
processInstance = instance;
StateEvent stateEvent = new StateEvent();
stateEvent.setExecutionStatus(processInstance.getState());
stateEvent.setProcessInstanceId(this.processInstance.getId());
stateEvent.setType(StateEventType.PROCESS_STATE_CHANGE);
this.processStateChangeHandler(stateEvent);
}
}
/**
* get task dependency result
*
* @param taskInstance task instance
* @return DependResult
*/
private DependResult getDependResultForTask(TaskInstance taskInstance) {
return isTaskDepsComplete(Long.toString(taskInstance.getTaskCode()));
}
/**
* add task to standby list
*
* @param taskInstance task instance
*/
private void addTaskToStandByList(TaskInstance taskInstance) {
logger.info("add task to stand by list: {}", taskInstance.getName());
try {
readyToSubmitTaskQueue.put(taskInstance);
} catch (Exception e) {
logger.error("add task instance to readyToSubmitTaskQueue error, taskName: {}", taskInstance.getName(), e);
}
}
/**
* remove task from stand by list
*
* @param taskInstance task instance
*/
private void removeTaskFromStandbyList(TaskInstance taskInstance) {
logger.info("remove task from stand by list, id: {} name:{}",
taskInstance.getId(),
taskInstance.getName());
try {
readyToSubmitTaskQueue.remove(taskInstance);
} catch (Exception e) {
logger.error("remove task instance from readyToSubmitTaskQueue error, task id:{}, Name: {}",
taskInstance.getId(),
taskInstance.getName(), e);
}
}
/**
* has retry task in standby
*
* @return Boolean whether has retry task in standby
*/
private boolean hasRetryTaskInStandBy() {
for (Iterator<TaskInstance> iter = readyToSubmitTaskQueue.iterator(); iter.hasNext(); ) {
if (iter.next().getState().typeIsFailure()) {
return true;
}
}
return false;
}
/**
* close the on going tasks
*/
private void killAllTasks() {
logger.info("kill called on process instance id: {}, num: {}", processInstance.getId(),
activeTaskProcessorMaps.size());
for (int taskId : activeTaskProcessorMaps.keySet()) {
TaskInstance taskInstance = processService.findTaskInstanceById(taskId);
if (taskInstance == null || taskInstance.getState().typeIsFinished()) {
continue;
}
ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(taskId);
taskProcessor.action(TaskAction.STOP);
if (taskProcessor.taskState().typeIsFinished()) {
StateEvent stateEvent = new StateEvent();
stateEvent.setType(StateEventType.TASK_STATE_CHANGE);
stateEvent.setProcessInstanceId(this.processInstance.getId());
stateEvent.setTaskInstanceId(taskInstance.getId());
stateEvent.setExecutionStatus(taskProcessor.taskState());
this.addStateEvent(stateEvent);
}
}
}
public boolean workFlowFinish() {
return this.processInstance.getState().typeIsFinished();
}
/**
* whether the retry interval is timed out
*
* @param taskInstance task instance
* @return Boolean
*/
private boolean retryTaskIntervalOverTime(TaskInstance taskInstance) {
if (taskInstance.getState() != ExecutionStatus.FAILURE) {
return true;
}
if (taskInstance.getId() == 0
||
taskInstance.getMaxRetryTimes() == 0
||
taskInstance.getRetryInterval() == 0) {
return true;
}
Date now = new Date();
long failedTimeInterval = DateUtils.differSec(now, taskInstance.getEndTime());
// task retry does not over time, return false
return taskInstance.getRetryInterval() * SEC_2_MINUTES_TIME_UNIT < failedTimeInterval;
}
/**
* handling the list of tasks to be submitted
*/
private void submitStandByTask() {
try {
int length = readyToSubmitTaskQueue.size();
for (int i = 0; i < length; i++) {
TaskInstance task = readyToSubmitTaskQueue.peek();
if (task == null) {
continue;
}
// stop tasks which is retrying if forced success happens
if (task.taskCanRetry()) {
TaskInstance retryTask = processService.findTaskInstanceById(task.getId());
if (retryTask != null && retryTask.getState().equals(ExecutionStatus.FORCED_SUCCESS)) {
task.setState(retryTask.getState());
logger.info("task: {} has been forced success, put it into complete task list and stop retrying", task.getName());
removeTaskFromStandbyList(task);
completeTaskList.put(Long.toString(task.getTaskCode()), task);
submitPostNode(Long.toString(task.getTaskCode()));
continue;
}
}
//init varPool only this task is the first time running
if (task.isFirstRun()) {
//get pre task ,get all the task varPool to this task
Set<String> preTask = dag.getPreviousNodes(Long.toString(task.getTaskCode()));
getPreVarPool(task, preTask);
}
DependResult dependResult = getDependResultForTask(task);
if (DependResult.SUCCESS == dependResult) {
if (retryTaskIntervalOverTime(task)) {
TaskInstance taskInstance = submitTaskExec(task);
if (taskInstance == null) {
this.taskFailedSubmit = true;
} else {
removeTaskFromStandbyList(task);
}
}
} else if (DependResult.FAILED == dependResult) {
// if the dependency fails, the current node is not submitted and the state changes to failure.
dependFailedTask.put(Long.toString(task.getTaskCode()), task);
removeTaskFromStandbyList(task);
logger.info("task {},id:{} depend result : {}", task.getName(), task.getId(), dependResult);
} else if (DependResult.NON_EXEC == dependResult) {
// for some reasons(depend task pause/stop) this task would not be submit
removeTaskFromStandbyList(task);
logger.info("remove task {},id:{} , because depend result : {}", task.getName(), task.getId(), dependResult);
}
}
} catch (Exception e) {
logger.error("submit standby task error", e);
}
}
/**
* get recovery task instance
*
* @param taskId task id
* @return recovery task instance
*/
private TaskInstance getRecoveryTaskInstance(String taskId) {
if (!StringUtils.isNotEmpty(taskId)) {
return null;
}
try {
Integer intId = Integer.valueOf(taskId);
TaskInstance task = processService.findTaskInstanceById(intId);
if (task == null) {
logger.error("start node id cannot be found: {}", taskId);
} else {
return task;
}
} catch (Exception e) {
logger.error("get recovery task instance failed ", e);
}
return null;
}
/**
* get start task instance list
*
* @param cmdParam command param
* @return task instance list
*/
private List<TaskInstance> getStartTaskInstanceList(String cmdParam) {
List<TaskInstance> instanceList = new ArrayList<>();
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
if (paramMap != null && paramMap.containsKey(CMD_PARAM_RECOVERY_START_NODE_STRING)) {
String[] idList = paramMap.get(CMD_PARAM_RECOVERY_START_NODE_STRING).split(Constants.COMMA);
for (String nodeId : idList) {
TaskInstance task = getRecoveryTaskInstance(nodeId);
if (task != null) {
instanceList.add(task);
}
}
}
return instanceList;
}
/**
* parse "StartNodeNameList" from cmd param
*
* @param cmdParam command param
* @return start node name list
*/
private List<String> parseStartNodeName(String cmdParam) {
List<String> startNodeNameList = new ArrayList<>();
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
if (paramMap == null) {
return startNodeNameList;
}
if (paramMap.containsKey(CMD_PARAM_START_NODE_NAMES)) {
startNodeNameList = Arrays.asList(paramMap.get(CMD_PARAM_START_NODE_NAMES).split(Constants.COMMA));
}
return startNodeNameList;
}
/**
* generate start node code list from parsing command param;
* if "StartNodeIdList" exists in command param, return StartNodeIdList
*
* @return recovery node code list
*/
private List<String> getRecoveryNodeCodeList() {
List<String> recoveryNodeCodeList = new ArrayList<>();
if (CollectionUtils.isNotEmpty(recoverNodeIdList)) {
for (TaskInstance task : recoverNodeIdList) {
recoveryNodeCodeList.add(Long.toString(task.getTaskCode()));
}
}
return recoveryNodeCodeList;
}
/**
* generate flow dag
*
* @param totalTaskNodeList total task node list
* @param startNodeNameList start node name list
* @param recoveryNodeCodeList recovery node code list
* @param depNodeType depend node type
* @return ProcessDag process dag
* @throws Exception exception
*/
public ProcessDag generateFlowDag(List<TaskNode> totalTaskNodeList,
List<String> startNodeNameList,
List<String> recoveryNodeCodeList,
TaskDependType depNodeType) throws Exception {
return DagHelper.generateFlowDag(totalTaskNodeList, startNodeNameList, recoveryNodeCodeList, depNodeType);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,474 | [Improvement] [MasterServer] schedule time for process instance optimization | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
branch: dev
When I use arthas to trace methods by complete data mode, I found that ProcessService.constructProcessInstance is slow because three point:
- getScheduleTime, which seems like to handle in api is better;
- complete data type handle, delete all the valid tasks when complement data;
- initComplementDataParam, the schedule time again;
### What you expected to happen
construct process instance is more quickly without needless DB operation;
### How to reproduce
use arthas to trace ProcessService.constructProcessInstance
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6474 | https://github.com/apache/dolphinscheduler/pull/6477 | ea493534df7b924220c9c1e7f6529f548d3912c9 | e091801e05c8e9243ec96f76a2e7cc1f1f5cecab | "2021-10-09T09:23:27Z" | java | "2021-10-11T07:13:38Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.service.process;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_EMPTY_SUB_PROCESS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_FATHER_PARAMS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID;
import static org.apache.dolphinscheduler.common.Constants.LOCAL_PARAMS;
import static java.util.stream.Collectors.toSet;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.Direct;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.enums.ReleaseState;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.TaskDependType;
import org.apache.dolphinscheduler.common.enums.TimeoutFlag;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.DateInterval;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter;
import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils;
import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException;
import org.apache.dolphinscheduler.common.utils.TaskParametersUtils;
import org.apache.dolphinscheduler.dao.entity.Command;
import org.apache.dolphinscheduler.dao.entity.DagData;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.dao.entity.Environment;
import org.apache.dolphinscheduler.dao.entity.ErrorCommand;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap;
import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation;
import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.ProjectUser;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskDefinition;
import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.CommandMapper;
import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper;
import org.apache.dolphinscheduler.dao.mapper.EnvironmentMapper;
import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper;
import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.TenantMapper;
import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper;
import org.apache.dolphinscheduler.dao.mapper.UserMapper;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.service.log.LogClientService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.facebook.presto.jdbc.internal.guava.collect.Lists;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.node.ObjectNode;
/**
* process relative dao that some mappers in this.
*/
@Component
public class ProcessService {
private final Logger logger = LoggerFactory.getLogger(getClass());
private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
ExecutionStatus.RUNNING_EXECUTION.ordinal(),
ExecutionStatus.DELAY_EXECUTION.ordinal(),
ExecutionStatus.READY_PAUSE.ordinal(),
ExecutionStatus.READY_STOP.ordinal()};
@Autowired
private UserMapper userMapper;
@Autowired
private ProcessDefinitionMapper processDefineMapper;
@Autowired
private ProcessDefinitionLogMapper processDefineLogMapper;
@Autowired
private ProcessInstanceMapper processInstanceMapper;
@Autowired
private DataSourceMapper dataSourceMapper;
@Autowired
private ProcessInstanceMapMapper processInstanceMapMapper;
@Autowired
private TaskInstanceMapper taskInstanceMapper;
@Autowired
private CommandMapper commandMapper;
@Autowired
private ScheduleMapper scheduleMapper;
@Autowired
private UdfFuncMapper udfFuncMapper;
@Autowired
private ResourceMapper resourceMapper;
@Autowired
private ResourceUserMapper resourceUserMapper;
@Autowired
private ErrorCommandMapper errorCommandMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private ProjectMapper projectMapper;
@Autowired
private TaskDefinitionMapper taskDefinitionMapper;
@Autowired
private TaskDefinitionLogMapper taskDefinitionLogMapper;
@Autowired
private ProcessTaskRelationMapper processTaskRelationMapper;
@Autowired
private ProcessTaskRelationLogMapper processTaskRelationLogMapper;
@Autowired
private EnvironmentMapper environmentMapper;
/**
* handle Command (construct ProcessInstance from Command) , wrapped in transaction
*
* @param logger logger
* @param host host
* @param validThreadNum validThreadNum
* @param command found command
* @return process instance
*/
@Transactional(rollbackFor = Exception.class)
public ProcessInstance handleCommand(Logger logger, String host, int validThreadNum, Command command) {
ProcessInstance processInstance = constructProcessInstance(command, host);
// cannot construct process instance, return null
if (processInstance == null) {
logger.error("scan command, command parameter is error: {}", command);
moveToErrorCommand(command, "process instance is null");
return null;
}
processInstance.setCommandType(command.getCommandType());
processInstance.addHistoryCmd(command.getCommandType());
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
this.commandMapper.deleteById(command.getId());
return processInstance;
}
/**
* save error command, and delete original command
*
* @param command command
* @param message message
*/
@Transactional(rollbackFor = Exception.class)
public void moveToErrorCommand(Command command, String message) {
ErrorCommand errorCommand = new ErrorCommand(command, message);
this.errorCommandMapper.insert(errorCommand);
this.commandMapper.deleteById(command.getId());
}
/**
* set process waiting thread
*
* @param command command
* @param processInstance processInstance
* @return process instance
*/
private ProcessInstance setWaitingThreadProcess(Command command, ProcessInstance processInstance) {
processInstance.setState(ExecutionStatus.WAITING_THREAD);
if (command.getCommandType() != CommandType.RECOVER_WAITING_THREAD) {
processInstance.addHistoryCmd(command.getCommandType());
}
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
createRecoveryWaitingThreadCommand(command, processInstance);
return null;
}
/**
* check thread num
*
* @param command command
* @param validThreadNum validThreadNum
* @return if thread is enough
*/
private boolean checkThreadNum(Command command, int validThreadNum) {
int commandThreadCount = this.workProcessThreadNumCount(command.getProcessDefinitionCode());
return validThreadNum >= commandThreadCount;
}
/**
* insert one command
*
* @param command command
* @return create result
*/
public int createCommand(Command command) {
int result = 0;
if (command != null) {
result = commandMapper.insert(command);
}
return result;
}
/**
* find one command from queue list
*
* @return command
*/
public Command findOneCommand() {
return commandMapper.getOneToRun();
}
/**
* get command page
*
* @param pageSize
* @param pageNumber
* @return
*/
public List<Command> findCommandPage(int pageSize, int pageNumber) {
return commandMapper.queryCommandPage(pageSize, pageNumber * pageSize);
}
/**
* check the input command exists in queue list
*
* @param command command
* @return create command result
*/
public boolean verifyIsNeedCreateCommand(Command command) {
boolean isNeedCreate = true;
EnumMap<CommandType, Integer> cmdTypeMap = new EnumMap<>(CommandType.class);
cmdTypeMap.put(CommandType.REPEAT_RUNNING, 1);
cmdTypeMap.put(CommandType.RECOVER_SUSPENDED_PROCESS, 1);
cmdTypeMap.put(CommandType.START_FAILURE_TASK_PROCESS, 1);
CommandType commandType = command.getCommandType();
if (cmdTypeMap.containsKey(commandType)) {
ObjectNode cmdParamObj = JSONUtils.parseObject(command.getCommandParam());
int processInstanceId = cmdParamObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt();
List<Command> commands = commandMapper.selectList(null);
// for all commands
for (Command tmpCommand : commands) {
if (cmdTypeMap.containsKey(tmpCommand.getCommandType())) {
ObjectNode tempObj = JSONUtils.parseObject(tmpCommand.getCommandParam());
if (tempObj != null && processInstanceId == tempObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt()) {
isNeedCreate = false;
break;
}
}
}
}
return isNeedCreate;
}
/**
* find process instance detail by id
*
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceDetailById(int processId) {
return processInstanceMapper.queryDetailById(processId);
}
/**
* get task node list by definitionId
*/
public List<TaskDefinition> getTaskNodeListByDefinitionId(Integer defineId) {
ProcessDefinition processDefinition = processDefineMapper.selectById(defineId);
if (processDefinition == null) {
logger.error("process define not exists");
return new ArrayList<>();
}
List<ProcessTaskRelationLog> processTaskRelations = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion());
Set<TaskDefinition> taskDefinitionSet = new HashSet<>();
for (ProcessTaskRelationLog processTaskRelation : processTaskRelations) {
if (processTaskRelation.getPostTaskCode() > 0) {
taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion()));
}
}
List<TaskDefinitionLog> taskDefinitionLogs = taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet);
return new ArrayList<>(taskDefinitionLogs);
}
/**
* find process instance by id
*
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceById(int processId) {
return processInstanceMapper.selectById(processId);
}
/**
* find process define by id.
*
* @param processDefinitionId processDefinitionId
* @return process definition
*/
public ProcessDefinition findProcessDefineById(int processDefinitionId) {
return processDefineMapper.selectById(processDefinitionId);
}
/**
* find process define by code and version.
*
* @param processDefinitionCode processDefinitionCode
* @return process definition
*/
public ProcessDefinition findProcessDefinition(Long processDefinitionCode, int version) {
ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode);
if (processDefinition == null || processDefinition.getVersion() != version) {
processDefinition = processDefineLogMapper.queryByDefinitionCodeAndVersion(processDefinitionCode, version);
if (processDefinition != null) {
processDefinition.setId(0);
}
}
return processDefinition;
}
/**
* find process define by code.
*
* @param processDefinitionCode processDefinitionCode
* @return process definition
*/
public ProcessDefinition findProcessDefinitionByCode(Long processDefinitionCode) {
return processDefineMapper.queryByCode(processDefinitionCode);
}
/**
* delete work process instance by id
*
* @param processInstanceId processInstanceId
* @return delete process instance result
*/
public int deleteWorkProcessInstanceById(int processInstanceId) {
return processInstanceMapper.deleteById(processInstanceId);
}
/**
* delete all sub process by parent instance id
*
* @param processInstanceId processInstanceId
* @return delete all sub process instance result
*/
public int deleteAllSubWorkProcessByParentId(int processInstanceId) {
List<Integer> subProcessIdList = processInstanceMapMapper.querySubIdListByParentId(processInstanceId);
for (Integer subId : subProcessIdList) {
deleteAllSubWorkProcessByParentId(subId);
deleteWorkProcessMapByParentId(subId);
removeTaskLogFile(subId);
deleteWorkProcessInstanceById(subId);
}
return 1;
}
/**
* remove task log file
*
* @param processInstanceId processInstanceId
*/
public void removeTaskLogFile(Integer processInstanceId) {
List<TaskInstance> taskInstanceList = findValidTaskListByProcessId(processInstanceId);
if (CollectionUtils.isEmpty(taskInstanceList)) {
return;
}
try (LogClientService logClient = new LogClientService()) {
for (TaskInstance taskInstance : taskInstanceList) {
String taskLogPath = taskInstance.getLogPath();
if (StringUtils.isEmpty(taskInstance.getHost())) {
continue;
}
int port = Constants.RPC_PORT;
String ip = "";
try {
ip = Host.of(taskInstance.getHost()).getIp();
} catch (Exception e) {
// compatible old version
ip = taskInstance.getHost();
}
// remove task log from loggerserver
logClient.removeTaskLog(ip, port, taskLogPath);
}
}
}
/**
* calculate sub process number in the process define.
*
* @param processDefinitionCode processDefinitionCode
* @return process thread num count
*/
private Integer workProcessThreadNumCount(long processDefinitionCode) {
ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode);
List<Integer> ids = new ArrayList<>();
recurseFindSubProcessId(processDefinition.getId(), ids);
return ids.size() + 1;
}
/**
* recursive query sub process definition id by parent id.
*
* @param parentId parentId
* @param ids ids
*/
public void recurseFindSubProcessId(int parentId, List<Integer> ids) {
List<TaskDefinition> taskNodeList = this.getTaskNodeListByDefinitionId(parentId);
if (taskNodeList != null && !taskNodeList.isEmpty()) {
for (TaskDefinition taskNode : taskNodeList) {
String parameter = taskNode.getTaskParams();
ObjectNode parameterJson = JSONUtils.parseObject(parameter);
if (parameterJson.get(CMD_PARAM_SUB_PROCESS_DEFINE_ID) != null) {
SubProcessParameters subProcessParam = JSONUtils.parseObject(parameter, SubProcessParameters.class);
ids.add(subProcessParam.getProcessDefinitionId());
recurseFindSubProcessId(subProcessParam.getProcessDefinitionId(), ids);
}
}
}
}
/**
* create recovery waiting thread command when thread pool is not enough for the process instance.
* sub work process instance need not to create recovery command.
* create recovery waiting thread command and delete origin command at the same time.
* if the recovery command is exists, only update the field update_time
*
* @param originCommand originCommand
* @param processInstance processInstance
*/
public void createRecoveryWaitingThreadCommand(Command originCommand, ProcessInstance processInstance) {
// sub process doesnot need to create wait command
if (processInstance.getIsSubProcess() == Flag.YES) {
if (originCommand != null) {
commandMapper.deleteById(originCommand.getId());
}
return;
}
Map<String, String> cmdParam = new HashMap<>();
cmdParam.put(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD, String.valueOf(processInstance.getId()));
// process instance quit by "waiting thread" state
if (originCommand == null) {
Command command = new Command(
CommandType.RECOVER_WAITING_THREAD,
processInstance.getTaskDependType(),
processInstance.getFailureStrategy(),
processInstance.getExecutorId(),
processInstance.getProcessDefinition().getCode(),
JSONUtils.toJsonString(cmdParam),
processInstance.getWarningType(),
processInstance.getWarningGroupId(),
processInstance.getScheduleTime(),
processInstance.getWorkerGroup(),
processInstance.getEnvironmentCode(),
processInstance.getProcessInstancePriority(),
processInstance.getDryRun()
);
saveCommand(command);
return;
}
// update the command time if current command if recover from waiting
if (originCommand.getCommandType() == CommandType.RECOVER_WAITING_THREAD) {
originCommand.setUpdateTime(new Date());
saveCommand(originCommand);
} else {
// delete old command and create new waiting thread command
commandMapper.deleteById(originCommand.getId());
originCommand.setId(0);
originCommand.setCommandType(CommandType.RECOVER_WAITING_THREAD);
originCommand.setUpdateTime(new Date());
originCommand.setCommandParam(JSONUtils.toJsonString(cmdParam));
originCommand.setProcessInstancePriority(processInstance.getProcessInstancePriority());
saveCommand(originCommand);
}
}
/**
* get schedule time from command
*
* @param command command
* @param cmdParam cmdParam map
* @return date
*/
private Date getScheduleTime(Command command, Map<String, String> cmdParam) {
Date scheduleTime = command.getScheduleTime();
if (scheduleTime == null
&& cmdParam != null
&& cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) {
Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
List<Schedule> schedules = queryReleaseSchedulerListByProcessDefinitionCode(command.getProcessDefinitionCode());
List<Date> complementDateList = CronUtils.getSelfFireDateList(start, end, schedules);
if (complementDateList.size() > 0) {
scheduleTime = complementDateList.get(0);
} else {
logger.error("set scheduler time error: complement date list is empty, command: {}",
command.toString());
}
}
return scheduleTime;
}
/**
* generate a new work process instance from command.
*
* @param processDefinition processDefinition
* @param command command
* @param cmdParam cmdParam map
* @return process instance
*/
private ProcessInstance generateNewProcessInstance(ProcessDefinition processDefinition,
Command command,
Map<String, String> cmdParam) {
ProcessInstance processInstance = new ProcessInstance(processDefinition);
processInstance.setProcessDefinitionCode(processDefinition.getCode());
processInstance.setProcessDefinitionVersion(processDefinition.getVersion());
processInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
processInstance.setRecovery(Flag.NO);
processInstance.setStartTime(new Date());
processInstance.setRunTimes(1);
processInstance.setMaxTryTimes(0);
//processInstance.setProcessDefinitionId(command.getProcessDefinitionId());
processInstance.setCommandParam(command.getCommandParam());
processInstance.setCommandType(command.getCommandType());
processInstance.setIsSubProcess(Flag.NO);
processInstance.setTaskDependType(command.getTaskDependType());
processInstance.setFailureStrategy(command.getFailureStrategy());
processInstance.setExecutorId(command.getExecutorId());
WarningType warningType = command.getWarningType() == null ? WarningType.NONE : command.getWarningType();
processInstance.setWarningType(warningType);
Integer warningGroupId = command.getWarningGroupId() == null ? 0 : command.getWarningGroupId();
processInstance.setWarningGroupId(warningGroupId);
processInstance.setDryRun(command.getDryRun());
// schedule time
Date scheduleTime = getScheduleTime(command, cmdParam);
if (scheduleTime != null) {
processInstance.setScheduleTime(scheduleTime);
}
processInstance.setCommandStartTime(command.getStartTime());
processInstance.setLocations(processDefinition.getLocations());
// reset global params while there are start parameters
setGlobalParamIfCommanded(processDefinition, cmdParam);
// curing global params
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
getCommandTypeIfComplement(processInstance, command),
processInstance.getScheduleTime()));
// set process instance priority
processInstance.setProcessInstancePriority(command.getProcessInstancePriority());
String workerGroup = StringUtils.isBlank(command.getWorkerGroup()) ? Constants.DEFAULT_WORKER_GROUP : command.getWorkerGroup();
processInstance.setWorkerGroup(workerGroup);
processInstance.setEnvironmentCode(Objects.isNull(command.getEnvironmentCode()) ? -1 : command.getEnvironmentCode());
processInstance.setTimeout(processDefinition.getTimeout());
processInstance.setTenantId(processDefinition.getTenantId());
return processInstance;
}
private void setGlobalParamIfCommanded(ProcessDefinition processDefinition, Map<String, String> cmdParam) {
// get start params from command param
Map<String, String> startParamMap = new HashMap<>();
if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_START_PARAMS)) {
String startParamJson = cmdParam.get(Constants.CMD_PARAM_START_PARAMS);
startParamMap = JSONUtils.toMap(startParamJson);
}
Map<String, String> fatherParamMap = new HashMap<>();
if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_FATHER_PARAMS)) {
String fatherParamJson = cmdParam.get(Constants.CMD_PARAM_FATHER_PARAMS);
fatherParamMap = JSONUtils.toMap(fatherParamJson);
}
startParamMap.putAll(fatherParamMap);
// set start param into global params
if (startParamMap.size() > 0
&& processDefinition.getGlobalParamMap() != null) {
for (Map.Entry<String, String> param : processDefinition.getGlobalParamMap().entrySet()) {
String val = startParamMap.get(param.getKey());
if (val != null) {
param.setValue(val);
}
}
}
}
/**
* get process tenant
* there is tenant id in definition, use the tenant of the definition.
* if there is not tenant id in the definiton or the tenant not exist
* use definition creator's tenant.
*
* @param tenantId tenantId
* @param userId userId
* @return tenant
*/
public Tenant getTenantForProcess(int tenantId, int userId) {
Tenant tenant = null;
if (tenantId >= 0) {
tenant = tenantMapper.queryById(tenantId);
}
if (userId == 0) {
return null;
}
if (tenant == null) {
User user = userMapper.selectById(userId);
tenant = tenantMapper.queryById(user.getTenantId());
}
return tenant;
}
/**
* get an environment
* use the code of the environment to find a environment.
*
* @param environmentCode environmentCode
* @return Environment
*/
public Environment findEnvironmentByCode(Long environmentCode) {
Environment environment = null;
if (environmentCode >= 0) {
environment = environmentMapper.queryByEnvironmentCode(environmentCode);
}
return environment;
}
/**
* check command parameters is valid
*
* @param command command
* @param cmdParam cmdParam map
* @return whether command param is valid
*/
private Boolean checkCmdParam(Command command, Map<String, String> cmdParam) {
if (command.getTaskDependType() == TaskDependType.TASK_ONLY || command.getTaskDependType() == TaskDependType.TASK_PRE) {
if (cmdParam == null
|| !cmdParam.containsKey(Constants.CMD_PARAM_START_NODE_NAMES)
|| cmdParam.get(Constants.CMD_PARAM_START_NODE_NAMES).isEmpty()) {
logger.error("command node depend type is {}, but start nodes is null ", command.getTaskDependType());
return false;
}
}
return true;
}
/**
* construct process instance according to one command.
*
* @param command command
* @param host host
* @return process instance
*/
private ProcessInstance constructProcessInstance(Command command, String host) {
ProcessInstance processInstance;
CommandType commandType = command.getCommandType();
Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam());
ProcessDefinition processDefinition = getProcessDefinitionByCommand(command.getProcessDefinitionCode(), cmdParam);
if (processDefinition == null) {
logger.error("cannot find the work process define! define code : {}", command.getProcessDefinitionCode());
return null;
}
if (cmdParam != null) {
int processInstanceId = 0;
// recover from failure or pause tasks
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) {
String processId = cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING);
processInstanceId = Integer.parseInt(processId);
if (processInstanceId == 0) {
logger.error("command parameter is error, [ ProcessInstanceId ] is 0");
return null;
}
} else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) {
// sub process map
String pId = cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS);
processInstanceId = Integer.parseInt(pId);
} else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) {
// waiting thread command
String pId = cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD);
processInstanceId = Integer.parseInt(pId);
}
if (processInstanceId == 0) {
processInstance = generateNewProcessInstance(processDefinition, command, cmdParam);
} else {
processInstance = this.findProcessInstanceDetailById(processInstanceId);
if (processInstance == null) {
return processInstance;
}
CommandType commandTypeIfComplement = getCommandTypeIfComplement(processInstance, command);
// reset global params while repeat running is needed by cmdParam
if (commandTypeIfComplement == CommandType.REPEAT_RUNNING) {
setGlobalParamIfCommanded(processDefinition, cmdParam);
}
// Recalculate global parameters after rerun.
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
commandTypeIfComplement,
processInstance.getScheduleTime()));
processInstance.setProcessDefinition(processDefinition);
}
//reset command parameter
if (processInstance.getCommandParam() != null) {
Map<String, String> processCmdParam = JSONUtils.toMap(processInstance.getCommandParam());
for (Map.Entry<String, String> entry : processCmdParam.entrySet()) {
if (!cmdParam.containsKey(entry.getKey())) {
cmdParam.put(entry.getKey(), entry.getValue());
}
}
}
// reset command parameter if sub process
if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) {
processInstance.setCommandParam(command.getCommandParam());
}
} else {
// generate one new process instance
processInstance = generateNewProcessInstance(processDefinition, command, cmdParam);
}
if (Boolean.FALSE.equals(checkCmdParam(command, cmdParam))) {
logger.error("command parameter check failed!");
return null;
}
if (command.getScheduleTime() != null) {
processInstance.setScheduleTime(command.getScheduleTime());
}
processInstance.setHost(host);
ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXECUTION;
int runTime = processInstance.getRunTimes();
switch (commandType) {
case START_PROCESS:
break;
case START_FAILURE_TASK_PROCESS:
// find failed tasks and init these tasks
List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE);
List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE);
List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL);
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
failedList.addAll(killedList);
failedList.addAll(toleranceList);
for (Integer taskId : failedList) {
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING,
String.join(Constants.COMMA, convertIntListToString(failedList)));
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
processInstance.setRunTimes(runTime + 1);
break;
case START_CURRENT_TASK_PROCESS:
break;
case RECOVER_WAITING_THREAD:
break;
case RECOVER_SUSPENDED_PROCESS:
// find pause tasks and init task's state
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE);
List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(),
ExecutionStatus.KILL);
suspendedNodeList.addAll(stopNodeList);
for (Integer taskId : suspendedNodeList) {
// initialize the pause state
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(",", convertIntListToString(suspendedNodeList)));
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
processInstance.setRunTimes(runTime + 1);
break;
case RECOVER_TOLERANCE_FAULT_PROCESS:
// recover tolerance fault process
processInstance.setRecovery(Flag.YES);
runStatus = processInstance.getState();
break;
case COMPLEMENT_DATA:
// delete all the valid tasks when complement data
List<TaskInstance> taskInstanceList = this.findValidTaskListByProcessId(processInstance.getId());
for (TaskInstance taskInstance : taskInstanceList) {
taskInstance.setFlag(Flag.NO);
this.updateTaskInstance(taskInstance);
}
initComplementDataParam(processDefinition, processInstance, cmdParam);
break;
case REPEAT_RUNNING:
// delete the recover task names from command parameter
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) {
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
}
// delete all the valid tasks when repeat running
List<TaskInstance> validTaskList = findValidTaskListByProcessId(processInstance.getId());
for (TaskInstance taskInstance : validTaskList) {
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
}
processInstance.setStartTime(new Date());
processInstance.setEndTime(null);
processInstance.setRunTimes(runTime + 1);
initComplementDataParam(processDefinition, processInstance, cmdParam);
break;
case SCHEDULER:
break;
default:
break;
}
processInstance.setState(runStatus);
return processInstance;
}
/**
* get process definition by command
* If it is a fault-tolerant command, get the specified version of ProcessDefinition through ProcessInstance
* Otherwise, get the latest version of ProcessDefinition
*
* @return ProcessDefinition
*/
private ProcessDefinition getProcessDefinitionByCommand(long processDefinitionCode, Map<String, String> cmdParam) {
if (cmdParam != null) {
int processInstanceId = 0;
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) {
processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING));
} else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) {
processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS));
} else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) {
processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD));
}
if (processInstanceId != 0) {
ProcessInstance processInstance = this.findProcessInstanceDetailById(processInstanceId);
if (processInstance == null) {
return null;
}
return processDefineLogMapper.queryByDefinitionCodeAndVersion(
processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion());
}
}
return processDefineMapper.queryByCode(processDefinitionCode);
}
/**
* return complement data if the process start with complement data
*
* @param processInstance processInstance
* @param command command
* @return command type
*/
private CommandType getCommandTypeIfComplement(ProcessInstance processInstance, Command command) {
if (CommandType.COMPLEMENT_DATA == processInstance.getCmdTypeIfComplement()) {
return CommandType.COMPLEMENT_DATA;
} else {
return command.getCommandType();
}
}
/**
* initialize complement data parameters
*
* @param processDefinition processDefinition
* @param processInstance processInstance
* @param cmdParam cmdParam
*/
private void initComplementDataParam(ProcessDefinition processDefinition,
ProcessInstance processInstance,
Map<String, String> cmdParam) {
if (!processInstance.isComplementData()) {
return;
}
Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
List<Schedule> listSchedules = queryReleaseSchedulerListByProcessDefinitionCode(processInstance.getProcessDefinitionCode());
List<Date> complementDate = CronUtils.getSelfFireDateList(start, end, listSchedules);
if (complementDate.size() > 0
&& Flag.NO == processInstance.getIsSubProcess()) {
processInstance.setScheduleTime(complementDate.get(0));
}
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
}
/**
* set sub work process parameters.
* handle sub work process instance, update relation table and command parameters
* set sub work process flag, extends parent work process command parameters
*
* @param subProcessInstance subProcessInstance
*/
public void setSubProcessParam(ProcessInstance subProcessInstance) {
String cmdParam = subProcessInstance.getCommandParam();
if (StringUtils.isEmpty(cmdParam)) {
return;
}
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
// write sub process id into cmd param.
if (paramMap.containsKey(CMD_PARAM_SUB_PROCESS)
&& CMD_PARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMD_PARAM_SUB_PROCESS))) {
paramMap.remove(CMD_PARAM_SUB_PROCESS);
paramMap.put(CMD_PARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId()));
subProcessInstance.setCommandParam(JSONUtils.toJsonString(paramMap));
subProcessInstance.setIsSubProcess(Flag.YES);
this.saveProcessInstance(subProcessInstance);
}
// copy parent instance user def params to sub process..
String parentInstanceId = paramMap.get(CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID);
if (StringUtils.isNotEmpty(parentInstanceId)) {
ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId));
if (parentInstance != null) {
subProcessInstance.setGlobalParams(
joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams()));
this.saveProcessInstance(subProcessInstance);
} else {
logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam);
}
}
ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class);
if (processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0) {
return;
}
// update sub process id to process map table
processInstanceMap.setProcessInstanceId(subProcessInstance.getId());
this.updateWorkProcessInstanceMap(processInstanceMap);
}
/**
* join parent global params into sub process.
* only the keys doesn't in sub process global would be joined.
*
* @param parentGlobalParams parentGlobalParams
* @param subGlobalParams subGlobalParams
* @return global params join
*/
private String joinGlobalParams(String parentGlobalParams, String subGlobalParams) {
List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class);
List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class);
Map<String, String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
for (Property parent : parentPropertyList) {
if (!subMap.containsKey(parent.getProp())) {
subPropertyList.add(parent);
}
}
return JSONUtils.toJsonString(subPropertyList);
}
/**
* initialize task instance
*
* @param taskInstance taskInstance
*/
private void initTaskInstance(TaskInstance taskInstance) {
if (!taskInstance.isSubProcess()
&& (taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure())) {
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
return;
}
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
updateTaskInstance(taskInstance);
}
/**
* retry submit task to db
*
* @param taskInstance
* @param commitRetryTimes
* @param commitInterval
* @return
*/
public TaskInstance submitTask(TaskInstance taskInstance, int commitRetryTimes, int commitInterval) {
int retryTimes = 1;
boolean submitDB = false;
TaskInstance task = null;
while (retryTimes <= commitRetryTimes) {
try {
if (!submitDB) {
// submit task to db
task = submitTask(taskInstance);
if (task != null && task.getId() != 0) {
submitDB = true;
break;
}
}
if (!submitDB) {
logger.error("task commit to db failed , taskId {} has already retry {} times, please check the database", taskInstance.getId(), retryTimes);
}
Thread.sleep(commitInterval);
} catch (Exception e) {
logger.error("task commit to mysql failed", e);
}
retryTimes += 1;
}
return task;
}
/**
* submit task to db
* submit sub process to command
*
* @param taskInstance taskInstance
* @return task instance
*/
@Transactional(rollbackFor = Exception.class)
public TaskInstance submitTask(TaskInstance taskInstance) {
ProcessInstance processInstance = this.findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
logger.info("start submit task : {}, instance id:{}, state: {}",
taskInstance.getName(), taskInstance.getProcessInstanceId(), processInstance.getState());
//submit to db
TaskInstance task = submitTaskInstanceToDB(taskInstance, processInstance);
if (task == null) {
logger.error("end submit task to db error, task name:{}, process id:{} state: {} ",
taskInstance.getName(), taskInstance.getProcessInstance(), processInstance.getState());
return task;
}
if (!task.getState().typeIsFinished()) {
createSubWorkProcess(processInstance, task);
}
logger.info("end submit task to db successfully:{} {} state:{} complete, instance id:{} state: {} ",
taskInstance.getId(), taskInstance.getName(), task.getState(), processInstance.getId(), processInstance.getState());
return task;
}
/**
* set work process instance map
* consider o
* repeat running does not generate new sub process instance
* set map {parent instance id, task instance id, 0(child instance id)}
*
* @param parentInstance parentInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask) {
ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId());
if (processMap != null) {
return processMap;
}
if (parentInstance.getCommandType() == CommandType.REPEAT_RUNNING) {
// update current task id to map
processMap = findPreviousTaskProcessMap(parentInstance, parentTask);
if (processMap != null) {
processMap.setParentTaskInstanceId(parentTask.getId());
updateWorkProcessInstanceMap(processMap);
return processMap;
}
}
// new task
processMap = new ProcessInstanceMap();
processMap.setParentProcessInstanceId(parentInstance.getId());
processMap.setParentTaskInstanceId(parentTask.getId());
createWorkProcessInstanceMap(processMap);
return processMap;
}
/**
* find previous task work process map.
*
* @param parentProcessInstance parentProcessInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap findPreviousTaskProcessMap(ProcessInstance parentProcessInstance,
TaskInstance parentTask) {
Integer preTaskId = 0;
List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId());
for (TaskInstance task : preTaskList) {
if (task.getName().equals(parentTask.getName())) {
preTaskId = task.getId();
ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId);
if (map != null) {
return map;
}
}
}
logger.info("sub process instance is not found,parent task:{},parent instance:{}",
parentTask.getId(), parentProcessInstance.getId());
return null;
}
/**
* create sub work process command
*
* @param parentProcessInstance parentProcessInstance
* @param task task
*/
public void createSubWorkProcess(ProcessInstance parentProcessInstance, TaskInstance task) {
if (!task.isSubProcess()) {
return;
}
//check create sub work flow firstly
ProcessInstanceMap instanceMap = findWorkProcessMapByParent(parentProcessInstance.getId(), task.getId());
if (null != instanceMap && CommandType.RECOVER_TOLERANCE_FAULT_PROCESS == parentProcessInstance.getCommandType()) {
// recover failover tolerance would not create a new command when the sub command already have been created
return;
}
instanceMap = setProcessInstanceMap(parentProcessInstance, task);
ProcessInstance childInstance = null;
if (instanceMap.getProcessInstanceId() != 0) {
childInstance = findProcessInstanceById(instanceMap.getProcessInstanceId());
}
Command subProcessCommand = createSubProcessCommand(parentProcessInstance, childInstance, instanceMap, task);
updateSubProcessDefinitionByParent(parentProcessInstance, subProcessCommand.getProcessDefinitionCode());
initSubInstanceState(childInstance);
createCommand(subProcessCommand);
logger.info("sub process command created: {} ", subProcessCommand);
}
/**
* complement data needs transform parent parameter to child.
*/
private String getSubWorkFlowParam(ProcessInstanceMap instanceMap, ProcessInstance parentProcessInstance, Map<String, String> fatherParams) {
// set sub work process command
String processMapStr = JSONUtils.toJsonString(instanceMap);
Map<String, String> cmdParam = JSONUtils.toMap(processMapStr);
if (parentProcessInstance.isComplementData()) {
Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam());
String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE);
String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime);
processMapStr = JSONUtils.toJsonString(cmdParam);
}
if (fatherParams.size() != 0) {
cmdParam.put(CMD_PARAM_FATHER_PARAMS, JSONUtils.toJsonString(fatherParams));
processMapStr = JSONUtils.toJsonString(cmdParam);
}
return processMapStr;
}
public Map<String, String> getGlobalParamMap(String globalParams) {
List<Property> propList;
Map<String, String> globalParamMap = new HashMap<>();
if (StringUtils.isNotEmpty(globalParams)) {
propList = JSONUtils.toList(globalParams, Property.class);
globalParamMap = propList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
}
return globalParamMap;
}
/**
* create sub work process command
*/
public Command createSubProcessCommand(ProcessInstance parentProcessInstance,
ProcessInstance childInstance,
ProcessInstanceMap instanceMap,
TaskInstance task) {
CommandType commandType = getSubCommandType(parentProcessInstance, childInstance);
Map<String, String> subProcessParam = JSONUtils.toMap(task.getTaskParams());
int childDefineId = Integer.parseInt(subProcessParam.get(Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID));
ProcessDefinition processDefinition = processDefineMapper.queryByDefineId(childDefineId);
Object localParams = subProcessParam.get(Constants.LOCAL_PARAMS);
List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class);
Map<String, String> globalMap = this.getGlobalParamMap(parentProcessInstance.getGlobalParams());
Map<String, String> fatherParams = new HashMap<>();
if (CollectionUtils.isNotEmpty(allParam)) {
for (Property info : allParam) {
fatherParams.put(info.getProp(), globalMap.get(info.getProp()));
}
}
String processParam = getSubWorkFlowParam(instanceMap, parentProcessInstance, fatherParams);
return new Command(
commandType,
TaskDependType.TASK_POST,
parentProcessInstance.getFailureStrategy(),
parentProcessInstance.getExecutorId(),
processDefinition.getCode(),
processParam,
parentProcessInstance.getWarningType(),
parentProcessInstance.getWarningGroupId(),
parentProcessInstance.getScheduleTime(),
task.getWorkerGroup(),
task.getEnvironmentCode(),
parentProcessInstance.getProcessInstancePriority(),
parentProcessInstance.getDryRun()
);
}
/**
* initialize sub work flow state
* child instance state would be initialized when 'recovery from pause/stop/failure'
*/
private void initSubInstanceState(ProcessInstance childInstance) {
if (childInstance != null) {
childInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
updateProcessInstance(childInstance);
}
}
/**
* get sub work flow command type
* child instance exist: child command = fatherCommand
* child instance not exists: child command = fatherCommand[0]
*/
private CommandType getSubCommandType(ProcessInstance parentProcessInstance, ProcessInstance childInstance) {
CommandType commandType = parentProcessInstance.getCommandType();
if (childInstance == null) {
String fatherHistoryCommand = parentProcessInstance.getHistoryCmd();
commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]);
}
return commandType;
}
/**
* update sub process definition
*
* @param parentProcessInstance parentProcessInstance
* @param childDefinitionCode childDefinitionId
*/
private void updateSubProcessDefinitionByParent(ProcessInstance parentProcessInstance, long childDefinitionCode) {
ProcessDefinition fatherDefinition = this.findProcessDefinition(parentProcessInstance.getProcessDefinitionCode(),
parentProcessInstance.getProcessDefinitionVersion());
ProcessDefinition childDefinition = this.findProcessDefinitionByCode(childDefinitionCode);
if (childDefinition != null && fatherDefinition != null) {
childDefinition.setWarningGroupId(fatherDefinition.getWarningGroupId());
processDefineMapper.updateById(childDefinition);
}
}
/**
* submit task to mysql
*
* @param taskInstance taskInstance
* @param processInstance processInstance
* @return task instance
*/
public TaskInstance submitTaskInstanceToDB(TaskInstance taskInstance, ProcessInstance processInstance) {
ExecutionStatus processInstanceState = processInstance.getState();
if (taskInstance.getState().typeIsFailure()) {
if (taskInstance.isSubProcess()) {
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1);
} else {
if (processInstanceState != ExecutionStatus.READY_STOP
&& processInstanceState != ExecutionStatus.READY_PAUSE) {
// failure task set invalid
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
// crate new task instance
if (taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE) {
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1);
}
taskInstance.setSubmitTime(null);
taskInstance.setStartTime(null);
taskInstance.setEndTime(null);
taskInstance.setFlag(Flag.YES);
taskInstance.setHost(null);
taskInstance.setId(0);
}
}
}
taskInstance.setExecutorId(processInstance.getExecutorId());
taskInstance.setProcessInstancePriority(processInstance.getProcessInstancePriority());
taskInstance.setState(getSubmitTaskState(taskInstance, processInstanceState));
if (taskInstance.getSubmitTime() == null) {
taskInstance.setSubmitTime(new Date());
}
if (taskInstance.getFirstSubmitTime() == null) {
taskInstance.setFirstSubmitTime(taskInstance.getSubmitTime());
}
boolean saveResult = saveTaskInstance(taskInstance);
if (!saveResult) {
return null;
}
return taskInstance;
}
/**
* get submit task instance state by the work process state
* cannot modify the task state when running/kill/submit success, or this
* task instance is already exists in task queue .
* return pause if work process state is ready pause
* return stop if work process state is ready stop
* if all of above are not satisfied, return submit success
*
* @param taskInstance taskInstance
* @param processInstanceState processInstanceState
* @return process instance state
*/
public ExecutionStatus getSubmitTaskState(TaskInstance taskInstance, ExecutionStatus processInstanceState) {
ExecutionStatus state = taskInstance.getState();
// running, delayed or killed
// the task already exists in task queue
// return state
if (
state == ExecutionStatus.RUNNING_EXECUTION
|| state == ExecutionStatus.DELAY_EXECUTION
|| state == ExecutionStatus.KILL
) {
return state;
}
//return pasue /stop if process instance state is ready pause / stop
// or return submit success
if (processInstanceState == ExecutionStatus.READY_PAUSE) {
state = ExecutionStatus.PAUSE;
} else if (processInstanceState == ExecutionStatus.READY_STOP
|| !checkProcessStrategy(taskInstance)) {
state = ExecutionStatus.KILL;
} else {
state = ExecutionStatus.SUBMITTED_SUCCESS;
}
return state;
}
/**
* check process instance strategy
*
* @param taskInstance taskInstance
* @return check strategy result
*/
private boolean checkProcessStrategy(TaskInstance taskInstance) {
ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId());
FailureStrategy failureStrategy = processInstance.getFailureStrategy();
if (failureStrategy == FailureStrategy.CONTINUE) {
return true;
}
List<TaskInstance> taskInstances = this.findValidTaskListByProcessId(taskInstance.getProcessInstanceId());
for (TaskInstance task : taskInstances) {
if (task.getState() == ExecutionStatus.FAILURE
&& task.getRetryTimes() >= task.getMaxRetryTimes()) {
return false;
}
}
return true;
}
/**
* insert or update work process instance to data base
*
* @param processInstance processInstance
*/
public void saveProcessInstance(ProcessInstance processInstance) {
if (processInstance == null) {
logger.error("save error, process instance is null!");
return;
}
if (processInstance.getId() != 0) {
processInstanceMapper.updateById(processInstance);
} else {
processInstanceMapper.insert(processInstance);
}
}
/**
* insert or update command
*
* @param command command
* @return save command result
*/
public int saveCommand(Command command) {
if (command.getId() != 0) {
return commandMapper.updateById(command);
} else {
return commandMapper.insert(command);
}
}
/**
* insert or update task instance
*
* @param taskInstance taskInstance
* @return save task instance result
*/
public boolean saveTaskInstance(TaskInstance taskInstance) {
if (taskInstance.getId() != 0) {
return updateTaskInstance(taskInstance);
} else {
return createTaskInstance(taskInstance);
}
}
/**
* insert task instance
*
* @param taskInstance taskInstance
* @return create task instance result
*/
public boolean createTaskInstance(TaskInstance taskInstance) {
int count = taskInstanceMapper.insert(taskInstance);
return count > 0;
}
/**
* update task instance
*
* @param taskInstance taskInstance
* @return update task instance result
*/
public boolean updateTaskInstance(TaskInstance taskInstance) {
int count = taskInstanceMapper.updateById(taskInstance);
return count > 0;
}
/**
* find task instance by id
*
* @param taskId task id
* @return task intance
*/
public TaskInstance findTaskInstanceById(Integer taskId) {
return taskInstanceMapper.selectById(taskId);
}
/**
* package task instance,associate processInstance and processDefine
*
* @param taskInstId taskInstId
* @return task instance
*/
public TaskInstance getTaskInstanceDetailByTaskId(int taskInstId) {
// get task instance
TaskInstance taskInstance = findTaskInstanceById(taskInstId);
if (taskInstance == null) {
return null;
}
setTaskInstanceDetail(taskInstance);
return taskInstance;
}
/**
* package task instance,associate processInstance and processDefine
*
* @param taskInstance taskInstance
* @return task instance
*/
public void setTaskInstanceDetail(TaskInstance taskInstance) {
// get process instance
ProcessInstance processInstance = findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
// get process define
ProcessDefinition processDefine = findProcessDefinition(processInstance.getProcessDefinitionCode(),
processInstance.getProcessDefinitionVersion());
taskInstance.setProcessInstance(processInstance);
taskInstance.setProcessDefine(processDefine);
TaskDefinition taskDefinition = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(
taskInstance.getTaskCode(),
taskInstance.getTaskDefinitionVersion());
updateTaskDefinitionResources(taskDefinition);
taskInstance.setTaskDefine(taskDefinition);
}
/**
* Update {@link ResourceInfo} information in {@link TaskDefinition}
*
* @param taskDefinition the given {@link TaskDefinition}
*/
private void updateTaskDefinitionResources(TaskDefinition taskDefinition) {
Map<String, Object> taskParameters = JSONUtils.parseObject(
taskDefinition.getTaskParams(),
new TypeReference<Map<String, Object>>() { });
if (taskParameters != null) {
// if contains mainJar field, query resource from database
// Flink, Spark, MR
if (taskParameters.containsKey("mainJar")) {
Object mainJarObj = taskParameters.get("mainJar");
ResourceInfo mainJar = JSONUtils.parseObject(
JSONUtils.toJsonString(mainJarObj),
ResourceInfo.class);
ResourceInfo resourceInfo = updateResourceInfo(mainJar);
if (resourceInfo != null) {
taskParameters.put("mainJar", resourceInfo);
}
}
// update resourceList information
if (taskParameters.containsKey("resourceList")) {
String resourceListStr = JSONUtils.toJsonString(taskParameters.get("resourceList"));
List<ResourceInfo> resourceInfos = JSONUtils.toList(resourceListStr, ResourceInfo.class);
List<ResourceInfo> updatedResourceInfos = resourceInfos
.stream()
.map(this::updateResourceInfo)
.filter(Objects::nonNull)
.collect(Collectors.toList());
taskParameters.put("resourceList", updatedResourceInfos);
}
// set task parameters
taskDefinition.setTaskParams(JSONUtils.toJsonString(taskParameters));
}
}
/**
* update {@link ResourceInfo} by given original ResourceInfo
*
* @param res origin resource info
* @return {@link ResourceInfo}
*/
private ResourceInfo updateResourceInfo(ResourceInfo res) {
ResourceInfo resourceInfo = null;
// only if mainJar is not null and does not contains "resourceName" field
if (res != null) {
int resourceId = res.getId();
if (resourceId <= 0) {
logger.error("invalid resourceId, {}", resourceId);
return null;
}
resourceInfo = new ResourceInfo();
// get resource from database, only one resource should be returned
Resource resource = getResourceById(resourceId);
resourceInfo.setId(resourceId);
resourceInfo.setRes(resource.getFileName());
resourceInfo.setResourceName(resource.getFullName());
if (logger.isInfoEnabled()) {
logger.info("updated resource info {}",
JSONUtils.toJsonString(resourceInfo));
}
}
return resourceInfo;
}
/**
* get id list by task state
*
* @param instanceId instanceId
* @param state state
* @return task instance states
*/
public List<Integer> findTaskIdByInstanceState(int instanceId, ExecutionStatus state) {
return taskInstanceMapper.queryTaskByProcessIdAndState(instanceId, state.ordinal());
}
/**
* find valid task list by process definition id
*
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findValidTaskListByProcessId(Integer processInstanceId) {
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.YES);
}
/**
* find previous task list by work process id
*
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId) {
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO);
}
/**
* update work process instance map
*
* @param processInstanceMap processInstanceMap
* @return update process instance result
*/
public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) {
return processInstanceMapMapper.updateById(processInstanceMap);
}
/**
* create work process instance map
*
* @param processInstanceMap processInstanceMap
* @return create process instance result
*/
public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) {
int count = 0;
if (processInstanceMap != null) {
return processInstanceMapMapper.insert(processInstanceMap);
}
return count;
}
/**
* find work process map by parent process id and parent task id.
*
* @param parentWorkProcessId parentWorkProcessId
* @param parentTaskId parentTaskId
* @return process instance map
*/
public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId) {
return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId);
}
/**
* delete work process map by parent process id
*
* @param parentWorkProcessId parentWorkProcessId
* @return delete process map result
*/
public int deleteWorkProcessMapByParentId(int parentWorkProcessId) {
return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId);
}
/**
* find sub process instance
*
* @param parentProcessId parentProcessId
* @param parentTaskId parentTaskId
* @return process instance
*/
public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId) {
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId);
if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) {
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId());
return processInstance;
}
/**
* find parent process instance
*
* @param subProcessId subProcessId
* @return process instance
*/
public ProcessInstance findParentProcessInstance(Integer subProcessId) {
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId);
if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) {
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId());
return processInstance;
}
/**
* change task state
*
* @param state state
* @param startTime startTime
* @param host host
* @param executePath executePath
* @param logPath logPath
* @param taskInstId taskInstId
*/
public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date startTime, String host,
String executePath,
String logPath,
int taskInstId) {
taskInstance.setState(state);
taskInstance.setStartTime(startTime);
taskInstance.setHost(host);
taskInstance.setExecutePath(executePath);
taskInstance.setLogPath(logPath);
saveTaskInstance(taskInstance);
}
/**
* update process instance
*
* @param processInstance processInstance
* @return update process instance result
*/
public int updateProcessInstance(ProcessInstance processInstance) {
return processInstanceMapper.updateById(processInstance);
}
/**
* change task state
*
* @param state state
* @param endTime endTime
* @param taskInstId taskInstId
* @param varPool varPool
*/
public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state,
Date endTime,
int processId,
String appIds,
int taskInstId,
String varPool) {
taskInstance.setPid(processId);
taskInstance.setAppLink(appIds);
taskInstance.setState(state);
taskInstance.setEndTime(endTime);
taskInstance.setVarPool(varPool);
changeOutParam(taskInstance);
saveTaskInstance(taskInstance);
}
/**
* for show in page of taskInstance
*
* @param taskInstance
*/
public void changeOutParam(TaskInstance taskInstance) {
if (StringUtils.isEmpty(taskInstance.getVarPool())) {
return;
}
List<Property> properties = JSONUtils.toList(taskInstance.getVarPool(), Property.class);
if (CollectionUtils.isEmpty(properties)) {
return;
}
//if the result more than one line,just get the first .
Map<String, Object> taskParams = JSONUtils.parseObject(taskInstance.getTaskParams(), new TypeReference<Map<String, Object>>() {});
Object localParams = taskParams.get(LOCAL_PARAMS);
if (localParams == null) {
return;
}
List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class);
Map<String, String> outProperty = new HashMap<>();
for (Property info : properties) {
if (info.getDirect() == Direct.OUT) {
outProperty.put(info.getProp(), info.getValue());
}
}
for (Property info : allParam) {
if (info.getDirect() == Direct.OUT) {
String paramName = info.getProp();
info.setValue(outProperty.get(paramName));
}
}
taskParams.put(LOCAL_PARAMS, allParam);
taskInstance.setTaskParams(JSONUtils.toJsonString(taskParams));
}
/**
* convert integer list to string list
*
* @param intList intList
* @return string list
*/
public List<String> convertIntListToString(List<Integer> intList) {
if (intList == null) {
return new ArrayList<>();
}
List<String> result = new ArrayList<>(intList.size());
for (Integer intVar : intList) {
result.add(String.valueOf(intVar));
}
return result;
}
/**
* query schedule by id
*
* @param id id
* @return schedule
*/
public Schedule querySchedule(int id) {
return scheduleMapper.selectById(id);
}
/**
* query Schedule by processDefinitionCode
*
* @param processDefinitionCode processDefinitionCode
* @see Schedule
*/
public List<Schedule> queryReleaseSchedulerListByProcessDefinitionCode(long processDefinitionCode) {
return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode);
}
/**
* query need failover process instance
*
* @param host host
* @return process instance list
*/
public List<ProcessInstance> queryNeedFailoverProcessInstances(String host) {
return processInstanceMapper.queryByHostAndStatus(host, stateArray);
}
/**
* process need failover process instance
*
* @param processInstance processInstance
*/
@Transactional(rollbackFor = RuntimeException.class)
public void processNeedFailoverProcessInstances(ProcessInstance processInstance) {
//1 update processInstance host is null
processInstance.setHost(Constants.NULL);
processInstanceMapper.updateById(processInstance);
ProcessDefinition processDefinition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion());
//2 insert into recover command
Command cmd = new Command();
cmd.setProcessDefinitionCode(processDefinition.getCode());
cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId()));
cmd.setExecutorId(processInstance.getExecutorId());
cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS);
createCommand(cmd);
}
/**
* query all need failover task instances by host
*
* @param host host
* @return task instance list
*/
public List<TaskInstance> queryNeedFailoverTaskInstances(String host) {
return taskInstanceMapper.queryByHostAndStatus(host,
stateArray);
}
/**
* find data source by id
*
* @param id id
* @return datasource
*/
public DataSource findDataSourceById(int id) {
return dataSourceMapper.selectById(id);
}
/**
* update process instance state by id
*
* @param processInstanceId processInstanceId
* @param executionStatus executionStatus
* @return update process result
*/
public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) {
ProcessInstance instance = processInstanceMapper.selectById(processInstanceId);
instance.setState(executionStatus);
return processInstanceMapper.updateById(instance);
}
/**
* find process instance by the task id
*
* @param taskId taskId
* @return process instance
*/
public ProcessInstance findProcessInstanceByTaskId(int taskId) {
TaskInstance taskInstance = taskInstanceMapper.selectById(taskId);
if (taskInstance != null) {
return processInstanceMapper.selectById(taskInstance.getProcessInstanceId());
}
return null;
}
/**
* find udf function list by id list string
*
* @param ids ids
* @return udf function list
*/
public List<UdfFunc> queryUdfFunListByIds(int[] ids) {
return udfFuncMapper.queryUdfByIdStr(ids, null);
}
/**
* find tenant code by resource name
*
* @param resName resource name
* @param resourceType resource type
* @return tenant code
*/
public String queryTenantCodeByResName(String resName, ResourceType resourceType) {
// in order to query tenant code successful although the version is older
String fullName = resName.startsWith("/") ? resName : String.format("/%s", resName);
List<Resource> resourceList = resourceMapper.queryResource(fullName, resourceType.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
return StringUtils.EMPTY;
}
int userId = resourceList.get(0).getUserId();
User user = userMapper.selectById(userId);
if (Objects.isNull(user)) {
return StringUtils.EMPTY;
}
Tenant tenant = tenantMapper.selectById(user.getTenantId());
if (Objects.isNull(tenant)) {
return StringUtils.EMPTY;
}
return tenant.getTenantCode();
}
/**
* find schedule list by process define codes.
*
* @param codes codes
* @return schedule list
*/
public List<Schedule> selectAllByProcessDefineCode(long[] codes) {
return scheduleMapper.selectAllByProcessDefineArray(codes);
}
/**
* find last scheduler process instance in the date interval
*
* @param definitionCode definitionCode
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastSchedulerProcessInterval(Long definitionCode, DateInterval dateInterval) {
return processInstanceMapper.queryLastSchedulerProcess(definitionCode,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last manual process instance interval
*
* @param definitionCode process definition code
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastManualProcessInterval(Long definitionCode, DateInterval dateInterval) {
return processInstanceMapper.queryLastManualProcess(definitionCode,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last running process instance
*
* @param definitionCode process definition code
* @param startTime start time
* @param endTime end time
* @return process instance
*/
public ProcessInstance findLastRunningProcess(Long definitionCode, Date startTime, Date endTime) {
return processInstanceMapper.queryLastRunningProcess(definitionCode,
startTime,
endTime,
stateArray);
}
/**
* query user queue by process instance id
*
* @param processInstanceId processInstanceId
* @return queue
*/
public String queryUserQueueByProcessInstanceId(int processInstanceId) {
String queue = "";
ProcessInstance processInstance = processInstanceMapper.selectById(processInstanceId);
if (processInstance == null) {
return queue;
}
User executor = userMapper.selectById(processInstance.getExecutorId());
if (executor != null) {
queue = executor.getQueue();
}
return queue;
}
/**
* query project name and user name by processInstanceId.
*
* @param processInstanceId processInstanceId
* @return projectName and userName
*/
public ProjectUser queryProjectWithUserByProcessInstanceId(int processInstanceId) {
return projectMapper.queryProjectWithUserByProcessInstanceId(processInstanceId);
}
/**
* get task worker group
*
* @param taskInstance taskInstance
* @return workerGroupId
*/
public String getTaskWorkerGroup(TaskInstance taskInstance) {
String workerGroup = taskInstance.getWorkerGroup();
if (StringUtils.isNotBlank(workerGroup)) {
return workerGroup;
}
int processInstanceId = taskInstance.getProcessInstanceId();
ProcessInstance processInstance = findProcessInstanceById(processInstanceId);
if (processInstance != null) {
return processInstance.getWorkerGroup();
}
logger.info("task : {} will use default worker group", taskInstance.getId());
return Constants.DEFAULT_WORKER_GROUP;
}
/**
* get have perm project list
*
* @param userId userId
* @return project list
*/
public List<Project> getProjectListHavePerm(int userId) {
List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId);
List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId);
if (createProjects == null) {
createProjects = new ArrayList<>();
}
if (authedProjects != null) {
createProjects.addAll(authedProjects);
}
return createProjects;
}
/**
* list unauthorized udf function
*
* @param userId user id
* @param needChecks data source id array
* @return unauthorized udf function list
*/
public <T> List<T> listUnauthorized(int userId, T[] needChecks, AuthorizationType authorizationType) {
List<T> resultList = new ArrayList<>();
if (Objects.nonNull(needChecks) && needChecks.length > 0) {
Set<T> originResSet = new HashSet<>(Arrays.asList(needChecks));
switch (authorizationType) {
case RESOURCE_FILE_ID:
case UDF_FILE:
List<Resource> ownUdfResources = resourceMapper.listAuthorizedResourceById(userId, needChecks);
addAuthorizedResources(ownUdfResources, userId);
Set<Integer> authorizedResourceFiles = ownUdfResources.stream().map(Resource::getId).collect(toSet());
originResSet.removeAll(authorizedResourceFiles);
break;
case RESOURCE_FILE_NAME:
List<Resource> ownResources = resourceMapper.listAuthorizedResource(userId, needChecks);
addAuthorizedResources(ownResources, userId);
Set<String> authorizedResources = ownResources.stream().map(Resource::getFullName).collect(toSet());
originResSet.removeAll(authorizedResources);
break;
case DATASOURCE:
Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId, needChecks).stream().map(DataSource::getId).collect(toSet());
originResSet.removeAll(authorizedDatasources);
break;
case UDF:
Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(UdfFunc::getId).collect(toSet());
originResSet.removeAll(authorizedUdfs);
break;
default:
break;
}
resultList.addAll(originResSet);
}
return resultList;
}
/**
* get user by user id
*
* @param userId user id
* @return User
*/
public User getUserById(int userId) {
return userMapper.selectById(userId);
}
/**
* get resource by resource id
*
* @param resourceId resource id
* @return Resource
*/
public Resource getResourceById(int resourceId) {
return resourceMapper.selectById(resourceId);
}
/**
* list resources by ids
*
* @param resIds resIds
* @return resource list
*/
public List<Resource> listResourceByIds(Integer[] resIds) {
return resourceMapper.listResourceByIds(resIds);
}
/**
* format task app id in task instance
*/
public String formatTaskAppId(TaskInstance taskInstance) {
ProcessInstance processInstance = findProcessInstanceById(taskInstance.getProcessInstanceId());
if (processInstance == null) {
return "";
}
ProcessDefinition definition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion());
if (definition == null) {
return "";
}
return String.format("%s_%s_%s", definition.getId(), processInstance.getId(), taskInstance.getId());
}
/**
* switch process definition version to process definition log version
*/
public int switchVersion(ProcessDefinition processDefinition, ProcessDefinitionLog processDefinitionLog) {
if (null == processDefinition || null == processDefinitionLog) {
return Constants.DEFINITION_FAILURE;
}
processDefinitionLog.setId(processDefinition.getId());
processDefinitionLog.setReleaseState(ReleaseState.OFFLINE);
processDefinitionLog.setFlag(Flag.YES);
int result = processDefineMapper.updateById(processDefinitionLog);
if (result > 0) {
result = switchProcessTaskRelationVersion(processDefinitionLog);
if (result <= 0) {
return Constants.DEFINITION_FAILURE;
}
}
return result;
}
public int switchProcessTaskRelationVersion(ProcessDefinition processDefinition) {
List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode());
if (!processTaskRelationList.isEmpty()) {
processTaskRelationMapper.deleteByCode(processDefinition.getProjectCode(), processDefinition.getCode());
}
List<ProcessTaskRelationLog> processTaskRelationLogList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion());
return processTaskRelationMapper.batchInsert(processTaskRelationLogList);
}
/**
* get resource ids
*
* @param taskDefinition taskDefinition
* @return resource ids
*/
public String getResourceIds(TaskDefinition taskDefinition) {
Set<Integer> resourceIds = null;
AbstractParameters params = TaskParametersUtils.getParameters(taskDefinition.getTaskType(), taskDefinition.getTaskParams());
if (params != null && CollectionUtils.isNotEmpty(params.getResourceFilesList())) {
resourceIds = params.getResourceFilesList().
stream()
.filter(t -> t.getId() != 0)
.map(ResourceInfo::getId)
.collect(Collectors.toSet());
}
if (CollectionUtils.isEmpty(resourceIds)) {
return StringUtils.EMPTY;
}
return StringUtils.join(resourceIds, ",");
}
public int saveTaskDefine(User operator, long projectCode, List<TaskDefinitionLog> taskDefinitionLogs) {
Date now = new Date();
List<TaskDefinitionLog> newTaskDefinitionLogs = new ArrayList<>();
List<TaskDefinitionLog> updateTaskDefinitionLogs = new ArrayList<>();
for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) {
taskDefinitionLog.setProjectCode(projectCode);
taskDefinitionLog.setUpdateTime(now);
taskDefinitionLog.setOperateTime(now);
taskDefinitionLog.setOperator(operator.getId());
taskDefinitionLog.setResourceIds(getResourceIds(taskDefinitionLog));
if (taskDefinitionLog.getCode() > 0 && taskDefinitionLog.getVersion() > 0) {
TaskDefinitionLog definitionCodeAndVersion = taskDefinitionLogMapper
.queryByDefinitionCodeAndVersion(taskDefinitionLog.getCode(), taskDefinitionLog.getVersion());
if (definitionCodeAndVersion != null) {
if (!taskDefinitionLog.equals(definitionCodeAndVersion)) {
taskDefinitionLog.setUserId(definitionCodeAndVersion.getUserId());
Integer version = taskDefinitionLogMapper.queryMaxVersionForDefinition(taskDefinitionLog.getCode());
taskDefinitionLog.setVersion(version + 1);
taskDefinitionLog.setCreateTime(definitionCodeAndVersion.getCreateTime());
updateTaskDefinitionLogs.add(taskDefinitionLog);
}
continue;
}
}
taskDefinitionLog.setUserId(operator.getId());
taskDefinitionLog.setVersion(Constants.VERSION_FIRST);
taskDefinitionLog.setCreateTime(now);
if (taskDefinitionLog.getCode() == 0) {
try {
taskDefinitionLog.setCode(SnowFlakeUtils.getInstance().nextId());
} catch (SnowFlakeException e) {
logger.error("Task code get error, ", e);
return Constants.DEFINITION_FAILURE;
}
}
newTaskDefinitionLogs.add(taskDefinitionLog);
}
int insertResult = 0;
int updateResult = 0;
for (TaskDefinitionLog taskDefinitionToUpdate : updateTaskDefinitionLogs) {
TaskDefinition task = taskDefinitionMapper.queryByCode(taskDefinitionToUpdate.getCode());
if (task == null) {
newTaskDefinitionLogs.add(taskDefinitionToUpdate);
} else {
insertResult += taskDefinitionLogMapper.insert(taskDefinitionToUpdate);
taskDefinitionToUpdate.setId(task.getId());
updateResult += taskDefinitionMapper.updateById(taskDefinitionToUpdate);
}
}
if (!newTaskDefinitionLogs.isEmpty()) {
updateResult += taskDefinitionMapper.batchInsert(newTaskDefinitionLogs);
insertResult += taskDefinitionLogMapper.batchInsert(newTaskDefinitionLogs);
}
return (insertResult & updateResult) > 0 ? 1 : Constants.EXIT_CODE_SUCCESS;
}
/**
* save processDefinition (including create or update processDefinition)
*/
public int saveProcessDefine(User operator, ProcessDefinition processDefinition, Boolean isFromProcessDefine) {
ProcessDefinitionLog processDefinitionLog = new ProcessDefinitionLog(processDefinition);
Integer version = processDefineLogMapper.queryMaxVersionForDefinition(processDefinition.getCode());
int insertVersion = version == null || version == 0 ? Constants.VERSION_FIRST : version + 1;
processDefinitionLog.setVersion(insertVersion);
processDefinitionLog.setReleaseState(isFromProcessDefine ? ReleaseState.OFFLINE : ReleaseState.ONLINE);
processDefinitionLog.setOperator(operator.getId());
processDefinitionLog.setOperateTime(processDefinition.getUpdateTime());
int insertLog = processDefineLogMapper.insert(processDefinitionLog);
int result;
if (0 == processDefinition.getId()) {
result = processDefineMapper.insert(processDefinitionLog);
} else {
processDefinitionLog.setId(processDefinition.getId());
result = processDefineMapper.updateById(processDefinitionLog);
}
return (insertLog & result) > 0 ? insertVersion : 0;
}
/**
* save task relations
*/
public int saveTaskRelation(User operator, long projectCode, long processDefinitionCode, int processDefinitionVersion,
List<ProcessTaskRelationLog> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) {
Map<Long, TaskDefinitionLog> taskDefinitionLogMap = null;
if (CollectionUtils.isNotEmpty(taskDefinitionLogs)) {
taskDefinitionLogMap = taskDefinitionLogs.stream()
.collect(Collectors.toMap(TaskDefinition::getCode, taskDefinitionLog -> taskDefinitionLog));
}
Date now = new Date();
for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) {
processTaskRelationLog.setProjectCode(projectCode);
processTaskRelationLog.setProcessDefinitionCode(processDefinitionCode);
processTaskRelationLog.setProcessDefinitionVersion(processDefinitionVersion);
if (taskDefinitionLogMap != null) {
TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(processTaskRelationLog.getPreTaskCode());
if (taskDefinitionLog != null) {
processTaskRelationLog.setPreTaskVersion(taskDefinitionLog.getVersion());
}
processTaskRelationLog.setPostTaskVersion(taskDefinitionLogMap.get(processTaskRelationLog.getPostTaskCode()).getVersion());
}
processTaskRelationLog.setCreateTime(now);
processTaskRelationLog.setUpdateTime(now);
processTaskRelationLog.setOperator(operator.getId());
processTaskRelationLog.setOperateTime(now);
}
List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode);
if (!processTaskRelationList.isEmpty()) {
Set<Integer> processTaskRelationSet = processTaskRelationList.stream().map(ProcessTaskRelation::hashCode).collect(toSet());
Set<Integer> taskRelationSet = taskRelationList.stream().map(ProcessTaskRelationLog::hashCode).collect(toSet());
if (CollectionUtils.isEqualCollection(processTaskRelationSet, taskRelationSet)) {
return Constants.EXIT_CODE_SUCCESS;
}
processTaskRelationMapper.deleteByCode(projectCode, processDefinitionCode);
}
int result = processTaskRelationMapper.batchInsert(taskRelationList);
int resultLog = processTaskRelationLogMapper.batchInsert(taskRelationList);
return (result & resultLog) > 0 ? Constants.EXIT_CODE_SUCCESS : Constants.EXIT_CODE_FAILURE;
}
public boolean isTaskOnline(long taskCode) {
List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByTaskCode(taskCode);
if (!processTaskRelationList.isEmpty()) {
Set<Long> processDefinitionCodes = processTaskRelationList
.stream()
.map(ProcessTaskRelation::getProcessDefinitionCode)
.collect(Collectors.toSet());
List<ProcessDefinition> processDefinitionList = processDefineMapper.queryByCodes(processDefinitionCodes);
// check process definition is already online
for (ProcessDefinition processDefinition : processDefinitionList) {
if (processDefinition.getReleaseState() == ReleaseState.ONLINE) {
return true;
}
}
}
return false;
}
/**
* Generate the DAG Graph based on the process definition id
*
* @param processDefinition process definition
* @return dag graph
*/
public DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) {
List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode());
List<TaskNode> taskNodeList = transformTask(processTaskRelations, Lists.newArrayList());
ProcessDag processDag = DagHelper.getProcessDag(taskNodeList, new ArrayList<>(processTaskRelations));
// Generate concrete Dag to be executed
return DagHelper.buildDagGraph(processDag);
}
/**
* generate DagData
*/
public DagData genDagData(ProcessDefinition processDefinition) {
List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode());
List<TaskDefinitionLog> taskDefinitionLogList = genTaskDefineList(processTaskRelations);
List<TaskDefinition> taskDefinitions = taskDefinitionLogList.stream()
.map(taskDefinitionLog -> JSONUtils.parseObject(JSONUtils.toJsonString(taskDefinitionLog), TaskDefinition.class))
.collect(Collectors.toList());
return new DagData(processDefinition, processTaskRelations, taskDefinitions);
}
public List<TaskDefinitionLog> genTaskDefineList(List<ProcessTaskRelation> processTaskRelations) {
Set<TaskDefinition> taskDefinitionSet = new HashSet<>();
for (ProcessTaskRelation processTaskRelation : processTaskRelations) {
if (processTaskRelation.getPreTaskCode() > 0) {
taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPreTaskCode(), processTaskRelation.getPreTaskVersion()));
}
if (processTaskRelation.getPostTaskCode() > 0) {
taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion()));
}
}
return taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet);
}
/**
* find task definition by code and version
*/
public TaskDefinition findTaskDefinition(long taskCode, int taskDefinitionVersion) {
return taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskCode, taskDefinitionVersion);
}
/**
* find process task relation list by projectCode and processDefinitionCode
*/
public List<ProcessTaskRelation> findRelationByCode(long projectCode, long processDefinitionCode) {
return processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode);
}
/**
* add authorized resources
*
* @param ownResources own resources
* @param userId userId
*/
private void addAuthorizedResources(List<Resource> ownResources, int userId) {
List<Integer> relationResourceIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 7);
List<Resource> relationResources = CollectionUtils.isNotEmpty(relationResourceIds) ? resourceMapper.queryResourceListById(relationResourceIds) : new ArrayList<>();
ownResources.addAll(relationResources);
}
/**
* Use temporarily before refactoring taskNode
*/
public List<TaskNode> transformTask(List<ProcessTaskRelation> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) {
Map<Long, List<Long>> taskCodeMap = new HashMap<>();
for (ProcessTaskRelation processTaskRelation : taskRelationList) {
taskCodeMap.compute(processTaskRelation.getPostTaskCode(), (k, v) -> {
if (v == null) {
v = new ArrayList<>();
}
if (processTaskRelation.getPreTaskCode() != 0L) {
v.add(processTaskRelation.getPreTaskCode());
}
return v;
});
}
if (CollectionUtils.isEmpty(taskDefinitionLogs)) {
taskDefinitionLogs = genTaskDefineList(taskRelationList);
}
Map<Long, TaskDefinitionLog> taskDefinitionLogMap = taskDefinitionLogs.stream()
.collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog));
List<TaskNode> taskNodeList = new ArrayList<>();
for (Entry<Long, List<Long>> code : taskCodeMap.entrySet()) {
TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(code.getKey());
if (taskDefinitionLog != null) {
TaskNode taskNode = new TaskNode();
taskNode.setCode(taskDefinitionLog.getCode());
taskNode.setVersion(taskDefinitionLog.getVersion());
taskNode.setName(taskDefinitionLog.getName());
taskNode.setDesc(taskDefinitionLog.getDescription());
taskNode.setType(taskDefinitionLog.getTaskType().toUpperCase());
taskNode.setRunFlag(taskDefinitionLog.getFlag() == Flag.YES ? Constants.FLOWNODE_RUN_FLAG_NORMAL : Constants.FLOWNODE_RUN_FLAG_FORBIDDEN);
taskNode.setMaxRetryTimes(taskDefinitionLog.getFailRetryTimes());
taskNode.setRetryInterval(taskDefinitionLog.getFailRetryInterval());
Map<String, Object> taskParamsMap = taskNode.taskParamsToJsonObj(taskDefinitionLog.getTaskParams());
taskNode.setConditionResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.CONDITION_RESULT)));
taskNode.setSwitchResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.SWITCH_RESULT)));
taskNode.setDependence(JSONUtils.toJsonString(taskParamsMap.get(Constants.DEPENDENCE)));
taskParamsMap.remove(Constants.CONDITION_RESULT);
taskParamsMap.remove(Constants.DEPENDENCE);
taskNode.setParams(JSONUtils.toJsonString(taskParamsMap));
taskNode.setTaskInstancePriority(taskDefinitionLog.getTaskPriority());
taskNode.setWorkerGroup(taskDefinitionLog.getWorkerGroup());
taskNode.setEnvironmentCode(taskDefinitionLog.getEnvironmentCode());
taskNode.setTimeout(JSONUtils.toJsonString(new TaskTimeoutParameter(taskDefinitionLog.getTimeoutFlag() == TimeoutFlag.OPEN,
taskDefinitionLog.getTimeoutNotifyStrategy(),
taskDefinitionLog.getTimeout())));
taskNode.setDelayTime(taskDefinitionLog.getDelayTime());
taskNode.setPreTasks(JSONUtils.toJsonString(code.getValue().stream().map(taskDefinitionLogMap::get).map(TaskDefinition::getCode).collect(Collectors.toList())));
taskNodeList.add(taskNode);
}
}
return taskNodeList;
}
public Map<ProcessInstance, TaskInstance> notifyProcessList(int processId, int taskId) {
HashMap<ProcessInstance, TaskInstance> processTaskMap = new HashMap<>();
//find sub tasks
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(processId);
if (processInstanceMap == null) {
return processTaskMap;
}
ProcessInstance fatherProcess = this.findProcessInstanceById(processInstanceMap.getParentProcessInstanceId());
TaskInstance fatherTask = this.findTaskInstanceById(processInstanceMap.getParentTaskInstanceId());
if (fatherProcess != null) {
processTaskMap.put(fatherProcess, fatherTask);
}
return processTaskMap;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,478 | [Bug] [API & SERVICE] Missing history data in complement data mode | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
1.I have an hourly workflow,the crontab is :"0 0 * * * ? *"
我有一个每小时运行的工作流,crontab 是 :"0 0 * * * ? *"
2. I want to add history data
我去补一个历史数据,数据范围是0点到3点59分
<img width="646" alt="aba7791b47ff5b28599a8f6885eeb52" src="https://user-images.githubusercontent.com/50513095/136656656-e0730af4-d6bc-4835-956a-4881a639230e.png">
3.the history data of 2021-10-09 03:00:00 not works
03点的数据没有运行
![image](https://user-images.githubusercontent.com/50513095/136656707-89cbc19e-e022-4f3c-b1e5-c62c8a14ea7d.png)
### What you expected to happen
I want the data of 2021-10-09 03:00:00 is works
我想要3点的数据执行
### How to reproduce
<img width="646" alt="fd5559070e8152804b241e41a2938b3" src="https://user-images.githubusercontent.com/50513095/136656799-6a19fe41-300e-4ea1-b27b-a680233a76a6.png">
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6478 | https://github.com/apache/dolphinscheduler/pull/6491 | e091801e05c8e9243ec96f76a2e7cc1f1f5cecab | 88690462b4b93f8df01ba1952374cdf00c02eda5 | "2021-10-09T11:53:38Z" | java | "2021-10-11T12:03:25Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service.impl;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODE_NAMES;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_PARAMS;
import static org.apache.dolphinscheduler.common.Constants.MAX_TASK_TIMEOUT;
import com.fasterxml.jackson.core.type.TypeReference;
import org.apache.dolphinscheduler.api.enums.ExecuteType;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.ExecutorService;
import org.apache.dolphinscheduler.api.service.MonitorService;
import org.apache.dolphinscheduler.api.service.ProjectService;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Priority;
import org.apache.dolphinscheduler.common.enums.ReleaseState;
import org.apache.dolphinscheduler.common.enums.RunMode;
import org.apache.dolphinscheduler.common.enums.TaskDependType;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.dao.entity.Command;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.remote.command.StateEventChangeCommand;
import org.apache.dolphinscheduler.remote.processor.StateEventCallbackService;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
/**
* executor service impl
*/
@Service
public class ExecutorServiceImpl extends BaseServiceImpl implements ExecutorService {
private static final Logger logger = LoggerFactory.getLogger(ExecutorServiceImpl.class);
@Autowired
private ProjectMapper projectMapper;
@Autowired
private ProjectService projectService;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
@Autowired
private MonitorService monitorService;
@Autowired
private ProcessInstanceMapper processInstanceMapper;
@Autowired
private ProcessService processService;
@Autowired
StateEventCallbackService stateEventCallbackService;
/**
* execute process instance
*
* @param loginUser login user
* @param projectCode project code
* @param processDefinitionCode process definition code
* @param cronTime cron time
* @param commandType command type
* @param failureStrategy failure strategy
* @param startNodeList start nodelist
* @param taskDependType node dependency type
* @param warningType warning type
* @param warningGroupId notify group id
* @param processInstancePriority process instance priority
* @param workerGroup worker group name
* @param environmentCode environment code
* @param runMode run mode
* @param timeout timeout
* @param startParams the global param values which pass to new process instance
* @param expectedParallelismNumber the expected parallelism number when execute complement in parallel mode
* @return execute process instance code
*/
@Override
public Map<String, Object> execProcessInstance(User loginUser, long projectCode,
long processDefinitionCode, String cronTime, CommandType commandType,
FailureStrategy failureStrategy, String startNodeList,
TaskDependType taskDependType, WarningType warningType, int warningGroupId,
RunMode runMode,
Priority processInstancePriority, String workerGroup, Long environmentCode,Integer timeout,
Map<String, String> startParams, Integer expectedParallelismNumber,
int dryRun) {
Project project = projectMapper.queryByCode(projectCode);
//check user access for project
Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
// timeout is invalid
if (timeout <= 0 || timeout > MAX_TASK_TIMEOUT) {
putMsg(result, Status.TASK_TIMEOUT_PARAMS_ERROR);
return result;
}
// check process define release state
ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(processDefinitionCode);
result = checkProcessDefinitionValid(processDefinition, processDefinitionCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
if (!checkTenantSuitable(processDefinition)) {
logger.error("there is not any valid tenant for the process definition: id:{},name:{}, ",
processDefinition.getId(), processDefinition.getName());
putMsg(result, Status.TENANT_NOT_SUITABLE);
return result;
}
// check master exists
if (!checkMasterExists(result)) {
return result;
}
/**
* create command
*/
int create = this.createCommand(commandType, processDefinition.getCode(),
taskDependType, failureStrategy, startNodeList, cronTime, warningType, loginUser.getId(),
warningGroupId, runMode, processInstancePriority, workerGroup, environmentCode, startParams, expectedParallelismNumber, dryRun);
if (create > 0) {
processDefinition.setWarningGroupId(warningGroupId);
processDefinitionMapper.updateById(processDefinition);
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.START_PROCESS_INSTANCE_ERROR);
}
return result;
}
/**
* check whether master exists
*
* @param result result
* @return master exists return true , otherwise return false
*/
private boolean checkMasterExists(Map<String, Object> result) {
// check master server exists
List<Server> masterServers = monitorService.getServerListFromRegistry(true);
// no master
if (masterServers.isEmpty()) {
putMsg(result, Status.MASTER_NOT_EXISTS);
return false;
}
return true;
}
/**
* check whether the process definition can be executed
*
* @param processDefinition process definition
* @param processDefineCode process definition code
* @return check result code
*/
@Override
public Map<String, Object> checkProcessDefinitionValid(ProcessDefinition processDefinition, long processDefineCode) {
Map<String, Object> result = new HashMap<>();
if (processDefinition == null) {
// check process definition exists
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefineCode);
} else if (processDefinition.getReleaseState() != ReleaseState.ONLINE) {
// check process definition online
putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE, processDefineCode);
} else {
result.put(Constants.STATUS, Status.SUCCESS);
}
return result;
}
/**
* do action to process instance:pause, stop, repeat, recover from pause, recover from stop
*
* @param loginUser login user
* @param projectCode project code
* @param processInstanceId process instance id
* @param executeType execute type
* @return execute result code
*/
@Override
public Map<String, Object> execute(User loginUser, long projectCode, Integer processInstanceId, ExecuteType executeType) {
Project project = projectMapper.queryByCode(projectCode);
//check user access for project
Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
// check master exists
if (!checkMasterExists(result)) {
return result;
}
ProcessInstance processInstance = processService.findProcessInstanceDetailById(processInstanceId);
if (processInstance == null) {
putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceId);
return result;
}
ProcessDefinition processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(),
processInstance.getProcessDefinitionVersion());
if (executeType != ExecuteType.STOP && executeType != ExecuteType.PAUSE) {
result = checkProcessDefinitionValid(processDefinition, processInstance.getProcessDefinitionCode());
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
}
result = checkExecuteType(processInstance, executeType);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
if (!checkTenantSuitable(processDefinition)) {
logger.error("there is not any valid tenant for the process definition: id:{},name:{}, ",
processDefinition.getId(), processDefinition.getName());
putMsg(result, Status.TENANT_NOT_SUITABLE);
}
//get the startParams user specified at the first starting while repeat running is needed
Map<String, Object> commandMap = JSONUtils.parseObject(processInstance.getCommandParam(), new TypeReference<Map<String, Object>>() {});
String startParams = null;
if (MapUtils.isNotEmpty(commandMap) && executeType == ExecuteType.REPEAT_RUNNING) {
Object startParamsJson = commandMap.get(Constants.CMD_PARAM_START_PARAMS);
if (startParamsJson != null) {
startParams = startParamsJson.toString();
}
}
switch (executeType) {
case REPEAT_RUNNING:
result = insertCommand(loginUser, processInstanceId, processDefinition.getCode(), CommandType.REPEAT_RUNNING, startParams);
break;
case RECOVER_SUSPENDED_PROCESS:
result = insertCommand(loginUser, processInstanceId, processDefinition.getCode(), CommandType.RECOVER_SUSPENDED_PROCESS, startParams);
break;
case START_FAILURE_TASK_PROCESS:
result = insertCommand(loginUser, processInstanceId, processDefinition.getCode(), CommandType.START_FAILURE_TASK_PROCESS, startParams);
break;
case STOP:
if (processInstance.getState() == ExecutionStatus.READY_STOP) {
putMsg(result, Status.PROCESS_INSTANCE_ALREADY_CHANGED, processInstance.getName(), processInstance.getState());
} else {
result = updateProcessInstancePrepare(processInstance, CommandType.STOP, ExecutionStatus.READY_STOP);
}
break;
case PAUSE:
if (processInstance.getState() == ExecutionStatus.READY_PAUSE) {
putMsg(result, Status.PROCESS_INSTANCE_ALREADY_CHANGED, processInstance.getName(), processInstance.getState());
} else {
result = updateProcessInstancePrepare(processInstance, CommandType.PAUSE, ExecutionStatus.READY_PAUSE);
}
break;
default:
logger.error("unknown execute type : {}", executeType);
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "unknown execute type");
break;
}
return result;
}
/**
* check tenant suitable
*
* @param processDefinition process definition
* @return true if tenant suitable, otherwise return false
*/
private boolean checkTenantSuitable(ProcessDefinition processDefinition) {
Tenant tenant = processService.getTenantForProcess(processDefinition.getTenantId(),
processDefinition.getUserId());
return tenant != null;
}
/**
* Check the state of process instance and the type of operation match
*
* @param processInstance process instance
* @param executeType execute type
* @return check result code
*/
private Map<String, Object> checkExecuteType(ProcessInstance processInstance, ExecuteType executeType) {
Map<String, Object> result = new HashMap<>();
ExecutionStatus executionStatus = processInstance.getState();
boolean checkResult = false;
switch (executeType) {
case PAUSE:
case STOP:
if (executionStatus.typeIsRunning()) {
checkResult = true;
}
break;
case REPEAT_RUNNING:
if (executionStatus.typeIsFinished()) {
checkResult = true;
}
break;
case START_FAILURE_TASK_PROCESS:
if (executionStatus.typeIsFailure()) {
checkResult = true;
}
break;
case RECOVER_SUSPENDED_PROCESS:
if (executionStatus.typeIsPause() || executionStatus.typeIsCancel()) {
checkResult = true;
}
break;
default:
break;
}
if (!checkResult) {
putMsg(result, Status.PROCESS_INSTANCE_STATE_OPERATION_ERROR, processInstance.getName(), executionStatus.toString(), executeType.toString());
} else {
putMsg(result, Status.SUCCESS);
}
return result;
}
/**
* prepare to update process instance command type and status
*
* @param processInstance process instance
* @param commandType command type
* @param executionStatus execute status
* @return update result
*/
private Map<String, Object> updateProcessInstancePrepare(ProcessInstance processInstance, CommandType commandType, ExecutionStatus executionStatus) {
Map<String, Object> result = new HashMap<>();
processInstance.setCommandType(commandType);
processInstance.addHistoryCmd(commandType);
processInstance.setState(executionStatus);
int update = processService.updateProcessInstance(processInstance);
// determine whether the process is normal
if (update > 0) {
String host = processInstance.getHost();
String address = host.split(":")[0];
int port = Integer.parseInt(host.split(":")[1]);
StateEventChangeCommand stateEventChangeCommand = new StateEventChangeCommand(
processInstance.getId(), 0, processInstance.getState(), processInstance.getId(), 0
);
stateEventCallbackService.sendResult(address, port, stateEventChangeCommand.convert2Command());
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.EXECUTE_PROCESS_INSTANCE_ERROR);
}
return result;
}
/**
* insert command, used in the implementation of the page, re run, recovery (pause / failure) execution
*
* @param loginUser login user
* @param instanceId instance id
* @param processDefinitionCode process definition code
* @param commandType command type
* @return insert result code
*/
private Map<String, Object> insertCommand(User loginUser, Integer instanceId, long processDefinitionCode, CommandType commandType, String startParams) {
Map<String, Object> result = new HashMap<>();
//To add startParams only when repeat running is needed
Map<String, Object> cmdParam = new HashMap<>();
cmdParam.put(CMD_PARAM_RECOVER_PROCESS_ID_STRING, instanceId);
if (!StringUtils.isEmpty(startParams)) {
cmdParam.put(CMD_PARAM_START_PARAMS, startParams);
}
Command command = new Command();
command.setCommandType(commandType);
command.setProcessDefinitionCode(processDefinitionCode);
command.setCommandParam(JSONUtils.toJsonString(cmdParam));
command.setExecutorId(loginUser.getId());
if (!processService.verifyIsNeedCreateCommand(command)) {
putMsg(result, Status.PROCESS_INSTANCE_EXECUTING_COMMAND, processDefinitionCode);
return result;
}
int create = processService.createCommand(command);
if (create > 0) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.EXECUTE_PROCESS_INSTANCE_ERROR);
}
return result;
}
/**
* check if sub processes are offline before starting process definition
*
* @param processDefinitionCode process definition code
* @return check result code
*/
@Override
public Map<String, Object> startCheckByProcessDefinedCode(long processDefinitionCode) {
Map<String, Object> result = new HashMap<>();
ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(processDefinitionCode);
if (processDefinition == null) {
logger.error("process definition is not found");
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "processDefinitionCode");
return result;
}
List<Integer> ids = new ArrayList<>();
processService.recurseFindSubProcessId(processDefinition.getId(), ids);
Integer[] idArray = ids.toArray(new Integer[ids.size()]);
if (!ids.isEmpty()) {
List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryDefinitionListByIdList(idArray);
if (processDefinitionList != null) {
for (ProcessDefinition processDefinitionTmp : processDefinitionList) {
/**
* if there is no online process, exit directly
*/
if (processDefinitionTmp.getReleaseState() != ReleaseState.ONLINE) {
putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE, processDefinitionTmp.getName());
logger.info("not release process definition id: {} , name : {}",
processDefinitionTmp.getId(), processDefinitionTmp.getName());
return result;
}
}
}
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* create command
*
* @param commandType commandType
* @param processDefineCode processDefineCode
* @param nodeDep nodeDep
* @param failureStrategy failureStrategy
* @param startNodeList startNodeList
* @param schedule schedule
* @param warningType warningType
* @param executorId executorId
* @param warningGroupId warningGroupId
* @param runMode runMode
* @param processInstancePriority processInstancePriority
* @param workerGroup workerGroup
* @param environmentCode environmentCode
* @return command id
*/
private int createCommand(CommandType commandType, long processDefineCode,
TaskDependType nodeDep, FailureStrategy failureStrategy,
String startNodeList, String schedule, WarningType warningType,
int executorId, int warningGroupId,
RunMode runMode, Priority processInstancePriority, String workerGroup, Long environmentCode,
Map<String, String> startParams, Integer expectedParallelismNumber, int dryRun) {
/**
* instantiate command schedule instance
*/
Command command = new Command();
Map<String, String> cmdParam = new HashMap<>();
if (commandType == null) {
command.setCommandType(CommandType.START_PROCESS);
} else {
command.setCommandType(commandType);
}
command.setProcessDefinitionCode(processDefineCode);
if (nodeDep != null) {
command.setTaskDependType(nodeDep);
}
if (failureStrategy != null) {
command.setFailureStrategy(failureStrategy);
}
if (!StringUtils.isEmpty(startNodeList)) {
cmdParam.put(CMD_PARAM_START_NODE_NAMES, startNodeList);
}
if (warningType != null) {
command.setWarningType(warningType);
}
if (startParams != null && startParams.size() > 0) {
cmdParam.put(CMD_PARAM_START_PARAMS, JSONUtils.toJsonString(startParams));
}
command.setCommandParam(JSONUtils.toJsonString(cmdParam));
command.setExecutorId(executorId);
command.setWarningGroupId(warningGroupId);
command.setProcessInstancePriority(processInstancePriority);
command.setWorkerGroup(workerGroup);
command.setEnvironmentCode(environmentCode);
command.setDryRun(dryRun);
Date start = null;
Date end = null;
if (!StringUtils.isEmpty(schedule)) {
String[] interval = schedule.split(",");
if (interval.length == 2) {
start = DateUtils.getScheduleDate(interval[0]);
end = DateUtils.getScheduleDate(interval[1]);
if (start.after(end)) {
logger.info("complement data error, wrong date start:{} and end date:{} ",
start, end
);
return 0;
}
}
}
// determine whether to complement
if (commandType == CommandType.COMPLEMENT_DATA) {
if (start == null || end == null) {
return 0;
}
return createComplementCommandList(start, end, runMode, command, expectedParallelismNumber);
} else {
command.setCommandParam(JSONUtils.toJsonString(cmdParam));
return processService.createCommand(command);
}
}
/**
* create complement command
* close left open right
*
* @param start
* @param end
* @param runMode
* @return
*/
private int createComplementCommandList(Date start, Date end, RunMode runMode, Command command, Integer expectedParallelismNumber) {
int createCount = 0;
runMode = (runMode == null) ? RunMode.RUN_MODE_SERIAL : runMode;
Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam());
switch (runMode) {
case RUN_MODE_SERIAL: {
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(start));
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(end));
command.setCommandParam(JSONUtils.toJsonString(cmdParam));
createCount = processService.createCommand(command);
break;
}
case RUN_MODE_PARALLEL: {
LinkedList<Date> listDate = new LinkedList<>();
List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionCode(command.getProcessDefinitionCode());
listDate.addAll(CronUtils.getSelfFireDateList(start, end, schedules));
createCount = listDate.size();
if (!CollectionUtils.isEmpty(listDate)) {
if (expectedParallelismNumber != null && expectedParallelismNumber != 0) {
createCount = Math.min(listDate.size(), expectedParallelismNumber);
}
logger.info("In parallel mode, current expectedParallelismNumber:{}", createCount);
int chunkSize = listDate.size() / createCount;
for (int i = 0; i < createCount; i++) {
int rangeStart = i == 0 ? i : (i * chunkSize);
int rangeEnd = i == createCount - 1 ? listDate.size() - 1
: rangeStart + chunkSize;
if (rangeEnd == listDate.size()) {
rangeEnd = listDate.size() - 1;
}
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(listDate.get(rangeStart)));
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(listDate.get(rangeEnd)));
command.setCommandParam(JSONUtils.toJsonString(cmdParam));
processService.createCommand(command);
}
}
break;
}
default:
break;
}
logger.info("create complement command count: {}", createCount);
return createCount;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,478 | [Bug] [API & SERVICE] Missing history data in complement data mode | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
1.I have an hourly workflow,the crontab is :"0 0 * * * ? *"
我有一个每小时运行的工作流,crontab 是 :"0 0 * * * ? *"
2. I want to add history data
我去补一个历史数据,数据范围是0点到3点59分
<img width="646" alt="aba7791b47ff5b28599a8f6885eeb52" src="https://user-images.githubusercontent.com/50513095/136656656-e0730af4-d6bc-4835-956a-4881a639230e.png">
3.the history data of 2021-10-09 03:00:00 not works
03点的数据没有运行
![image](https://user-images.githubusercontent.com/50513095/136656707-89cbc19e-e022-4f3c-b1e5-c62c8a14ea7d.png)
### What you expected to happen
I want the data of 2021-10-09 03:00:00 is works
我想要3点的数据执行
### How to reproduce
<img width="646" alt="fd5559070e8152804b241e41a2938b3" src="https://user-images.githubusercontent.com/50513095/136656799-6a19fe41-300e-4ea1-b27b-a680233a76a6.png">
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6478 | https://github.com/apache/dolphinscheduler/pull/6491 | e091801e05c8e9243ec96f76a2e7cc1f1f5cecab | 88690462b4b93f8df01ba1952374cdf00c02eda5 | "2021-10-09T11:53:38Z" | java | "2021-10-11T12:03:25Z" | dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ExecutorServiceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import org.apache.dolphinscheduler.api.enums.ExecuteType;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.impl.ExecutorServiceImpl;
import org.apache.dolphinscheduler.api.service.impl.ProjectServiceImpl;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.Priority;
import org.apache.dolphinscheduler.common.enums.ReleaseState;
import org.apache.dolphinscheduler.common.enums.RunMode;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.dao.entity.Command;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.service.process.ProcessService;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
/**
* executor service 2 test
*/
@RunWith(MockitoJUnitRunner.Silent.class)
public class ExecutorServiceTest {
@InjectMocks
private ExecutorServiceImpl executorService;
@Mock
private ProcessService processService;
@Mock
private ProcessDefinitionMapper processDefinitionMapper;
@Mock
private ProjectMapper projectMapper;
@Mock
private ProjectServiceImpl projectService;
@Mock
private MonitorService monitorService;
private int processDefinitionId = 1;
private long processDefinitionCode = 1L;
private int processInstanceId = 1;
private int tenantId = 1;
private int userId = 1;
private ProcessDefinition processDefinition = new ProcessDefinition();
private ProcessInstance processInstance = new ProcessInstance();
private User loginUser = new User();
private long projectCode = 1L;
private String projectName = "projectName";
private Project project = new Project();
private String cronTime;
@Before
public void init() {
// user
loginUser.setId(userId);
// processDefinition
processDefinition.setId(processDefinitionId);
processDefinition.setReleaseState(ReleaseState.ONLINE);
processDefinition.setTenantId(tenantId);
processDefinition.setUserId(userId);
processDefinition.setVersion(1);
processDefinition.setCode(1L);
// processInstance
processInstance.setId(processInstanceId);
processInstance.setState(ExecutionStatus.FAILURE);
processInstance.setExecutorId(userId);
processInstance.setTenantId(tenantId);
processInstance.setProcessDefinitionVersion(1);
processInstance.setProcessDefinitionCode(1L);
// project
project.setCode(projectCode);
project.setName(projectName);
// cronRangeTime
cronTime = "2020-01-01 00:00:00,2020-01-31 23:00:00";
// mock
Mockito.when(projectMapper.queryByCode(projectCode)).thenReturn(project);
Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(checkProjectAndAuth());
Mockito.when(processDefinitionMapper.queryByCode(processDefinitionCode)).thenReturn(processDefinition);
Mockito.when(processService.getTenantForProcess(tenantId, userId)).thenReturn(new Tenant());
Mockito.when(processService.createCommand(any(Command.class))).thenReturn(1);
Mockito.when(monitorService.getServerListFromRegistry(true)).thenReturn(getMasterServersList());
Mockito.when(processService.findProcessInstanceDetailById(processInstanceId)).thenReturn(processInstance);
Mockito.when(processService.findProcessDefinition(1L, 1)).thenReturn(processDefinition);
}
/**
* not complement
*/
@Test
public void testNoComplement() {
Mockito.when(processService.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode)).thenReturn(zeroSchedulerList());
Map<String, Object> result = executorService.execProcessInstance(loginUser, projectCode,
processDefinitionCode, cronTime, CommandType.START_PROCESS,
null, null,
null, null, 0,
RunMode.RUN_MODE_SERIAL,
Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 100L, 10, null, 0, Constants.DRY_RUN_FLAG_NO);
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
verify(processService, times(1)).createCommand(any(Command.class));
}
/**
* not complement
*/
@Test
public void testComplementWithStartNodeList() {
Mockito.when(processService.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode)).thenReturn(zeroSchedulerList());
Map<String, Object> result = executorService.execProcessInstance(loginUser, projectCode,
processDefinitionCode, cronTime, CommandType.START_PROCESS,
null, "n1,n2",
null, null, 0,
RunMode.RUN_MODE_SERIAL,
Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 100L,110, null, 0, Constants.DRY_RUN_FLAG_NO);
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
verify(processService, times(1)).createCommand(any(Command.class));
}
/**
* date error
*/
@Test
public void testDateError() {
Mockito.when(processService.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode)).thenReturn(zeroSchedulerList());
Map<String, Object> result = executorService.execProcessInstance(loginUser, projectCode,
processDefinitionCode, "2020-01-31 23:00:00,2020-01-01 00:00:00", CommandType.COMPLEMENT_DATA,
null, null,
null, null, 0,
RunMode.RUN_MODE_SERIAL,
Priority.LOW, Constants.DEFAULT_WORKER_GROUP,100L, 110, null, 0, Constants.DRY_RUN_FLAG_NO);
Assert.assertEquals(Status.START_PROCESS_INSTANCE_ERROR, result.get(Constants.STATUS));
verify(processService, times(0)).createCommand(any(Command.class));
}
/**
* serial
*/
@Test
public void testSerial() {
Mockito.when(processService.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode)).thenReturn(zeroSchedulerList());
Map<String, Object> result = executorService.execProcessInstance(loginUser, projectCode,
processDefinitionCode, cronTime, CommandType.COMPLEMENT_DATA,
null, null,
null, null, 0,
RunMode.RUN_MODE_SERIAL,
Priority.LOW, Constants.DEFAULT_WORKER_GROUP,100L, 110, null, 0, Constants.DRY_RUN_FLAG_NO);
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
verify(processService, times(1)).createCommand(any(Command.class));
}
/**
* without schedule
*/
@Test
public void testParallelWithOutSchedule() {
Mockito.when(processService.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode)).thenReturn(zeroSchedulerList());
Map<String, Object> result = executorService.execProcessInstance(loginUser, projectCode,
processDefinitionCode, cronTime, CommandType.COMPLEMENT_DATA,
null, null,
null, null, 0,
RunMode.RUN_MODE_PARALLEL,
Priority.LOW, Constants.DEFAULT_WORKER_GROUP,100L, 110, null, 0, Constants.DRY_RUN_FLAG_NO);
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
verify(processService, times(31)).createCommand(any(Command.class));
}
/**
* with schedule
*/
@Test
public void testParallelWithSchedule() {
Mockito.when(processService.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode)).thenReturn(oneSchedulerList());
Map<String, Object> result = executorService.execProcessInstance(loginUser, projectCode,
processDefinitionCode, cronTime, CommandType.COMPLEMENT_DATA,
null, null,
null, null, 0,
RunMode.RUN_MODE_PARALLEL,
Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 100L,110, null, 15, Constants.DRY_RUN_FLAG_NO);
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
verify(processService, times(15)).createCommand(any(Command.class));
}
@Test
public void testNoMasterServers() {
Mockito.when(monitorService.getServerListFromRegistry(true)).thenReturn(new ArrayList<>());
Map<String, Object> result = executorService.execProcessInstance(loginUser, projectCode,
processDefinitionCode, cronTime, CommandType.COMPLEMENT_DATA,
null, null,
null, null, 0,
RunMode.RUN_MODE_PARALLEL,
Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 100L,110, null, 0, Constants.DRY_RUN_FLAG_NO);
Assert.assertEquals(result.get(Constants.STATUS), Status.MASTER_NOT_EXISTS);
}
@Test
public void testExecuteRepeatRunning() {
Mockito.when(processService.verifyIsNeedCreateCommand(any(Command.class))).thenReturn(true);
Map<String, Object> result = executorService.execute(loginUser, projectCode, processInstanceId, ExecuteType.REPEAT_RUNNING);
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
}
@Test
public void testStartCheckByProcessDefinedCode() {
List<Integer> ids = new ArrayList<>();
ids.add(1);
Mockito.doNothing().when(processService).recurseFindSubProcessId(1, ids);
List<ProcessDefinition> processDefinitionList = new ArrayList<>();
ProcessDefinition processDefinition = new ProcessDefinition();
processDefinition.setId(1);
processDefinition.setReleaseState(ReleaseState.ONLINE);
processDefinitionList.add(processDefinition);
Mockito.when(processDefinitionMapper.queryDefinitionListByIdList(new Integer[ids.size()]))
.thenReturn(processDefinitionList);
Map<String, Object> result = executorService.startCheckByProcessDefinedCode(1L);
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
}
private List<Server> getMasterServersList() {
List<Server> masterServerList = new ArrayList<>();
Server masterServer1 = new Server();
masterServer1.setId(1);
masterServer1.setHost("192.168.220.188");
masterServer1.setPort(1121);
masterServerList.add(masterServer1);
Server masterServer2 = new Server();
masterServer2.setId(2);
masterServer2.setHost("192.168.220.189");
masterServer2.setPort(1122);
masterServerList.add(masterServer2);
return masterServerList;
}
private List zeroSchedulerList() {
return Collections.EMPTY_LIST;
}
private List<Schedule> oneSchedulerList() {
List<Schedule> schedulerList = new LinkedList<>();
Schedule schedule = new Schedule();
schedule.setCrontab("0 0 0 1/2 * ?");
schedulerList.add(schedule);
return schedulerList;
}
private Map<String, Object> checkProjectAndAuth() {
Map<String, Object> result = new HashMap<>();
result.put(Constants.STATUS, Status.SUCCESS);
return result;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,478 | [Bug] [API & SERVICE] Missing history data in complement data mode | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
1.I have an hourly workflow,the crontab is :"0 0 * * * ? *"
我有一个每小时运行的工作流,crontab 是 :"0 0 * * * ? *"
2. I want to add history data
我去补一个历史数据,数据范围是0点到3点59分
<img width="646" alt="aba7791b47ff5b28599a8f6885eeb52" src="https://user-images.githubusercontent.com/50513095/136656656-e0730af4-d6bc-4835-956a-4881a639230e.png">
3.the history data of 2021-10-09 03:00:00 not works
03点的数据没有运行
![image](https://user-images.githubusercontent.com/50513095/136656707-89cbc19e-e022-4f3c-b1e5-c62c8a14ea7d.png)
### What you expected to happen
I want the data of 2021-10-09 03:00:00 is works
我想要3点的数据执行
### How to reproduce
<img width="646" alt="fd5559070e8152804b241e41a2938b3" src="https://user-images.githubusercontent.com/50513095/136656799-6a19fe41-300e-4ea1-b27b-a680233a76a6.png">
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6478 | https://github.com/apache/dolphinscheduler/pull/6491 | e091801e05c8e9243ec96f76a2e7cc1f1f5cecab | 88690462b4b93f8df01ba1952374cdf00c02eda5 | "2021-10-09T11:53:38Z" | java | "2021-10-11T12:03:25Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/quartz/cron/CronUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.service.quartz.cron;
import static org.apache.dolphinscheduler.service.quartz.cron.CycleFactory.day;
import static org.apache.dolphinscheduler.service.quartz.cron.CycleFactory.hour;
import static org.apache.dolphinscheduler.service.quartz.cron.CycleFactory.min;
import static org.apache.dolphinscheduler.service.quartz.cron.CycleFactory.month;
import static org.apache.dolphinscheduler.service.quartz.cron.CycleFactory.week;
import static com.cronutils.model.CronType.QUARTZ;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CycleEnum;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collections;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.List;
import org.quartz.CronExpression;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.cronutils.model.Cron;
import com.cronutils.model.definition.CronDefinitionBuilder;
import com.cronutils.parser.CronParser;
/**
* cron utils
*/
public class CronUtils {
private CronUtils() {
throw new IllegalStateException("CronUtils class");
}
private static final Logger logger = LoggerFactory.getLogger(CronUtils.class);
private static final CronParser QUARTZ_CRON_PARSER = new CronParser(CronDefinitionBuilder.instanceDefinitionFor(QUARTZ));
/**
* parse to cron
*
* @param cronExpression cron expression, never null
* @return Cron instance, corresponding to cron expression received
*/
public static Cron parse2Cron(String cronExpression) {
return QUARTZ_CRON_PARSER.parse(cronExpression);
}
/**
* build a new CronExpression based on the string cronExpression
*
* @param cronExpression String representation of the cron expression the new object should represent
* @return CronExpression
* @throws ParseException if the string expression cannot be parsed into a valid
*/
public static CronExpression parse2CronExpression(String cronExpression) throws ParseException {
return new CronExpression(cronExpression);
}
/**
* get max cycle
*
* @param cron cron
* @return CycleEnum
*/
public static CycleEnum getMaxCycle(Cron cron) {
return min(cron).addCycle(hour(cron)).addCycle(day(cron)).addCycle(week(cron)).addCycle(month(cron)).getCycle();
}
/**
* get min cycle
*
* @param cron cron
* @return CycleEnum
*/
public static CycleEnum getMiniCycle(Cron cron) {
return min(cron).addCycle(hour(cron)).addCycle(day(cron)).addCycle(week(cron)).addCycle(month(cron)).getMiniCycle();
}
/**
* get max cycle
*
* @param crontab crontab
* @return CycleEnum
*/
public static CycleEnum getMaxCycle(String crontab) {
return getMaxCycle(parse2Cron(crontab));
}
/**
* gets all scheduled times for a period of time based on not self dependency
*
* @param startTime startTime
* @param endTime endTime
* @param cronExpression cronExpression
* @return date list
*/
public static List<Date> getFireDateList(Date startTime, Date endTime, CronExpression cronExpression) {
List<Date> dateList = new ArrayList<>();
while (Stopper.isRunning()) {
startTime = cronExpression.getNextValidTimeAfter(startTime);
if (startTime.after(endTime)) {
break;
}
dateList.add(startTime);
}
return dateList;
}
/**
* gets expect scheduled times for a period of time based on self dependency
*
* @param startTime startTime
* @param endTime endTime
* @param cronExpression cronExpression
* @param fireTimes fireTimes
* @return date list
*/
public static List<Date> getSelfFireDateList(Date startTime, Date endTime, CronExpression cronExpression, int fireTimes) {
List<Date> dateList = new ArrayList<>();
while (fireTimes > 0) {
startTime = cronExpression.getNextValidTimeAfter(startTime);
if (startTime.after(endTime) || startTime.equals(endTime)) {
break;
}
dateList.add(startTime);
fireTimes--;
}
return dateList;
}
/**
* gets all scheduled times for a period of time based on self dependency
*
* @param startTime startTime
* @param endTime endTime
* @param cronExpression cronExpression
* @return date list
*/
public static List<Date> getSelfFireDateList(Date startTime, Date endTime, CronExpression cronExpression) {
List<Date> dateList = new ArrayList<>();
while (Stopper.isRunning()) {
startTime = cronExpression.getNextValidTimeAfter(startTime);
if (startTime.after(endTime) || startTime.equals(endTime)) {
break;
}
dateList.add(startTime);
}
return dateList;
}
/**
* gets all scheduled times for a period of time based on self dependency
* if schedulers is empty then default scheduler = 1 day
*/
public static List<Date> getSelfFireDateList(final Date startTime, final Date endTime, final List<Schedule> schedules) {
List<Date> result = new ArrayList<>();
if(startTime.equals(endTime)){
result.add(startTime);
return result;
}
Date from = new Date(startTime.getTime() - Constants.SECOND_TIME_MILLIS);
Date to = new Date(endTime.getTime() - Constants.SECOND_TIME_MILLIS);
List<Schedule> listSchedule = new ArrayList<>();
listSchedule.addAll(schedules);
if (CollectionUtils.isEmpty(listSchedule)) {
Schedule schedule = new Schedule();
schedule.setCrontab(Constants.DEFAULT_CRON_STRING);
listSchedule.add(schedule);
}
for (Schedule schedule : listSchedule) {
result.addAll(CronUtils.getSelfFireDateList(from, to, schedule.getCrontab()));
}
return result;
}
/**
* gets all scheduled times for a period of time based on self dependency
*
* @param startTime startTime
* @param endTime endTime
* @param cron cron
* @return date list
*/
public static List<Date> getSelfFireDateList(Date startTime, Date endTime, String cron) {
CronExpression cronExpression = null;
try {
cronExpression = parse2CronExpression(cron);
} catch (ParseException e) {
logger.error(e.getMessage(), e);
return Collections.emptyList();
}
return getSelfFireDateList(startTime, endTime, cronExpression);
}
/**
* get expiration time
*
* @param startTime startTime
* @param cycleEnum cycleEnum
* @return date
*/
public static Date getExpirationTime(Date startTime, CycleEnum cycleEnum) {
Date maxExpirationTime = null;
Date startTimeMax = null;
try {
startTimeMax = getEndTime(startTime);
Calendar calendar = Calendar.getInstance();
calendar.setTime(startTime);
switch (cycleEnum) {
case HOUR:
calendar.add(Calendar.HOUR, 1);
break;
case DAY:
calendar.add(Calendar.DATE, 1);
break;
case WEEK:
calendar.add(Calendar.DATE, 1);
break;
case MONTH:
calendar.add(Calendar.DATE, 1);
break;
default:
logger.error("Dependent process definition's cycleEnum is {},not support!!", cycleEnum);
break;
}
maxExpirationTime = calendar.getTime();
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
return DateUtils.compare(startTimeMax, maxExpirationTime) ? maxExpirationTime : startTimeMax;
}
/**
* get the end time of the day by value of date
*
* @return date
*/
private static Date getEndTime(Date date) {
Calendar end = new GregorianCalendar();
end.setTime(date);
end.set(Calendar.HOUR_OF_DAY, 23);
end.set(Calendar.MINUTE, 59);
end.set(Calendar.SECOND, 59);
end.set(Calendar.MILLISECOND, 999);
return end.getTime();
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,502 | [Bug] [MasterServer] cmdParam NPE when constructProcessInstance on SCHEDULER mode | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
```
[ERROR] 2021-10-11 23:38:00.433 org.apache.dolphinscheduler.server.master.runner.MasterSchedulerService:[211] - scan command error
java.lang.NullPointerException: null
at org.apache.dolphinscheduler.service.process.ProcessService.constructProcessInstance(ProcessService.java:804)
at org.apache.dolphinscheduler.service.process.ProcessService.handleCommand(ProcessService.java:216)
at org.apache.dolphinscheduler.service.process.ProcessService$$FastClassBySpringCGLIB$$ed138739.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:687)
at org.apache.dolphinscheduler.service.process.ProcessService$$EnhancerBySpringCGLIB$$c9f32f1c.handleCommand(<generated>)
at org.apache.dolphinscheduler.server.master.runner.MasterSchedulerService.scheduleProcess(MasterSchedulerService.java:189)
at org.apache.dolphinscheduler.server.master.runner.MasterSchedulerService.run(MasterSchedulerService.java:169)
```
### What you expected to happen
not NPE when run
### How to reproduce
run some task
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6502 | https://github.com/apache/dolphinscheduler/pull/6504 | db04a5b04df8aa5754741c6dd456579d743af1e9 | fea7874f95028325f52300949295260cafab4a64 | "2021-10-12T02:35:18Z" | java | "2021-10-12T06:42:49Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.service.process;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_EMPTY_SUB_PROCESS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_FATHER_PARAMS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID;
import static org.apache.dolphinscheduler.common.Constants.LOCAL_PARAMS;
import static java.util.stream.Collectors.toSet;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.Direct;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.enums.ReleaseState;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.TaskDependType;
import org.apache.dolphinscheduler.common.enums.TimeoutFlag;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.DateInterval;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter;
import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils;
import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException;
import org.apache.dolphinscheduler.common.utils.TaskParametersUtils;
import org.apache.dolphinscheduler.dao.entity.Command;
import org.apache.dolphinscheduler.dao.entity.DagData;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.dao.entity.Environment;
import org.apache.dolphinscheduler.dao.entity.ErrorCommand;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap;
import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation;
import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.ProjectUser;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskDefinition;
import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.CommandMapper;
import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper;
import org.apache.dolphinscheduler.dao.mapper.EnvironmentMapper;
import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper;
import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.TenantMapper;
import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper;
import org.apache.dolphinscheduler.dao.mapper.UserMapper;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.service.log.LogClientService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import com.facebook.presto.jdbc.internal.guava.collect.Lists;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.node.ObjectNode;
/**
* process relative dao that some mappers in this.
*/
@Component
public class ProcessService {
private final Logger logger = LoggerFactory.getLogger(getClass());
private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
ExecutionStatus.RUNNING_EXECUTION.ordinal(),
ExecutionStatus.DELAY_EXECUTION.ordinal(),
ExecutionStatus.READY_PAUSE.ordinal(),
ExecutionStatus.READY_STOP.ordinal()};
@Autowired
private UserMapper userMapper;
@Autowired
private ProcessDefinitionMapper processDefineMapper;
@Autowired
private ProcessDefinitionLogMapper processDefineLogMapper;
@Autowired
private ProcessInstanceMapper processInstanceMapper;
@Autowired
private DataSourceMapper dataSourceMapper;
@Autowired
private ProcessInstanceMapMapper processInstanceMapMapper;
@Autowired
private TaskInstanceMapper taskInstanceMapper;
@Autowired
private CommandMapper commandMapper;
@Autowired
private ScheduleMapper scheduleMapper;
@Autowired
private UdfFuncMapper udfFuncMapper;
@Autowired
private ResourceMapper resourceMapper;
@Autowired
private ResourceUserMapper resourceUserMapper;
@Autowired
private ErrorCommandMapper errorCommandMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private ProjectMapper projectMapper;
@Autowired
private TaskDefinitionMapper taskDefinitionMapper;
@Autowired
private TaskDefinitionLogMapper taskDefinitionLogMapper;
@Autowired
private ProcessTaskRelationMapper processTaskRelationMapper;
@Autowired
private ProcessTaskRelationLogMapper processTaskRelationLogMapper;
@Autowired
private EnvironmentMapper environmentMapper;
/**
* handle Command (construct ProcessInstance from Command) , wrapped in transaction
*
* @param logger logger
* @param host host
* @param command found command
* @param processDefinitionCacheMaps
* @return process instance
*/
public ProcessInstance handleCommand(Logger logger, String host, Command command, HashMap<String, ProcessDefinition> processDefinitionCacheMaps) {
ProcessInstance processInstance = constructProcessInstance(command, host, processDefinitionCacheMaps);
// cannot construct process instance, return null
if (processInstance == null) {
logger.error("scan command, command parameter is error: {}", command);
moveToErrorCommand(command, "process instance is null");
return null;
}
processInstance.setCommandType(command.getCommandType());
processInstance.addHistoryCmd(command.getCommandType());
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
this.commandMapper.deleteById(command.getId());
return processInstance;
}
/**
* save error command, and delete original command
*
* @param command command
* @param message message
*/
public void moveToErrorCommand(Command command, String message) {
ErrorCommand errorCommand = new ErrorCommand(command, message);
this.errorCommandMapper.insert(errorCommand);
this.commandMapper.deleteById(command.getId());
}
/**
* set process waiting thread
*
* @param command command
* @param processInstance processInstance
* @return process instance
*/
private ProcessInstance setWaitingThreadProcess(Command command, ProcessInstance processInstance) {
processInstance.setState(ExecutionStatus.WAITING_THREAD);
if (command.getCommandType() != CommandType.RECOVER_WAITING_THREAD) {
processInstance.addHistoryCmd(command.getCommandType());
}
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
createRecoveryWaitingThreadCommand(command, processInstance);
return null;
}
/**
* check thread num
*
* @param command command
* @param validThreadNum validThreadNum
* @return if thread is enough
*/
private boolean checkThreadNum(Command command, int validThreadNum) {
int commandThreadCount = this.workProcessThreadNumCount(command.getProcessDefinitionCode());
return validThreadNum >= commandThreadCount;
}
/**
* insert one command
*
* @param command command
* @return create result
*/
public int createCommand(Command command) {
int result = 0;
if (command != null) {
result = commandMapper.insert(command);
}
return result;
}
/**
* get command page
*
* @param pageSize
* @param pageNumber
* @return
*/
public List<Command> findCommandPage(int pageSize, int pageNumber) {
return commandMapper.queryCommandPage(pageSize, pageNumber * pageSize);
}
/**
* check the input command exists in queue list
*
* @param command command
* @return create command result
*/
public boolean verifyIsNeedCreateCommand(Command command) {
boolean isNeedCreate = true;
EnumMap<CommandType, Integer> cmdTypeMap = new EnumMap<>(CommandType.class);
cmdTypeMap.put(CommandType.REPEAT_RUNNING, 1);
cmdTypeMap.put(CommandType.RECOVER_SUSPENDED_PROCESS, 1);
cmdTypeMap.put(CommandType.START_FAILURE_TASK_PROCESS, 1);
CommandType commandType = command.getCommandType();
if (cmdTypeMap.containsKey(commandType)) {
ObjectNode cmdParamObj = JSONUtils.parseObject(command.getCommandParam());
int processInstanceId = cmdParamObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt();
List<Command> commands = commandMapper.selectList(null);
// for all commands
for (Command tmpCommand : commands) {
if (cmdTypeMap.containsKey(tmpCommand.getCommandType())) {
ObjectNode tempObj = JSONUtils.parseObject(tmpCommand.getCommandParam());
if (tempObj != null && processInstanceId == tempObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt()) {
isNeedCreate = false;
break;
}
}
}
}
return isNeedCreate;
}
/**
* find process instance detail by id
*
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceDetailById(int processId) {
return processInstanceMapper.queryDetailById(processId);
}
/**
* get task node list by definitionId
*/
public List<TaskDefinition> getTaskNodeListByDefinitionId(Integer defineId) {
ProcessDefinition processDefinition = processDefineMapper.selectById(defineId);
if (processDefinition == null) {
logger.error("process define not exists");
return new ArrayList<>();
}
List<ProcessTaskRelationLog> processTaskRelations = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion());
Set<TaskDefinition> taskDefinitionSet = new HashSet<>();
for (ProcessTaskRelationLog processTaskRelation : processTaskRelations) {
if (processTaskRelation.getPostTaskCode() > 0) {
taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion()));
}
}
List<TaskDefinitionLog> taskDefinitionLogs = taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet);
return new ArrayList<>(taskDefinitionLogs);
}
/**
* find process instance by id
*
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceById(int processId) {
return processInstanceMapper.selectById(processId);
}
/**
* find process define by id.
*
* @param processDefinitionId processDefinitionId
* @return process definition
*/
public ProcessDefinition findProcessDefineById(int processDefinitionId) {
return processDefineMapper.selectById(processDefinitionId);
}
/**
* find process define by code and version.
*
* @param processDefinitionCode processDefinitionCode
* @return process definition
*/
public ProcessDefinition findProcessDefinition(Long processDefinitionCode, int version) {
ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode);
if (processDefinition == null || processDefinition.getVersion() != version) {
processDefinition = processDefineLogMapper.queryByDefinitionCodeAndVersion(processDefinitionCode, version);
if (processDefinition != null) {
processDefinition.setId(0);
}
}
return processDefinition;
}
/**
* find process define by code.
*
* @param processDefinitionCode processDefinitionCode
* @return process definition
*/
public ProcessDefinition findProcessDefinitionByCode(Long processDefinitionCode) {
return processDefineMapper.queryByCode(processDefinitionCode);
}
/**
* delete work process instance by id
*
* @param processInstanceId processInstanceId
* @return delete process instance result
*/
public int deleteWorkProcessInstanceById(int processInstanceId) {
return processInstanceMapper.deleteById(processInstanceId);
}
/**
* delete all sub process by parent instance id
*
* @param processInstanceId processInstanceId
* @return delete all sub process instance result
*/
public int deleteAllSubWorkProcessByParentId(int processInstanceId) {
List<Integer> subProcessIdList = processInstanceMapMapper.querySubIdListByParentId(processInstanceId);
for (Integer subId : subProcessIdList) {
deleteAllSubWorkProcessByParentId(subId);
deleteWorkProcessMapByParentId(subId);
removeTaskLogFile(subId);
deleteWorkProcessInstanceById(subId);
}
return 1;
}
/**
* remove task log file
*
* @param processInstanceId processInstanceId
*/
public void removeTaskLogFile(Integer processInstanceId) {
List<TaskInstance> taskInstanceList = findValidTaskListByProcessId(processInstanceId);
if (CollectionUtils.isEmpty(taskInstanceList)) {
return;
}
try (LogClientService logClient = new LogClientService()) {
for (TaskInstance taskInstance : taskInstanceList) {
String taskLogPath = taskInstance.getLogPath();
if (StringUtils.isEmpty(taskInstance.getHost())) {
continue;
}
int port = Constants.RPC_PORT;
String ip = "";
try {
ip = Host.of(taskInstance.getHost()).getIp();
} catch (Exception e) {
// compatible old version
ip = taskInstance.getHost();
}
// remove task log from loggerserver
logClient.removeTaskLog(ip, port, taskLogPath);
}
}
}
/**
* calculate sub process number in the process define.
*
* @param processDefinitionCode processDefinitionCode
* @return process thread num count
*/
private Integer workProcessThreadNumCount(long processDefinitionCode) {
ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode);
List<Integer> ids = new ArrayList<>();
recurseFindSubProcessId(processDefinition.getId(), ids);
return ids.size() + 1;
}
/**
* recursive query sub process definition id by parent id.
*
* @param parentId parentId
* @param ids ids
*/
public void recurseFindSubProcessId(int parentId, List<Integer> ids) {
List<TaskDefinition> taskNodeList = this.getTaskNodeListByDefinitionId(parentId);
if (taskNodeList != null && !taskNodeList.isEmpty()) {
for (TaskDefinition taskNode : taskNodeList) {
String parameter = taskNode.getTaskParams();
ObjectNode parameterJson = JSONUtils.parseObject(parameter);
if (parameterJson.get(CMD_PARAM_SUB_PROCESS_DEFINE_ID) != null) {
SubProcessParameters subProcessParam = JSONUtils.parseObject(parameter, SubProcessParameters.class);
ids.add(subProcessParam.getProcessDefinitionId());
recurseFindSubProcessId(subProcessParam.getProcessDefinitionId(), ids);
}
}
}
}
/**
* create recovery waiting thread command when thread pool is not enough for the process instance.
* sub work process instance need not to create recovery command.
* create recovery waiting thread command and delete origin command at the same time.
* if the recovery command is exists, only update the field update_time
*
* @param originCommand originCommand
* @param processInstance processInstance
*/
public void createRecoveryWaitingThreadCommand(Command originCommand, ProcessInstance processInstance) {
// sub process doesnot need to create wait command
if (processInstance.getIsSubProcess() == Flag.YES) {
if (originCommand != null) {
commandMapper.deleteById(originCommand.getId());
}
return;
}
Map<String, String> cmdParam = new HashMap<>();
cmdParam.put(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD, String.valueOf(processInstance.getId()));
// process instance quit by "waiting thread" state
if (originCommand == null) {
Command command = new Command(
CommandType.RECOVER_WAITING_THREAD,
processInstance.getTaskDependType(),
processInstance.getFailureStrategy(),
processInstance.getExecutorId(),
processInstance.getProcessDefinition().getCode(),
JSONUtils.toJsonString(cmdParam),
processInstance.getWarningType(),
processInstance.getWarningGroupId(),
processInstance.getScheduleTime(),
processInstance.getWorkerGroup(),
processInstance.getEnvironmentCode(),
processInstance.getProcessInstancePriority(),
processInstance.getDryRun(),
processInstance.getId(),
processInstance.getProcessDefinitionVersion()
);
saveCommand(command);
return;
}
// update the command time if current command if recover from waiting
if (originCommand.getCommandType() == CommandType.RECOVER_WAITING_THREAD) {
originCommand.setUpdateTime(new Date());
saveCommand(originCommand);
} else {
// delete old command and create new waiting thread command
commandMapper.deleteById(originCommand.getId());
originCommand.setId(0);
originCommand.setCommandType(CommandType.RECOVER_WAITING_THREAD);
originCommand.setUpdateTime(new Date());
originCommand.setCommandParam(JSONUtils.toJsonString(cmdParam));
originCommand.setProcessInstancePriority(processInstance.getProcessInstancePriority());
saveCommand(originCommand);
}
}
/**
* get schedule time from command
*
* @param command command
* @param cmdParam cmdParam map
* @return date
*/
private Date getScheduleTime(Command command, Map<String, String> cmdParam) {
Date scheduleTime = command.getScheduleTime();
if (scheduleTime == null
&& cmdParam != null
&& cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) {
Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
List<Schedule> schedules = queryReleaseSchedulerListByProcessDefinitionCode(command.getProcessDefinitionCode());
List<Date> complementDateList = CronUtils.getSelfFireDateList(start, end, schedules);
if (complementDateList.size() > 0) {
scheduleTime = complementDateList.get(0);
} else {
logger.error("set scheduler time error: complement date list is empty, command: {}",
command.toString());
}
}
return scheduleTime;
}
/**
* generate a new work process instance from command.
*
* @param processDefinition processDefinition
* @param command command
* @param cmdParam cmdParam map
* @return process instance
*/
private ProcessInstance generateNewProcessInstance(ProcessDefinition processDefinition,
Command command,
Map<String, String> cmdParam) {
ProcessInstance processInstance = new ProcessInstance(processDefinition);
processInstance.setProcessDefinitionCode(processDefinition.getCode());
processInstance.setProcessDefinitionVersion(processDefinition.getVersion());
processInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
processInstance.setRecovery(Flag.NO);
processInstance.setStartTime(new Date());
processInstance.setRunTimes(1);
processInstance.setMaxTryTimes(0);
//processInstance.setProcessDefinitionId(command.getProcessDefinitionId());
processInstance.setCommandParam(command.getCommandParam());
processInstance.setCommandType(command.getCommandType());
processInstance.setIsSubProcess(Flag.NO);
processInstance.setTaskDependType(command.getTaskDependType());
processInstance.setFailureStrategy(command.getFailureStrategy());
processInstance.setExecutorId(command.getExecutorId());
WarningType warningType = command.getWarningType() == null ? WarningType.NONE : command.getWarningType();
processInstance.setWarningType(warningType);
Integer warningGroupId = command.getWarningGroupId() == null ? 0 : command.getWarningGroupId();
processInstance.setWarningGroupId(warningGroupId);
processInstance.setDryRun(command.getDryRun());
if (command.getScheduleTime() != null) {
processInstance.setScheduleTime(command.getScheduleTime());
}
processInstance.setCommandStartTime(command.getStartTime());
processInstance.setLocations(processDefinition.getLocations());
// reset global params while there are start parameters
setGlobalParamIfCommanded(processDefinition, cmdParam);
// curing global params
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
getCommandTypeIfComplement(processInstance, command),
processInstance.getScheduleTime()));
// set process instance priority
processInstance.setProcessInstancePriority(command.getProcessInstancePriority());
String workerGroup = StringUtils.isBlank(command.getWorkerGroup()) ? Constants.DEFAULT_WORKER_GROUP : command.getWorkerGroup();
processInstance.setWorkerGroup(workerGroup);
processInstance.setEnvironmentCode(Objects.isNull(command.getEnvironmentCode()) ? -1 : command.getEnvironmentCode());
processInstance.setTimeout(processDefinition.getTimeout());
processInstance.setTenantId(processDefinition.getTenantId());
return processInstance;
}
private void setGlobalParamIfCommanded(ProcessDefinition processDefinition, Map<String, String> cmdParam) {
// get start params from command param
Map<String, String> startParamMap = new HashMap<>();
if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_START_PARAMS)) {
String startParamJson = cmdParam.get(Constants.CMD_PARAM_START_PARAMS);
startParamMap = JSONUtils.toMap(startParamJson);
}
Map<String, String> fatherParamMap = new HashMap<>();
if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_FATHER_PARAMS)) {
String fatherParamJson = cmdParam.get(Constants.CMD_PARAM_FATHER_PARAMS);
fatherParamMap = JSONUtils.toMap(fatherParamJson);
}
startParamMap.putAll(fatherParamMap);
// set start param into global params
if (startParamMap.size() > 0
&& processDefinition.getGlobalParamMap() != null) {
for (Map.Entry<String, String> param : processDefinition.getGlobalParamMap().entrySet()) {
String val = startParamMap.get(param.getKey());
if (val != null) {
param.setValue(val);
}
}
}
}
/**
* get process tenant
* there is tenant id in definition, use the tenant of the definition.
* if there is not tenant id in the definiton or the tenant not exist
* use definition creator's tenant.
*
* @param tenantId tenantId
* @param userId userId
* @return tenant
*/
public Tenant getTenantForProcess(int tenantId, int userId) {
Tenant tenant = null;
if (tenantId >= 0) {
tenant = tenantMapper.queryById(tenantId);
}
if (userId == 0) {
return null;
}
if (tenant == null) {
User user = userMapper.selectById(userId);
tenant = tenantMapper.queryById(user.getTenantId());
}
return tenant;
}
/**
* get an environment
* use the code of the environment to find a environment.
*
* @param environmentCode environmentCode
* @return Environment
*/
public Environment findEnvironmentByCode(Long environmentCode) {
Environment environment = null;
if (environmentCode >= 0) {
environment = environmentMapper.queryByEnvironmentCode(environmentCode);
}
return environment;
}
/**
* check command parameters is valid
*
* @param command command
* @param cmdParam cmdParam map
* @return whether command param is valid
*/
private Boolean checkCmdParam(Command command, Map<String, String> cmdParam) {
if (command.getTaskDependType() == TaskDependType.TASK_ONLY || command.getTaskDependType() == TaskDependType.TASK_PRE) {
if (cmdParam == null
|| !cmdParam.containsKey(Constants.CMD_PARAM_START_NODE_NAMES)
|| cmdParam.get(Constants.CMD_PARAM_START_NODE_NAMES).isEmpty()) {
logger.error("command node depend type is {}, but start nodes is null ", command.getTaskDependType());
return false;
}
}
return true;
}
/**
* construct process instance according to one command.
*
* @param command command
* @param host host
* @param processDefinitionCacheMaps
* @return process instance
*/
private ProcessInstance constructProcessInstance(Command command, String host, HashMap<String, ProcessDefinition> processDefinitionCacheMaps) {
ProcessInstance processInstance;
ProcessDefinition processDefinition;
CommandType commandType = command.getCommandType();
String key = String.format("%d-%d", command.getProcessDefinitionCode(), command.getProcessDefinitionVersion());
if (processDefinitionCacheMaps.containsKey(key)) {
processDefinition = processDefinitionCacheMaps.get(key);
} else {
processDefinition = this.findProcessDefinition(command.getProcessDefinitionCode(), command.getProcessDefinitionVersion());
if (processDefinition != null) {
processDefinitionCacheMaps.put(key, processDefinition);
}
}
if (processDefinition == null) {
logger.error("cannot find the work process define! define code : {}", command.getProcessDefinitionCode());
return null;
}
Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam());
int processInstanceId = command.getProcessInstanceId();
if (processInstanceId == 0) {
processInstance = generateNewProcessInstance(processDefinition, command, cmdParam);
} else {
processInstance = this.findProcessInstanceDetailById(processInstanceId);
if (processInstance == null) {
return processInstance;
}
}
if (cmdParam != null) {
CommandType commandTypeIfComplement = getCommandTypeIfComplement(processInstance, command);
// reset global params while repeat running is needed by cmdParam
if (commandTypeIfComplement == CommandType.REPEAT_RUNNING) {
setGlobalParamIfCommanded(processDefinition, cmdParam);
}
// Recalculate global parameters after rerun.
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
commandTypeIfComplement,
processInstance.getScheduleTime()));
processInstance.setProcessDefinition(processDefinition);
}
//reset command parameter
if (processInstance.getCommandParam() != null) {
Map<String, String> processCmdParam = JSONUtils.toMap(processInstance.getCommandParam());
for (Map.Entry<String, String> entry : processCmdParam.entrySet()) {
if (!cmdParam.containsKey(entry.getKey())) {
cmdParam.put(entry.getKey(), entry.getValue());
}
}
}
// reset command parameter if sub process
if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) {
processInstance.setCommandParam(command.getCommandParam());
}
if (Boolean.FALSE.equals(checkCmdParam(command, cmdParam))) {
logger.error("command parameter check failed!");
return null;
}
if (command.getScheduleTime() != null) {
processInstance.setScheduleTime(command.getScheduleTime());
}
processInstance.setHost(host);
ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXECUTION;
int runTime = processInstance.getRunTimes();
switch (commandType) {
case START_PROCESS:
break;
case START_FAILURE_TASK_PROCESS:
// find failed tasks and init these tasks
List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE);
List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE);
List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL);
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
failedList.addAll(killedList);
failedList.addAll(toleranceList);
for (Integer taskId : failedList) {
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING,
String.join(Constants.COMMA, convertIntListToString(failedList)));
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
processInstance.setRunTimes(runTime + 1);
break;
case START_CURRENT_TASK_PROCESS:
break;
case RECOVER_WAITING_THREAD:
break;
case RECOVER_SUSPENDED_PROCESS:
// find pause tasks and init task's state
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE);
List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(),
ExecutionStatus.KILL);
suspendedNodeList.addAll(stopNodeList);
for (Integer taskId : suspendedNodeList) {
// initialize the pause state
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(",", convertIntListToString(suspendedNodeList)));
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
processInstance.setRunTimes(runTime + 1);
break;
case RECOVER_TOLERANCE_FAULT_PROCESS:
// recover tolerance fault process
processInstance.setRecovery(Flag.YES);
runStatus = processInstance.getState();
break;
case COMPLEMENT_DATA:
// delete all the valid tasks when complement data if id is not null
if (processInstance.getId() != 0) {
List<TaskInstance> taskInstanceList = this.findValidTaskListByProcessId(processInstance.getId());
for (TaskInstance taskInstance : taskInstanceList) {
taskInstance.setFlag(Flag.NO);
this.updateTaskInstance(taskInstance);
}
}
break;
case REPEAT_RUNNING:
// delete the recover task names from command parameter
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) {
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
}
// delete all the valid tasks when repeat running
List<TaskInstance> validTaskList = findValidTaskListByProcessId(processInstance.getId());
for (TaskInstance taskInstance : validTaskList) {
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
}
processInstance.setStartTime(new Date());
processInstance.setEndTime(null);
processInstance.setRunTimes(runTime + 1);
initComplementDataParam(processDefinition, processInstance, cmdParam);
break;
case SCHEDULER:
break;
default:
break;
}
processInstance.setState(runStatus);
return processInstance;
}
/**
* get process definition by command
* If it is a fault-tolerant command, get the specified version of ProcessDefinition through ProcessInstance
* Otherwise, get the latest version of ProcessDefinition
*
* @return ProcessDefinition
*/
private ProcessDefinition getProcessDefinitionByCommand(long processDefinitionCode, Map<String, String> cmdParam) {
if (cmdParam != null) {
int processInstanceId = 0;
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) {
processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING));
} else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) {
processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS));
} else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) {
processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD));
}
if (processInstanceId != 0) {
ProcessInstance processInstance = this.findProcessInstanceDetailById(processInstanceId);
if (processInstance == null) {
return null;
}
return processDefineLogMapper.queryByDefinitionCodeAndVersion(
processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion());
}
}
return processDefineMapper.queryByCode(processDefinitionCode);
}
/**
* return complement data if the process start with complement data
*
* @param processInstance processInstance
* @param command command
* @return command type
*/
private CommandType getCommandTypeIfComplement(ProcessInstance processInstance, Command command) {
if (CommandType.COMPLEMENT_DATA == processInstance.getCmdTypeIfComplement()) {
return CommandType.COMPLEMENT_DATA;
} else {
return command.getCommandType();
}
}
/**
* initialize complement data parameters
*
* @param processDefinition processDefinition
* @param processInstance processInstance
* @param cmdParam cmdParam
*/
private void initComplementDataParam(ProcessDefinition processDefinition,
ProcessInstance processInstance,
Map<String, String> cmdParam) {
if (!processInstance.isComplementData()) {
return;
}
Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
List<Schedule> listSchedules = queryReleaseSchedulerListByProcessDefinitionCode(processInstance.getProcessDefinitionCode());
List<Date> complementDate = CronUtils.getSelfFireDateList(start, end, listSchedules);
if (complementDate.size() > 0
&& Flag.NO == processInstance.getIsSubProcess()) {
processInstance.setScheduleTime(complementDate.get(0));
}
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
}
/**
* set sub work process parameters.
* handle sub work process instance, update relation table and command parameters
* set sub work process flag, extends parent work process command parameters
*
* @param subProcessInstance subProcessInstance
*/
public void setSubProcessParam(ProcessInstance subProcessInstance) {
String cmdParam = subProcessInstance.getCommandParam();
if (StringUtils.isEmpty(cmdParam)) {
return;
}
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
// write sub process id into cmd param.
if (paramMap.containsKey(CMD_PARAM_SUB_PROCESS)
&& CMD_PARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMD_PARAM_SUB_PROCESS))) {
paramMap.remove(CMD_PARAM_SUB_PROCESS);
paramMap.put(CMD_PARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId()));
subProcessInstance.setCommandParam(JSONUtils.toJsonString(paramMap));
subProcessInstance.setIsSubProcess(Flag.YES);
this.saveProcessInstance(subProcessInstance);
}
// copy parent instance user def params to sub process..
String parentInstanceId = paramMap.get(CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID);
if (StringUtils.isNotEmpty(parentInstanceId)) {
ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId));
if (parentInstance != null) {
subProcessInstance.setGlobalParams(
joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams()));
this.saveProcessInstance(subProcessInstance);
} else {
logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam);
}
}
ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class);
if (processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0) {
return;
}
// update sub process id to process map table
processInstanceMap.setProcessInstanceId(subProcessInstance.getId());
this.updateWorkProcessInstanceMap(processInstanceMap);
}
/**
* join parent global params into sub process.
* only the keys doesn't in sub process global would be joined.
*
* @param parentGlobalParams parentGlobalParams
* @param subGlobalParams subGlobalParams
* @return global params join
*/
private String joinGlobalParams(String parentGlobalParams, String subGlobalParams) {
List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class);
List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class);
Map<String, String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
for (Property parent : parentPropertyList) {
if (!subMap.containsKey(parent.getProp())) {
subPropertyList.add(parent);
}
}
return JSONUtils.toJsonString(subPropertyList);
}
/**
* initialize task instance
*
* @param taskInstance taskInstance
*/
private void initTaskInstance(TaskInstance taskInstance) {
if (!taskInstance.isSubProcess()
&& (taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure())) {
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
return;
}
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
updateTaskInstance(taskInstance);
}
/**
* retry submit task to db
*
* @param taskInstance
* @param commitRetryTimes
* @param commitInterval
* @return
*/
public TaskInstance submitTask(TaskInstance taskInstance, int commitRetryTimes, int commitInterval) {
int retryTimes = 1;
boolean submitDB = false;
TaskInstance task = null;
while (retryTimes <= commitRetryTimes) {
try {
if (!submitDB) {
// submit task to db
task = submitTask(taskInstance);
if (task != null && task.getId() != 0) {
submitDB = true;
break;
}
}
if (!submitDB) {
logger.error("task commit to db failed , taskId {} has already retry {} times, please check the database", taskInstance.getId(), retryTimes);
}
Thread.sleep(commitInterval);
} catch (Exception e) {
logger.error("task commit to mysql failed", e);
}
retryTimes += 1;
}
return task;
}
/**
* submit task to db
* submit sub process to command
*
* @param taskInstance taskInstance
* @return task instance
*/
@Transactional(rollbackFor = Exception.class)
public TaskInstance submitTask(TaskInstance taskInstance) {
ProcessInstance processInstance = this.findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
logger.info("start submit task : {}, instance id:{}, state: {}",
taskInstance.getName(), taskInstance.getProcessInstanceId(), processInstance.getState());
//submit to db
TaskInstance task = submitTaskInstanceToDB(taskInstance, processInstance);
if (task == null) {
logger.error("end submit task to db error, task name:{}, process id:{} state: {} ",
taskInstance.getName(), taskInstance.getProcessInstance(), processInstance.getState());
return task;
}
if (!task.getState().typeIsFinished()) {
createSubWorkProcess(processInstance, task);
}
logger.info("end submit task to db successfully:{} {} state:{} complete, instance id:{} state: {} ",
taskInstance.getId(), taskInstance.getName(), task.getState(), processInstance.getId(), processInstance.getState());
return task;
}
/**
* set work process instance map
* consider o
* repeat running does not generate new sub process instance
* set map {parent instance id, task instance id, 0(child instance id)}
*
* @param parentInstance parentInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask) {
ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId());
if (processMap != null) {
return processMap;
}
if (parentInstance.getCommandType() == CommandType.REPEAT_RUNNING) {
// update current task id to map
processMap = findPreviousTaskProcessMap(parentInstance, parentTask);
if (processMap != null) {
processMap.setParentTaskInstanceId(parentTask.getId());
updateWorkProcessInstanceMap(processMap);
return processMap;
}
}
// new task
processMap = new ProcessInstanceMap();
processMap.setParentProcessInstanceId(parentInstance.getId());
processMap.setParentTaskInstanceId(parentTask.getId());
createWorkProcessInstanceMap(processMap);
return processMap;
}
/**
* find previous task work process map.
*
* @param parentProcessInstance parentProcessInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap findPreviousTaskProcessMap(ProcessInstance parentProcessInstance,
TaskInstance parentTask) {
Integer preTaskId = 0;
List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId());
for (TaskInstance task : preTaskList) {
if (task.getName().equals(parentTask.getName())) {
preTaskId = task.getId();
ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId);
if (map != null) {
return map;
}
}
}
logger.info("sub process instance is not found,parent task:{},parent instance:{}",
parentTask.getId(), parentProcessInstance.getId());
return null;
}
/**
* create sub work process command
*
* @param parentProcessInstance parentProcessInstance
* @param task task
*/
public void createSubWorkProcess(ProcessInstance parentProcessInstance, TaskInstance task) {
if (!task.isSubProcess()) {
return;
}
//check create sub work flow firstly
ProcessInstanceMap instanceMap = findWorkProcessMapByParent(parentProcessInstance.getId(), task.getId());
if (null != instanceMap && CommandType.RECOVER_TOLERANCE_FAULT_PROCESS == parentProcessInstance.getCommandType()) {
// recover failover tolerance would not create a new command when the sub command already have been created
return;
}
instanceMap = setProcessInstanceMap(parentProcessInstance, task);
ProcessInstance childInstance = null;
if (instanceMap.getProcessInstanceId() != 0) {
childInstance = findProcessInstanceById(instanceMap.getProcessInstanceId());
}
Command subProcessCommand = createSubProcessCommand(parentProcessInstance, childInstance, instanceMap, task);
updateSubProcessDefinitionByParent(parentProcessInstance, subProcessCommand.getProcessDefinitionCode());
initSubInstanceState(childInstance);
createCommand(subProcessCommand);
logger.info("sub process command created: {} ", subProcessCommand);
}
/**
* complement data needs transform parent parameter to child.
*/
private String getSubWorkFlowParam(ProcessInstanceMap instanceMap, ProcessInstance parentProcessInstance, Map<String, String> fatherParams) {
// set sub work process command
String processMapStr = JSONUtils.toJsonString(instanceMap);
Map<String, String> cmdParam = JSONUtils.toMap(processMapStr);
if (parentProcessInstance.isComplementData()) {
Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam());
String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE);
String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime);
processMapStr = JSONUtils.toJsonString(cmdParam);
}
if (fatherParams.size() != 0) {
cmdParam.put(CMD_PARAM_FATHER_PARAMS, JSONUtils.toJsonString(fatherParams));
processMapStr = JSONUtils.toJsonString(cmdParam);
}
return processMapStr;
}
public Map<String, String> getGlobalParamMap(String globalParams) {
List<Property> propList;
Map<String, String> globalParamMap = new HashMap<>();
if (StringUtils.isNotEmpty(globalParams)) {
propList = JSONUtils.toList(globalParams, Property.class);
globalParamMap = propList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
}
return globalParamMap;
}
/**
* create sub work process command
*/
public Command createSubProcessCommand(ProcessInstance parentProcessInstance,
ProcessInstance childInstance,
ProcessInstanceMap instanceMap,
TaskInstance task) {
CommandType commandType = getSubCommandType(parentProcessInstance, childInstance);
Map<String, String> subProcessParam = JSONUtils.toMap(task.getTaskParams());
int childDefineId = Integer.parseInt(subProcessParam.get(Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID));
ProcessDefinition processDefinition = processDefineMapper.queryByDefineId(childDefineId);
Object localParams = subProcessParam.get(Constants.LOCAL_PARAMS);
List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class);
Map<String, String> globalMap = this.getGlobalParamMap(parentProcessInstance.getGlobalParams());
Map<String, String> fatherParams = new HashMap<>();
if (CollectionUtils.isNotEmpty(allParam)) {
for (Property info : allParam) {
fatherParams.put(info.getProp(), globalMap.get(info.getProp()));
}
}
String processParam = getSubWorkFlowParam(instanceMap, parentProcessInstance, fatherParams);
int subProcessInstanceId = childInstance == null ? 0 : childInstance.getId();
return new Command(
commandType,
TaskDependType.TASK_POST,
parentProcessInstance.getFailureStrategy(),
parentProcessInstance.getExecutorId(),
processDefinition.getCode(),
processParam,
parentProcessInstance.getWarningType(),
parentProcessInstance.getWarningGroupId(),
parentProcessInstance.getScheduleTime(),
task.getWorkerGroup(),
task.getEnvironmentCode(),
parentProcessInstance.getProcessInstancePriority(),
parentProcessInstance.getDryRun(),
subProcessInstanceId,
parentProcessInstance.getProcessDefinitionVersion()
);
}
/**
* initialize sub work flow state
* child instance state would be initialized when 'recovery from pause/stop/failure'
*/
private void initSubInstanceState(ProcessInstance childInstance) {
if (childInstance != null) {
childInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
updateProcessInstance(childInstance);
}
}
/**
* get sub work flow command type
* child instance exist: child command = fatherCommand
* child instance not exists: child command = fatherCommand[0]
*/
private CommandType getSubCommandType(ProcessInstance parentProcessInstance, ProcessInstance childInstance) {
CommandType commandType = parentProcessInstance.getCommandType();
if (childInstance == null) {
String fatherHistoryCommand = parentProcessInstance.getHistoryCmd();
commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]);
}
return commandType;
}
/**
* update sub process definition
*
* @param parentProcessInstance parentProcessInstance
* @param childDefinitionCode childDefinitionId
*/
private void updateSubProcessDefinitionByParent(ProcessInstance parentProcessInstance, long childDefinitionCode) {
ProcessDefinition fatherDefinition = this.findProcessDefinition(parentProcessInstance.getProcessDefinitionCode(),
parentProcessInstance.getProcessDefinitionVersion());
ProcessDefinition childDefinition = this.findProcessDefinitionByCode(childDefinitionCode);
if (childDefinition != null && fatherDefinition != null) {
childDefinition.setWarningGroupId(fatherDefinition.getWarningGroupId());
processDefineMapper.updateById(childDefinition);
}
}
/**
* submit task to mysql
*
* @param taskInstance taskInstance
* @param processInstance processInstance
* @return task instance
*/
public TaskInstance submitTaskInstanceToDB(TaskInstance taskInstance, ProcessInstance processInstance) {
ExecutionStatus processInstanceState = processInstance.getState();
if (taskInstance.getState().typeIsFailure()) {
if (taskInstance.isSubProcess()) {
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1);
} else {
if (processInstanceState != ExecutionStatus.READY_STOP
&& processInstanceState != ExecutionStatus.READY_PAUSE) {
// failure task set invalid
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
// crate new task instance
if (taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE) {
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1);
}
taskInstance.setSubmitTime(null);
taskInstance.setStartTime(null);
taskInstance.setEndTime(null);
taskInstance.setFlag(Flag.YES);
taskInstance.setHost(null);
taskInstance.setId(0);
}
}
}
taskInstance.setExecutorId(processInstance.getExecutorId());
taskInstance.setProcessInstancePriority(processInstance.getProcessInstancePriority());
taskInstance.setState(getSubmitTaskState(taskInstance, processInstanceState));
if (taskInstance.getSubmitTime() == null) {
taskInstance.setSubmitTime(new Date());
}
if (taskInstance.getFirstSubmitTime() == null) {
taskInstance.setFirstSubmitTime(taskInstance.getSubmitTime());
}
boolean saveResult = saveTaskInstance(taskInstance);
if (!saveResult) {
return null;
}
return taskInstance;
}
/**
* get submit task instance state by the work process state
* cannot modify the task state when running/kill/submit success, or this
* task instance is already exists in task queue .
* return pause if work process state is ready pause
* return stop if work process state is ready stop
* if all of above are not satisfied, return submit success
*
* @param taskInstance taskInstance
* @param processInstanceState processInstanceState
* @return process instance state
*/
public ExecutionStatus getSubmitTaskState(TaskInstance taskInstance, ExecutionStatus processInstanceState) {
ExecutionStatus state = taskInstance.getState();
// running, delayed or killed
// the task already exists in task queue
// return state
if (
state == ExecutionStatus.RUNNING_EXECUTION
|| state == ExecutionStatus.DELAY_EXECUTION
|| state == ExecutionStatus.KILL
) {
return state;
}
//return pasue /stop if process instance state is ready pause / stop
// or return submit success
if (processInstanceState == ExecutionStatus.READY_PAUSE) {
state = ExecutionStatus.PAUSE;
} else if (processInstanceState == ExecutionStatus.READY_STOP
|| !checkProcessStrategy(taskInstance)) {
state = ExecutionStatus.KILL;
} else {
state = ExecutionStatus.SUBMITTED_SUCCESS;
}
return state;
}
/**
* check process instance strategy
*
* @param taskInstance taskInstance
* @return check strategy result
*/
private boolean checkProcessStrategy(TaskInstance taskInstance) {
ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId());
FailureStrategy failureStrategy = processInstance.getFailureStrategy();
if (failureStrategy == FailureStrategy.CONTINUE) {
return true;
}
List<TaskInstance> taskInstances = this.findValidTaskListByProcessId(taskInstance.getProcessInstanceId());
for (TaskInstance task : taskInstances) {
if (task.getState() == ExecutionStatus.FAILURE
&& task.getRetryTimes() >= task.getMaxRetryTimes()) {
return false;
}
}
return true;
}
/**
* insert or update work process instance to data base
*
* @param processInstance processInstance
*/
public void saveProcessInstance(ProcessInstance processInstance) {
if (processInstance == null) {
logger.error("save error, process instance is null!");
return;
}
if (processInstance.getId() != 0) {
processInstanceMapper.updateById(processInstance);
} else {
processInstanceMapper.insert(processInstance);
}
}
/**
* insert or update command
*
* @param command command
* @return save command result
*/
public int saveCommand(Command command) {
if (command.getId() != 0) {
return commandMapper.updateById(command);
} else {
return commandMapper.insert(command);
}
}
/**
* insert or update task instance
*
* @param taskInstance taskInstance
* @return save task instance result
*/
public boolean saveTaskInstance(TaskInstance taskInstance) {
if (taskInstance.getId() != 0) {
return updateTaskInstance(taskInstance);
} else {
return createTaskInstance(taskInstance);
}
}
/**
* insert task instance
*
* @param taskInstance taskInstance
* @return create task instance result
*/
public boolean createTaskInstance(TaskInstance taskInstance) {
int count = taskInstanceMapper.insert(taskInstance);
return count > 0;
}
/**
* update task instance
*
* @param taskInstance taskInstance
* @return update task instance result
*/
public boolean updateTaskInstance(TaskInstance taskInstance) {
int count = taskInstanceMapper.updateById(taskInstance);
return count > 0;
}
/**
* find task instance by id
*
* @param taskId task id
* @return task intance
*/
public TaskInstance findTaskInstanceById(Integer taskId) {
return taskInstanceMapper.selectById(taskId);
}
/**
* package task instance,associate processInstance and processDefine
*
* @param taskInstId taskInstId
* @return task instance
*/
public TaskInstance getTaskInstanceDetailByTaskId(int taskInstId) {
// get task instance
TaskInstance taskInstance = findTaskInstanceById(taskInstId);
if (taskInstance == null) {
return null;
}
setTaskInstanceDetail(taskInstance);
return taskInstance;
}
/**
* package task instance,associate processInstance and processDefine
*
* @param taskInstance taskInstance
* @return task instance
*/
public void setTaskInstanceDetail(TaskInstance taskInstance) {
// get process instance
ProcessInstance processInstance = findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
// get process define
ProcessDefinition processDefine = findProcessDefinition(processInstance.getProcessDefinitionCode(),
processInstance.getProcessDefinitionVersion());
taskInstance.setProcessInstance(processInstance);
taskInstance.setProcessDefine(processDefine);
TaskDefinition taskDefinition = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(
taskInstance.getTaskCode(),
taskInstance.getTaskDefinitionVersion());
updateTaskDefinitionResources(taskDefinition);
taskInstance.setTaskDefine(taskDefinition);
}
/**
* Update {@link ResourceInfo} information in {@link TaskDefinition}
*
* @param taskDefinition the given {@link TaskDefinition}
*/
private void updateTaskDefinitionResources(TaskDefinition taskDefinition) {
Map<String, Object> taskParameters = JSONUtils.parseObject(
taskDefinition.getTaskParams(),
new TypeReference<Map<String, Object>>() { });
if (taskParameters != null) {
// if contains mainJar field, query resource from database
// Flink, Spark, MR
if (taskParameters.containsKey("mainJar")) {
Object mainJarObj = taskParameters.get("mainJar");
ResourceInfo mainJar = JSONUtils.parseObject(
JSONUtils.toJsonString(mainJarObj),
ResourceInfo.class);
ResourceInfo resourceInfo = updateResourceInfo(mainJar);
if (resourceInfo != null) {
taskParameters.put("mainJar", resourceInfo);
}
}
// update resourceList information
if (taskParameters.containsKey("resourceList")) {
String resourceListStr = JSONUtils.toJsonString(taskParameters.get("resourceList"));
List<ResourceInfo> resourceInfos = JSONUtils.toList(resourceListStr, ResourceInfo.class);
List<ResourceInfo> updatedResourceInfos = resourceInfos
.stream()
.map(this::updateResourceInfo)
.filter(Objects::nonNull)
.collect(Collectors.toList());
taskParameters.put("resourceList", updatedResourceInfos);
}
// set task parameters
taskDefinition.setTaskParams(JSONUtils.toJsonString(taskParameters));
}
}
/**
* update {@link ResourceInfo} by given original ResourceInfo
*
* @param res origin resource info
* @return {@link ResourceInfo}
*/
private ResourceInfo updateResourceInfo(ResourceInfo res) {
ResourceInfo resourceInfo = null;
// only if mainJar is not null and does not contains "resourceName" field
if (res != null) {
int resourceId = res.getId();
if (resourceId <= 0) {
logger.error("invalid resourceId, {}", resourceId);
return null;
}
resourceInfo = new ResourceInfo();
// get resource from database, only one resource should be returned
Resource resource = getResourceById(resourceId);
resourceInfo.setId(resourceId);
resourceInfo.setRes(resource.getFileName());
resourceInfo.setResourceName(resource.getFullName());
if (logger.isInfoEnabled()) {
logger.info("updated resource info {}",
JSONUtils.toJsonString(resourceInfo));
}
}
return resourceInfo;
}
/**
* get id list by task state
*
* @param instanceId instanceId
* @param state state
* @return task instance states
*/
public List<Integer> findTaskIdByInstanceState(int instanceId, ExecutionStatus state) {
return taskInstanceMapper.queryTaskByProcessIdAndState(instanceId, state.ordinal());
}
/**
* find valid task list by process definition id
*
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findValidTaskListByProcessId(Integer processInstanceId) {
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.YES);
}
/**
* find previous task list by work process id
*
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId) {
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO);
}
/**
* update work process instance map
*
* @param processInstanceMap processInstanceMap
* @return update process instance result
*/
public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) {
return processInstanceMapMapper.updateById(processInstanceMap);
}
/**
* create work process instance map
*
* @param processInstanceMap processInstanceMap
* @return create process instance result
*/
public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) {
int count = 0;
if (processInstanceMap != null) {
return processInstanceMapMapper.insert(processInstanceMap);
}
return count;
}
/**
* find work process map by parent process id and parent task id.
*
* @param parentWorkProcessId parentWorkProcessId
* @param parentTaskId parentTaskId
* @return process instance map
*/
public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId) {
return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId);
}
/**
* delete work process map by parent process id
*
* @param parentWorkProcessId parentWorkProcessId
* @return delete process map result
*/
public int deleteWorkProcessMapByParentId(int parentWorkProcessId) {
return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId);
}
/**
* find sub process instance
*
* @param parentProcessId parentProcessId
* @param parentTaskId parentTaskId
* @return process instance
*/
public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId) {
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId);
if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) {
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId());
return processInstance;
}
/**
* find parent process instance
*
* @param subProcessId subProcessId
* @return process instance
*/
public ProcessInstance findParentProcessInstance(Integer subProcessId) {
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId);
if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) {
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId());
return processInstance;
}
/**
* change task state
*
* @param state state
* @param startTime startTime
* @param host host
* @param executePath executePath
* @param logPath logPath
* @param taskInstId taskInstId
*/
public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date startTime, String host,
String executePath,
String logPath,
int taskInstId) {
taskInstance.setState(state);
taskInstance.setStartTime(startTime);
taskInstance.setHost(host);
taskInstance.setExecutePath(executePath);
taskInstance.setLogPath(logPath);
saveTaskInstance(taskInstance);
}
/**
* update process instance
*
* @param processInstance processInstance
* @return update process instance result
*/
public int updateProcessInstance(ProcessInstance processInstance) {
return processInstanceMapper.updateById(processInstance);
}
/**
* change task state
*
* @param state state
* @param endTime endTime
* @param taskInstId taskInstId
* @param varPool varPool
*/
public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state,
Date endTime,
int processId,
String appIds,
int taskInstId,
String varPool) {
taskInstance.setPid(processId);
taskInstance.setAppLink(appIds);
taskInstance.setState(state);
taskInstance.setEndTime(endTime);
taskInstance.setVarPool(varPool);
changeOutParam(taskInstance);
saveTaskInstance(taskInstance);
}
/**
* for show in page of taskInstance
*
* @param taskInstance
*/
public void changeOutParam(TaskInstance taskInstance) {
if (StringUtils.isEmpty(taskInstance.getVarPool())) {
return;
}
List<Property> properties = JSONUtils.toList(taskInstance.getVarPool(), Property.class);
if (CollectionUtils.isEmpty(properties)) {
return;
}
//if the result more than one line,just get the first .
Map<String, Object> taskParams = JSONUtils.parseObject(taskInstance.getTaskParams(), new TypeReference<Map<String, Object>>() {});
Object localParams = taskParams.get(LOCAL_PARAMS);
if (localParams == null) {
return;
}
List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class);
Map<String, String> outProperty = new HashMap<>();
for (Property info : properties) {
if (info.getDirect() == Direct.OUT) {
outProperty.put(info.getProp(), info.getValue());
}
}
for (Property info : allParam) {
if (info.getDirect() == Direct.OUT) {
String paramName = info.getProp();
info.setValue(outProperty.get(paramName));
}
}
taskParams.put(LOCAL_PARAMS, allParam);
taskInstance.setTaskParams(JSONUtils.toJsonString(taskParams));
}
/**
* convert integer list to string list
*
* @param intList intList
* @return string list
*/
public List<String> convertIntListToString(List<Integer> intList) {
if (intList == null) {
return new ArrayList<>();
}
List<String> result = new ArrayList<>(intList.size());
for (Integer intVar : intList) {
result.add(String.valueOf(intVar));
}
return result;
}
/**
* query schedule by id
*
* @param id id
* @return schedule
*/
public Schedule querySchedule(int id) {
return scheduleMapper.selectById(id);
}
/**
* query Schedule by processDefinitionCode
*
* @param processDefinitionCode processDefinitionCode
* @see Schedule
*/
public List<Schedule> queryReleaseSchedulerListByProcessDefinitionCode(long processDefinitionCode) {
return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode);
}
/**
* query need failover process instance
*
* @param host host
* @return process instance list
*/
public List<ProcessInstance> queryNeedFailoverProcessInstances(String host) {
return processInstanceMapper.queryByHostAndStatus(host, stateArray);
}
/**
* process need failover process instance
*
* @param processInstance processInstance
*/
@Transactional(rollbackFor = RuntimeException.class)
public void processNeedFailoverProcessInstances(ProcessInstance processInstance) {
//1 update processInstance host is null
processInstance.setHost(Constants.NULL);
processInstanceMapper.updateById(processInstance);
ProcessDefinition processDefinition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion());
//2 insert into recover command
Command cmd = new Command();
cmd.setProcessDefinitionCode(processDefinition.getCode());
cmd.setProcessDefinitionVersion(processDefinition.getVersion());
cmd.setProcessInstanceId(processInstance.getId());
cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId()));
cmd.setExecutorId(processInstance.getExecutorId());
cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS);
createCommand(cmd);
}
/**
* query all need failover task instances by host
*
* @param host host
* @return task instance list
*/
public List<TaskInstance> queryNeedFailoverTaskInstances(String host) {
return taskInstanceMapper.queryByHostAndStatus(host,
stateArray);
}
/**
* find data source by id
*
* @param id id
* @return datasource
*/
public DataSource findDataSourceById(int id) {
return dataSourceMapper.selectById(id);
}
/**
* update process instance state by id
*
* @param processInstanceId processInstanceId
* @param executionStatus executionStatus
* @return update process result
*/
public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) {
ProcessInstance instance = processInstanceMapper.selectById(processInstanceId);
instance.setState(executionStatus);
return processInstanceMapper.updateById(instance);
}
/**
* find process instance by the task id
*
* @param taskId taskId
* @return process instance
*/
public ProcessInstance findProcessInstanceByTaskId(int taskId) {
TaskInstance taskInstance = taskInstanceMapper.selectById(taskId);
if (taskInstance != null) {
return processInstanceMapper.selectById(taskInstance.getProcessInstanceId());
}
return null;
}
/**
* find udf function list by id list string
*
* @param ids ids
* @return udf function list
*/
public List<UdfFunc> queryUdfFunListByIds(int[] ids) {
return udfFuncMapper.queryUdfByIdStr(ids, null);
}
/**
* find tenant code by resource name
*
* @param resName resource name
* @param resourceType resource type
* @return tenant code
*/
public String queryTenantCodeByResName(String resName, ResourceType resourceType) {
// in order to query tenant code successful although the version is older
String fullName = resName.startsWith("/") ? resName : String.format("/%s", resName);
List<Resource> resourceList = resourceMapper.queryResource(fullName, resourceType.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
return StringUtils.EMPTY;
}
int userId = resourceList.get(0).getUserId();
User user = userMapper.selectById(userId);
if (Objects.isNull(user)) {
return StringUtils.EMPTY;
}
Tenant tenant = tenantMapper.selectById(user.getTenantId());
if (Objects.isNull(tenant)) {
return StringUtils.EMPTY;
}
return tenant.getTenantCode();
}
/**
* find schedule list by process define codes.
*
* @param codes codes
* @return schedule list
*/
public List<Schedule> selectAllByProcessDefineCode(long[] codes) {
return scheduleMapper.selectAllByProcessDefineArray(codes);
}
/**
* find last scheduler process instance in the date interval
*
* @param definitionCode definitionCode
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastSchedulerProcessInterval(Long definitionCode, DateInterval dateInterval) {
return processInstanceMapper.queryLastSchedulerProcess(definitionCode,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last manual process instance interval
*
* @param definitionCode process definition code
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastManualProcessInterval(Long definitionCode, DateInterval dateInterval) {
return processInstanceMapper.queryLastManualProcess(definitionCode,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last running process instance
*
* @param definitionCode process definition code
* @param startTime start time
* @param endTime end time
* @return process instance
*/
public ProcessInstance findLastRunningProcess(Long definitionCode, Date startTime, Date endTime) {
return processInstanceMapper.queryLastRunningProcess(definitionCode,
startTime,
endTime,
stateArray);
}
/**
* query user queue by process instance id
*
* @param processInstanceId processInstanceId
* @return queue
*/
public String queryUserQueueByProcessInstanceId(int processInstanceId) {
String queue = "";
ProcessInstance processInstance = processInstanceMapper.selectById(processInstanceId);
if (processInstance == null) {
return queue;
}
User executor = userMapper.selectById(processInstance.getExecutorId());
if (executor != null) {
queue = executor.getQueue();
}
return queue;
}
/**
* query project name and user name by processInstanceId.
*
* @param processInstanceId processInstanceId
* @return projectName and userName
*/
public ProjectUser queryProjectWithUserByProcessInstanceId(int processInstanceId) {
return projectMapper.queryProjectWithUserByProcessInstanceId(processInstanceId);
}
/**
* get task worker group
*
* @param taskInstance taskInstance
* @return workerGroupId
*/
public String getTaskWorkerGroup(TaskInstance taskInstance) {
String workerGroup = taskInstance.getWorkerGroup();
if (StringUtils.isNotBlank(workerGroup)) {
return workerGroup;
}
int processInstanceId = taskInstance.getProcessInstanceId();
ProcessInstance processInstance = findProcessInstanceById(processInstanceId);
if (processInstance != null) {
return processInstance.getWorkerGroup();
}
logger.info("task : {} will use default worker group", taskInstance.getId());
return Constants.DEFAULT_WORKER_GROUP;
}
/**
* get have perm project list
*
* @param userId userId
* @return project list
*/
public List<Project> getProjectListHavePerm(int userId) {
List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId);
List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId);
if (createProjects == null) {
createProjects = new ArrayList<>();
}
if (authedProjects != null) {
createProjects.addAll(authedProjects);
}
return createProjects;
}
/**
* list unauthorized udf function
*
* @param userId user id
* @param needChecks data source id array
* @return unauthorized udf function list
*/
public <T> List<T> listUnauthorized(int userId, T[] needChecks, AuthorizationType authorizationType) {
List<T> resultList = new ArrayList<>();
if (Objects.nonNull(needChecks) && needChecks.length > 0) {
Set<T> originResSet = new HashSet<>(Arrays.asList(needChecks));
switch (authorizationType) {
case RESOURCE_FILE_ID:
case UDF_FILE:
List<Resource> ownUdfResources = resourceMapper.listAuthorizedResourceById(userId, needChecks);
addAuthorizedResources(ownUdfResources, userId);
Set<Integer> authorizedResourceFiles = ownUdfResources.stream().map(Resource::getId).collect(toSet());
originResSet.removeAll(authorizedResourceFiles);
break;
case RESOURCE_FILE_NAME:
List<Resource> ownResources = resourceMapper.listAuthorizedResource(userId, needChecks);
addAuthorizedResources(ownResources, userId);
Set<String> authorizedResources = ownResources.stream().map(Resource::getFullName).collect(toSet());
originResSet.removeAll(authorizedResources);
break;
case DATASOURCE:
Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId, needChecks).stream().map(DataSource::getId).collect(toSet());
originResSet.removeAll(authorizedDatasources);
break;
case UDF:
Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(UdfFunc::getId).collect(toSet());
originResSet.removeAll(authorizedUdfs);
break;
default:
break;
}
resultList.addAll(originResSet);
}
return resultList;
}
/**
* get user by user id
*
* @param userId user id
* @return User
*/
public User getUserById(int userId) {
return userMapper.selectById(userId);
}
/**
* get resource by resource id
*
* @param resourceId resource id
* @return Resource
*/
public Resource getResourceById(int resourceId) {
return resourceMapper.selectById(resourceId);
}
/**
* list resources by ids
*
* @param resIds resIds
* @return resource list
*/
public List<Resource> listResourceByIds(Integer[] resIds) {
return resourceMapper.listResourceByIds(resIds);
}
/**
* format task app id in task instance
*/
public String formatTaskAppId(TaskInstance taskInstance) {
ProcessInstance processInstance = findProcessInstanceById(taskInstance.getProcessInstanceId());
if (processInstance == null) {
return "";
}
ProcessDefinition definition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion());
if (definition == null) {
return "";
}
return String.format("%s_%s_%s", definition.getId(), processInstance.getId(), taskInstance.getId());
}
/**
* switch process definition version to process definition log version
*/
public int switchVersion(ProcessDefinition processDefinition, ProcessDefinitionLog processDefinitionLog) {
if (null == processDefinition || null == processDefinitionLog) {
return Constants.DEFINITION_FAILURE;
}
processDefinitionLog.setId(processDefinition.getId());
processDefinitionLog.setReleaseState(ReleaseState.OFFLINE);
processDefinitionLog.setFlag(Flag.YES);
int result = processDefineMapper.updateById(processDefinitionLog);
if (result > 0) {
result = switchProcessTaskRelationVersion(processDefinitionLog);
if (result <= 0) {
return Constants.DEFINITION_FAILURE;
}
}
return result;
}
public int switchProcessTaskRelationVersion(ProcessDefinition processDefinition) {
List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode());
if (!processTaskRelationList.isEmpty()) {
processTaskRelationMapper.deleteByCode(processDefinition.getProjectCode(), processDefinition.getCode());
}
List<ProcessTaskRelationLog> processTaskRelationLogList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion());
return processTaskRelationMapper.batchInsert(processTaskRelationLogList);
}
/**
* get resource ids
*
* @param taskDefinition taskDefinition
* @return resource ids
*/
public String getResourceIds(TaskDefinition taskDefinition) {
Set<Integer> resourceIds = null;
AbstractParameters params = TaskParametersUtils.getParameters(taskDefinition.getTaskType(), taskDefinition.getTaskParams());
if (params != null && CollectionUtils.isNotEmpty(params.getResourceFilesList())) {
resourceIds = params.getResourceFilesList().
stream()
.filter(t -> t.getId() != 0)
.map(ResourceInfo::getId)
.collect(Collectors.toSet());
}
if (CollectionUtils.isEmpty(resourceIds)) {
return StringUtils.EMPTY;
}
return StringUtils.join(resourceIds, ",");
}
public int saveTaskDefine(User operator, long projectCode, List<TaskDefinitionLog> taskDefinitionLogs) {
Date now = new Date();
List<TaskDefinitionLog> newTaskDefinitionLogs = new ArrayList<>();
List<TaskDefinitionLog> updateTaskDefinitionLogs = new ArrayList<>();
for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) {
taskDefinitionLog.setProjectCode(projectCode);
taskDefinitionLog.setUpdateTime(now);
taskDefinitionLog.setOperateTime(now);
taskDefinitionLog.setOperator(operator.getId());
taskDefinitionLog.setResourceIds(getResourceIds(taskDefinitionLog));
if (taskDefinitionLog.getCode() > 0 && taskDefinitionLog.getVersion() > 0) {
TaskDefinitionLog definitionCodeAndVersion = taskDefinitionLogMapper
.queryByDefinitionCodeAndVersion(taskDefinitionLog.getCode(), taskDefinitionLog.getVersion());
if (definitionCodeAndVersion != null) {
if (!taskDefinitionLog.equals(definitionCodeAndVersion)) {
taskDefinitionLog.setUserId(definitionCodeAndVersion.getUserId());
Integer version = taskDefinitionLogMapper.queryMaxVersionForDefinition(taskDefinitionLog.getCode());
taskDefinitionLog.setVersion(version + 1);
taskDefinitionLog.setCreateTime(definitionCodeAndVersion.getCreateTime());
updateTaskDefinitionLogs.add(taskDefinitionLog);
}
continue;
}
}
taskDefinitionLog.setUserId(operator.getId());
taskDefinitionLog.setVersion(Constants.VERSION_FIRST);
taskDefinitionLog.setCreateTime(now);
if (taskDefinitionLog.getCode() == 0) {
try {
taskDefinitionLog.setCode(SnowFlakeUtils.getInstance().nextId());
} catch (SnowFlakeException e) {
logger.error("Task code get error, ", e);
return Constants.DEFINITION_FAILURE;
}
}
newTaskDefinitionLogs.add(taskDefinitionLog);
}
int insertResult = 0;
int updateResult = 0;
for (TaskDefinitionLog taskDefinitionToUpdate : updateTaskDefinitionLogs) {
TaskDefinition task = taskDefinitionMapper.queryByCode(taskDefinitionToUpdate.getCode());
if (task == null) {
newTaskDefinitionLogs.add(taskDefinitionToUpdate);
} else {
insertResult += taskDefinitionLogMapper.insert(taskDefinitionToUpdate);
taskDefinitionToUpdate.setId(task.getId());
updateResult += taskDefinitionMapper.updateById(taskDefinitionToUpdate);
}
}
if (!newTaskDefinitionLogs.isEmpty()) {
updateResult += taskDefinitionMapper.batchInsert(newTaskDefinitionLogs);
insertResult += taskDefinitionLogMapper.batchInsert(newTaskDefinitionLogs);
}
return (insertResult & updateResult) > 0 ? 1 : Constants.EXIT_CODE_SUCCESS;
}
/**
* save processDefinition (including create or update processDefinition)
*/
public int saveProcessDefine(User operator, ProcessDefinition processDefinition, Boolean isFromProcessDefine) {
ProcessDefinitionLog processDefinitionLog = new ProcessDefinitionLog(processDefinition);
Integer version = processDefineLogMapper.queryMaxVersionForDefinition(processDefinition.getCode());
int insertVersion = version == null || version == 0 ? Constants.VERSION_FIRST : version + 1;
processDefinitionLog.setVersion(insertVersion);
processDefinitionLog.setReleaseState(isFromProcessDefine ? ReleaseState.OFFLINE : ReleaseState.ONLINE);
processDefinitionLog.setOperator(operator.getId());
processDefinitionLog.setOperateTime(processDefinition.getUpdateTime());
int insertLog = processDefineLogMapper.insert(processDefinitionLog);
int result;
if (0 == processDefinition.getId()) {
result = processDefineMapper.insert(processDefinitionLog);
} else {
processDefinitionLog.setId(processDefinition.getId());
result = processDefineMapper.updateById(processDefinitionLog);
}
return (insertLog & result) > 0 ? insertVersion : 0;
}
/**
* save task relations
*/
public int saveTaskRelation(User operator, long projectCode, long processDefinitionCode, int processDefinitionVersion,
List<ProcessTaskRelationLog> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) {
Map<Long, TaskDefinitionLog> taskDefinitionLogMap = null;
if (CollectionUtils.isNotEmpty(taskDefinitionLogs)) {
taskDefinitionLogMap = taskDefinitionLogs.stream()
.collect(Collectors.toMap(TaskDefinition::getCode, taskDefinitionLog -> taskDefinitionLog));
}
Date now = new Date();
for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) {
processTaskRelationLog.setProjectCode(projectCode);
processTaskRelationLog.setProcessDefinitionCode(processDefinitionCode);
processTaskRelationLog.setProcessDefinitionVersion(processDefinitionVersion);
if (taskDefinitionLogMap != null) {
TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(processTaskRelationLog.getPreTaskCode());
if (taskDefinitionLog != null) {
processTaskRelationLog.setPreTaskVersion(taskDefinitionLog.getVersion());
}
processTaskRelationLog.setPostTaskVersion(taskDefinitionLogMap.get(processTaskRelationLog.getPostTaskCode()).getVersion());
}
processTaskRelationLog.setCreateTime(now);
processTaskRelationLog.setUpdateTime(now);
processTaskRelationLog.setOperator(operator.getId());
processTaskRelationLog.setOperateTime(now);
}
List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode);
if (!processTaskRelationList.isEmpty()) {
Set<Integer> processTaskRelationSet = processTaskRelationList.stream().map(ProcessTaskRelation::hashCode).collect(toSet());
Set<Integer> taskRelationSet = taskRelationList.stream().map(ProcessTaskRelationLog::hashCode).collect(toSet());
if (CollectionUtils.isEqualCollection(processTaskRelationSet, taskRelationSet)) {
return Constants.EXIT_CODE_SUCCESS;
}
processTaskRelationMapper.deleteByCode(projectCode, processDefinitionCode);
}
int result = processTaskRelationMapper.batchInsert(taskRelationList);
int resultLog = processTaskRelationLogMapper.batchInsert(taskRelationList);
return (result & resultLog) > 0 ? Constants.EXIT_CODE_SUCCESS : Constants.EXIT_CODE_FAILURE;
}
public boolean isTaskOnline(long taskCode) {
List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByTaskCode(taskCode);
if (!processTaskRelationList.isEmpty()) {
Set<Long> processDefinitionCodes = processTaskRelationList
.stream()
.map(ProcessTaskRelation::getProcessDefinitionCode)
.collect(Collectors.toSet());
List<ProcessDefinition> processDefinitionList = processDefineMapper.queryByCodes(processDefinitionCodes);
// check process definition is already online
for (ProcessDefinition processDefinition : processDefinitionList) {
if (processDefinition.getReleaseState() == ReleaseState.ONLINE) {
return true;
}
}
}
return false;
}
/**
* Generate the DAG Graph based on the process definition id
*
* @param processDefinition process definition
* @return dag graph
*/
public DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) {
List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode());
List<TaskNode> taskNodeList = transformTask(processTaskRelations, Lists.newArrayList());
ProcessDag processDag = DagHelper.getProcessDag(taskNodeList, new ArrayList<>(processTaskRelations));
// Generate concrete Dag to be executed
return DagHelper.buildDagGraph(processDag);
}
/**
* generate DagData
*/
public DagData genDagData(ProcessDefinition processDefinition) {
List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode());
List<TaskDefinitionLog> taskDefinitionLogList = genTaskDefineList(processTaskRelations);
List<TaskDefinition> taskDefinitions = taskDefinitionLogList.stream()
.map(taskDefinitionLog -> JSONUtils.parseObject(JSONUtils.toJsonString(taskDefinitionLog), TaskDefinition.class))
.collect(Collectors.toList());
return new DagData(processDefinition, processTaskRelations, taskDefinitions);
}
public List<TaskDefinitionLog> genTaskDefineList(List<ProcessTaskRelation> processTaskRelations) {
Set<TaskDefinition> taskDefinitionSet = new HashSet<>();
for (ProcessTaskRelation processTaskRelation : processTaskRelations) {
if (processTaskRelation.getPreTaskCode() > 0) {
taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPreTaskCode(), processTaskRelation.getPreTaskVersion()));
}
if (processTaskRelation.getPostTaskCode() > 0) {
taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion()));
}
}
return taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet);
}
/**
* find task definition by code and version
*/
public TaskDefinition findTaskDefinition(long taskCode, int taskDefinitionVersion) {
return taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskCode, taskDefinitionVersion);
}
/**
* find process task relation list by projectCode and processDefinitionCode
*/
public List<ProcessTaskRelation> findRelationByCode(long projectCode, long processDefinitionCode) {
return processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode);
}
/**
* add authorized resources
*
* @param ownResources own resources
* @param userId userId
*/
private void addAuthorizedResources(List<Resource> ownResources, int userId) {
List<Integer> relationResourceIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 7);
List<Resource> relationResources = CollectionUtils.isNotEmpty(relationResourceIds) ? resourceMapper.queryResourceListById(relationResourceIds) : new ArrayList<>();
ownResources.addAll(relationResources);
}
/**
* Use temporarily before refactoring taskNode
*/
public List<TaskNode> transformTask(List<ProcessTaskRelation> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) {
Map<Long, List<Long>> taskCodeMap = new HashMap<>();
for (ProcessTaskRelation processTaskRelation : taskRelationList) {
taskCodeMap.compute(processTaskRelation.getPostTaskCode(), (k, v) -> {
if (v == null) {
v = new ArrayList<>();
}
if (processTaskRelation.getPreTaskCode() != 0L) {
v.add(processTaskRelation.getPreTaskCode());
}
return v;
});
}
if (CollectionUtils.isEmpty(taskDefinitionLogs)) {
taskDefinitionLogs = genTaskDefineList(taskRelationList);
}
Map<Long, TaskDefinitionLog> taskDefinitionLogMap = taskDefinitionLogs.stream()
.collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog));
List<TaskNode> taskNodeList = new ArrayList<>();
for (Entry<Long, List<Long>> code : taskCodeMap.entrySet()) {
TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(code.getKey());
if (taskDefinitionLog != null) {
TaskNode taskNode = new TaskNode();
taskNode.setCode(taskDefinitionLog.getCode());
taskNode.setVersion(taskDefinitionLog.getVersion());
taskNode.setName(taskDefinitionLog.getName());
taskNode.setDesc(taskDefinitionLog.getDescription());
taskNode.setType(taskDefinitionLog.getTaskType().toUpperCase());
taskNode.setRunFlag(taskDefinitionLog.getFlag() == Flag.YES ? Constants.FLOWNODE_RUN_FLAG_NORMAL : Constants.FLOWNODE_RUN_FLAG_FORBIDDEN);
taskNode.setMaxRetryTimes(taskDefinitionLog.getFailRetryTimes());
taskNode.setRetryInterval(taskDefinitionLog.getFailRetryInterval());
Map<String, Object> taskParamsMap = taskNode.taskParamsToJsonObj(taskDefinitionLog.getTaskParams());
taskNode.setConditionResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.CONDITION_RESULT)));
taskNode.setSwitchResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.SWITCH_RESULT)));
taskNode.setDependence(JSONUtils.toJsonString(taskParamsMap.get(Constants.DEPENDENCE)));
taskParamsMap.remove(Constants.CONDITION_RESULT);
taskParamsMap.remove(Constants.DEPENDENCE);
taskNode.setParams(JSONUtils.toJsonString(taskParamsMap));
taskNode.setTaskInstancePriority(taskDefinitionLog.getTaskPriority());
taskNode.setWorkerGroup(taskDefinitionLog.getWorkerGroup());
taskNode.setEnvironmentCode(taskDefinitionLog.getEnvironmentCode());
taskNode.setTimeout(JSONUtils.toJsonString(new TaskTimeoutParameter(taskDefinitionLog.getTimeoutFlag() == TimeoutFlag.OPEN,
taskDefinitionLog.getTimeoutNotifyStrategy(),
taskDefinitionLog.getTimeout())));
taskNode.setDelayTime(taskDefinitionLog.getDelayTime());
taskNode.setPreTasks(JSONUtils.toJsonString(code.getValue().stream().map(taskDefinitionLogMap::get).map(TaskDefinition::getCode).collect(Collectors.toList())));
taskNodeList.add(taskNode);
}
}
return taskNodeList;
}
public Map<ProcessInstance, TaskInstance> notifyProcessList(int processId, int taskId) {
HashMap<ProcessInstance, TaskInstance> processTaskMap = new HashMap<>();
//find sub tasks
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(processId);
if (processInstanceMap == null) {
return processTaskMap;
}
ProcessInstance fatherProcess = this.findProcessInstanceById(processInstanceMap.getParentProcessInstanceId());
TaskInstance fatherTask = this.findTaskInstanceById(processInstanceMap.getParentTaskInstanceId());
if (fatherProcess != null) {
processTaskMap.put(fatherProcess, fatherTask);
}
return processTaskMap;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,497 | [Bug] [Task] Shell task can not use user defined environment correctly | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
branch:dev
When submitting a Shell task, the task cannot correctly obtain and set user-defined environment variables
### What you expected to happen
Shell task could obtain user-defined when created and could wite the user-defined env config to the `command file` correctly
### How to reproduce
* create a new env
* create a task with the newly created env
* execute the task
* see task log, the env is not used correctly
### Anything else
# How often does problem occur?
always
# Why this problem occur ?
* When submit a task, the work obtain a `Command` from DB and get `TaskExecutionContext` from the command.
* Then transform `TaskExecutionContext` to `TaskRequest`, TaskExecutionContext` contains a field named `environmentConfig` * to stored user-defined env, but `TaskRequest` **NOT** contains the field, so the user-defined env lost.
* When a `ShellCommandExecutor` created, it only get the dolphinscheduler's system env file and execute `source` command to set the env, but **DO NOT** set the user-defined env.
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6497 | https://github.com/apache/dolphinscheduler/pull/6509 | fea7874f95028325f52300949295260cafab4a64 | a80fca70f7f325aad8eda2fdc1fe64b2a7d2f04f | "2021-10-11T09:10:35Z" | java | "2021-10-12T09:47:04Z" | dolphinscheduler-spi/src/main/java/org/apache/dolphinscheduler/spi/task/request/TaskRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.spi.task.request;
import org.apache.dolphinscheduler.spi.enums.TaskTimeoutStrategy;
import org.apache.dolphinscheduler.spi.task.Property;
import java.util.Date;
import java.util.Map;
import com.fasterxml.jackson.annotation.JsonFormat;
/**
* to master/worker task transport
*/
public class TaskRequest {
/**
* task id
*/
private int taskInstanceId;
/**
* task name
*/
private String taskName;
/**
* task first submit time.
*/
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8")
private Date firstSubmitTime;
/**
* task start time
*/
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8")
private Date startTime;
/**
* task type
*/
private String taskType;
/**
* host
*/
private String host;
/**
* task execute path
*/
private String executePath;
/**
* log path
*/
private String logPath;
/**
* task json
*/
private String taskJson;
/**
* processId
*/
private int processId;
/**
* appIds
*/
private String appIds;
/**
* process instance id
*/
private int processInstanceId;
/**
* process instance schedule time
*/
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8")
private Date scheduleTime;
/**
* process instance global parameters
*/
private String globalParams;
/**
* execute user id
*/
private int executorId;
/**
* command type if complement
*/
private int cmdTypeIfComplement;
/**
* tenant code
*/
private String tenantCode;
/**
* task queue
*/
private String queue;
/**
* process define id
*/
private int processDefineId;
/**
* project id
*/
private int projectId;
/**
* taskParams
*/
private String taskParams;
/**
* envFile
*/
private String envFile;
/**
* definedParams
*/
private Map<String, String> definedParams;
/**
* task AppId
*/
private String taskAppId;
/**
* task timeout strategy
*/
private TaskTimeoutStrategy taskTimeoutStrategy;
/**
* task timeout
*/
private int taskTimeout;
/**
* worker group
*/
private String workerGroup;
/**
* delay execution time.
*/
private int delayTime;
/**
* Task Logger name should be like: Task-{processDefinitionId}-{processInstanceId}-{taskInstanceId}
*/
private String taskLogName;
public String getTaskLogName() {
return taskLogName;
}
public void setTaskLogName(String taskLogName) {
this.taskLogName = taskLogName;
}
/**
* resources full name and tenant code
*/
private Map<String, String> resources;
private Map<String, Property> paramsMap;
/**
* sql TaskExecutionContext
*/
private SQLTaskExecutionContext sqlTaskExecutionContext;
/**
* datax TaskExecutionContext
*/
private DataxTaskExecutionContext dataxTaskExecutionContext;
/**
* procedure TaskExecutionContext
*/
private ProcedureTaskExecutionContext procedureTaskExecutionContext;
/**
* sqoop TaskExecutionContext
*/
private SqoopTaskExecutionContext sqoopTaskExecutionContext;
public Map<String, String> getResources() {
return resources;
}
public void setResources(Map<String, String> resources) {
this.resources = resources;
}
public Map<String, Property> getParamsMap() {
return paramsMap;
}
public void setParamsMap(Map<String, Property> paramsMap) {
this.paramsMap = paramsMap;
}
public int getTaskInstanceId() {
return taskInstanceId;
}
public void setTaskInstanceId(int taskInstanceId) {
this.taskInstanceId = taskInstanceId;
}
public String getTaskName() {
return taskName;
}
public void setTaskName(String taskName) {
this.taskName = taskName;
}
public Date getFirstSubmitTime() {
return firstSubmitTime;
}
public void setFirstSubmitTime(Date firstSubmitTime) {
this.firstSubmitTime = firstSubmitTime;
}
public Date getStartTime() {
return startTime;
}
public void setStartTime(Date startTime) {
this.startTime = startTime;
}
public String getTaskType() {
return taskType;
}
public void setTaskType(String taskType) {
this.taskType = taskType;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public String getExecutePath() {
return executePath;
}
public void setExecutePath(String executePath) {
this.executePath = executePath;
}
public String getLogPath() {
return logPath;
}
public void setLogPath(String logPath) {
this.logPath = logPath;
}
public String getTaskJson() {
return taskJson;
}
public void setTaskJson(String taskJson) {
this.taskJson = taskJson;
}
public int getProcessId() {
return processId;
}
public void setProcessId(int processId) {
this.processId = processId;
}
public String getAppIds() {
return appIds;
}
public void setAppIds(String appIds) {
this.appIds = appIds;
}
public int getProcessInstanceId() {
return processInstanceId;
}
public void setProcessInstanceId(int processInstanceId) {
this.processInstanceId = processInstanceId;
}
public Date getScheduleTime() {
return scheduleTime;
}
public void setScheduleTime(Date scheduleTime) {
this.scheduleTime = scheduleTime;
}
public String getGlobalParams() {
return globalParams;
}
public void setGlobalParams(String globalParams) {
this.globalParams = globalParams;
}
public int getExecutorId() {
return executorId;
}
public void setExecutorId(int executorId) {
this.executorId = executorId;
}
public int getCmdTypeIfComplement() {
return cmdTypeIfComplement;
}
public void setCmdTypeIfComplement(int cmdTypeIfComplement) {
this.cmdTypeIfComplement = cmdTypeIfComplement;
}
public String getTenantCode() {
return tenantCode;
}
public void setTenantCode(String tenantCode) {
this.tenantCode = tenantCode;
}
public String getQueue() {
return queue;
}
public void setQueue(String queue) {
this.queue = queue;
}
public int getProcessDefineId() {
return processDefineId;
}
public void setProcessDefineId(int processDefineId) {
this.processDefineId = processDefineId;
}
public int getProjectId() {
return projectId;
}
public void setProjectId(int projectId) {
this.projectId = projectId;
}
public String getTaskParams() {
return taskParams;
}
public void setTaskParams(String taskParams) {
this.taskParams = taskParams;
}
public String getEnvFile() {
return envFile;
}
public void setEnvFile(String envFile) {
this.envFile = envFile;
}
public Map<String, String> getDefinedParams() {
return definedParams;
}
public void setDefinedParams(Map<String, String> definedParams) {
this.definedParams = definedParams;
}
public String getTaskAppId() {
return taskAppId;
}
public void setTaskAppId(String taskAppId) {
this.taskAppId = taskAppId;
}
public TaskTimeoutStrategy getTaskTimeoutStrategy() {
return taskTimeoutStrategy;
}
public void setTaskTimeoutStrategy(TaskTimeoutStrategy taskTimeoutStrategy) {
this.taskTimeoutStrategy = taskTimeoutStrategy;
}
public int getTaskTimeout() {
return taskTimeout;
}
public void setTaskTimeout(int taskTimeout) {
this.taskTimeout = taskTimeout;
}
public String getWorkerGroup() {
return workerGroup;
}
public void setWorkerGroup(String workerGroup) {
this.workerGroup = workerGroup;
}
public int getDelayTime() {
return delayTime;
}
public void setDelayTime(int delayTime) {
this.delayTime = delayTime;
}
public SQLTaskExecutionContext getSqlTaskExecutionContext() {
return sqlTaskExecutionContext;
}
public void setSqlTaskExecutionContext(SQLTaskExecutionContext sqlTaskExecutionContext) {
this.sqlTaskExecutionContext = sqlTaskExecutionContext;
}
public DataxTaskExecutionContext getDataxTaskExecutionContext() {
return dataxTaskExecutionContext;
}
public void setDataxTaskExecutionContext(DataxTaskExecutionContext dataxTaskExecutionContext) {
this.dataxTaskExecutionContext = dataxTaskExecutionContext;
}
public SqoopTaskExecutionContext getSqoopTaskExecutionContext() {
return sqoopTaskExecutionContext;
}
public void setSqoopTaskExecutionContext(SqoopTaskExecutionContext sqoopTaskExecutionContext) {
this.sqoopTaskExecutionContext = sqoopTaskExecutionContext;
}
public ProcedureTaskExecutionContext getProcedureTaskExecutionContext() {
return procedureTaskExecutionContext;
}
public void setProcedureTaskExecutionContext(ProcedureTaskExecutionContext procedureTaskExecutionContext) {
this.procedureTaskExecutionContext = procedureTaskExecutionContext;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,497 | [Bug] [Task] Shell task can not use user defined environment correctly | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
branch:dev
When submitting a Shell task, the task cannot correctly obtain and set user-defined environment variables
### What you expected to happen
Shell task could obtain user-defined when created and could wite the user-defined env config to the `command file` correctly
### How to reproduce
* create a new env
* create a task with the newly created env
* execute the task
* see task log, the env is not used correctly
### Anything else
# How often does problem occur?
always
# Why this problem occur ?
* When submit a task, the work obtain a `Command` from DB and get `TaskExecutionContext` from the command.
* Then transform `TaskExecutionContext` to `TaskRequest`, TaskExecutionContext` contains a field named `environmentConfig` * to stored user-defined env, but `TaskRequest` **NOT** contains the field, so the user-defined env lost.
* When a `ShellCommandExecutor` created, it only get the dolphinscheduler's system env file and execute `source` command to set the env, but **DO NOT** set the user-defined env.
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6497 | https://github.com/apache/dolphinscheduler/pull/6509 | fea7874f95028325f52300949295260cafab4a64 | a80fca70f7f325aad8eda2fdc1fe64b2a7d2f04f | "2021-10-11T09:10:35Z" | java | "2021-10-12T09:47:04Z" | dolphinscheduler-task-plugin/dolphinscheduler-task-api/src/main/java/org/apache/dolphinscheduler/plugin/task/api/ShellCommandExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.plugin.task.api;
import org.apache.dolphinscheduler.plugin.task.util.OSUtils;
import org.apache.dolphinscheduler.spi.task.request.TaskRequest;
import org.apache.commons.io.FileUtils;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.List;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.function.Consumer;
import org.slf4j.Logger;
/**
* shell command executor
*/
public class ShellCommandExecutor extends AbstractCommandExecutor {
/**
* For Unix-like, using sh
*/
private static final String SH = "sh";
/**
* For Windows, using cmd.exe
*/
private static final String CMD = "cmd.exe";
/**
* constructor
*
* @param logHandler logHandler
* @param taskRequest taskRequest
* @param logger logger
*/
public ShellCommandExecutor(Consumer<LinkedBlockingQueue<String>> logHandler,
TaskRequest taskRequest,
Logger logger) {
super(logHandler, taskRequest, logger);
}
public ShellCommandExecutor(LinkedBlockingQueue<String> logBuffer) {
super(logBuffer);
}
@Override
protected String buildCommandFilePath() {
// command file
return String.format("%s/%s.%s"
, taskRequest.getExecutePath()
, taskRequest.getTaskAppId()
, OSUtils.isWindows() ? "bat" : "command");
}
/**
* create command file if not exists
*
* @param execCommand exec command
* @param commandFile command file
* @throws IOException io exception
*/
@Override
protected void createCommandFileIfNotExists(String execCommand, String commandFile) throws IOException {
logger.info("tenantCode user:{}, task dir:{}", taskRequest.getTenantCode(),
taskRequest.getTaskAppId());
// create if non existence
if (!Files.exists(Paths.get(commandFile))) {
logger.info("create command file:{}", commandFile);
StringBuilder sb = new StringBuilder();
if (OSUtils.isWindows()) {
sb.append("@echo off\n");
sb.append("cd /d %~dp0\n");
if (taskRequest.getEnvFile() != null) {
sb.append("call ").append(taskRequest.getEnvFile()).append("\n");
}
} else {
sb.append("#!/bin/sh\n");
sb.append("BASEDIR=$(cd `dirname $0`; pwd)\n");
sb.append("cd $BASEDIR\n");
if (taskRequest.getEnvFile() != null) {
sb.append("source ").append(taskRequest.getEnvFile()).append("\n");
}
}
sb.append(execCommand);
logger.info("command : {}", sb);
// write data to file
FileUtils.writeStringToFile(new File(commandFile), sb.toString(), StandardCharsets.UTF_8);
}
}
@Override
protected String commandInterpreter() {
return OSUtils.isWindows() ? CMD : SH;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,497 | [Bug] [Task] Shell task can not use user defined environment correctly | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
branch:dev
When submitting a Shell task, the task cannot correctly obtain and set user-defined environment variables
### What you expected to happen
Shell task could obtain user-defined when created and could wite the user-defined env config to the `command file` correctly
### How to reproduce
* create a new env
* create a task with the newly created env
* execute the task
* see task log, the env is not used correctly
### Anything else
# How often does problem occur?
always
# Why this problem occur ?
* When submit a task, the work obtain a `Command` from DB and get `TaskExecutionContext` from the command.
* Then transform `TaskExecutionContext` to `TaskRequest`, TaskExecutionContext` contains a field named `environmentConfig` * to stored user-defined env, but `TaskRequest` **NOT** contains the field, so the user-defined env lost.
* When a `ShellCommandExecutor` created, it only get the dolphinscheduler's system env file and execute `source` command to set the env, but **DO NOT** set the user-defined env.
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6497 | https://github.com/apache/dolphinscheduler/pull/6509 | fea7874f95028325f52300949295260cafab4a64 | a80fca70f7f325aad8eda2fdc1fe64b2a7d2f04f | "2021-10-11T09:10:35Z" | java | "2021-10-12T09:47:04Z" | dolphinscheduler-task-plugin/dolphinscheduler-task-python/src/main/java/org/apache/dolphinscheduler/plugin/task/python/PythonCommandExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.plugin.task.python;
import org.apache.dolphinscheduler.plugin.task.api.AbstractCommandExecutor;
import org.apache.dolphinscheduler.spi.task.request.TaskRequest;
import org.apache.dolphinscheduler.spi.utils.StringUtils;
import org.apache.commons.io.FileUtils;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.function.Consumer;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* python command executor
*/
public class PythonCommandExecutor extends AbstractCommandExecutor {
/**
* logger
*/
private static final Logger logger = LoggerFactory.getLogger(PythonCommandExecutor.class);
/**
* python
*/
public static final String PYTHON = "python";
private static final Pattern PYTHON_PATH_PATTERN = Pattern.compile("/bin/python[\\d.]*$");
/**
* constructor
*
* @param logHandler log handler
* @param taskRequest TaskRequest
* @param logger logger
*/
public PythonCommandExecutor(Consumer<LinkedBlockingQueue<String>> logHandler,
TaskRequest taskRequest,
Logger logger) {
super(logHandler, taskRequest, logger);
}
/**
* build command file path
*
* @return command file path
*/
@Override
protected String buildCommandFilePath() {
return String.format("%s/py_%s.command", taskRequest.getExecutePath(), taskRequest.getTaskAppId());
}
/**
* create command file if not exists
*
* @param execCommand exec command
* @param commandFile command file
* @throws IOException io exception
*/
@Override
protected void createCommandFileIfNotExists(String execCommand, String commandFile) throws IOException {
logger.info("tenantCode :{}, task dir:{}", taskRequest.getTenantCode(), taskRequest.getExecutePath());
if (!Files.exists(Paths.get(commandFile))) {
logger.info("generate command file:{}", commandFile);
StringBuilder sb = new StringBuilder();
sb.append("#-*- encoding=utf8 -*-\n");
sb.append("\n\n");
sb.append(execCommand);
logger.info(sb.toString());
// write data to file
FileUtils.writeStringToFile(new File(commandFile),
sb.toString(),
StandardCharsets.UTF_8);
}
}
/**
* get the absolute path of the Python command
* note :
* common.properties
* PYTHON_HOME configured under common.properties is Python absolute path, not PYTHON_HOME itself
* <p>
* for example :
* your PYTHON_HOM is /opt/python3.7/
* you must set PYTHON_HOME is /opt/python3.7/python under nder common.properties
* dolphinscheduler.env.path file.
*
* @param envPath env path
* @return python home
*/
private static String getPythonHome(String envPath) {
// BufferedReader br = null;
StringBuilder sb = new StringBuilder();
try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(envPath)));) {
String line;
while ((line = br.readLine()) != null) {
if (line.contains(PythonConstants.PYTHON_HOME)) {
sb.append(line);
break;
}
}
String result = sb.toString();
if (StringUtils.isEmpty(result)) {
return null;
}
String[] arrs = result.split(PythonConstants.EQUAL_SIGN);
if (arrs.length == 2) {
return arrs[1];
}
} catch (IOException e) {
logger.error("read file failure", e);
}
return null;
}
/**
* Gets the command path to which Python can execute
* @return python command path
*/
@Override
protected String commandInterpreter() {
String pythonHome = getPythonHome(taskRequest.getEnvFile());
return getPythonCommand(pythonHome);
}
/**
* get python command
*
* @param pythonHome python home
* @return python command
*/
public static String getPythonCommand(String pythonHome) {
if (StringUtils.isEmpty(pythonHome)) {
return PYTHON;
}
File file = new File(pythonHome);
if (file.exists() && file.isFile()) {
return pythonHome;
}
if (PYTHON_PATH_PATTERN.matcher(pythonHome).find()) {
return pythonHome;
}
return Paths.get(pythonHome, "/bin/python").toString();
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,514 | [Bug] [Task] Task load class fail | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
![image](https://user-images.githubusercontent.com/16631152/136983008-88168ba2-8c03-40ee-a246-b7dae3bfd4fc.png)
### What you expected to happen
normal run
### How to reproduce
run ShellCommandExecutor and When Use Commons-Lang Class
### Anything else
no
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6514 | https://github.com/apache/dolphinscheduler/pull/6513 | a80fca70f7f325aad8eda2fdc1fe64b2a7d2f04f | bddf3e6f2c811c3ae6623bb588414173f0132c57 | "2021-10-12T15:16:03Z" | java | "2021-10-13T02:30:49Z" | dolphinscheduler-task-plugin/dolphinscheduler-task-api/pom.xml | <?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>dolphinscheduler-task-plugin</artifactId>
<groupId>org.apache.dolphinscheduler</groupId>
<version>2.0.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<packaging>jar</packaging>
<artifactId>dolphinscheduler-task-api</artifactId>
<dependencies>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-spi</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<scope>provided</scope>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jetty.aggregate</groupId>
<artifactId>jetty-all</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.ant</groupId>
<artifactId>ant</artifactId>
</exclusion>
<exclusion>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-json</artifactId>
</exclusion>
<exclusion>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-jvm</artifactId>
</exclusion>
<exclusion>
<groupId>com.github.joshelser</groupId>
<artifactId>dropwizard-metrics-hadoop-metrics2-reporter</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty-all</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.code.gson</groupId>
<artifactId>gson</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.code.findbugs</groupId>
<artifactId>jsr305</artifactId>
</exclusion>
<exclusion>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-core</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.avro</groupId>
<artifactId>avro</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.commons</groupId>
<artifactId>commons-compress</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.curator</groupId>
<artifactId>curator-client</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-jaxrs</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-xc</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</exclusion>
<exclusion>
<groupId>org.json</groupId>
<artifactId>json</artifactId>
</exclusion>
<exclusion>
<artifactId>log4j-slf4j-impl</artifactId>
<groupId>org.apache.logging.log4j</groupId>
</exclusion>
<exclusion>
<artifactId>javax.servlet</artifactId>
<groupId>org.eclipse.jetty.orbit</groupId>
</exclusion>
<exclusion>
<artifactId>servlet-api-2.5</artifactId>
<groupId>org.mortbay.jetty</groupId>
</exclusion>
<exclusion>
<artifactId>jasper-runtime</artifactId>
<groupId>tomcat</groupId>
</exclusion>
<exclusion>
<artifactId>slider-core</artifactId>
<groupId>org.apache.slider</groupId>
</exclusion>
<exclusion>
<artifactId>hbase-server</artifactId>
<groupId>org.apache.hbase</groupId>
</exclusion>
<exclusion>
<artifactId>jersey-client</artifactId>
<groupId>com.sun.jersey</groupId>
</exclusion>
<exclusion>
<artifactId>jersey-core</artifactId>
<groupId>com.sun.jersey</groupId>
</exclusion>
<exclusion>
<artifactId>jersey-json</artifactId>
<groupId>com.sun.jersey</groupId>
</exclusion>
<exclusion>
<artifactId>jersey-server</artifactId>
<groupId>com.sun.jersey</groupId>
</exclusion>
<exclusion>
<artifactId>jersey-guice</artifactId>
<groupId>com.sun.jersey.contribs</groupId>
</exclusion>
<exclusion>
<artifactId>hbase-common</artifactId>
<groupId>org.apache.hbase</groupId>
</exclusion>
<exclusion>
<artifactId>hbase-hadoop2-compat</artifactId>
<groupId>org.apache.hbase</groupId>
</exclusion>
<exclusion>
<artifactId>hbase-client</artifactId>
<groupId>org.apache.hbase</groupId>
</exclusion>
<exclusion>
<artifactId>hbase-hadoop-compat</artifactId>
<groupId>org.apache.hbase</groupId>
</exclusion>
<exclusion>
<artifactId>tephra-hbase-compat-1.0</artifactId>
<groupId>co.cask.tephra</groupId>
</exclusion>
<exclusion>
<artifactId>jaxb-api</artifactId>
<groupId>javax.xml.bind</groupId>
</exclusion>
<exclusion>
<artifactId>hive-llap-client</artifactId>
<groupId>org.apache.hive</groupId>
</exclusion>
<exclusion>
<artifactId>hive-llap-common</artifactId>
<groupId>org.apache.hive</groupId>
</exclusion>
<exclusion>
<artifactId>hive-llap-server</artifactId>
<groupId>org.apache.hive</groupId>
</exclusion>
<exclusion>
<artifactId>tephra-core</artifactId>
<groupId>co.cask.tephra</groupId>
</exclusion>
<exclusion>
<artifactId>ant</artifactId>
<groupId>ant</groupId>
</exclusion>
<exclusion>
<artifactId>stringtemplate</artifactId>
<groupId>org.antlr</groupId>
</exclusion>
<exclusion>
<artifactId>antlr-runtime</artifactId>
<groupId>org.antlr</groupId>
</exclusion>
<exclusion>
<artifactId>hive-shims</artifactId>
<groupId>org.apache.hive</groupId>
</exclusion>
<exclusion>
<artifactId>jsp-api</artifactId>
<groupId>javax.servlet</groupId>
</exclusion>
<exclusion>
<artifactId>log4j-api</artifactId>
<groupId>org.apache.logging.log4j</groupId>
</exclusion>
<exclusion>
<artifactId>log4j-core</artifactId>
<groupId>org.apache.logging.log4j</groupId>
</exclusion>
<exclusion>
<artifactId>log4j-web</artifactId>
<groupId>org.apache.logging.log4j</groupId>
</exclusion>
<exclusion>
<artifactId>jasper-compiler</artifactId>
<groupId>tomcat</groupId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
<build>
<finalName>dolphinscheduler-task-api-${project.version}</finalName>
</build>
</project> |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,514 | [Bug] [Task] Task load class fail | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
![image](https://user-images.githubusercontent.com/16631152/136983008-88168ba2-8c03-40ee-a246-b7dae3bfd4fc.png)
### What you expected to happen
normal run
### How to reproduce
run ShellCommandExecutor and When Use Commons-Lang Class
### Anything else
no
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6514 | https://github.com/apache/dolphinscheduler/pull/6513 | a80fca70f7f325aad8eda2fdc1fe64b2a7d2f04f | bddf3e6f2c811c3ae6623bb588414173f0132c57 | "2021-10-12T15:16:03Z" | java | "2021-10-13T02:30:49Z" | pom.xml | <?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler</artifactId>
<version>2.0.0-SNAPSHOT</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
<url>http://dolphinscheduler.apache.org</url>
<description>Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated
to solving the complex dependencies in data processing, making the scheduling system out of the box for data
processing.
</description>
<licenses>
<license>
<name>Apache License 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:https://github.com/apache/dolphinscheduler.git</connection>
<developerConnection>scm:git:https://github.com/apache/dolphinscheduler.git</developerConnection>
<url>https://github.com/apache/dolphinscheduler</url>
<tag>HEAD</tag>
</scm>
<mailingLists>
<mailingList>
<name>DolphinScheduler Developer List</name>
<post>dev@dolphinscheduler.apache.org</post>
<subscribe>dev-subscribe@dolphinscheduler.apache.org</subscribe>
<unsubscribe>dev-unsubscribe@dolphinscheduler.apache.org</unsubscribe>
</mailingList>
</mailingLists>
<parent>
<groupId>org.apache</groupId>
<artifactId>apache</artifactId>
<version>21</version>
</parent>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<curator.version>4.3.0</curator.version>
<zookeeper.version>3.4.14</zookeeper.version>
<spring.version>5.1.19.RELEASE</spring.version>
<spring.boot.version>2.1.18.RELEASE</spring.boot.version>
<java.version>1.8</java.version>
<logback.version>1.2.3</logback.version>
<hadoop.version>2.7.3</hadoop.version>
<quartz.version>2.3.0</quartz.version>
<jackson.version>2.10.5</jackson.version>
<mybatis-plus.version>3.2.0</mybatis-plus.version>
<mybatis.spring.version>2.0.1</mybatis.spring.version>
<cron.utils.version>5.0.5</cron.utils.version>
<druid.version>1.2.4</druid.version>
<h2.version>1.4.200</h2.version>
<commons.codec.version>1.11</commons.codec.version>
<commons.logging.version>1.1.1</commons.logging.version>
<httpclient.version>4.4.1</httpclient.version>
<httpcore.version>4.4.1</httpcore.version>
<junit.version>4.12</junit.version>
<mysql.connector.version>8.0.16</mysql.connector.version>
<slf4j.api.version>1.7.5</slf4j.api.version>
<slf4j.log4j12.version>1.7.5</slf4j.log4j12.version>
<commons.collections.version>3.2.2</commons.collections.version>
<commons.httpclient>3.0.1</commons.httpclient>
<commons.beanutils.version>1.9.4</commons.beanutils.version>
<commons.configuration.version>1.10</commons.configuration.version>
<commons.email.version>1.5</commons.email.version>
<poi.version>4.1.2</poi.version>
<javax.servlet.api.version>3.1.0</javax.servlet.api.version>
<commons.collections4.version>4.1</commons.collections4.version>
<guava.version>24.1-jre</guava.version>
<postgresql.version>42.2.5</postgresql.version>
<hive.jdbc.version>2.1.0</hive.jdbc.version>
<commons.io.version>2.4</commons.io.version>
<oshi.core.version>3.9.1</oshi.core.version>
<clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version>
<mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version>
<presto.jdbc.version>0.238.1</presto.jdbc.version>
<spotbugs.version>3.1.12</spotbugs.version>
<checkstyle.version>3.1.2</checkstyle.version>
<zookeeper.version>3.4.14</zookeeper.version>
<curator.test>2.12.0</curator.test>
<frontend-maven-plugin.version>1.6</frontend-maven-plugin.version>
<maven-compiler-plugin.version>3.3</maven-compiler-plugin.version>
<maven-assembly-plugin.version>3.1.0</maven-assembly-plugin.version>
<maven-release-plugin.version>2.5.3</maven-release-plugin.version>
<maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version>
<maven-source-plugin.version>2.4</maven-source-plugin.version>
<maven-surefire-plugin.version>2.22.1</maven-surefire-plugin.version>
<maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version>
<rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version>
<jacoco.version>0.8.4</jacoco.version>
<jcip.version>1.0</jcip.version>
<maven.deploy.skip>false</maven.deploy.skip>
<cobertura-maven-plugin.version>2.7</cobertura-maven-plugin.version>
<mockito.version>2.21.0</mockito.version>
<powermock.version>2.0.2</powermock.version>
<servlet-api.version>2.5</servlet-api.version>
<swagger.version>1.9.3</swagger.version>
<springfox.version>2.9.2</springfox.version>
<swagger-models.version>1.5.24</swagger-models.version>
<guava-retry.version>2.0.0</guava-retry.version>
<dep.airlift.version>0.184</dep.airlift.version>
<dep.packaging.version>${dep.airlift.version}</dep.packaging.version>
<protostuff.version>1.7.2</protostuff.version>
<reflections.version>0.9.12</reflections.version>
<byte-buddy.version>1.9.16</byte-buddy.version>
<java-websocket.version>1.5.1</java-websocket.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.java-websocket</groupId>
<artifactId>Java-WebSocket</artifactId>
<version>${java-websocket.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<!-- quartz-->
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz-jobs</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>com.cronutils</groupId>
<artifactId>cron-utils</artifactId>
<version>${cron.utils.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>${druid.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>${spring.boot.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-core</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-beans</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-tx</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-jdbc</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
<version>${spring.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-server</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-standalone-server</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert-plugin</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-registry-plugin</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-dao</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-remote</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-service</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-spi</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<version>${zookeeper.version}</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<artifactId>netty</artifactId>
<groupId>io.netty</groupId>
</exclusion>
<exclusion>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-annotations</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-client</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>log4j-1.2-api</groupId>
<artifactId>org.apache.logging.log4j</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-test</artifactId>
<version>${curator.test}</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>${commons.codec.version}</version>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons.logging.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>${httpclient.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>${httpcore.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<version>${jackson.version}</version>
</dependency>
<!--protostuff-->
<!-- https://mvnrepository.com/artifact/io.protostuff/protostuff-core -->
<dependency>
<groupId>io.protostuff</groupId>
<artifactId>protostuff-core</artifactId>
<version>${protostuff.version}</version>
</dependency>
<!-- https://mvnrepository.com/artifact/io.protostuff/protostuff-runtime -->
<dependency>
<groupId>io.protostuff</groupId>
<artifactId>protostuff-runtime</artifactId>
<version>${protostuff.version}</version>
</dependency>
<dependency>
<groupId>net.bytebuddy</groupId>
<artifactId>byte-buddy</artifactId>
<version>${byte-buddy.version}</version>
</dependency>
<dependency>
<groupId>org.reflections</groupId>
<artifactId>reflections</artifactId>
<version>${reflections.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>${mockito.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-junit4</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-api-mockito2</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.jacoco</groupId>
<artifactId>org.jacoco.agent</artifactId>
<version>${jacoco.version}</version>
<classifier>runtime</classifier>
<scope>test</scope>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${mysql.connector.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.h2database</groupId>
<artifactId>h2</artifactId>
<version>${h2.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${slf4j.api.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>${slf4j.log4j12.version}</version>
</dependency>
<dependency>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
<version>${commons.collections.version}</version>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>${commons.httpclient}</version>
</dependency>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<version>${commons.beanutils.version}</version>
</dependency>
<dependency>
<groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId>
<version>${commons.configuration.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-email</artifactId>
<version>${commons.email.version}</version>
</dependency>
<!--excel poi-->
<dependency>
<groupId>org.apache.poi</groupId>
<artifactId>poi</artifactId>
<version>${poi.version}</version>
</dependency>
<dependency>
<groupId>org.apache.poi</groupId>
<artifactId>poi-ooxml</artifactId>
<version>${poi.version}</version>
</dependency>
<!-- hadoop -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>com.sun.jersey</artifactId>
<groupId>jersey-json</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-aws</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>${commons.collections4.version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>${postgresql.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>${hive.jdbc.version}</version>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>${commons.io.version}</version>
</dependency>
<dependency>
<groupId>com.github.oshi</groupId>
<artifactId>oshi-core</artifactId>
<version>${oshi.core.version}</version>
</dependency>
<dependency>
<groupId>ru.yandex.clickhouse</groupId>
<artifactId>clickhouse-jdbc</artifactId>
<version>${clickhouse.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
<version>${mssql.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.facebook.presto</groupId>
<artifactId>presto-jdbc</artifactId>
<version>${presto.jdbc.version}</version>
</dependency>
<dependency>
<groupId>net.jcip</groupId>
<artifactId>jcip-annotations</artifactId>
<version>${jcip.version}</version>
<optional>true</optional>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<version>${servlet-api.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
<version>${javax.servlet.api.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger2</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger-ui</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.swagger</groupId>
<artifactId>swagger-models</artifactId>
<version>${swagger-models.version}</version>
</dependency>
<dependency>
<groupId>com.github.xiaoymin</groupId>
<artifactId>swagger-bootstrap-ui</artifactId>
<version>${swagger.version}</version>
</dependency>
<dependency>
<groupId>com.github.rholder</groupId>
<artifactId>guava-retrying</artifactId>
<version>${guava-retry.version}</version>
</dependency>
<dependency>
<groupId>org.sonatype.aether</groupId>
<artifactId>aether-api</artifactId>
<version>1.13.1</version>
</dependency>
<dependency>
<groupId>io.airlift.resolver</groupId>
<artifactId>resolver</artifactId>
<version>1.5</version>
</dependency>
<dependency>
<groupId>org.ow2.asm</groupId>
<artifactId>asm</artifactId>
<version>6.2.1</version>
</dependency>
<dependency>
<groupId>javax.activation</groupId>
<artifactId>activation</artifactId>
<version>1.1</version>
</dependency>
<dependency>
<groupId>com.sun.mail</groupId>
<artifactId>javax.mail</artifactId>
<version>1.6.2</version>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<finalName>apache-dolphinscheduler-${project.version}</finalName>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-maven-plugin</artifactId>
<version>1.0.0</version>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>ca.vanzyl.maven.plugins</groupId>
<artifactId>provisio-maven-plugin</artifactId>
<version>1.0.4</version>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>rpm-maven-plugin</artifactId>
<version>${rpm-maven-plugion.version}</version>
<inherited>false</inherited>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<testSource>${java.version}</testSource>
<testTarget>${java.version}</testTarget>
</configuration>
<version>${maven-compiler-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<tagNameFormat>@{project.version}</tagNameFormat>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>${maven-assembly-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<configuration>
<source>8</source>
<failOnError>false</failOnError>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>${maven-source-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>${maven-dependency-plugin.version}</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-maven-plugin</artifactId>
<extensions>true</extensions>
<!--<configuration>-->
<!--<allowedProvidedDependencies>-->
<!--<allowedProvidedDependency>org.apache.dolphinscheduler:dolphinscheduler-common</allowedProvidedDependency>-->
<!--</allowedProvidedDependencies>-->
<!--</configuration>-->
</plugin>
<plugin>
<groupId>ca.vanzyl.maven.plugins</groupId>
<artifactId>provisio-maven-plugin</artifactId>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<id>attach-sources</id>
<phase>verify</phase>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
<configuration>
<aggregate>true</aggregate>
<charset>${project.build.sourceEncoding}</charset>
<encoding>${project.build.sourceEncoding}</encoding>
<docencoding>${project.build.sourceEncoding}</docencoding>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<autoVersionSubmodules>true</autoVersionSubmodules>
<tagNameFormat>@{project.version}</tagNameFormat>
<tagBase>${project.version}</tagBase>
<!--<goals>-f pom.xml deploy</goals>-->
</configuration>
<dependencies>
<dependency>
<groupId>org.apache.maven.scm</groupId>
<artifactId>maven-scm-provider-jgit</artifactId>
<version>1.9.5</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<encoding>${project.build.sourceEncoding}</encoding>
<skip>false</skip><!--not skip compile test classes-->
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<systemPropertyVariables>
<jacoco-agent.destfile>${project.build.directory}/jacoco.exec</jacoco-agent.destfile>
</systemPropertyVariables>
<includes>
<!--registry plugin -->
<include>**/plugin/registry/zookeeper/ZookeeperRegistryTest.java</include>
<!-- API -->
<include>**/api/controller/ProjectControllerTest.java</include>
<include>**/api/controller/QueueControllerTest.java</include>
<include>**/api/configuration/TrafficConfigurationTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TenantControllerTest.java</include>
<include>**/api/controller/SchedulerControllerTest.java</include>
<include>**/api/dto/resources/filter/ResourceFilterTest.java</include>
<include>**/api/dto/resources/visitor/ResourceTreeVisitorTest.java</include>
<includeDataxTaskTest>**/api/enums/testGetEnum.java</includeDataxTaskTest>
<include>**/api/enums/StatusTest.java</include>
<include>**/api/exceptions/ApiExceptionHandlerTest.java</include>
<include>**/api/exceptions/ServiceExceptionTest.java</include>
<include>**/api/interceptor/LocaleChangeInterceptorTest.java</include>
<include>**/api/interceptor/LoginHandlerInterceptorTest.java</include>
<include>**/api/interceptor/RateLimitInterceptorTest.java</include>
<include>**/api/security/impl/pwd/PasswordAuthenticatorTest.java</include>
<include>**/api/security/impl/ldap/LdapAuthenticatorTest.java</include>
<include>**/api/security/SecurityConfigLDAPTest.java</include>
<include>**/api/security/SecurityConfigPasswordTest.java</include>
<include>**/api/service/AccessTokenServiceTest.java</include>
<include>**/api/service/AlertGroupServiceTest.java</include>
<include>**/api/service/BaseDAGServiceTest.java</include>
<include>**/api/service/BaseServiceTest.java</include>
<include>**/api/service/DataAnalysisServiceTest.java</include>
<include>**/api/service/AlertPluginInstanceServiceTest.java</include>
<include>**/api/service/DataSourceServiceTest.java</include>
<include>**/api/service/ExecutorService2Test.java</include>
<include>**/api/service/ExecutorServiceTest.java</include>
<include>**/api/service/LoggerServiceTest.java</include>
<include>**/api/service/MonitorServiceTest.java</include>
<include>**/api/service/ProcessDefinitionServiceTest.java</include>
<include>**/api/service/ProcessTaskRelationServiceImplTest.java</include>
<include>**/api/service/TaskDefinitionServiceImplTest.java</include>
<include>**/api/service/ProcessInstanceServiceTest.java</include>
<include>**/api/service/ProjectServiceTest.java</include>
<include>**/api/service/QueueServiceTest.java</include>
<include>**/api/service/ResourcesServiceTest.java</include>
<include>**/api/service/SchedulerServiceTest.java</include>
<include>**/api/service/SessionServiceTest.java</include>
<include>**/api/service/TaskInstanceServiceTest.java</include>
<include>**/api/service/TenantServiceTest.java</include>
<include>**/api/service/UdfFuncServiceTest.java</include>
<include>**/api/service/UiPluginServiceTest.java</include>
<include>**/api/service/UserAlertGroupServiceTest.java</include>
<include>**/api/service/UsersServiceTest.java</include>
<include>**/api/service/WorkerGroupServiceTest.java</include>
<include>**/api/service/WorkFlowLineageServiceTest.java</include>
<include>**/api/service/EnvironmentServiceTest.java</include>
<include>**/api/service/EnvironmentWorkerGroupRelationServiceTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TaskInstanceControllerTest.java</include>
<include>**/api/controller/WorkFlowLineageControllerTest.java</include>
<include>**/api/controller/EnvironmentControllerTest.java</include>
<include>**/api/utils/exportprocess/DataSourceParamTest.java</include>
<include>**/api/utils/exportprocess/DependentParamTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/FileUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/ResultTest.java</include>
<include>**/common/graph/DAGTest.java</include>
<include>**/common/os/OshiTest.java</include>
<include>**/common/shell/ShellExecutorTest.java</include>
<include>**/common/task/DataxParametersTest.java</include>
<include>**/common/task/EntityTestUtils.java</include>
<include>**/common/task/FlinkParametersTest.java</include>
<include>**/common/task/HttpParametersTest.java</include>
<include>**/common/task/SparkParametersTest.java</include>
<include>**/common/task/SqlParametersTest.java</include>
<include>**/common/task/SqoopParameterEntityTest.java</include>
<include>**/common/threadutils/ThreadPoolExecutorsTest.java</include>
<include>**/common/threadutils/ThreadUtilsTest.java</include>
<include>**/common/utils/CollectionUtilsTest.java</include>
<include>**/common/utils/CommonUtilsTest.java</include>
<include>**/common/utils/DateUtilsTest.java</include>
<include>**/common/utils/DependentUtilsTest.java</include>
<include>**/common/utils/EncryptionUtilsTest.java</include>
<include>**/common/utils/FileUtilsTest.java</include>
<include>**/common/utils/JSONUtilsTest.java</include>
<include>**/common/utils/LoggerUtilsTest.java</include>
<include>**/common/utils/NetUtilsTest.java</include>
<include>**/common/utils/ParameterUtilsTest.java</include>
<include>**/common/utils/TimePlaceholderUtilsTest.java</include>
<include>**/common/utils/PreconditionsTest.java</include>
<include>**/common/utils/PropertyUtilsTest.java</include>
<include>**/common/utils/SchemaUtilsTest.java</include>
<include>**/common/utils/ScriptRunnerTest.java</include>
<include>**/common/utils/SensitiveLogUtilsTest.java</include>
<include>**/common/utils/StringTest.java</include>
<include>**/common/utils/StringUtilsTest.java</include>
<include>**/common/utils/TaskParametersUtilsTest.java</include>
<include>**/common/utils/VarPoolUtilsTest.java</include>
<include>**/common/utils/HadoopUtilsTest.java</include>
<include>**/common/utils/HttpUtilsTest.java</include>
<include>**/common/utils/KerberosHttpClientTest.java</include>
<include>**/common/utils/HiveConfUtilsTest.java</include>
<include>**/common/ConstantsTest.java</include>
<include>**/common/utils/HadoopUtils.java</include>
<include>**/common/utils/RetryerUtilsTest.java</include>
<include>**/common/datasource/clickhouse/ClickHouseDatasourceProcessorTest.java</include>
<include>**/common/datasource/db2/Db2DatasourceProcessorTest.java</include>
<include>**/common/datasource/hive/HiveDatasourceProcessorTest.java</include>
<include>**/common/datasource/mysql/MysqlDatasourceProcessorTest.java</include>
<include>**/common/datasource/oracle/OracleDatasourceProcessorTest.java</include>
<include>**/common/datasource/postgresql/PostgreSqlDatasourceProcessorTest.java</include>
<include>**/common/datasource/presto/PrestoDatasourceProcessorTest.java</include>
<include>**/common/datasource/spark/SparkDatasourceProcessorTest.java</include>
<include>**/common/datasource/sqlserver/SqlServerDatasourceProcessorTest.java</include>
<include>**/common/datasource/DatasourceUtilTest.java</include>
<include>**/common/enums/ExecutionStatusTest</include>
<include>**/dao/mapper/AccessTokenMapperTest.java</include>
<include>**/dao/mapper/AlertGroupMapperTest.java</include>
<include>**/dao/mapper/CommandMapperTest.java</include>
<include>**/dao/mapper/ConnectionFactoryTest.java</include>
<include>**/dao/mapper/DataSourceMapperTest.java</include>
<include>**/dao/datasource/MySQLDataSourceTest.java</include>
<include>**/dao/entity/TaskInstanceTest.java</include>
<include>**/dao/entity/UdfFuncTest.java</include>
<include>**/remote/command/alert/AlertSendRequestCommandTest.java</include>
<include>**/remote/command/alert/AlertSendResponseCommandTest.java</include>
<include>**/remote/command/future/ResponseFutureTest.java</include>
<include>**/remote/command/log/RemoveTaskLogRequestCommandTest.java</include>
<include>**/remote/command/log/RemoveTaskLogResponseCommandTest.java</include>
<include>**/remote/command/log/GetLogBytesRequestCommandTest.java</include>
<include>**/remote/command/log/GetLogBytesResponseCommandTest.java</include>
<include>**/remote/command/log/ViewLogRequestCommandTest.java</include>
<include>**/remote/utils/HostTest.java</include>
<include>**/remote/utils/NettyUtilTest.java</include>
<include>**/remote/NettyRemotingClientTest.java</include>
<include>**/rpc/RpcTest.java</include>
<include>**/server/log/LoggerServerTest.java</include>
<include>**/server/entity/SQLTaskExecutionContextTest.java</include>
<include>**/server/log/MasterLogFilterTest.java</include>
<include>**/server/log/SensitiveDataConverterTest.java</include>
<include>**/server/log/LoggerRequestProcessorTest.java</include>
<!--<include>**/server/log/TaskLogDiscriminatorTest.java</include>-->
<include>**/server/log/TaskLogFilterTest.java</include>
<include>**/server/log/WorkerLogFilterTest.java</include>
<include>**/server/master/cache/impl/TaskInstanceCacheManagerImplTest.java</include>
<include>**/server/master/config/MasterConfigTest.java</include>
<include>**/server/master/consumer/TaskPriorityQueueConsumerTest.java</include>
<include>**/server/master/runner/MasterTaskExecThreadTest.java</include>
<!--<include>**/server/master/dispatch/executor/NettyExecutorManagerTest.java</include>-->
<include>**/server/master/dispatch/host/assign/LowerWeightRoundRobinTest.java</include>
<include>**/server/master/dispatch/host/assign/RandomSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/HostWorkerTest.java</include>
<include>**/server/master/registry/MasterRegistryClientTest.java</include>
<include>**/server/master/registry/ServerNodeManagerTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinHostManagerTest.java</include>
<include>**/server/master/MasterCommandTest.java</include>
<include>**/server/master/DependentTaskTest.java</include>
<include>**/server/master/ConditionsTaskTest.java</include>
<include>**/server/master/SwitchTaskTest.java</include>
<include>**/server/master/MasterExecThreadTest.java</include>
<include>**/server/master/ParamsTest.java</include>
<include>**/server/master/SubProcessTaskTest.java</include>
<include>**/server/master/processor/TaskAckProcessorTest.java</include>
<include>**/server/master/processor/TaskKillResponseProcessorTest.java</include>
<include>**/server/master/processor/queue/TaskResponseServiceTest.java</include>
<include>**/server/master/zk/ZKMasterClientTest.java</include>
<include>**/server/registry/ZookeeperRegistryCenterTest.java</include>
<include>**/server/utils/DataxUtilsTest.java</include>
<include>**/server/utils/ExecutionContextTestUtils.java</include>
<include>**/server/utils/FlinkArgsUtilsTest.java</include>
<include>**/server/utils/LogUtilsTest.java</include>
<include>**/server/utils/MapReduceArgsUtilsTest.java</include>
<include>**/server/utils/ParamUtilsTest.java</include>
<include>**/server/utils/ProcessUtilsTest.java</include>
<include>**/server/utils/SparkArgsUtilsTest.java</include>
<include>**/server/worker/processor/TaskCallbackServiceTest.java</include>
<include>**/server/worker/processor/TaskExecuteProcessorTest.java</include>
<include>**/server/worker/registry/WorkerRegistryTest.java</include>
<include>**/server/worker/shell/ShellCommandExecutorTest.java</include>
<include>**/server/worker/sql/SqlExecutorTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<include>**/server/worker/task/datax/DataxTaskTest.java</include>
<!--<include>**/server/worker/task/http/HttpTaskTest.java</include>-->
<include>**/server/worker/task/sqoop/SqoopTaskTest.java</include>
<include>**/server/worker/task/processdure/ProcedureTaskTest.java</include>
<include>**/server/worker/task/shell/ShellTaskTest.java</include>
<include>**/server/worker/task/TaskManagerTest.java</include>
<include>**/server/worker/task/PythonCommandExecutorTest.java</include>
<include>**/server/worker/task/TaskParamsTest.java</include>
<include>**/server/worker/task/ShellTaskReturnTest.java</include>
<include>**/server/worker/task/sql/SqlTaskTest.java</include>
<include>**/server/worker/runner/TaskExecuteThreadTest.java</include>
<include>**/server/worker/runner/WorkerManagerThreadTest.java</include>
<include>**/service/quartz/cron/CronUtilsTest.java</include>
<include>**/service/process/ProcessServiceTest.java</include>
<include>**/service/registry/RegistryClientTest.java</include>
<include>**/service/registry/RegistryPluginTest.java</include>
<include>**/service/queue/TaskUpdateQueueTest.java</include>
<include>**/service/queue/PeerTaskInstancePriorityQueueTest.java</include>
<include>**/service/log/LogClientServiceTest.java</include>
<include>**/service/alert/AlertClientServiceTest.java</include>
<include>**/service/alert/ProcessAlertManagerTest.java</include>
<include>**/dao/mapper/DataSourceUserMapperTest.java</include>
<!--<iTaskUpdateQueueConsumerThreadnclude>**/dao/mapper/ErrorCommandMapperTest.java</iTaskUpdateQueueConsumerThreadnclude>-->
<include>**/dao/mapper/ProcessDefinitionMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapperTest.java</include>
<include>**/dao/mapper/ProjectMapperTest.java</include>
<include>**/dao/mapper/ProjectUserMapperTest.java</include>
<include>**/dao/mapper/QueueMapperTest.java</include>
<include>**/dao/mapper/ResourceUserMapperTest.java</include>
<include>**/dao/mapper/ScheduleMapperTest.java</include>
<include>**/dao/mapper/SessionMapperTest.java</include>
<include>**/dao/mapper/TaskInstanceMapperTest.java</include>
<include>**/dao/mapper/TenantMapperTest.java</include>
<include>**/dao/mapper/UdfFuncMapperTest.java</include>
<include>**/dao/mapper/UDFUserMapperTest.java</include>
<include>**/dao/mapper/UserMapperTest.java</include>
<include>**/dao/mapper/AlertPluginInstanceMapperTest.java</include>
<include>**/dao/mapper/PluginDefineTest.java</include>
<include>**/dao/utils/DagHelperTest.java</include>
<include>**/dao/AlertDaoTest.java</include>
<include>**/dao/datasource/OracleDataSourceTest.java</include>
<include>**/dao/datasource/HiveDataSourceTest.java</include>
<include>**/dao/datasource/BaseDataSourceTest.java</include>
<include>**/dao/upgrade/ProcessDefinitionDaoTest.java</include>
<include>**/dao/upgrade/WokrerGrouopDaoTest.java</include>
<include>**/dao/upgrade/UpgradeDaoTest.java</include>
<include>**/plugin/alert/email/EmailAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/email/EmailAlertChannelTest.java</include>
<include>**/plugin/alert/email/ExcelUtilsTest.java</include>
<include>**/plugin/alert/email/template/DefaultHTMLTemplateTest.java</include>
<include>**/plugin/alert/dingtalk/DingTalkSenderTest.java</include>
<include>**/plugin/alert/dingtalk/DingTalkAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/wechat/WeChatSenderTest.java</include>
<include>**/plugin/alert/wechat/WeChatAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/script/ProcessUtilsTest.java</include>
<include>**/plugin/alert/script/ScriptAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/script/ScriptSenderTest.java</include>
<include>**/plugin/alert/http/HttpAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/http/HttpAlertChannelTest.java</include>
<include>**/plugin/alert/feishu/FeiShuAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/feishu/FeiShuSenderTest.java</include>
<include>**/plugin/alert/http/HttpAlertPluginTest.java</include>
<include>**/plugin/alert/http/HttpSenderTest.java</include>
<include>**/plugin/alert/slack/SlackAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/slack/SlackAlertPluginTest.java</include>
<include>**/plugin/alert/slack/SlackSenderTest.java</include>
<include>**/spi/params/PluginParamsTransferTest.java</include>
<include>**/spi/plugin/DolphinSchedulerPluginLoaderTest.java</include>
<include>**/alert/plugin/EmailAlertPluginTest.java</include>
<include>**/alert/plugin/AlertPluginManagerTest.java</include>
<include>**/alert/plugin/DolphinPluginLoaderTest.java</include>
<include>**/alert/utils/FuncUtilsTest.java</include>
<include>**/alert/processor/AlertRequestProcessorTest.java</include>
<include>**/alert/runner/AlertSenderTest.java</include>
<include>**/alert/AlertServerTest.java</include>
<include>**/plugin/task/pigeon/PigeonTaskTest.java</include>
</includes>
<!-- <skip>true</skip> -->
</configuration>
</plugin>
<!-- jenkins plugin jacoco report-->
<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<version>${jacoco.version}</version>
<configuration>
<dataFile>${project.build.directory}/jacoco.exec</dataFile>
</configuration>
<executions>
<execution>
<id>default-instrument</id>
<goals>
<goal>instrument</goal>
</goals>
</execution>
<execution>
<id>default-restore-instrumented-classes</id>
<goals>
<goal>restore-instrumented-classes</goal>
</goals>
</execution>
<execution>
<id>default-report</id>
<goals>
<goal>report</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-maven-plugin</artifactId>
<version>${spotbugs.version}</version>
<configuration>
<xmlOutput>true</xmlOutput>
<threshold>medium</threshold>
<effort>default</effort>
<excludeFilterFile>dev-config/spotbugs-exclude.xml</excludeFilterFile>
<failOnError>true</failOnError>
</configuration>
<dependencies>
<dependency>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs</artifactId>
<version>4.0.0-beta4</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>${checkstyle.version}</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
<version>8.45</version>
</dependency>
</dependencies>
<configuration>
<consoleOutput>true</consoleOutput>
<encoding>UTF-8</encoding>
<configLocation>style/checkstyle.xml</configLocation>
<failOnViolation>true</failOnViolation>
<violationSeverity>warning</violationSeverity>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<sourceDirectories>
<sourceDirectory>${project.build.sourceDirectory}</sourceDirectory>
</sourceDirectories>
<excludes>**\/generated-sources\/</excludes>
</configuration>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>cobertura-maven-plugin</artifactId>
<version>${cobertura-maven-plugin.version}</version>
<configuration>
<check>
</check>
<aggregate>true</aggregate>
<outputDirectory>./target/cobertura</outputDirectory>
<encoding>${project.build.sourceEncoding}</encoding>
<quiet>true</quiet>
<format>xml</format>
<instrumentation>
<ignoreTrivial>true</ignoreTrivial>
</instrumentation>
</configuration>
</plugin>
</plugins>
</build>
<modules>
<module>dolphinscheduler-spi</module>
<module>dolphinscheduler-alert-plugin</module>
<module>dolphinscheduler-registry-plugin</module>
<module>dolphinscheduler-task-plugin</module>
<module>dolphinscheduler-ui</module>
<module>dolphinscheduler-server</module>
<module>dolphinscheduler-common</module>
<module>dolphinscheduler-api</module>
<module>dolphinscheduler-dao</module>
<module>dolphinscheduler-alert</module>
<module>dolphinscheduler-dist</module>
<module>dolphinscheduler-remote</module>
<module>dolphinscheduler-service</module>
<module>dolphinscheduler-microbench</module>
<module>dolphinscheduler-standalone-server</module>
</modules>
</project>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,471 | [Feature][Master] Cache Process definition in master | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
in order to reduce the db query times, cache process definitions in master.
procesDefinitionCacheMaps: {key: code-version, value: processDefintion}
### Use case
_No response_
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6471 | https://github.com/apache/dolphinscheduler/pull/6511 | bddf3e6f2c811c3ae6623bb588414173f0132c57 | cea8ae69916039b55e84482b5c0703f3c62ef9c5 | "2021-10-09T06:08:19Z" | java | "2021-10-13T03:36:27Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.service.process;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_EMPTY_SUB_PROCESS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_FATHER_PARAMS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID;
import static org.apache.dolphinscheduler.common.Constants.LOCAL_PARAMS;
import static java.util.stream.Collectors.toSet;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.Direct;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.enums.ReleaseState;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.TaskDependType;
import org.apache.dolphinscheduler.common.enums.TimeoutFlag;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.DateInterval;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter;
import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils;
import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException;
import org.apache.dolphinscheduler.common.utils.TaskParametersUtils;
import org.apache.dolphinscheduler.dao.entity.Command;
import org.apache.dolphinscheduler.dao.entity.DagData;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.dao.entity.Environment;
import org.apache.dolphinscheduler.dao.entity.ErrorCommand;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap;
import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation;
import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.ProjectUser;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskDefinition;
import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.CommandMapper;
import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper;
import org.apache.dolphinscheduler.dao.mapper.EnvironmentMapper;
import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper;
import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.TenantMapper;
import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper;
import org.apache.dolphinscheduler.dao.mapper.UserMapper;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.service.log.LogClientService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import com.facebook.presto.jdbc.internal.guava.collect.Lists;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.node.ObjectNode;
/**
* process relative dao that some mappers in this.
*/
@Component
public class ProcessService {
private final Logger logger = LoggerFactory.getLogger(getClass());
private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
ExecutionStatus.RUNNING_EXECUTION.ordinal(),
ExecutionStatus.DELAY_EXECUTION.ordinal(),
ExecutionStatus.READY_PAUSE.ordinal(),
ExecutionStatus.READY_STOP.ordinal()};
@Autowired
private UserMapper userMapper;
@Autowired
private ProcessDefinitionMapper processDefineMapper;
@Autowired
private ProcessDefinitionLogMapper processDefineLogMapper;
@Autowired
private ProcessInstanceMapper processInstanceMapper;
@Autowired
private DataSourceMapper dataSourceMapper;
@Autowired
private ProcessInstanceMapMapper processInstanceMapMapper;
@Autowired
private TaskInstanceMapper taskInstanceMapper;
@Autowired
private CommandMapper commandMapper;
@Autowired
private ScheduleMapper scheduleMapper;
@Autowired
private UdfFuncMapper udfFuncMapper;
@Autowired
private ResourceMapper resourceMapper;
@Autowired
private ResourceUserMapper resourceUserMapper;
@Autowired
private ErrorCommandMapper errorCommandMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private ProjectMapper projectMapper;
@Autowired
private TaskDefinitionMapper taskDefinitionMapper;
@Autowired
private TaskDefinitionLogMapper taskDefinitionLogMapper;
@Autowired
private ProcessTaskRelationMapper processTaskRelationMapper;
@Autowired
private ProcessTaskRelationLogMapper processTaskRelationLogMapper;
@Autowired
private EnvironmentMapper environmentMapper;
/**
* handle Command (construct ProcessInstance from Command) , wrapped in transaction
*
* @param logger logger
* @param host host
* @param command found command
* @param processDefinitionCacheMaps
* @return process instance
*/
public ProcessInstance handleCommand(Logger logger, String host, Command command, HashMap<String, ProcessDefinition> processDefinitionCacheMaps) {
ProcessInstance processInstance = constructProcessInstance(command, host, processDefinitionCacheMaps);
// cannot construct process instance, return null
if (processInstance == null) {
logger.error("scan command, command parameter is error: {}", command);
moveToErrorCommand(command, "process instance is null");
return null;
}
processInstance.setCommandType(command.getCommandType());
processInstance.addHistoryCmd(command.getCommandType());
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
this.commandMapper.deleteById(command.getId());
return processInstance;
}
/**
* save error command, and delete original command
*
* @param command command
* @param message message
*/
public void moveToErrorCommand(Command command, String message) {
ErrorCommand errorCommand = new ErrorCommand(command, message);
this.errorCommandMapper.insert(errorCommand);
this.commandMapper.deleteById(command.getId());
}
/**
* set process waiting thread
*
* @param command command
* @param processInstance processInstance
* @return process instance
*/
private ProcessInstance setWaitingThreadProcess(Command command, ProcessInstance processInstance) {
processInstance.setState(ExecutionStatus.WAITING_THREAD);
if (command.getCommandType() != CommandType.RECOVER_WAITING_THREAD) {
processInstance.addHistoryCmd(command.getCommandType());
}
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
createRecoveryWaitingThreadCommand(command, processInstance);
return null;
}
/**
* check thread num
*
* @param command command
* @param validThreadNum validThreadNum
* @return if thread is enough
*/
private boolean checkThreadNum(Command command, int validThreadNum) {
int commandThreadCount = this.workProcessThreadNumCount(command.getProcessDefinitionCode());
return validThreadNum >= commandThreadCount;
}
/**
* insert one command
*
* @param command command
* @return create result
*/
public int createCommand(Command command) {
int result = 0;
if (command != null) {
result = commandMapper.insert(command);
}
return result;
}
/**
* get command page
*
* @param pageSize
* @param pageNumber
* @return
*/
public List<Command> findCommandPage(int pageSize, int pageNumber) {
return commandMapper.queryCommandPage(pageSize, pageNumber * pageSize);
}
/**
* check the input command exists in queue list
*
* @param command command
* @return create command result
*/
public boolean verifyIsNeedCreateCommand(Command command) {
boolean isNeedCreate = true;
EnumMap<CommandType, Integer> cmdTypeMap = new EnumMap<>(CommandType.class);
cmdTypeMap.put(CommandType.REPEAT_RUNNING, 1);
cmdTypeMap.put(CommandType.RECOVER_SUSPENDED_PROCESS, 1);
cmdTypeMap.put(CommandType.START_FAILURE_TASK_PROCESS, 1);
CommandType commandType = command.getCommandType();
if (cmdTypeMap.containsKey(commandType)) {
ObjectNode cmdParamObj = JSONUtils.parseObject(command.getCommandParam());
int processInstanceId = cmdParamObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt();
List<Command> commands = commandMapper.selectList(null);
// for all commands
for (Command tmpCommand : commands) {
if (cmdTypeMap.containsKey(tmpCommand.getCommandType())) {
ObjectNode tempObj = JSONUtils.parseObject(tmpCommand.getCommandParam());
if (tempObj != null && processInstanceId == tempObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt()) {
isNeedCreate = false;
break;
}
}
}
}
return isNeedCreate;
}
/**
* find process instance detail by id
*
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceDetailById(int processId) {
return processInstanceMapper.queryDetailById(processId);
}
/**
* get task node list by definitionId
*/
public List<TaskDefinition> getTaskNodeListByDefinitionId(Integer defineId) {
ProcessDefinition processDefinition = processDefineMapper.selectById(defineId);
if (processDefinition == null) {
logger.error("process define not exists");
return new ArrayList<>();
}
List<ProcessTaskRelationLog> processTaskRelations = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion());
Set<TaskDefinition> taskDefinitionSet = new HashSet<>();
for (ProcessTaskRelationLog processTaskRelation : processTaskRelations) {
if (processTaskRelation.getPostTaskCode() > 0) {
taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion()));
}
}
List<TaskDefinitionLog> taskDefinitionLogs = taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet);
return new ArrayList<>(taskDefinitionLogs);
}
/**
* find process instance by id
*
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceById(int processId) {
return processInstanceMapper.selectById(processId);
}
/**
* find process define by id.
*
* @param processDefinitionId processDefinitionId
* @return process definition
*/
public ProcessDefinition findProcessDefineById(int processDefinitionId) {
return processDefineMapper.selectById(processDefinitionId);
}
/**
* find process define by code and version.
*
* @param processDefinitionCode processDefinitionCode
* @return process definition
*/
public ProcessDefinition findProcessDefinition(Long processDefinitionCode, int version) {
ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode);
if (processDefinition == null || processDefinition.getVersion() != version) {
processDefinition = processDefineLogMapper.queryByDefinitionCodeAndVersion(processDefinitionCode, version);
if (processDefinition != null) {
processDefinition.setId(0);
}
}
return processDefinition;
}
/**
* find process define by code.
*
* @param processDefinitionCode processDefinitionCode
* @return process definition
*/
public ProcessDefinition findProcessDefinitionByCode(Long processDefinitionCode) {
return processDefineMapper.queryByCode(processDefinitionCode);
}
/**
* delete work process instance by id
*
* @param processInstanceId processInstanceId
* @return delete process instance result
*/
public int deleteWorkProcessInstanceById(int processInstanceId) {
return processInstanceMapper.deleteById(processInstanceId);
}
/**
* delete all sub process by parent instance id
*
* @param processInstanceId processInstanceId
* @return delete all sub process instance result
*/
public int deleteAllSubWorkProcessByParentId(int processInstanceId) {
List<Integer> subProcessIdList = processInstanceMapMapper.querySubIdListByParentId(processInstanceId);
for (Integer subId : subProcessIdList) {
deleteAllSubWorkProcessByParentId(subId);
deleteWorkProcessMapByParentId(subId);
removeTaskLogFile(subId);
deleteWorkProcessInstanceById(subId);
}
return 1;
}
/**
* remove task log file
*
* @param processInstanceId processInstanceId
*/
public void removeTaskLogFile(Integer processInstanceId) {
List<TaskInstance> taskInstanceList = findValidTaskListByProcessId(processInstanceId);
if (CollectionUtils.isEmpty(taskInstanceList)) {
return;
}
try (LogClientService logClient = new LogClientService()) {
for (TaskInstance taskInstance : taskInstanceList) {
String taskLogPath = taskInstance.getLogPath();
if (StringUtils.isEmpty(taskInstance.getHost())) {
continue;
}
int port = Constants.RPC_PORT;
String ip = "";
try {
ip = Host.of(taskInstance.getHost()).getIp();
} catch (Exception e) {
// compatible old version
ip = taskInstance.getHost();
}
// remove task log from loggerserver
logClient.removeTaskLog(ip, port, taskLogPath);
}
}
}
/**
* calculate sub process number in the process define.
*
* @param processDefinitionCode processDefinitionCode
* @return process thread num count
*/
private Integer workProcessThreadNumCount(long processDefinitionCode) {
ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode);
List<Integer> ids = new ArrayList<>();
recurseFindSubProcessId(processDefinition.getId(), ids);
return ids.size() + 1;
}
/**
* recursive query sub process definition id by parent id.
*
* @param parentId parentId
* @param ids ids
*/
public void recurseFindSubProcessId(int parentId, List<Integer> ids) {
List<TaskDefinition> taskNodeList = this.getTaskNodeListByDefinitionId(parentId);
if (taskNodeList != null && !taskNodeList.isEmpty()) {
for (TaskDefinition taskNode : taskNodeList) {
String parameter = taskNode.getTaskParams();
ObjectNode parameterJson = JSONUtils.parseObject(parameter);
if (parameterJson.get(CMD_PARAM_SUB_PROCESS_DEFINE_ID) != null) {
SubProcessParameters subProcessParam = JSONUtils.parseObject(parameter, SubProcessParameters.class);
ids.add(subProcessParam.getProcessDefinitionId());
recurseFindSubProcessId(subProcessParam.getProcessDefinitionId(), ids);
}
}
}
}
/**
* create recovery waiting thread command when thread pool is not enough for the process instance.
* sub work process instance need not to create recovery command.
* create recovery waiting thread command and delete origin command at the same time.
* if the recovery command is exists, only update the field update_time
*
* @param originCommand originCommand
* @param processInstance processInstance
*/
public void createRecoveryWaitingThreadCommand(Command originCommand, ProcessInstance processInstance) {
// sub process doesnot need to create wait command
if (processInstance.getIsSubProcess() == Flag.YES) {
if (originCommand != null) {
commandMapper.deleteById(originCommand.getId());
}
return;
}
Map<String, String> cmdParam = new HashMap<>();
cmdParam.put(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD, String.valueOf(processInstance.getId()));
// process instance quit by "waiting thread" state
if (originCommand == null) {
Command command = new Command(
CommandType.RECOVER_WAITING_THREAD,
processInstance.getTaskDependType(),
processInstance.getFailureStrategy(),
processInstance.getExecutorId(),
processInstance.getProcessDefinition().getCode(),
JSONUtils.toJsonString(cmdParam),
processInstance.getWarningType(),
processInstance.getWarningGroupId(),
processInstance.getScheduleTime(),
processInstance.getWorkerGroup(),
processInstance.getEnvironmentCode(),
processInstance.getProcessInstancePriority(),
processInstance.getDryRun(),
processInstance.getId(),
processInstance.getProcessDefinitionVersion()
);
saveCommand(command);
return;
}
// update the command time if current command if recover from waiting
if (originCommand.getCommandType() == CommandType.RECOVER_WAITING_THREAD) {
originCommand.setUpdateTime(new Date());
saveCommand(originCommand);
} else {
// delete old command and create new waiting thread command
commandMapper.deleteById(originCommand.getId());
originCommand.setId(0);
originCommand.setCommandType(CommandType.RECOVER_WAITING_THREAD);
originCommand.setUpdateTime(new Date());
originCommand.setCommandParam(JSONUtils.toJsonString(cmdParam));
originCommand.setProcessInstancePriority(processInstance.getProcessInstancePriority());
saveCommand(originCommand);
}
}
/**
* get schedule time from command
*
* @param command command
* @param cmdParam cmdParam map
* @return date
*/
private Date getScheduleTime(Command command, Map<String, String> cmdParam) {
Date scheduleTime = command.getScheduleTime();
if (scheduleTime == null
&& cmdParam != null
&& cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) {
Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
List<Schedule> schedules = queryReleaseSchedulerListByProcessDefinitionCode(command.getProcessDefinitionCode());
List<Date> complementDateList = CronUtils.getSelfFireDateList(start, end, schedules);
if (complementDateList.size() > 0) {
scheduleTime = complementDateList.get(0);
} else {
logger.error("set scheduler time error: complement date list is empty, command: {}",
command.toString());
}
}
return scheduleTime;
}
/**
* generate a new work process instance from command.
*
* @param processDefinition processDefinition
* @param command command
* @param cmdParam cmdParam map
* @return process instance
*/
private ProcessInstance generateNewProcessInstance(ProcessDefinition processDefinition,
Command command,
Map<String, String> cmdParam) {
ProcessInstance processInstance = new ProcessInstance(processDefinition);
processInstance.setProcessDefinitionCode(processDefinition.getCode());
processInstance.setProcessDefinitionVersion(processDefinition.getVersion());
processInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
processInstance.setRecovery(Flag.NO);
processInstance.setStartTime(new Date());
processInstance.setRunTimes(1);
processInstance.setMaxTryTimes(0);
//processInstance.setProcessDefinitionId(command.getProcessDefinitionId());
processInstance.setCommandParam(command.getCommandParam());
processInstance.setCommandType(command.getCommandType());
processInstance.setIsSubProcess(Flag.NO);
processInstance.setTaskDependType(command.getTaskDependType());
processInstance.setFailureStrategy(command.getFailureStrategy());
processInstance.setExecutorId(command.getExecutorId());
WarningType warningType = command.getWarningType() == null ? WarningType.NONE : command.getWarningType();
processInstance.setWarningType(warningType);
Integer warningGroupId = command.getWarningGroupId() == null ? 0 : command.getWarningGroupId();
processInstance.setWarningGroupId(warningGroupId);
processInstance.setDryRun(command.getDryRun());
if (command.getScheduleTime() != null) {
processInstance.setScheduleTime(command.getScheduleTime());
}
processInstance.setCommandStartTime(command.getStartTime());
processInstance.setLocations(processDefinition.getLocations());
// reset global params while there are start parameters
setGlobalParamIfCommanded(processDefinition, cmdParam);
// curing global params
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
getCommandTypeIfComplement(processInstance, command),
processInstance.getScheduleTime()));
// set process instance priority
processInstance.setProcessInstancePriority(command.getProcessInstancePriority());
String workerGroup = StringUtils.isBlank(command.getWorkerGroup()) ? Constants.DEFAULT_WORKER_GROUP : command.getWorkerGroup();
processInstance.setWorkerGroup(workerGroup);
processInstance.setEnvironmentCode(Objects.isNull(command.getEnvironmentCode()) ? -1 : command.getEnvironmentCode());
processInstance.setTimeout(processDefinition.getTimeout());
processInstance.setTenantId(processDefinition.getTenantId());
return processInstance;
}
private void setGlobalParamIfCommanded(ProcessDefinition processDefinition, Map<String, String> cmdParam) {
// get start params from command param
Map<String, String> startParamMap = new HashMap<>();
if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_START_PARAMS)) {
String startParamJson = cmdParam.get(Constants.CMD_PARAM_START_PARAMS);
startParamMap = JSONUtils.toMap(startParamJson);
}
Map<String, String> fatherParamMap = new HashMap<>();
if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_FATHER_PARAMS)) {
String fatherParamJson = cmdParam.get(Constants.CMD_PARAM_FATHER_PARAMS);
fatherParamMap = JSONUtils.toMap(fatherParamJson);
}
startParamMap.putAll(fatherParamMap);
// set start param into global params
if (startParamMap.size() > 0
&& processDefinition.getGlobalParamMap() != null) {
for (Map.Entry<String, String> param : processDefinition.getGlobalParamMap().entrySet()) {
String val = startParamMap.get(param.getKey());
if (val != null) {
param.setValue(val);
}
}
}
}
/**
* get process tenant
* there is tenant id in definition, use the tenant of the definition.
* if there is not tenant id in the definiton or the tenant not exist
* use definition creator's tenant.
*
* @param tenantId tenantId
* @param userId userId
* @return tenant
*/
public Tenant getTenantForProcess(int tenantId, int userId) {
Tenant tenant = null;
if (tenantId >= 0) {
tenant = tenantMapper.queryById(tenantId);
}
if (userId == 0) {
return null;
}
if (tenant == null) {
User user = userMapper.selectById(userId);
tenant = tenantMapper.queryById(user.getTenantId());
}
return tenant;
}
/**
* get an environment
* use the code of the environment to find a environment.
*
* @param environmentCode environmentCode
* @return Environment
*/
public Environment findEnvironmentByCode(Long environmentCode) {
Environment environment = null;
if (environmentCode >= 0) {
environment = environmentMapper.queryByEnvironmentCode(environmentCode);
}
return environment;
}
/**
* check command parameters is valid
*
* @param command command
* @param cmdParam cmdParam map
* @return whether command param is valid
*/
private Boolean checkCmdParam(Command command, Map<String, String> cmdParam) {
if (command.getTaskDependType() == TaskDependType.TASK_ONLY || command.getTaskDependType() == TaskDependType.TASK_PRE) {
if (cmdParam == null
|| !cmdParam.containsKey(Constants.CMD_PARAM_START_NODE_NAMES)
|| cmdParam.get(Constants.CMD_PARAM_START_NODE_NAMES).isEmpty()) {
logger.error("command node depend type is {}, but start nodes is null ", command.getTaskDependType());
return false;
}
}
return true;
}
/**
* construct process instance according to one command.
*
* @param command command
* @param host host
* @param processDefinitionCacheMaps
* @return process instance
*/
private ProcessInstance constructProcessInstance(Command command, String host, HashMap<String, ProcessDefinition> processDefinitionCacheMaps) {
ProcessInstance processInstance;
ProcessDefinition processDefinition;
CommandType commandType = command.getCommandType();
String key = String.format("%d-%d", command.getProcessDefinitionCode(), command.getProcessDefinitionVersion());
if (processDefinitionCacheMaps.containsKey(key)) {
processDefinition = processDefinitionCacheMaps.get(key);
} else {
processDefinition = this.findProcessDefinition(command.getProcessDefinitionCode(), command.getProcessDefinitionVersion());
if (processDefinition != null) {
processDefinitionCacheMaps.put(key, processDefinition);
}
}
if (processDefinition == null) {
logger.error("cannot find the work process define! define code : {}", command.getProcessDefinitionCode());
return null;
}
Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam());
int processInstanceId = command.getProcessInstanceId();
if (processInstanceId == 0) {
processInstance = generateNewProcessInstance(processDefinition, command, cmdParam);
} else {
processInstance = this.findProcessInstanceDetailById(processInstanceId);
if (processInstance == null) {
return processInstance;
}
}
if (cmdParam != null) {
CommandType commandTypeIfComplement = getCommandTypeIfComplement(processInstance, command);
// reset global params while repeat running is needed by cmdParam
if (commandTypeIfComplement == CommandType.REPEAT_RUNNING) {
setGlobalParamIfCommanded(processDefinition, cmdParam);
}
// Recalculate global parameters after rerun.
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
commandTypeIfComplement,
processInstance.getScheduleTime()));
processInstance.setProcessDefinition(processDefinition);
}
//reset command parameter
if (processInstance.getCommandParam() != null) {
Map<String, String> processCmdParam = JSONUtils.toMap(processInstance.getCommandParam());
for (Map.Entry<String, String> entry : processCmdParam.entrySet()) {
if (!cmdParam.containsKey(entry.getKey())) {
cmdParam.put(entry.getKey(), entry.getValue());
}
}
}
// reset command parameter if sub process
if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) {
processInstance.setCommandParam(command.getCommandParam());
}
if (Boolean.FALSE.equals(checkCmdParam(command, cmdParam))) {
logger.error("command parameter check failed!");
return null;
}
if (command.getScheduleTime() != null) {
processInstance.setScheduleTime(command.getScheduleTime());
}
processInstance.setHost(host);
ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXECUTION;
int runTime = processInstance.getRunTimes();
switch (commandType) {
case START_PROCESS:
break;
case START_FAILURE_TASK_PROCESS:
// find failed tasks and init these tasks
List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE);
List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE);
List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL);
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
failedList.addAll(killedList);
failedList.addAll(toleranceList);
for (Integer taskId : failedList) {
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING,
String.join(Constants.COMMA, convertIntListToString(failedList)));
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
processInstance.setRunTimes(runTime + 1);
break;
case START_CURRENT_TASK_PROCESS:
break;
case RECOVER_WAITING_THREAD:
break;
case RECOVER_SUSPENDED_PROCESS:
// find pause tasks and init task's state
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE);
List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(),
ExecutionStatus.KILL);
suspendedNodeList.addAll(stopNodeList);
for (Integer taskId : suspendedNodeList) {
// initialize the pause state
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(",", convertIntListToString(suspendedNodeList)));
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
processInstance.setRunTimes(runTime + 1);
break;
case RECOVER_TOLERANCE_FAULT_PROCESS:
// recover tolerance fault process
processInstance.setRecovery(Flag.YES);
runStatus = processInstance.getState();
break;
case COMPLEMENT_DATA:
// delete all the valid tasks when complement data if id is not null
if (processInstance.getId() != 0) {
List<TaskInstance> taskInstanceList = this.findValidTaskListByProcessId(processInstance.getId());
for (TaskInstance taskInstance : taskInstanceList) {
taskInstance.setFlag(Flag.NO);
this.updateTaskInstance(taskInstance);
}
}
break;
case REPEAT_RUNNING:
// delete the recover task names from command parameter
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) {
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
}
// delete all the valid tasks when repeat running
List<TaskInstance> validTaskList = findValidTaskListByProcessId(processInstance.getId());
for (TaskInstance taskInstance : validTaskList) {
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
}
processInstance.setStartTime(new Date());
processInstance.setEndTime(null);
processInstance.setRunTimes(runTime + 1);
initComplementDataParam(processDefinition, processInstance, cmdParam);
break;
case SCHEDULER:
break;
default:
break;
}
processInstance.setState(runStatus);
return processInstance;
}
/**
* get process definition by command
* If it is a fault-tolerant command, get the specified version of ProcessDefinition through ProcessInstance
* Otherwise, get the latest version of ProcessDefinition
*
* @return ProcessDefinition
*/
private ProcessDefinition getProcessDefinitionByCommand(long processDefinitionCode, Map<String, String> cmdParam) {
if (cmdParam != null) {
int processInstanceId = 0;
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) {
processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING));
} else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) {
processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS));
} else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) {
processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD));
}
if (processInstanceId != 0) {
ProcessInstance processInstance = this.findProcessInstanceDetailById(processInstanceId);
if (processInstance == null) {
return null;
}
return processDefineLogMapper.queryByDefinitionCodeAndVersion(
processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion());
}
}
return processDefineMapper.queryByCode(processDefinitionCode);
}
/**
* return complement data if the process start with complement data
*
* @param processInstance processInstance
* @param command command
* @return command type
*/
private CommandType getCommandTypeIfComplement(ProcessInstance processInstance, Command command) {
if (CommandType.COMPLEMENT_DATA == processInstance.getCmdTypeIfComplement()) {
return CommandType.COMPLEMENT_DATA;
} else {
return command.getCommandType();
}
}
/**
* initialize complement data parameters
*
* @param processDefinition processDefinition
* @param processInstance processInstance
* @param cmdParam cmdParam
*/
private void initComplementDataParam(ProcessDefinition processDefinition,
ProcessInstance processInstance,
Map<String, String> cmdParam) {
if (!processInstance.isComplementData()) {
return;
}
Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
List<Schedule> listSchedules = queryReleaseSchedulerListByProcessDefinitionCode(processInstance.getProcessDefinitionCode());
List<Date> complementDate = CronUtils.getSelfFireDateList(start, end, listSchedules);
if (complementDate.size() > 0
&& Flag.NO == processInstance.getIsSubProcess()) {
processInstance.setScheduleTime(complementDate.get(0));
}
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
}
/**
* set sub work process parameters.
* handle sub work process instance, update relation table and command parameters
* set sub work process flag, extends parent work process command parameters
*
* @param subProcessInstance subProcessInstance
*/
public void setSubProcessParam(ProcessInstance subProcessInstance) {
String cmdParam = subProcessInstance.getCommandParam();
if (StringUtils.isEmpty(cmdParam)) {
return;
}
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
// write sub process id into cmd param.
if (paramMap.containsKey(CMD_PARAM_SUB_PROCESS)
&& CMD_PARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMD_PARAM_SUB_PROCESS))) {
paramMap.remove(CMD_PARAM_SUB_PROCESS);
paramMap.put(CMD_PARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId()));
subProcessInstance.setCommandParam(JSONUtils.toJsonString(paramMap));
subProcessInstance.setIsSubProcess(Flag.YES);
this.saveProcessInstance(subProcessInstance);
}
// copy parent instance user def params to sub process..
String parentInstanceId = paramMap.get(CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID);
if (StringUtils.isNotEmpty(parentInstanceId)) {
ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId));
if (parentInstance != null) {
subProcessInstance.setGlobalParams(
joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams()));
this.saveProcessInstance(subProcessInstance);
} else {
logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam);
}
}
ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class);
if (processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0) {
return;
}
// update sub process id to process map table
processInstanceMap.setProcessInstanceId(subProcessInstance.getId());
this.updateWorkProcessInstanceMap(processInstanceMap);
}
/**
* join parent global params into sub process.
* only the keys doesn't in sub process global would be joined.
*
* @param parentGlobalParams parentGlobalParams
* @param subGlobalParams subGlobalParams
* @return global params join
*/
private String joinGlobalParams(String parentGlobalParams, String subGlobalParams) {
List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class);
List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class);
Map<String, String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
for (Property parent : parentPropertyList) {
if (!subMap.containsKey(parent.getProp())) {
subPropertyList.add(parent);
}
}
return JSONUtils.toJsonString(subPropertyList);
}
/**
* initialize task instance
*
* @param taskInstance taskInstance
*/
private void initTaskInstance(TaskInstance taskInstance) {
if (!taskInstance.isSubProcess()
&& (taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure())) {
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
return;
}
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
updateTaskInstance(taskInstance);
}
/**
* retry submit task to db
*
* @param taskInstance
* @param commitRetryTimes
* @param commitInterval
* @return
*/
public TaskInstance submitTask(TaskInstance taskInstance, int commitRetryTimes, int commitInterval) {
int retryTimes = 1;
boolean submitDB = false;
TaskInstance task = null;
while (retryTimes <= commitRetryTimes) {
try {
if (!submitDB) {
// submit task to db
task = submitTask(taskInstance);
if (task != null && task.getId() != 0) {
submitDB = true;
break;
}
}
if (!submitDB) {
logger.error("task commit to db failed , taskId {} has already retry {} times, please check the database", taskInstance.getId(), retryTimes);
}
Thread.sleep(commitInterval);
} catch (Exception e) {
logger.error("task commit to mysql failed", e);
}
retryTimes += 1;
}
return task;
}
/**
* submit task to db
* submit sub process to command
*
* @param taskInstance taskInstance
* @return task instance
*/
@Transactional(rollbackFor = Exception.class)
public TaskInstance submitTask(TaskInstance taskInstance) {
ProcessInstance processInstance = this.findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
logger.info("start submit task : {}, instance id:{}, state: {}",
taskInstance.getName(), taskInstance.getProcessInstanceId(), processInstance.getState());
//submit to db
TaskInstance task = submitTaskInstanceToDB(taskInstance, processInstance);
if (task == null) {
logger.error("end submit task to db error, task name:{}, process id:{} state: {} ",
taskInstance.getName(), taskInstance.getProcessInstance(), processInstance.getState());
return task;
}
if (!task.getState().typeIsFinished()) {
createSubWorkProcess(processInstance, task);
}
logger.info("end submit task to db successfully:{} {} state:{} complete, instance id:{} state: {} ",
taskInstance.getId(), taskInstance.getName(), task.getState(), processInstance.getId(), processInstance.getState());
return task;
}
/**
* set work process instance map
* consider o
* repeat running does not generate new sub process instance
* set map {parent instance id, task instance id, 0(child instance id)}
*
* @param parentInstance parentInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask) {
ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId());
if (processMap != null) {
return processMap;
}
if (parentInstance.getCommandType() == CommandType.REPEAT_RUNNING) {
// update current task id to map
processMap = findPreviousTaskProcessMap(parentInstance, parentTask);
if (processMap != null) {
processMap.setParentTaskInstanceId(parentTask.getId());
updateWorkProcessInstanceMap(processMap);
return processMap;
}
}
// new task
processMap = new ProcessInstanceMap();
processMap.setParentProcessInstanceId(parentInstance.getId());
processMap.setParentTaskInstanceId(parentTask.getId());
createWorkProcessInstanceMap(processMap);
return processMap;
}
/**
* find previous task work process map.
*
* @param parentProcessInstance parentProcessInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap findPreviousTaskProcessMap(ProcessInstance parentProcessInstance,
TaskInstance parentTask) {
Integer preTaskId = 0;
List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId());
for (TaskInstance task : preTaskList) {
if (task.getName().equals(parentTask.getName())) {
preTaskId = task.getId();
ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId);
if (map != null) {
return map;
}
}
}
logger.info("sub process instance is not found,parent task:{},parent instance:{}",
parentTask.getId(), parentProcessInstance.getId());
return null;
}
/**
* create sub work process command
*
* @param parentProcessInstance parentProcessInstance
* @param task task
*/
public void createSubWorkProcess(ProcessInstance parentProcessInstance, TaskInstance task) {
if (!task.isSubProcess()) {
return;
}
//check create sub work flow firstly
ProcessInstanceMap instanceMap = findWorkProcessMapByParent(parentProcessInstance.getId(), task.getId());
if (null != instanceMap && CommandType.RECOVER_TOLERANCE_FAULT_PROCESS == parentProcessInstance.getCommandType()) {
// recover failover tolerance would not create a new command when the sub command already have been created
return;
}
instanceMap = setProcessInstanceMap(parentProcessInstance, task);
ProcessInstance childInstance = null;
if (instanceMap.getProcessInstanceId() != 0) {
childInstance = findProcessInstanceById(instanceMap.getProcessInstanceId());
}
Command subProcessCommand = createSubProcessCommand(parentProcessInstance, childInstance, instanceMap, task);
updateSubProcessDefinitionByParent(parentProcessInstance, subProcessCommand.getProcessDefinitionCode());
initSubInstanceState(childInstance);
createCommand(subProcessCommand);
logger.info("sub process command created: {} ", subProcessCommand);
}
/**
* complement data needs transform parent parameter to child.
*/
private String getSubWorkFlowParam(ProcessInstanceMap instanceMap, ProcessInstance parentProcessInstance, Map<String, String> fatherParams) {
// set sub work process command
String processMapStr = JSONUtils.toJsonString(instanceMap);
Map<String, String> cmdParam = JSONUtils.toMap(processMapStr);
if (parentProcessInstance.isComplementData()) {
Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam());
String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE);
String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime);
processMapStr = JSONUtils.toJsonString(cmdParam);
}
if (fatherParams.size() != 0) {
cmdParam.put(CMD_PARAM_FATHER_PARAMS, JSONUtils.toJsonString(fatherParams));
processMapStr = JSONUtils.toJsonString(cmdParam);
}
return processMapStr;
}
public Map<String, String> getGlobalParamMap(String globalParams) {
List<Property> propList;
Map<String, String> globalParamMap = new HashMap<>();
if (StringUtils.isNotEmpty(globalParams)) {
propList = JSONUtils.toList(globalParams, Property.class);
globalParamMap = propList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
}
return globalParamMap;
}
/**
* create sub work process command
*/
public Command createSubProcessCommand(ProcessInstance parentProcessInstance,
ProcessInstance childInstance,
ProcessInstanceMap instanceMap,
TaskInstance task) {
CommandType commandType = getSubCommandType(parentProcessInstance, childInstance);
Map<String, String> subProcessParam = JSONUtils.toMap(task.getTaskParams());
int childDefineId = Integer.parseInt(subProcessParam.get(Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID));
ProcessDefinition processDefinition = processDefineMapper.queryByDefineId(childDefineId);
Object localParams = subProcessParam.get(Constants.LOCAL_PARAMS);
List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class);
Map<String, String> globalMap = this.getGlobalParamMap(parentProcessInstance.getGlobalParams());
Map<String, String> fatherParams = new HashMap<>();
if (CollectionUtils.isNotEmpty(allParam)) {
for (Property info : allParam) {
fatherParams.put(info.getProp(), globalMap.get(info.getProp()));
}
}
String processParam = getSubWorkFlowParam(instanceMap, parentProcessInstance, fatherParams);
int subProcessInstanceId = childInstance == null ? 0 : childInstance.getId();
return new Command(
commandType,
TaskDependType.TASK_POST,
parentProcessInstance.getFailureStrategy(),
parentProcessInstance.getExecutorId(),
processDefinition.getCode(),
processParam,
parentProcessInstance.getWarningType(),
parentProcessInstance.getWarningGroupId(),
parentProcessInstance.getScheduleTime(),
task.getWorkerGroup(),
task.getEnvironmentCode(),
parentProcessInstance.getProcessInstancePriority(),
parentProcessInstance.getDryRun(),
subProcessInstanceId,
parentProcessInstance.getProcessDefinitionVersion()
);
}
/**
* initialize sub work flow state
* child instance state would be initialized when 'recovery from pause/stop/failure'
*/
private void initSubInstanceState(ProcessInstance childInstance) {
if (childInstance != null) {
childInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
updateProcessInstance(childInstance);
}
}
/**
* get sub work flow command type
* child instance exist: child command = fatherCommand
* child instance not exists: child command = fatherCommand[0]
*/
private CommandType getSubCommandType(ProcessInstance parentProcessInstance, ProcessInstance childInstance) {
CommandType commandType = parentProcessInstance.getCommandType();
if (childInstance == null) {
String fatherHistoryCommand = parentProcessInstance.getHistoryCmd();
commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]);
}
return commandType;
}
/**
* update sub process definition
*
* @param parentProcessInstance parentProcessInstance
* @param childDefinitionCode childDefinitionId
*/
private void updateSubProcessDefinitionByParent(ProcessInstance parentProcessInstance, long childDefinitionCode) {
ProcessDefinition fatherDefinition = this.findProcessDefinition(parentProcessInstance.getProcessDefinitionCode(),
parentProcessInstance.getProcessDefinitionVersion());
ProcessDefinition childDefinition = this.findProcessDefinitionByCode(childDefinitionCode);
if (childDefinition != null && fatherDefinition != null) {
childDefinition.setWarningGroupId(fatherDefinition.getWarningGroupId());
processDefineMapper.updateById(childDefinition);
}
}
/**
* submit task to mysql
*
* @param taskInstance taskInstance
* @param processInstance processInstance
* @return task instance
*/
public TaskInstance submitTaskInstanceToDB(TaskInstance taskInstance, ProcessInstance processInstance) {
ExecutionStatus processInstanceState = processInstance.getState();
if (taskInstance.getState().typeIsFailure()) {
if (taskInstance.isSubProcess()) {
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1);
} else {
if (processInstanceState != ExecutionStatus.READY_STOP
&& processInstanceState != ExecutionStatus.READY_PAUSE) {
// failure task set invalid
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
// crate new task instance
if (taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE) {
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1);
}
taskInstance.setSubmitTime(null);
taskInstance.setStartTime(null);
taskInstance.setEndTime(null);
taskInstance.setFlag(Flag.YES);
taskInstance.setHost(null);
taskInstance.setId(0);
}
}
}
taskInstance.setExecutorId(processInstance.getExecutorId());
taskInstance.setProcessInstancePriority(processInstance.getProcessInstancePriority());
taskInstance.setState(getSubmitTaskState(taskInstance, processInstanceState));
if (taskInstance.getSubmitTime() == null) {
taskInstance.setSubmitTime(new Date());
}
if (taskInstance.getFirstSubmitTime() == null) {
taskInstance.setFirstSubmitTime(taskInstance.getSubmitTime());
}
boolean saveResult = saveTaskInstance(taskInstance);
if (!saveResult) {
return null;
}
return taskInstance;
}
/**
* get submit task instance state by the work process state
* cannot modify the task state when running/kill/submit success, or this
* task instance is already exists in task queue .
* return pause if work process state is ready pause
* return stop if work process state is ready stop
* if all of above are not satisfied, return submit success
*
* @param taskInstance taskInstance
* @param processInstanceState processInstanceState
* @return process instance state
*/
public ExecutionStatus getSubmitTaskState(TaskInstance taskInstance, ExecutionStatus processInstanceState) {
ExecutionStatus state = taskInstance.getState();
// running, delayed or killed
// the task already exists in task queue
// return state
if (
state == ExecutionStatus.RUNNING_EXECUTION
|| state == ExecutionStatus.DELAY_EXECUTION
|| state == ExecutionStatus.KILL
) {
return state;
}
//return pasue /stop if process instance state is ready pause / stop
// or return submit success
if (processInstanceState == ExecutionStatus.READY_PAUSE) {
state = ExecutionStatus.PAUSE;
} else if (processInstanceState == ExecutionStatus.READY_STOP
|| !checkProcessStrategy(taskInstance)) {
state = ExecutionStatus.KILL;
} else {
state = ExecutionStatus.SUBMITTED_SUCCESS;
}
return state;
}
/**
* check process instance strategy
*
* @param taskInstance taskInstance
* @return check strategy result
*/
private boolean checkProcessStrategy(TaskInstance taskInstance) {
ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId());
FailureStrategy failureStrategy = processInstance.getFailureStrategy();
if (failureStrategy == FailureStrategy.CONTINUE) {
return true;
}
List<TaskInstance> taskInstances = this.findValidTaskListByProcessId(taskInstance.getProcessInstanceId());
for (TaskInstance task : taskInstances) {
if (task.getState() == ExecutionStatus.FAILURE
&& task.getRetryTimes() >= task.getMaxRetryTimes()) {
return false;
}
}
return true;
}
/**
* insert or update work process instance to data base
*
* @param processInstance processInstance
*/
public void saveProcessInstance(ProcessInstance processInstance) {
if (processInstance == null) {
logger.error("save error, process instance is null!");
return;
}
if (processInstance.getId() != 0) {
processInstanceMapper.updateById(processInstance);
} else {
processInstanceMapper.insert(processInstance);
}
}
/**
* insert or update command
*
* @param command command
* @return save command result
*/
public int saveCommand(Command command) {
if (command.getId() != 0) {
return commandMapper.updateById(command);
} else {
return commandMapper.insert(command);
}
}
/**
* insert or update task instance
*
* @param taskInstance taskInstance
* @return save task instance result
*/
public boolean saveTaskInstance(TaskInstance taskInstance) {
if (taskInstance.getId() != 0) {
return updateTaskInstance(taskInstance);
} else {
return createTaskInstance(taskInstance);
}
}
/**
* insert task instance
*
* @param taskInstance taskInstance
* @return create task instance result
*/
public boolean createTaskInstance(TaskInstance taskInstance) {
int count = taskInstanceMapper.insert(taskInstance);
return count > 0;
}
/**
* update task instance
*
* @param taskInstance taskInstance
* @return update task instance result
*/
public boolean updateTaskInstance(TaskInstance taskInstance) {
int count = taskInstanceMapper.updateById(taskInstance);
return count > 0;
}
/**
* find task instance by id
*
* @param taskId task id
* @return task intance
*/
public TaskInstance findTaskInstanceById(Integer taskId) {
return taskInstanceMapper.selectById(taskId);
}
/**
* package task instance,associate processInstance and processDefine
*
* @param taskInstId taskInstId
* @return task instance
*/
public TaskInstance getTaskInstanceDetailByTaskId(int taskInstId) {
// get task instance
TaskInstance taskInstance = findTaskInstanceById(taskInstId);
if (taskInstance == null) {
return null;
}
setTaskInstanceDetail(taskInstance);
return taskInstance;
}
/**
* package task instance,associate processInstance and processDefine
*
* @param taskInstance taskInstance
* @return task instance
*/
public void setTaskInstanceDetail(TaskInstance taskInstance) {
// get process instance
ProcessInstance processInstance = findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
// get process define
ProcessDefinition processDefine = findProcessDefinition(processInstance.getProcessDefinitionCode(),
processInstance.getProcessDefinitionVersion());
taskInstance.setProcessInstance(processInstance);
taskInstance.setProcessDefine(processDefine);
TaskDefinition taskDefinition = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(
taskInstance.getTaskCode(),
taskInstance.getTaskDefinitionVersion());
updateTaskDefinitionResources(taskDefinition);
taskInstance.setTaskDefine(taskDefinition);
}
/**
* Update {@link ResourceInfo} information in {@link TaskDefinition}
*
* @param taskDefinition the given {@link TaskDefinition}
*/
private void updateTaskDefinitionResources(TaskDefinition taskDefinition) {
Map<String, Object> taskParameters = JSONUtils.parseObject(
taskDefinition.getTaskParams(),
new TypeReference<Map<String, Object>>() { });
if (taskParameters != null) {
// if contains mainJar field, query resource from database
// Flink, Spark, MR
if (taskParameters.containsKey("mainJar")) {
Object mainJarObj = taskParameters.get("mainJar");
ResourceInfo mainJar = JSONUtils.parseObject(
JSONUtils.toJsonString(mainJarObj),
ResourceInfo.class);
ResourceInfo resourceInfo = updateResourceInfo(mainJar);
if (resourceInfo != null) {
taskParameters.put("mainJar", resourceInfo);
}
}
// update resourceList information
if (taskParameters.containsKey("resourceList")) {
String resourceListStr = JSONUtils.toJsonString(taskParameters.get("resourceList"));
List<ResourceInfo> resourceInfos = JSONUtils.toList(resourceListStr, ResourceInfo.class);
List<ResourceInfo> updatedResourceInfos = resourceInfos
.stream()
.map(this::updateResourceInfo)
.filter(Objects::nonNull)
.collect(Collectors.toList());
taskParameters.put("resourceList", updatedResourceInfos);
}
// set task parameters
taskDefinition.setTaskParams(JSONUtils.toJsonString(taskParameters));
}
}
/**
* update {@link ResourceInfo} by given original ResourceInfo
*
* @param res origin resource info
* @return {@link ResourceInfo}
*/
private ResourceInfo updateResourceInfo(ResourceInfo res) {
ResourceInfo resourceInfo = null;
// only if mainJar is not null and does not contains "resourceName" field
if (res != null) {
int resourceId = res.getId();
if (resourceId <= 0) {
logger.error("invalid resourceId, {}", resourceId);
return null;
}
resourceInfo = new ResourceInfo();
// get resource from database, only one resource should be returned
Resource resource = getResourceById(resourceId);
resourceInfo.setId(resourceId);
resourceInfo.setRes(resource.getFileName());
resourceInfo.setResourceName(resource.getFullName());
if (logger.isInfoEnabled()) {
logger.info("updated resource info {}",
JSONUtils.toJsonString(resourceInfo));
}
}
return resourceInfo;
}
/**
* get id list by task state
*
* @param instanceId instanceId
* @param state state
* @return task instance states
*/
public List<Integer> findTaskIdByInstanceState(int instanceId, ExecutionStatus state) {
return taskInstanceMapper.queryTaskByProcessIdAndState(instanceId, state.ordinal());
}
/**
* find valid task list by process definition id
*
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findValidTaskListByProcessId(Integer processInstanceId) {
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.YES);
}
/**
* find previous task list by work process id
*
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId) {
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO);
}
/**
* update work process instance map
*
* @param processInstanceMap processInstanceMap
* @return update process instance result
*/
public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) {
return processInstanceMapMapper.updateById(processInstanceMap);
}
/**
* create work process instance map
*
* @param processInstanceMap processInstanceMap
* @return create process instance result
*/
public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) {
int count = 0;
if (processInstanceMap != null) {
return processInstanceMapMapper.insert(processInstanceMap);
}
return count;
}
/**
* find work process map by parent process id and parent task id.
*
* @param parentWorkProcessId parentWorkProcessId
* @param parentTaskId parentTaskId
* @return process instance map
*/
public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId) {
return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId);
}
/**
* delete work process map by parent process id
*
* @param parentWorkProcessId parentWorkProcessId
* @return delete process map result
*/
public int deleteWorkProcessMapByParentId(int parentWorkProcessId) {
return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId);
}
/**
* find sub process instance
*
* @param parentProcessId parentProcessId
* @param parentTaskId parentTaskId
* @return process instance
*/
public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId) {
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId);
if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) {
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId());
return processInstance;
}
/**
* find parent process instance
*
* @param subProcessId subProcessId
* @return process instance
*/
public ProcessInstance findParentProcessInstance(Integer subProcessId) {
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId);
if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) {
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId());
return processInstance;
}
/**
* change task state
*
* @param state state
* @param startTime startTime
* @param host host
* @param executePath executePath
* @param logPath logPath
* @param taskInstId taskInstId
*/
public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date startTime, String host,
String executePath,
String logPath,
int taskInstId) {
taskInstance.setState(state);
taskInstance.setStartTime(startTime);
taskInstance.setHost(host);
taskInstance.setExecutePath(executePath);
taskInstance.setLogPath(logPath);
saveTaskInstance(taskInstance);
}
/**
* update process instance
*
* @param processInstance processInstance
* @return update process instance result
*/
public int updateProcessInstance(ProcessInstance processInstance) {
return processInstanceMapper.updateById(processInstance);
}
/**
* change task state
*
* @param state state
* @param endTime endTime
* @param taskInstId taskInstId
* @param varPool varPool
*/
public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state,
Date endTime,
int processId,
String appIds,
int taskInstId,
String varPool) {
taskInstance.setPid(processId);
taskInstance.setAppLink(appIds);
taskInstance.setState(state);
taskInstance.setEndTime(endTime);
taskInstance.setVarPool(varPool);
changeOutParam(taskInstance);
saveTaskInstance(taskInstance);
}
/**
* for show in page of taskInstance
*
* @param taskInstance
*/
public void changeOutParam(TaskInstance taskInstance) {
if (StringUtils.isEmpty(taskInstance.getVarPool())) {
return;
}
List<Property> properties = JSONUtils.toList(taskInstance.getVarPool(), Property.class);
if (CollectionUtils.isEmpty(properties)) {
return;
}
//if the result more than one line,just get the first .
Map<String, Object> taskParams = JSONUtils.parseObject(taskInstance.getTaskParams(), new TypeReference<Map<String, Object>>() {});
Object localParams = taskParams.get(LOCAL_PARAMS);
if (localParams == null) {
return;
}
List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class);
Map<String, String> outProperty = new HashMap<>();
for (Property info : properties) {
if (info.getDirect() == Direct.OUT) {
outProperty.put(info.getProp(), info.getValue());
}
}
for (Property info : allParam) {
if (info.getDirect() == Direct.OUT) {
String paramName = info.getProp();
info.setValue(outProperty.get(paramName));
}
}
taskParams.put(LOCAL_PARAMS, allParam);
taskInstance.setTaskParams(JSONUtils.toJsonString(taskParams));
}
/**
* convert integer list to string list
*
* @param intList intList
* @return string list
*/
public List<String> convertIntListToString(List<Integer> intList) {
if (intList == null) {
return new ArrayList<>();
}
List<String> result = new ArrayList<>(intList.size());
for (Integer intVar : intList) {
result.add(String.valueOf(intVar));
}
return result;
}
/**
* query schedule by id
*
* @param id id
* @return schedule
*/
public Schedule querySchedule(int id) {
return scheduleMapper.selectById(id);
}
/**
* query Schedule by processDefinitionCode
*
* @param processDefinitionCode processDefinitionCode
* @see Schedule
*/
public List<Schedule> queryReleaseSchedulerListByProcessDefinitionCode(long processDefinitionCode) {
return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode);
}
/**
* query need failover process instance
*
* @param host host
* @return process instance list
*/
public List<ProcessInstance> queryNeedFailoverProcessInstances(String host) {
return processInstanceMapper.queryByHostAndStatus(host, stateArray);
}
/**
* process need failover process instance
*
* @param processInstance processInstance
*/
@Transactional(rollbackFor = RuntimeException.class)
public void processNeedFailoverProcessInstances(ProcessInstance processInstance) {
//1 update processInstance host is null
processInstance.setHost(Constants.NULL);
processInstanceMapper.updateById(processInstance);
ProcessDefinition processDefinition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion());
//2 insert into recover command
Command cmd = new Command();
cmd.setProcessDefinitionCode(processDefinition.getCode());
cmd.setProcessDefinitionVersion(processDefinition.getVersion());
cmd.setProcessInstanceId(processInstance.getId());
cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId()));
cmd.setExecutorId(processInstance.getExecutorId());
cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS);
createCommand(cmd);
}
/**
* query all need failover task instances by host
*
* @param host host
* @return task instance list
*/
public List<TaskInstance> queryNeedFailoverTaskInstances(String host) {
return taskInstanceMapper.queryByHostAndStatus(host,
stateArray);
}
/**
* find data source by id
*
* @param id id
* @return datasource
*/
public DataSource findDataSourceById(int id) {
return dataSourceMapper.selectById(id);
}
/**
* update process instance state by id
*
* @param processInstanceId processInstanceId
* @param executionStatus executionStatus
* @return update process result
*/
public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) {
ProcessInstance instance = processInstanceMapper.selectById(processInstanceId);
instance.setState(executionStatus);
return processInstanceMapper.updateById(instance);
}
/**
* find process instance by the task id
*
* @param taskId taskId
* @return process instance
*/
public ProcessInstance findProcessInstanceByTaskId(int taskId) {
TaskInstance taskInstance = taskInstanceMapper.selectById(taskId);
if (taskInstance != null) {
return processInstanceMapper.selectById(taskInstance.getProcessInstanceId());
}
return null;
}
/**
* find udf function list by id list string
*
* @param ids ids
* @return udf function list
*/
public List<UdfFunc> queryUdfFunListByIds(int[] ids) {
return udfFuncMapper.queryUdfByIdStr(ids, null);
}
/**
* find tenant code by resource name
*
* @param resName resource name
* @param resourceType resource type
* @return tenant code
*/
public String queryTenantCodeByResName(String resName, ResourceType resourceType) {
// in order to query tenant code successful although the version is older
String fullName = resName.startsWith("/") ? resName : String.format("/%s", resName);
List<Resource> resourceList = resourceMapper.queryResource(fullName, resourceType.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
return StringUtils.EMPTY;
}
int userId = resourceList.get(0).getUserId();
User user = userMapper.selectById(userId);
if (Objects.isNull(user)) {
return StringUtils.EMPTY;
}
Tenant tenant = tenantMapper.selectById(user.getTenantId());
if (Objects.isNull(tenant)) {
return StringUtils.EMPTY;
}
return tenant.getTenantCode();
}
/**
* find schedule list by process define codes.
*
* @param codes codes
* @return schedule list
*/
public List<Schedule> selectAllByProcessDefineCode(long[] codes) {
return scheduleMapper.selectAllByProcessDefineArray(codes);
}
/**
* find last scheduler process instance in the date interval
*
* @param definitionCode definitionCode
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastSchedulerProcessInterval(Long definitionCode, DateInterval dateInterval) {
return processInstanceMapper.queryLastSchedulerProcess(definitionCode,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last manual process instance interval
*
* @param definitionCode process definition code
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastManualProcessInterval(Long definitionCode, DateInterval dateInterval) {
return processInstanceMapper.queryLastManualProcess(definitionCode,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last running process instance
*
* @param definitionCode process definition code
* @param startTime start time
* @param endTime end time
* @return process instance
*/
public ProcessInstance findLastRunningProcess(Long definitionCode, Date startTime, Date endTime) {
return processInstanceMapper.queryLastRunningProcess(definitionCode,
startTime,
endTime,
stateArray);
}
/**
* query user queue by process instance id
*
* @param processInstanceId processInstanceId
* @return queue
*/
public String queryUserQueueByProcessInstanceId(int processInstanceId) {
String queue = "";
ProcessInstance processInstance = processInstanceMapper.selectById(processInstanceId);
if (processInstance == null) {
return queue;
}
User executor = userMapper.selectById(processInstance.getExecutorId());
if (executor != null) {
queue = executor.getQueue();
}
return queue;
}
/**
* query project name and user name by processInstanceId.
*
* @param processInstanceId processInstanceId
* @return projectName and userName
*/
public ProjectUser queryProjectWithUserByProcessInstanceId(int processInstanceId) {
return projectMapper.queryProjectWithUserByProcessInstanceId(processInstanceId);
}
/**
* get task worker group
*
* @param taskInstance taskInstance
* @return workerGroupId
*/
public String getTaskWorkerGroup(TaskInstance taskInstance) {
String workerGroup = taskInstance.getWorkerGroup();
if (StringUtils.isNotBlank(workerGroup)) {
return workerGroup;
}
int processInstanceId = taskInstance.getProcessInstanceId();
ProcessInstance processInstance = findProcessInstanceById(processInstanceId);
if (processInstance != null) {
return processInstance.getWorkerGroup();
}
logger.info("task : {} will use default worker group", taskInstance.getId());
return Constants.DEFAULT_WORKER_GROUP;
}
/**
* get have perm project list
*
* @param userId userId
* @return project list
*/
public List<Project> getProjectListHavePerm(int userId) {
List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId);
List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId);
if (createProjects == null) {
createProjects = new ArrayList<>();
}
if (authedProjects != null) {
createProjects.addAll(authedProjects);
}
return createProjects;
}
/**
* list unauthorized udf function
*
* @param userId user id
* @param needChecks data source id array
* @return unauthorized udf function list
*/
public <T> List<T> listUnauthorized(int userId, T[] needChecks, AuthorizationType authorizationType) {
List<T> resultList = new ArrayList<>();
if (Objects.nonNull(needChecks) && needChecks.length > 0) {
Set<T> originResSet = new HashSet<>(Arrays.asList(needChecks));
switch (authorizationType) {
case RESOURCE_FILE_ID:
case UDF_FILE:
List<Resource> ownUdfResources = resourceMapper.listAuthorizedResourceById(userId, needChecks);
addAuthorizedResources(ownUdfResources, userId);
Set<Integer> authorizedResourceFiles = ownUdfResources.stream().map(Resource::getId).collect(toSet());
originResSet.removeAll(authorizedResourceFiles);
break;
case RESOURCE_FILE_NAME:
List<Resource> ownResources = resourceMapper.listAuthorizedResource(userId, needChecks);
addAuthorizedResources(ownResources, userId);
Set<String> authorizedResources = ownResources.stream().map(Resource::getFullName).collect(toSet());
originResSet.removeAll(authorizedResources);
break;
case DATASOURCE:
Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId, needChecks).stream().map(DataSource::getId).collect(toSet());
originResSet.removeAll(authorizedDatasources);
break;
case UDF:
Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(UdfFunc::getId).collect(toSet());
originResSet.removeAll(authorizedUdfs);
break;
default:
break;
}
resultList.addAll(originResSet);
}
return resultList;
}
/**
* get user by user id
*
* @param userId user id
* @return User
*/
public User getUserById(int userId) {
return userMapper.selectById(userId);
}
/**
* get resource by resource id
*
* @param resourceId resource id
* @return Resource
*/
public Resource getResourceById(int resourceId) {
return resourceMapper.selectById(resourceId);
}
/**
* list resources by ids
*
* @param resIds resIds
* @return resource list
*/
public List<Resource> listResourceByIds(Integer[] resIds) {
return resourceMapper.listResourceByIds(resIds);
}
/**
* format task app id in task instance
*/
public String formatTaskAppId(TaskInstance taskInstance) {
ProcessInstance processInstance = findProcessInstanceById(taskInstance.getProcessInstanceId());
if (processInstance == null) {
return "";
}
ProcessDefinition definition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion());
if (definition == null) {
return "";
}
return String.format("%s_%s_%s", definition.getId(), processInstance.getId(), taskInstance.getId());
}
/**
* switch process definition version to process definition log version
*/
public int switchVersion(ProcessDefinition processDefinition, ProcessDefinitionLog processDefinitionLog) {
if (null == processDefinition || null == processDefinitionLog) {
return Constants.DEFINITION_FAILURE;
}
processDefinitionLog.setId(processDefinition.getId());
processDefinitionLog.setReleaseState(ReleaseState.OFFLINE);
processDefinitionLog.setFlag(Flag.YES);
int result = processDefineMapper.updateById(processDefinitionLog);
if (result > 0) {
result = switchProcessTaskRelationVersion(processDefinitionLog);
if (result <= 0) {
return Constants.DEFINITION_FAILURE;
}
}
return result;
}
public int switchProcessTaskRelationVersion(ProcessDefinition processDefinition) {
List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode());
if (!processTaskRelationList.isEmpty()) {
processTaskRelationMapper.deleteByCode(processDefinition.getProjectCode(), processDefinition.getCode());
}
List<ProcessTaskRelationLog> processTaskRelationLogList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion());
return processTaskRelationMapper.batchInsert(processTaskRelationLogList);
}
/**
* get resource ids
*
* @param taskDefinition taskDefinition
* @return resource ids
*/
public String getResourceIds(TaskDefinition taskDefinition) {
Set<Integer> resourceIds = null;
AbstractParameters params = TaskParametersUtils.getParameters(taskDefinition.getTaskType(), taskDefinition.getTaskParams());
if (params != null && CollectionUtils.isNotEmpty(params.getResourceFilesList())) {
resourceIds = params.getResourceFilesList().
stream()
.filter(t -> t.getId() != 0)
.map(ResourceInfo::getId)
.collect(Collectors.toSet());
}
if (CollectionUtils.isEmpty(resourceIds)) {
return StringUtils.EMPTY;
}
return StringUtils.join(resourceIds, ",");
}
public int saveTaskDefine(User operator, long projectCode, List<TaskDefinitionLog> taskDefinitionLogs) {
Date now = new Date();
List<TaskDefinitionLog> newTaskDefinitionLogs = new ArrayList<>();
List<TaskDefinitionLog> updateTaskDefinitionLogs = new ArrayList<>();
for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) {
taskDefinitionLog.setProjectCode(projectCode);
taskDefinitionLog.setUpdateTime(now);
taskDefinitionLog.setOperateTime(now);
taskDefinitionLog.setOperator(operator.getId());
taskDefinitionLog.setResourceIds(getResourceIds(taskDefinitionLog));
if (taskDefinitionLog.getCode() > 0 && taskDefinitionLog.getVersion() > 0) {
TaskDefinitionLog definitionCodeAndVersion = taskDefinitionLogMapper
.queryByDefinitionCodeAndVersion(taskDefinitionLog.getCode(), taskDefinitionLog.getVersion());
if (definitionCodeAndVersion != null) {
if (!taskDefinitionLog.equals(definitionCodeAndVersion)) {
taskDefinitionLog.setUserId(definitionCodeAndVersion.getUserId());
Integer version = taskDefinitionLogMapper.queryMaxVersionForDefinition(taskDefinitionLog.getCode());
taskDefinitionLog.setVersion(version + 1);
taskDefinitionLog.setCreateTime(definitionCodeAndVersion.getCreateTime());
updateTaskDefinitionLogs.add(taskDefinitionLog);
}
continue;
}
}
taskDefinitionLog.setUserId(operator.getId());
taskDefinitionLog.setVersion(Constants.VERSION_FIRST);
taskDefinitionLog.setCreateTime(now);
if (taskDefinitionLog.getCode() == 0) {
try {
taskDefinitionLog.setCode(SnowFlakeUtils.getInstance().nextId());
} catch (SnowFlakeException e) {
logger.error("Task code get error, ", e);
return Constants.DEFINITION_FAILURE;
}
}
newTaskDefinitionLogs.add(taskDefinitionLog);
}
int insertResult = 0;
int updateResult = 0;
for (TaskDefinitionLog taskDefinitionToUpdate : updateTaskDefinitionLogs) {
TaskDefinition task = taskDefinitionMapper.queryByCode(taskDefinitionToUpdate.getCode());
if (task == null) {
newTaskDefinitionLogs.add(taskDefinitionToUpdate);
} else {
insertResult += taskDefinitionLogMapper.insert(taskDefinitionToUpdate);
taskDefinitionToUpdate.setId(task.getId());
updateResult += taskDefinitionMapper.updateById(taskDefinitionToUpdate);
}
}
if (!newTaskDefinitionLogs.isEmpty()) {
updateResult += taskDefinitionMapper.batchInsert(newTaskDefinitionLogs);
insertResult += taskDefinitionLogMapper.batchInsert(newTaskDefinitionLogs);
}
return (insertResult & updateResult) > 0 ? 1 : Constants.EXIT_CODE_SUCCESS;
}
/**
* save processDefinition (including create or update processDefinition)
*/
public int saveProcessDefine(User operator, ProcessDefinition processDefinition, Boolean isFromProcessDefine) {
ProcessDefinitionLog processDefinitionLog = new ProcessDefinitionLog(processDefinition);
Integer version = processDefineLogMapper.queryMaxVersionForDefinition(processDefinition.getCode());
int insertVersion = version == null || version == 0 ? Constants.VERSION_FIRST : version + 1;
processDefinitionLog.setVersion(insertVersion);
processDefinitionLog.setReleaseState(isFromProcessDefine ? ReleaseState.OFFLINE : ReleaseState.ONLINE);
processDefinitionLog.setOperator(operator.getId());
processDefinitionLog.setOperateTime(processDefinition.getUpdateTime());
int insertLog = processDefineLogMapper.insert(processDefinitionLog);
int result;
if (0 == processDefinition.getId()) {
result = processDefineMapper.insert(processDefinitionLog);
} else {
processDefinitionLog.setId(processDefinition.getId());
result = processDefineMapper.updateById(processDefinitionLog);
}
return (insertLog & result) > 0 ? insertVersion : 0;
}
/**
* save task relations
*/
public int saveTaskRelation(User operator, long projectCode, long processDefinitionCode, int processDefinitionVersion,
List<ProcessTaskRelationLog> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) {
Map<Long, TaskDefinitionLog> taskDefinitionLogMap = null;
if (CollectionUtils.isNotEmpty(taskDefinitionLogs)) {
taskDefinitionLogMap = taskDefinitionLogs.stream()
.collect(Collectors.toMap(TaskDefinition::getCode, taskDefinitionLog -> taskDefinitionLog));
}
Date now = new Date();
for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) {
processTaskRelationLog.setProjectCode(projectCode);
processTaskRelationLog.setProcessDefinitionCode(processDefinitionCode);
processTaskRelationLog.setProcessDefinitionVersion(processDefinitionVersion);
if (taskDefinitionLogMap != null) {
TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(processTaskRelationLog.getPreTaskCode());
if (taskDefinitionLog != null) {
processTaskRelationLog.setPreTaskVersion(taskDefinitionLog.getVersion());
}
processTaskRelationLog.setPostTaskVersion(taskDefinitionLogMap.get(processTaskRelationLog.getPostTaskCode()).getVersion());
}
processTaskRelationLog.setCreateTime(now);
processTaskRelationLog.setUpdateTime(now);
processTaskRelationLog.setOperator(operator.getId());
processTaskRelationLog.setOperateTime(now);
}
List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode);
if (!processTaskRelationList.isEmpty()) {
Set<Integer> processTaskRelationSet = processTaskRelationList.stream().map(ProcessTaskRelation::hashCode).collect(toSet());
Set<Integer> taskRelationSet = taskRelationList.stream().map(ProcessTaskRelationLog::hashCode).collect(toSet());
if (CollectionUtils.isEqualCollection(processTaskRelationSet, taskRelationSet)) {
return Constants.EXIT_CODE_SUCCESS;
}
processTaskRelationMapper.deleteByCode(projectCode, processDefinitionCode);
}
int result = processTaskRelationMapper.batchInsert(taskRelationList);
int resultLog = processTaskRelationLogMapper.batchInsert(taskRelationList);
return (result & resultLog) > 0 ? Constants.EXIT_CODE_SUCCESS : Constants.EXIT_CODE_FAILURE;
}
public boolean isTaskOnline(long taskCode) {
List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByTaskCode(taskCode);
if (!processTaskRelationList.isEmpty()) {
Set<Long> processDefinitionCodes = processTaskRelationList
.stream()
.map(ProcessTaskRelation::getProcessDefinitionCode)
.collect(Collectors.toSet());
List<ProcessDefinition> processDefinitionList = processDefineMapper.queryByCodes(processDefinitionCodes);
// check process definition is already online
for (ProcessDefinition processDefinition : processDefinitionList) {
if (processDefinition.getReleaseState() == ReleaseState.ONLINE) {
return true;
}
}
}
return false;
}
/**
* Generate the DAG Graph based on the process definition id
*
* @param processDefinition process definition
* @return dag graph
*/
public DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) {
List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode());
List<TaskNode> taskNodeList = transformTask(processTaskRelations, Lists.newArrayList());
ProcessDag processDag = DagHelper.getProcessDag(taskNodeList, new ArrayList<>(processTaskRelations));
// Generate concrete Dag to be executed
return DagHelper.buildDagGraph(processDag);
}
/**
* generate DagData
*/
public DagData genDagData(ProcessDefinition processDefinition) {
List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode());
List<TaskDefinitionLog> taskDefinitionLogList = genTaskDefineList(processTaskRelations);
List<TaskDefinition> taskDefinitions = taskDefinitionLogList.stream()
.map(taskDefinitionLog -> JSONUtils.parseObject(JSONUtils.toJsonString(taskDefinitionLog), TaskDefinition.class))
.collect(Collectors.toList());
return new DagData(processDefinition, processTaskRelations, taskDefinitions);
}
public List<TaskDefinitionLog> genTaskDefineList(List<ProcessTaskRelation> processTaskRelations) {
Set<TaskDefinition> taskDefinitionSet = new HashSet<>();
for (ProcessTaskRelation processTaskRelation : processTaskRelations) {
if (processTaskRelation.getPreTaskCode() > 0) {
taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPreTaskCode(), processTaskRelation.getPreTaskVersion()));
}
if (processTaskRelation.getPostTaskCode() > 0) {
taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion()));
}
}
return taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet);
}
/**
* find task definition by code and version
*/
public TaskDefinition findTaskDefinition(long taskCode, int taskDefinitionVersion) {
return taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskCode, taskDefinitionVersion);
}
/**
* find process task relation list by projectCode and processDefinitionCode
*/
public List<ProcessTaskRelation> findRelationByCode(long projectCode, long processDefinitionCode) {
return processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode);
}
/**
* add authorized resources
*
* @param ownResources own resources
* @param userId userId
*/
private void addAuthorizedResources(List<Resource> ownResources, int userId) {
List<Integer> relationResourceIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 7);
List<Resource> relationResources = CollectionUtils.isNotEmpty(relationResourceIds) ? resourceMapper.queryResourceListById(relationResourceIds) : new ArrayList<>();
ownResources.addAll(relationResources);
}
/**
* Use temporarily before refactoring taskNode
*/
public List<TaskNode> transformTask(List<ProcessTaskRelation> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) {
Map<Long, List<Long>> taskCodeMap = new HashMap<>();
for (ProcessTaskRelation processTaskRelation : taskRelationList) {
taskCodeMap.compute(processTaskRelation.getPostTaskCode(), (k, v) -> {
if (v == null) {
v = new ArrayList<>();
}
if (processTaskRelation.getPreTaskCode() != 0L) {
v.add(processTaskRelation.getPreTaskCode());
}
return v;
});
}
if (CollectionUtils.isEmpty(taskDefinitionLogs)) {
taskDefinitionLogs = genTaskDefineList(taskRelationList);
}
Map<Long, TaskDefinitionLog> taskDefinitionLogMap = taskDefinitionLogs.stream()
.collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog));
List<TaskNode> taskNodeList = new ArrayList<>();
for (Entry<Long, List<Long>> code : taskCodeMap.entrySet()) {
TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(code.getKey());
if (taskDefinitionLog != null) {
TaskNode taskNode = new TaskNode();
taskNode.setCode(taskDefinitionLog.getCode());
taskNode.setVersion(taskDefinitionLog.getVersion());
taskNode.setName(taskDefinitionLog.getName());
taskNode.setDesc(taskDefinitionLog.getDescription());
taskNode.setType(taskDefinitionLog.getTaskType().toUpperCase());
taskNode.setRunFlag(taskDefinitionLog.getFlag() == Flag.YES ? Constants.FLOWNODE_RUN_FLAG_NORMAL : Constants.FLOWNODE_RUN_FLAG_FORBIDDEN);
taskNode.setMaxRetryTimes(taskDefinitionLog.getFailRetryTimes());
taskNode.setRetryInterval(taskDefinitionLog.getFailRetryInterval());
Map<String, Object> taskParamsMap = taskNode.taskParamsToJsonObj(taskDefinitionLog.getTaskParams());
taskNode.setConditionResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.CONDITION_RESULT)));
taskNode.setSwitchResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.SWITCH_RESULT)));
taskNode.setDependence(JSONUtils.toJsonString(taskParamsMap.get(Constants.DEPENDENCE)));
taskParamsMap.remove(Constants.CONDITION_RESULT);
taskParamsMap.remove(Constants.DEPENDENCE);
taskNode.setParams(JSONUtils.toJsonString(taskParamsMap));
taskNode.setTaskInstancePriority(taskDefinitionLog.getTaskPriority());
taskNode.setWorkerGroup(taskDefinitionLog.getWorkerGroup());
taskNode.setEnvironmentCode(taskDefinitionLog.getEnvironmentCode());
taskNode.setTimeout(JSONUtils.toJsonString(new TaskTimeoutParameter(taskDefinitionLog.getTimeoutFlag() == TimeoutFlag.OPEN,
taskDefinitionLog.getTimeoutNotifyStrategy(),
taskDefinitionLog.getTimeout())));
taskNode.setDelayTime(taskDefinitionLog.getDelayTime());
taskNode.setPreTasks(JSONUtils.toJsonString(code.getValue().stream().map(taskDefinitionLogMap::get).map(TaskDefinition::getCode).collect(Collectors.toList())));
taskNodeList.add(taskNode);
}
}
return taskNodeList;
}
public Map<ProcessInstance, TaskInstance> notifyProcessList(int processId, int taskId) {
HashMap<ProcessInstance, TaskInstance> processTaskMap = new HashMap<>();
//find sub tasks
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(processId);
if (processInstanceMap == null) {
return processTaskMap;
}
ProcessInstance fatherProcess = this.findProcessInstanceById(processInstanceMap.getParentProcessInstanceId());
TaskInstance fatherTask = this.findTaskInstanceById(processInstanceMap.getParentTaskInstanceId());
if (fatherProcess != null) {
processTaskMap.put(fatherProcess, fatherTask);
}
return processTaskMap;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,523 | [Bug] [API] Failed to authorize resource | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Failed to authorize resource
### What you expected to happen
Failed to authorize resource
### How to reproduce
authorize resource
report 'authorize-resource-tree' failed
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6523 | https://github.com/apache/dolphinscheduler/pull/6524 | 50f1766903f27ce0960e133b30cf394187e8c5c1 | 8606cbd493eba950d3e5b95d8c8f891e669a48e8 | "2021-10-13T15:33:27Z" | java | "2021-10-13T16:31:23Z" | dolphinscheduler-ui/src/js/conf/home/store/security/actions.js | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import io from '@/module/io'
export default {
/**
* verify Name
* @param alertgroup/verifyGroupName
* @param users/verifyUserName
* @param tenant/verifyTenantCode
*/
verifyName ({ state }, payload) {
const o = {
user: {
param: {
userName: payload.userName
},
api: 'users/verify-user-name'
},
tenant: {
param: {
tenantCode: payload.tenantCode
},
api: 'tenants/verify-code'
},
alertgroup: {
param: {
groupName: payload.groupName
},
api: 'alert-groups/verify-name'
},
alarmInstance: {
param: {
alertInstanceName: payload.instanceName
},
api: 'alert-plugin-instances/verify-name'
}
}
return new Promise((resolve, reject) => {
io.get(o[payload.type].api, o[payload.type].param, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Create user
* @param "userName":string
* @param "userPassword": string
* @param "tenantId":int
* @param "email":string
* @param "phone":string
*/
createUser ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post('users/create', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Verify that the username exists
* @param userName
*/
verifyUserName ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post('users/verify-user-name', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Paging query user list
* @param "pageNo":int,
* @param "searchVal":string,
* @param "pageSize":int
*/
getUsersListP ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('users/list-paging', payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* user list expect admin
*/
getUsersList ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('users/list', payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* user all list
*/
getUsersAll ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('users/list-all', payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* Update user
* @param "id":int,
* @param "userName":string,
* @param "userPassword": string,
* @param "tenantId":int,
* @param "email":string,
* @param "phone":string
*/
updateUser ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post('users/update', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* delete users
* @param "id":int
*/
deleteUser ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post('users/delete', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Obtain authorized and unauthorized items
*/
getAuthList ({ state }, payload) {
const o = {
type: payload.type,
category: payload.category
}
const param = {}
// Manage user
if (o.type === 'user') {
param.alertgroupId = payload.id
} else {
param.userId = payload.id
}
// Authorized project
const p1 = new Promise((resolve, reject) => {
io.get(`${o.category}/unauth-${o.type}`, param, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
// Unauthorized project
const p2 = new Promise((resolve, reject) => {
io.get(`${o.category}/authed-${o.type}`, param, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
return new Promise((resolve, reject) => {
Promise.all([p1, p2]).then(a => {
resolve(a)
}).catch(e => {
reject(e)
})
})
},
getResourceList ({ state }, payload) {
const o = {
type: payload.type,
category: payload.category
}
const param = {}
// Manage user
if (o.type === 'user') {
param.alertgroupId = payload.id
} else {
param.userId = payload.id
}
// Authorized project
const p1 = new Promise((resolve, reject) => {
io.get(`${o.category}/authorize-resource-tree`, param, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
// Unauthorized project
const p2 = new Promise((resolve, reject) => {
io.get(`${o.category}/authed-${o.type}`, param, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
return new Promise((resolve, reject) => {
Promise.all([p1, p2]).then(a => {
resolve(a)
}).catch(e => {
reject(e)
})
})
},
/**
* Authorization [project, resource, data source]
* @param Project,Resources,Datasource
*/
grantAuthorization ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post(payload.api, payload.param, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Query user details
* @param "userId":int
*/
getUsersDetails ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post('users/select-by-id', payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* Tenant list - pagination
*/
getTenantListP ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('tenants', payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* Tenant list - no paging
*/
getTenantList ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('tenants/list', payload, res => {
const list = res.data
list.unshift({
id: -1,
tenantCode: 'default'
})
state.tenantAllList = list
resolve(list)
}).catch(e => {
reject(e)
})
})
},
/**
* Queue interface
*/
getQueueList ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('queues/list', payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* Create Queue
*/
createQueue ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post('tenants', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* update Queue
*/
updateQueue ({ state }, payload) {
return new Promise((resolve, reject) => {
io.put(`tenants/${payload.id}`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* delete Queue
*/
deleteQueue ({ state }, payload) {
return new Promise((resolve, reject) => {
io.delete(`tenants/${payload.id}`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* queryAlertGroupListPaging
*/
queryAlertGroupListPaging ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('alert-groups', payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* queryAlertPluginInstanceListPaging
*/
queryAlertPluginInstanceListPaging ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('alert-plugin-instances', payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* queryUiPlugins
*/
getPlugins ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('ui-plugins/query-by-type', payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* queryUiPluginById
*/
getUiPluginById ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`ui-plugins/${payload.pluginId}`, payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* queryAll alert-plugin-instance
*/
queryAllAlertPluginInstance ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('alert-plugin-instances/list', payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* Alarm group list
*/
getAlertgroup ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('alert-groups/list', payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* Create an alarm group.
*/
createAlertgrou ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post('alert-groups', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* create alert plugin instance operation
*/
createAlertPluginInstance ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post('alert-plugin-instances', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* update alert plugin instance operation
*/
updateAlertPluginInstance ({ state }, payload) {
return new Promise((resolve, reject) => {
io.put(`alert-plugin-instances/${payload.alertPluginInstanceId}`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* update an alarm group.
*/
updateAlertgrou ({ state }, payload) {
return new Promise((resolve, reject) => {
io.put(`alert-groups/${payload.id}`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* delete alarm group.
*/
deleteAlertgrou ({ state }, payload) {
return new Promise((resolve, reject) => {
io.delete(`alert-groups/${payload.id}`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* delete alert plugin instance operation
*/
deletAelertPluginInstance ({ state }, payload) {
return new Promise((resolve, reject) => {
io.delete(`alert-plugin-instances/${payload.id}`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Master list
*/
getProcessMasterList ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('process/master/list', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Worker list
*/
getProcessWorkerList ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('process/worker/list', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* get queue list pages
*/
getQueueListP ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('queues', payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* create queue
*/
createQueueQ ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post('queues', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* update queue
*/
updateQueueQ ({ state }, payload) {
return new Promise((resolve, reject) => {
io.put(`queues/${payload.id}`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* update queue
*/
verifyQueueQ ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post('queues/verify', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* get worker groups
*/
getWorkerGroups ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('worker-groups', payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* get worker groups all
*/
getWorkerGroupsAll ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('worker-groups/all', payload, res => {
let list = res.data
if (list.length > 0) {
list = list.map(item => {
return {
id: item,
name: item
}
})
} else {
list.unshift({
id: 'default',
name: 'default'
})
}
state.workerGroupsListAll = list
resolve(list)
}).catch(e => {
reject(e)
})
})
},
/**
* get alarm groups all
*/
getAlarmGroupsAll ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('alert-groups/list', payload, res => {
state.alarmGroupsListAll = res.data
resolve(res)
}).catch(e => {
reject(e)
})
})
},
saveWorkerGroups ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post('worker-groups', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
deleteWorkerGroups ({ state }, payload) {
return new Promise((resolve, reject) => {
io.delete(`worker-groups/${payload.id}`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
getWorkerAddresses ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('worker-groups/worker-address-list', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* get environment list pages
*/
getEnvironmentListPaging ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('environment/list-paging', payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* create environment
*/
createEnvironment ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post('environment/create', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* update environment
*/
updateEnvironment ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post('environment/update', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* delete environment
*/
deleteEnvironment ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post('environment/delete', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
verifyEnvironment ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post('environment/verify-environment', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* get all environment
*/
getEnvironmentAll ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('environment/query-environment-list', payload, res => {
let list = res.data
state.environmentListAll = list
resolve(list)
}).catch(e => {
reject(e)
})
})
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,876 | [Bug][SQL] Suggest to increase the max length of the field resource_ids in the table t_ds_task_definition. | **Describe the bug**
When I saved a shell task that used many resource files ,the service threw PSQLException.
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Create a shell task and select many resource files.
2. Save the workflow, you would see that again. If you don't , maybe you would do well to pick more resource files.
**Expected behavior**
Expect to increase the max length of the field resource_ids in the table t_ds_task_definition.
**Screenshots**
![image](https://user-images.githubusercontent.com/4928204/126594849-07126f10-9031-43d2-a2ce-50ce9771d1f1.png)
![image](https://user-images.githubusercontent.com/4928204/126594860-5e38dc77-32d2-4d67-9cfe-e9424b8b07d3.png)
**Which version of Dolphin Scheduler:**
-[dev]
**Additional context**
Exception stack trace:
```
[INFO] 2021-07-22 11:51:07.658 org.apache.dolphinscheduler.api.aspect.AccessLogAspect:[75] - REQUEST TRANCE_ID:0702204b-58c0-4f0f-8b4b-dbfef2be63cc, LOGIN_USER:admin, URI:/dolphinscheduler/users/list-paging, METHOD:GET, HANDLER:org.apache.dolphinscheduler.api.controller.UsersController.queryUserList, ARGS:{searchVal=jiang.hua, pageNo=1, pageSize=10}
[INFO] 2021-07-22 11:51:07.682 org.apache.dolphinscheduler.api.aspect.AccessLogAspect:[75] - REQUEST TRANCE_ID:55caab3c-e003-4925-88c9-172dd2318d7c, LOGIN_USER:admin, URI:/dolphinscheduler/projects/feature-works/process/update, METHOD:POST, HANDLER:org.apache.dolphinscheduler.api.controller.ProcessDefinitionController.updateProcessDefinition, ARGS:{connects=[], name=jianghua_test, description=test, locations={"tasks-24242":{"name":"restore_from_checkpoint_test123","targetarr":"","nodenumber":"0","x":-800,"y":-900,"color":"#1297DC"}}, id=8, releaseState=OFFLINE, projectName=feature-works, processDefinitionJson={"tasks":[{"id":"tasks-24242","code":491450867713,"version":27,"name":"restore_from_checkpoint_test123","desc":null,"type":"SHELL","runFlag":"NORMAL","loc":null,"maxRetryTimes":3,"retryInterval":1,"params":{"resourceList":[{"id":157},{"id":158},{"id":162},{"id":164},{"id":166},{"id":168},{"id":160},{"id":161},{"id":159},{"id":163},{"id":165},{"id":167},{"id":95},{"id":97},{"id":98},{"id":99},{"id":100},{"id":104},{"id":105},{"id":106},{"id":107},{"id":116},{"id":94},{"id":96},{"id":101},{"id":102},{"id":103},{"id":108},{"id":109},{"id":110},{"id":112},{"id":113},{"id":114},{"id":115},{"id":117},{"id":170},{"id":171},{"id":172},{"id":120},{"id":121},{"id":123},{"id":125},{"id":127},{"id":129},{"id":131},{"id":133},{"id":135},{"id":137},{"id":139},{"id":141},{"id":147},{"id":148},{"id":149},{"id":150},{"id":151},{"id":152},{"id":134},{"id":136},{"id":118},{"id":119},{"id":122},{"id":124},{"id":126},{"id":128},{"id":130},{"id":132},{"id":138},{"id":140},{"id":142},{"id":143},{"id":144},{"id":145},{"id":146},{"id":169},{"id":76},{"id":77},{"id":78}],"localParams":[{"prop":"task.name","direct":"IN","type":"VARCHAR","value":"restore_from_checkpoint_test.sql","deleteScript":"-n"},{"prop":"task.script.file","direct":"IN","type":"VARCHAR","value":"Projects/feature-works/tasks/stream/jianghua_test/restore_from_checkpoint_test.sql","deleteScript":"-f"},{"prop":"task.execution.parallel","direct":"IN","type":"VARCHAR","value":"3","deleteScript":"-p"},{"prop":"task.execution.slots","direct":"IN","type":"VARCHAR","value":"10","deleteScript":"-s"},{"prop":"task.execution.yjm","direct":"IN","type":"VARCHAR","value":"4096m","deleteScript":"-j"},{"prop":"task.execution.ytm","direct":"IN","type":"VARCHAR","value":"4096m","deleteScript":"-w"},{"prop":"task.instance.id","direct":"IN","type":"VARCHAR","value":"${system.task.instance.id}"},{"prop":"task.execute.path","direct":"IN","type":"VARCHAR","value":"${system.task.execute.path}"}],"rawScript":"chmod +x ${task.execute.path}/${datamax.home}/${datamax.version}/bin/*\n\n${task.execute.path}/${datamax.home}/${datamax.version}/bin/start.sh -m ${run.mode} -n ${task.name} -f ${task.script.file} -p ${task.execution.parallel} -s ${task.execution.slots} -j ${task.execution.yjm} -w ${task.execution.ytm} -r ${repository.home} -b ${task.execute.path} -c ${datamax.home}/${datamax.version}/lib/${datamax.core.jar} -l ${flink.home}/${flink.version} -e ${profile.home}/${run.mode}"},"preTasks":[],"preTaskNodeList":[],"extras":null,"depList":[],"dependence":{},"conditionResult":{"successNode":[""],"failedNode":[""]},"taskInstancePriority":"MEDIUM","workerGroup":"default","timeout":{"enable":false,"strategy":null,"interval":0},"delayTime":0}],"globalParams":[{"prop":"repository.home","direct":"IN","type":"VARCHAR","value":"/data/online/datamax-repository"},{"prop":"run.mode","direct":"IN","type":"VARCHAR","value":"stream"},{"prop":"flink.home","direct":"IN","type":"VARCHAR","value":"./flink/"},{"prop":"datamax.home","direct":"IN","type":"VARCHAR","value":"./datamax/"},{"prop":"datamax.core.jar","direct":"IN","type":"VARCHAR","value":"platform-on-flink_1.13.0-2.0-SNAPSHOT.jar"},{"prop":"flink.version","direct":"IN","type":"VARCHAR","value":"1.13.0"},{"prop":"datamax.version","direct":"IN","type":"VARCHAR","value":"2.0.0"},{"prop":"profile.home","direct":"IN","type":"VARCHAR","value":"./profile/prod"},{"prop":"root.home","direct":"IN","type":"VARCHAR","value":"/data/online/datamax-repository"},{"prop":"current.date","direct":"IN","type":"VARCHAR","value":"${system.biz.curdate}"}],"timeout":0,"tenantId":4}}
[ERROR] 2021-07-22 11:51:07.697 org.apache.dolphinscheduler.api.exceptions.ApiExceptionHandler:[46] - 更新工作流定义错误
org.springframework.dao.DataIntegrityViolationException:
### Error updating database. Cause: org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
### The error may exist in org/apache/dolphinscheduler/dao/mapper/TaskDefinitionMapper.java (best guess)
### The error may involve org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper.updateById-Inline
### The error occurred while setting parameters
### SQL: UPDATE t_ds_task_definition SET fail_retry_times=?, flag=?, code=?, task_priority=?, update_time=?, task_params=?, user_id=?, version=?, timeout=?, task_type=?, timeout_flag=?, create_time=?, project_code=?, fail_retry_interval=?, name=?, delay_time=?, worker_group=?, resource_ids=? WHERE id=?
### Cause: org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
; ERROR: value too long for type character varying(255); nested exception is org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
at org.springframework.jdbc.support.SQLStateSQLExceptionTranslator.doTranslate(SQLStateSQLExceptionTranslator.java:104)
at org.springframework.jdbc.support.AbstractFallbackSQLExceptionTranslator.translate(AbstractFallbackSQLExceptionTranslator.java:72)
at org.springframework.jdbc.support.AbstractFallbackSQLExceptionTranslator.translate(AbstractFallbackSQLExceptionTranslator.java:81)
at org.springframework.jdbc.support.AbstractFallbackSQLExceptionTranslator.translate(AbstractFallbackSQLExceptionTranslator.java:81)
at org.mybatis.spring.MyBatisExceptionTranslator.translateExceptionIfPossible(MyBatisExceptionTranslator.java:74)
at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:440)
at com.sun.proxy.$Proxy101.update(Unknown Source)
at org.mybatis.spring.SqlSessionTemplate.update(SqlSessionTemplate.java:287)
at com.baomidou.mybatisplus.core.override.MybatisMapperMethod.execute(MybatisMapperMethod.java:63)
at com.baomidou.mybatisplus.core.override.MybatisMapperProxy.invoke(MybatisMapperProxy.java:61)
at com.sun.proxy.$Proxy122.updateById(Unknown Source)
at org.apache.dolphinscheduler.service.process.ProcessService.updateTaskDefinition(ProcessService.java:2156)
at org.apache.dolphinscheduler.service.process.ProcessService.handleTaskDefinition(ProcessService.java:2282)
at org.apache.dolphinscheduler.service.process.ProcessService.saveProcessDefinition(ProcessService.java:2212)
at org.apache.dolphinscheduler.service.process.ProcessService$$FastClassBySpringCGLIB$$ed138739.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:687)
at org.apache.dolphinscheduler.service.process.ProcessService$$EnhancerBySpringCGLIB$$d02bc54d.saveProcessDefinition(<generated>)
at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl.updateProcessDefinition(ProcessDefinitionServiceImpl.java:418)
at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$FastClassBySpringCGLIB$$e8e34ed9.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:687)
at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$EnhancerBySpringCGLIB$$4564e02d.updateProcessDefinition(<generated>)
at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController.updateProcessDefinition(ProcessDefinitionController.java:242)
at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController$$FastClassBySpringCGLIB$$dc9bf5db.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:752)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163)
at org.springframework.aop.aspectj.MethodInvocationProceedingJoinPoint.proceed(MethodInvocationProceedingJoinPoint.java:88)
at org.apache.dolphinscheduler.api.aspect.AccessLogAspect.doAround(AccessLogAspect.java:86)
at sun.reflect.GeneratedMethodAccessor159.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.springframework.aop.aspectj.AbstractAspectJAdvice.invokeAdviceMethodWithGivenArgs(AbstractAspectJAdvice.java:644)
at org.springframework.aop.aspectj.AbstractAspectJAdvice.invokeAdviceMethod(AbstractAspectJAdvice.java:633)
at org.springframework.aop.aspectj.AspectJAroundAdvice.invoke(AspectJAroundAdvice.java:70)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:175)
at org.springframework.aop.interceptor.ExposeInvocationInterceptor.invoke(ExposeInvocationInterceptor.java:93)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:691)
at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController$$EnhancerBySpringCGLIB$$22f5b91d.updateProcessDefinition(<generated>)
at sun.reflect.GeneratedMethodAccessor622.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.springframework.web.method.support.InvocableHandlerMethod.doInvoke(InvocableHandlerMethod.java:190)
at org.springframework.web.method.support.InvocableHandlerMethod.invokeForRequest(InvocableHandlerMethod.java:138)
at org.springframework.web.servlet.mvc.method.annotation.ServletInvocableHandlerMethod.invokeAndHandle(ServletInvocableHandlerMethod.java:105)
at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.invokeHandlerMethod(RequestMappingHandlerAdapter.java:892)
at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.handleInternal(RequestMappingHandlerAdapter.java:797)
at org.springframework.web.servlet.mvc.method.AbstractHandlerMethodAdapter.handle(AbstractHandlerMethodAdapter.java:87)
at org.springframework.web.servlet.DispatcherServlet.doDispatch(DispatcherServlet.java:1040)
at org.springframework.web.servlet.DispatcherServlet.doService(DispatcherServlet.java:943)
at org.springframework.web.servlet.FrameworkServlet.processRequest(FrameworkServlet.java:1006)
at org.springframework.web.servlet.FrameworkServlet.doPost(FrameworkServlet.java:909)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:707)
at org.springframework.web.servlet.FrameworkServlet.service(FrameworkServlet.java:883)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:790)
at org.eclipse.jetty.servlet.ServletHolder.handle(ServletHolder.java:763)
at org.eclipse.jetty.servlet.ServletHandler$ChainEnd.doFilter(ServletHandler.java:1633)
at com.github.xiaoymin.swaggerbootstrapui.filter.SecurityBasicAuthFilter.doFilter(SecurityBasicAuthFilter.java:84)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at com.github.xiaoymin.swaggerbootstrapui.filter.ProductionSecurityFilter.doFilter(ProductionSecurityFilter.java:53)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.CorsFilter.doFilterInternal(CorsFilter.java:97)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.RequestContextFilter.doFilterInternal(RequestContextFilter.java:100)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.FormContentFilter.doFilterInternal(FormContentFilter.java:93)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.HiddenHttpMethodFilter.doFilterInternal(HiddenHttpMethodFilter.java:94)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.CharacterEncodingFilter.doFilterInternal(CharacterEncodingFilter.java:201)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:561)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:143)
at org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:602)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127)
at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:235)
at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1612)
at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:233)
at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1434)
at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:188)
at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:501)
at org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1582)
at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:186)
at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1349)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141)
at org.eclipse.jetty.server.handler.gzip.GzipHandler.handle(GzipHandler.java:766)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127)
at org.eclipse.jetty.server.Server.handle(Server.java:516)
at org.eclipse.jetty.server.HttpChannel.lambda$handle$1(HttpChannel.java:383)
at org.eclipse.jetty.server.HttpChannel.dispatch(HttpChannel.java:556)
at org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:375)
at org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:273)
at org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:311)
at org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:105)
at org.eclipse.jetty.io.ChannelEndPoint$1.run(ChannelEndPoint.java:104)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:336)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:313)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:129)
at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:375)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:773)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:905)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
at org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2440)
at org.postgresql.core.v3.QueryExecutorImpl.processResults(QueryExecutorImpl.java:2183)
at org.postgresql.core.v3.QueryExecutorImpl.execute(QueryExecutorImpl.java:308)
at org.postgresql.jdbc.PgStatement.executeInternal(PgStatement.java:441)
at org.postgresql.jdbc.PgStatement.execute(PgStatement.java:365)
at org.postgresql.jdbc.PgPreparedStatement.executeWithFlags(PgPreparedStatement.java:143)
at org.postgresql.jdbc.PgPreparedStatement.execute(PgPreparedStatement.java:132)
at com.alibaba.druid.pool.DruidPooledPreparedStatement.execute(DruidPooledPreparedStatement.java:497)
at org.apache.ibatis.executor.statement.PreparedStatementHandler.update(PreparedStatementHandler.java:47)
at org.apache.ibatis.executor.statement.RoutingStatementHandler.update(RoutingStatementHandler.java:74)
at sun.reflect.GeneratedMethodAccessor431.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.ibatis.plugin.Plugin.invoke(Plugin.java:63)
at com.sun.proxy.$Proxy177.update(Unknown Source)
at com.baomidou.mybatisplus.core.executor.MybatisSimpleExecutor.doUpdate(MybatisSimpleExecutor.java:54)
at org.apache.ibatis.executor.BaseExecutor.update(BaseExecutor.java:117)
at org.apache.ibatis.session.defaults.DefaultSqlSession.update(DefaultSqlSession.java:197)
at sun.reflect.GeneratedMethodAccessor505.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:426)
... 111 common frames omitted
```
**Requirement or improvement**
- I suggest to add some validation to check fields in the form.
| https://github.com/apache/dolphinscheduler/issues/5876 | https://github.com/apache/dolphinscheduler/pull/6457 | 6495a204f3f5520f3ebce8b939f1254ee676facc | ba4cb9d22ad02ba4927fd23ea811c82639261e69 | "2021-07-22T06:00:38Z" | java | "2021-10-14T07:31:06Z" | sql/dolphinscheduler_h2.sql | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
SET
FOREIGN_KEY_CHECKS=0;
-- ----------------------------
-- Table structure for QRTZ_JOB_DETAILS
-- ----------------------------
DROP TABLE IF EXISTS QRTZ_JOB_DETAILS;
CREATE TABLE QRTZ_JOB_DETAILS
(
SCHED_NAME varchar(120) NOT NULL,
JOB_NAME varchar(200) NOT NULL,
JOB_GROUP varchar(200) NOT NULL,
DESCRIPTION varchar(250) DEFAULT NULL,
JOB_CLASS_NAME varchar(250) NOT NULL,
IS_DURABLE varchar(1) NOT NULL,
IS_NONCONCURRENT varchar(1) NOT NULL,
IS_UPDATE_DATA varchar(1) NOT NULL,
REQUESTS_RECOVERY varchar(1) NOT NULL,
JOB_DATA blob,
PRIMARY KEY (SCHED_NAME, JOB_NAME, JOB_GROUP)
);
-- ----------------------------
-- Table structure for QRTZ_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS QRTZ_TRIGGERS;
CREATE TABLE QRTZ_TRIGGERS
(
SCHED_NAME varchar(120) NOT NULL,
TRIGGER_NAME varchar(200) NOT NULL,
TRIGGER_GROUP varchar(200) NOT NULL,
JOB_NAME varchar(200) NOT NULL,
JOB_GROUP varchar(200) NOT NULL,
DESCRIPTION varchar(250) DEFAULT NULL,
NEXT_FIRE_TIME bigint(13) DEFAULT NULL,
PREV_FIRE_TIME bigint(13) DEFAULT NULL,
PRIORITY int(11) DEFAULT NULL,
TRIGGER_STATE varchar(16) NOT NULL,
TRIGGER_TYPE varchar(8) NOT NULL,
START_TIME bigint(13) NOT NULL,
END_TIME bigint(13) DEFAULT NULL,
CALENDAR_NAME varchar(200) DEFAULT NULL,
MISFIRE_INSTR smallint(2) DEFAULT NULL,
JOB_DATA blob,
PRIMARY KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP),
CONSTRAINT QRTZ_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, JOB_NAME, JOB_GROUP) REFERENCES QRTZ_JOB_DETAILS (SCHED_NAME, JOB_NAME, JOB_GROUP)
);
-- ----------------------------
-- Table structure for QRTZ_BLOB_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS;
CREATE TABLE QRTZ_BLOB_TRIGGERS
(
SCHED_NAME varchar(120) NOT NULL,
TRIGGER_NAME varchar(200) NOT NULL,
TRIGGER_GROUP varchar(200) NOT NULL,
BLOB_DATA blob,
PRIMARY KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP),
FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP)
);
-- ----------------------------
-- Records of QRTZ_BLOB_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_CALENDARS
-- ----------------------------
DROP TABLE IF EXISTS QRTZ_CALENDARS;
CREATE TABLE QRTZ_CALENDARS
(
SCHED_NAME varchar(120) NOT NULL,
CALENDAR_NAME varchar(200) NOT NULL,
CALENDAR blob NOT NULL,
PRIMARY KEY (SCHED_NAME, CALENDAR_NAME)
);
-- ----------------------------
-- Records of QRTZ_CALENDARS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_CRON_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS;
CREATE TABLE QRTZ_CRON_TRIGGERS
(
SCHED_NAME varchar(120) NOT NULL,
TRIGGER_NAME varchar(200) NOT NULL,
TRIGGER_GROUP varchar(200) NOT NULL,
CRON_EXPRESSION varchar(120) NOT NULL,
TIME_ZONE_ID varchar(80) DEFAULT NULL,
PRIMARY KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP),
CONSTRAINT QRTZ_CRON_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP)
);
-- ----------------------------
-- Records of QRTZ_CRON_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_FIRED_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS;
CREATE TABLE QRTZ_FIRED_TRIGGERS
(
SCHED_NAME varchar(120) NOT NULL,
ENTRY_ID varchar(200) NOT NULL,
TRIGGER_NAME varchar(200) NOT NULL,
TRIGGER_GROUP varchar(200) NOT NULL,
INSTANCE_NAME varchar(200) NOT NULL,
FIRED_TIME bigint(13) NOT NULL,
SCHED_TIME bigint(13) NOT NULL,
PRIORITY int(11) NOT NULL,
STATE varchar(16) NOT NULL,
JOB_NAME varchar(200) DEFAULT NULL,
JOB_GROUP varchar(200) DEFAULT NULL,
IS_NONCONCURRENT varchar(1) DEFAULT NULL,
REQUESTS_RECOVERY varchar(1) DEFAULT NULL,
PRIMARY KEY (SCHED_NAME, ENTRY_ID)
);
-- ----------------------------
-- Records of QRTZ_FIRED_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Records of QRTZ_JOB_DETAILS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_LOCKS
-- ----------------------------
DROP TABLE IF EXISTS QRTZ_LOCKS;
CREATE TABLE QRTZ_LOCKS
(
SCHED_NAME varchar(120) NOT NULL,
LOCK_NAME varchar(40) NOT NULL,
PRIMARY KEY (SCHED_NAME, LOCK_NAME)
);
-- ----------------------------
-- Records of QRTZ_LOCKS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_PAUSED_TRIGGER_GRPS
-- ----------------------------
DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS;
CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS
(
SCHED_NAME varchar(120) NOT NULL,
TRIGGER_GROUP varchar(200) NOT NULL,
PRIMARY KEY (SCHED_NAME, TRIGGER_GROUP)
);
-- ----------------------------
-- Records of QRTZ_PAUSED_TRIGGER_GRPS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_SCHEDULER_STATE
-- ----------------------------
DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE;
CREATE TABLE QRTZ_SCHEDULER_STATE
(
SCHED_NAME varchar(120) NOT NULL,
INSTANCE_NAME varchar(200) NOT NULL,
LAST_CHECKIN_TIME bigint(13) NOT NULL,
CHECKIN_INTERVAL bigint(13) NOT NULL,
PRIMARY KEY (SCHED_NAME, INSTANCE_NAME)
);
-- ----------------------------
-- Records of QRTZ_SCHEDULER_STATE
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_SIMPLE_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS;
CREATE TABLE QRTZ_SIMPLE_TRIGGERS
(
SCHED_NAME varchar(120) NOT NULL,
TRIGGER_NAME varchar(200) NOT NULL,
TRIGGER_GROUP varchar(200) NOT NULL,
REPEAT_COUNT bigint(7) NOT NULL,
REPEAT_INTERVAL bigint(12) NOT NULL,
TIMES_TRIGGERED bigint(10) NOT NULL,
PRIMARY KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP),
CONSTRAINT QRTZ_SIMPLE_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP)
);
-- ----------------------------
-- Records of QRTZ_SIMPLE_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_SIMPROP_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS;
CREATE TABLE QRTZ_SIMPROP_TRIGGERS
(
SCHED_NAME varchar(120) NOT NULL,
TRIGGER_NAME varchar(200) NOT NULL,
TRIGGER_GROUP varchar(200) NOT NULL,
STR_PROP_1 varchar(512) DEFAULT NULL,
STR_PROP_2 varchar(512) DEFAULT NULL,
STR_PROP_3 varchar(512) DEFAULT NULL,
INT_PROP_1 int(11) DEFAULT NULL,
INT_PROP_2 int(11) DEFAULT NULL,
LONG_PROP_1 bigint(20) DEFAULT NULL,
LONG_PROP_2 bigint(20) DEFAULT NULL,
DEC_PROP_1 decimal(13, 4) DEFAULT NULL,
DEC_PROP_2 decimal(13, 4) DEFAULT NULL,
BOOL_PROP_1 varchar(1) DEFAULT NULL,
BOOL_PROP_2 varchar(1) DEFAULT NULL,
PRIMARY KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP),
CONSTRAINT QRTZ_SIMPROP_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP)
);
-- ----------------------------
-- Records of QRTZ_SIMPROP_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Records of QRTZ_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_access_token
-- ----------------------------
DROP TABLE IF EXISTS t_ds_access_token;
CREATE TABLE t_ds_access_token
(
id int(11) NOT NULL AUTO_INCREMENT,
user_id int(11) DEFAULT NULL,
token varchar(64) DEFAULT NULL,
expire_time datetime DEFAULT NULL,
create_time datetime DEFAULT NULL,
update_time datetime DEFAULT NULL,
PRIMARY KEY (id)
);
-- ----------------------------
-- Records of t_ds_access_token
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_alert
-- ----------------------------
DROP TABLE IF EXISTS t_ds_alert;
CREATE TABLE t_ds_alert
(
id int(11) NOT NULL AUTO_INCREMENT,
title varchar(64) DEFAULT NULL,
content text,
alert_status tinyint(4) DEFAULT '0',
log text,
alertgroup_id int(11) DEFAULT NULL,
create_time datetime DEFAULT NULL,
update_time datetime DEFAULT NULL,
PRIMARY KEY (id)
);
-- ----------------------------
-- Records of t_ds_alert
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_alertgroup
-- ----------------------------
DROP TABLE IF EXISTS t_ds_alertgroup;
CREATE TABLE t_ds_alertgroup
(
id int(11) NOT NULL AUTO_INCREMENT,
alert_instance_ids varchar(255) DEFAULT NULL,
create_user_id int(11) DEFAULT NULL,
group_name varchar(255) DEFAULT NULL,
description varchar(255) DEFAULT NULL,
create_time datetime DEFAULT NULL,
update_time datetime DEFAULT NULL,
PRIMARY KEY (id),
UNIQUE KEY t_ds_alertgroup_name_un (group_name)
);
-- ----------------------------
-- Records of t_ds_alertgroup
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_command
-- ----------------------------
DROP TABLE IF EXISTS t_ds_command;
CREATE TABLE t_ds_command
(
id int(11) NOT NULL AUTO_INCREMENT,
command_type tinyint(4) DEFAULT NULL,
process_definition_code bigint(20) DEFAULT NULL,
command_param text,
task_depend_type tinyint(4) DEFAULT NULL,
failure_strategy tinyint(4) DEFAULT '0',
warning_type tinyint(4) DEFAULT '0',
warning_group_id int(11) DEFAULT NULL,
schedule_time datetime DEFAULT NULL,
start_time datetime DEFAULT NULL,
executor_id int(11) DEFAULT NULL,
update_time datetime DEFAULT NULL,
process_instance_priority int(11) DEFAULT NULL,
worker_group varchar(64),
environment_code bigint(20) DEFAULT '-1',
dry_run int NULL DEFAULT 0,
process_instance_id int(11) DEFAULT 0,
process_definition_version int(11) DEFAULT 0,
PRIMARY KEY (id),
KEY priority_id_index (process_instance_priority, id)
);
-- ----------------------------
-- Records of t_ds_command
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_datasource
-- ----------------------------
DROP TABLE IF EXISTS t_ds_datasource;
CREATE TABLE t_ds_datasource
(
id int(11) NOT NULL AUTO_INCREMENT,
name varchar(64) NOT NULL,
note varchar(255) DEFAULT NULL,
type tinyint(4) NOT NULL,
user_id int(11) NOT NULL,
connection_params text NOT NULL,
create_time datetime NOT NULL,
update_time datetime DEFAULT NULL,
PRIMARY KEY (id),
UNIQUE KEY t_ds_datasource_name_un (name, type)
);
-- ----------------------------
-- Records of t_ds_datasource
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_error_command
-- ----------------------------
DROP TABLE IF EXISTS t_ds_error_command;
CREATE TABLE t_ds_error_command
(
id int(11) NOT NULL,
command_type tinyint(4) DEFAULT NULL,
executor_id int(11) DEFAULT NULL,
process_definition_code bigint(20) DEFAULT NULL,
command_param text,
task_depend_type tinyint(4) DEFAULT NULL,
failure_strategy tinyint(4) DEFAULT '0',
warning_type tinyint(4) DEFAULT '0',
warning_group_id int(11) DEFAULT NULL,
schedule_time datetime DEFAULT NULL,
start_time datetime DEFAULT NULL,
update_time datetime DEFAULT NULL,
process_instance_priority int(11) DEFAULT NULL,
worker_group varchar(64),
environment_code bigint(20) DEFAULT '-1',
message text,
dry_run int NULL DEFAULT 0,
process_instance_id int(11) DEFAULT 0,
process_definition_version int(11) DEFAULT 0,
PRIMARY KEY (id)
);
-- ----------------------------
-- Records of t_ds_error_command
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_process_definition
-- ----------------------------
DROP TABLE IF EXISTS t_ds_process_definition;
CREATE TABLE t_ds_process_definition
(
id int(11) NOT NULL AUTO_INCREMENT,
code bigint(20) NOT NULL,
name varchar(255) DEFAULT NULL,
version int(11) DEFAULT NULL,
description text,
project_code bigint(20) NOT NULL,
release_state tinyint(4) DEFAULT NULL,
user_id int(11) DEFAULT NULL,
global_params text,
flag tinyint(4) DEFAULT NULL,
locations text,
warning_group_id int(11) DEFAULT NULL,
timeout int(11) DEFAULT '0',
tenant_id int(11) NOT NULL DEFAULT '-1',
create_time datetime NOT NULL,
update_time datetime DEFAULT NULL,
PRIMARY KEY (id),
UNIQUE KEY process_unique (name,project_code) USING BTREE,
UNIQUE KEY code_unique (code)
);
-- ----------------------------
-- Records of t_ds_process_definition
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_process_definition_log
-- ----------------------------
DROP TABLE IF EXISTS t_ds_process_definition_log;
CREATE TABLE t_ds_process_definition_log
(
id int(11) NOT NULL AUTO_INCREMENT,
code bigint(20) NOT NULL,
name varchar(200) DEFAULT NULL,
version int(11) DEFAULT NULL,
description text,
project_code bigint(20) NOT NULL,
release_state tinyint(4) DEFAULT NULL,
user_id int(11) DEFAULT NULL,
global_params text,
flag tinyint(4) DEFAULT NULL,
locations text,
warning_group_id int(11) DEFAULT NULL,
timeout int(11) DEFAULT '0',
tenant_id int(11) NOT NULL DEFAULT '-1',
operator int(11) DEFAULT NULL,
operate_time datetime DEFAULT NULL,
create_time datetime NOT NULL,
update_time datetime DEFAULT NULL,
PRIMARY KEY (id)
);
-- ----------------------------
-- Table structure for t_ds_task_definition
-- ----------------------------
DROP TABLE IF EXISTS t_ds_task_definition;
CREATE TABLE t_ds_task_definition
(
id int(11) NOT NULL AUTO_INCREMENT,
code bigint(20) NOT NULL,
name varchar(200) DEFAULT NULL,
version int(11) DEFAULT NULL,
description text,
project_code bigint(20) NOT NULL,
user_id int(11) DEFAULT NULL,
task_type varchar(50) NOT NULL,
task_params longtext,
flag tinyint(2) DEFAULT NULL,
task_priority tinyint(4) DEFAULT NULL,
worker_group varchar(200) DEFAULT NULL,
environment_code bigint(20) DEFAULT '-1',
fail_retry_times int(11) DEFAULT NULL,
fail_retry_interval int(11) DEFAULT NULL,
timeout_flag tinyint(2) DEFAULT '0',
timeout_notify_strategy tinyint(4) DEFAULT NULL,
timeout int(11) DEFAULT '0',
delay_time int(11) DEFAULT '0',
resource_ids varchar(255) DEFAULT NULL,
create_time datetime NOT NULL,
update_time datetime DEFAULT NULL,
PRIMARY KEY (id, code)
);
-- ----------------------------
-- Table structure for t_ds_task_definition_log
-- ----------------------------
DROP TABLE IF EXISTS t_ds_task_definition_log;
CREATE TABLE t_ds_task_definition_log
(
id int(11) NOT NULL AUTO_INCREMENT,
code bigint(20) NOT NULL,
name varchar(200) DEFAULT NULL,
version int(11) DEFAULT NULL,
description text,
project_code bigint(20) NOT NULL,
user_id int(11) DEFAULT NULL,
task_type varchar(50) NOT NULL,
task_params text,
flag tinyint(2) DEFAULT NULL,
task_priority tinyint(4) DEFAULT NULL,
worker_group varchar(200) DEFAULT NULL,
environment_code bigint(20) DEFAULT '-1',
fail_retry_times int(11) DEFAULT NULL,
fail_retry_interval int(11) DEFAULT NULL,
timeout_flag tinyint(2) DEFAULT '0',
timeout_notify_strategy tinyint(4) DEFAULT NULL,
timeout int(11) DEFAULT '0',
delay_time int(11) DEFAULT '0',
resource_ids varchar(255) DEFAULT NULL,
operator int(11) DEFAULT NULL,
operate_time datetime DEFAULT NULL,
create_time datetime NOT NULL,
update_time datetime DEFAULT NULL,
PRIMARY KEY (id)
);
-- ----------------------------
-- Table structure for t_ds_process_task_relation
-- ----------------------------
DROP TABLE IF EXISTS t_ds_process_task_relation;
CREATE TABLE t_ds_process_task_relation
(
id int(11) NOT NULL AUTO_INCREMENT,
name varchar(200) DEFAULT NULL,
process_definition_version int(11) DEFAULT NULL,
project_code bigint(20) NOT NULL,
process_definition_code bigint(20) NOT NULL,
pre_task_code bigint(20) NOT NULL,
pre_task_version int(11) NOT NULL,
post_task_code bigint(20) NOT NULL,
post_task_version int(11) NOT NULL,
condition_type tinyint(2) DEFAULT NULL,
condition_params text,
create_time datetime NOT NULL,
update_time datetime DEFAULT NULL,
PRIMARY KEY (id)
);
-- ----------------------------
-- Table structure for t_ds_process_task_relation_log
-- ----------------------------
DROP TABLE IF EXISTS t_ds_process_task_relation_log;
CREATE TABLE t_ds_process_task_relation_log
(
id int(11) NOT NULL AUTO_INCREMENT,
name varchar(200) DEFAULT NULL,
process_definition_version int(11) DEFAULT NULL,
project_code bigint(20) NOT NULL,
process_definition_code bigint(20) NOT NULL,
pre_task_code bigint(20) NOT NULL,
pre_task_version int(11) NOT NULL,
post_task_code bigint(20) NOT NULL,
post_task_version int(11) NOT NULL,
condition_type tinyint(2) DEFAULT NULL,
condition_params text,
operator int(11) DEFAULT NULL,
operate_time datetime DEFAULT NULL,
create_time datetime NOT NULL,
update_time datetime DEFAULT NULL,
PRIMARY KEY (id)
);
-- ----------------------------
-- Table structure for t_ds_process_instance
-- ----------------------------
DROP TABLE IF EXISTS t_ds_process_instance;
CREATE TABLE t_ds_process_instance
(
id int(11) NOT NULL AUTO_INCREMENT,
name varchar(255) DEFAULT NULL,
process_definition_version int(11) DEFAULT NULL,
process_definition_code bigint(20) not NULL,
state tinyint(4) DEFAULT NULL,
recovery tinyint(4) DEFAULT NULL,
start_time datetime DEFAULT NULL,
end_time datetime DEFAULT NULL,
run_times int(11) DEFAULT NULL,
host varchar(135) DEFAULT NULL,
command_type tinyint(4) DEFAULT NULL,
command_param text,
task_depend_type tinyint(4) DEFAULT NULL,
max_try_times tinyint(4) DEFAULT '0',
failure_strategy tinyint(4) DEFAULT '0',
warning_type tinyint(4) DEFAULT '0',
warning_group_id int(11) DEFAULT NULL,
schedule_time datetime DEFAULT NULL,
command_start_time datetime DEFAULT NULL,
global_params text,
flag tinyint(4) DEFAULT '1',
update_time timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
is_sub_process int(11) DEFAULT '0',
executor_id int(11) NOT NULL,
history_cmd text,
process_instance_priority int(11) DEFAULT NULL,
worker_group varchar(64) DEFAULT NULL,
environment_code bigint(20) DEFAULT '-1',
timeout int(11) DEFAULT '0',
tenant_id int(11) NOT NULL DEFAULT '-1',
var_pool longtext,
dry_run int NULL DEFAULT 0,
PRIMARY KEY (id)
);
-- ----------------------------
-- Records of t_ds_process_instance
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_project
-- ----------------------------
DROP TABLE IF EXISTS t_ds_project;
CREATE TABLE t_ds_project
(
id int(11) NOT NULL AUTO_INCREMENT,
name varchar(100) DEFAULT NULL,
code bigint(20) NOT NULL,
description varchar(200) DEFAULT NULL,
user_id int(11) DEFAULT NULL,
flag tinyint(4) DEFAULT '1',
create_time datetime NOT NULL,
update_time datetime DEFAULT NULL,
PRIMARY KEY (id)
);
-- ----------------------------
-- Records of t_ds_project
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_queue
-- ----------------------------
DROP TABLE IF EXISTS t_ds_queue;
CREATE TABLE t_ds_queue
(
id int(11) NOT NULL AUTO_INCREMENT,
queue_name varchar(64) DEFAULT NULL,
queue varchar(64) DEFAULT NULL,
create_time datetime DEFAULT NULL,
update_time datetime DEFAULT NULL,
PRIMARY KEY (id)
);
-- ----------------------------
-- Records of t_ds_queue
-- ----------------------------
INSERT INTO t_ds_queue
VALUES ('1', 'default', 'default', null, null);
-- ----------------------------
-- Table structure for t_ds_relation_datasource_user
-- ----------------------------
DROP TABLE IF EXISTS t_ds_relation_datasource_user;
CREATE TABLE t_ds_relation_datasource_user
(
id int(11) NOT NULL AUTO_INCREMENT,
user_id int(11) NOT NULL,
datasource_id int(11) DEFAULT NULL,
perm int(11) DEFAULT '1',
create_time datetime DEFAULT NULL,
update_time datetime DEFAULT NULL,
PRIMARY KEY (id)
);
-- ----------------------------
-- Records of t_ds_relation_datasource_user
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_relation_process_instance
-- ----------------------------
DROP TABLE IF EXISTS t_ds_relation_process_instance;
CREATE TABLE t_ds_relation_process_instance
(
id int(11) NOT NULL AUTO_INCREMENT,
parent_process_instance_id int(11) DEFAULT NULL,
parent_task_instance_id int(11) DEFAULT NULL,
process_instance_id int(11) DEFAULT NULL,
PRIMARY KEY (id)
);
-- ----------------------------
-- Records of t_ds_relation_process_instance
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_relation_project_user
-- ----------------------------
DROP TABLE IF EXISTS t_ds_relation_project_user;
CREATE TABLE t_ds_relation_project_user
(
id int(11) NOT NULL AUTO_INCREMENT,
user_id int(11) NOT NULL,
project_id int(11) DEFAULT NULL,
perm int(11) DEFAULT '1',
create_time datetime DEFAULT NULL,
update_time datetime DEFAULT NULL,
PRIMARY KEY (id)
);
-- ----------------------------
-- Records of t_ds_relation_project_user
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_relation_resources_user
-- ----------------------------
DROP TABLE IF EXISTS t_ds_relation_resources_user;
CREATE TABLE t_ds_relation_resources_user
(
id int(11) NOT NULL AUTO_INCREMENT,
user_id int(11) NOT NULL,
resources_id int(11) DEFAULT NULL,
perm int(11) DEFAULT '1',
create_time datetime DEFAULT NULL,
update_time datetime DEFAULT NULL,
PRIMARY KEY (id)
);
-- ----------------------------
-- Records of t_ds_relation_resources_user
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_relation_udfs_user
-- ----------------------------
DROP TABLE IF EXISTS t_ds_relation_udfs_user;
CREATE TABLE t_ds_relation_udfs_user
(
id int(11) NOT NULL AUTO_INCREMENT,
user_id int(11) NOT NULL,
udf_id int(11) DEFAULT NULL,
perm int(11) DEFAULT '1',
create_time datetime DEFAULT NULL,
update_time datetime DEFAULT NULL,
PRIMARY KEY (id)
);
-- ----------------------------
-- Table structure for t_ds_resources
-- ----------------------------
DROP TABLE IF EXISTS t_ds_resources;
CREATE TABLE t_ds_resources
(
id int(11) NOT NULL AUTO_INCREMENT,
alias varchar(64) DEFAULT NULL,
file_name varchar(64) DEFAULT NULL,
description varchar(255) DEFAULT NULL,
user_id int(11) DEFAULT NULL,
type tinyint(4) DEFAULT NULL,
size bigint(20) DEFAULT NULL,
create_time datetime DEFAULT NULL,
update_time datetime DEFAULT NULL,
pid int(11) DEFAULT NULL,
full_name varchar(64) DEFAULT NULL,
is_directory tinyint(4) DEFAULT NULL,
PRIMARY KEY (id),
UNIQUE KEY t_ds_resources_un (full_name, type)
);
-- ----------------------------
-- Records of t_ds_resources
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_schedules
-- ----------------------------
DROP TABLE IF EXISTS t_ds_schedules;
CREATE TABLE t_ds_schedules
(
id int(11) NOT NULL AUTO_INCREMENT,
process_definition_code bigint(20) NOT NULL,
start_time datetime NOT NULL,
end_time datetime NOT NULL,
timezone_id varchar(40) DEFAULT NULL,
crontab varchar(255) NOT NULL,
failure_strategy tinyint(4) NOT NULL,
user_id int(11) NOT NULL,
release_state tinyint(4) NOT NULL,
warning_type tinyint(4) NOT NULL,
warning_group_id int(11) DEFAULT NULL,
process_instance_priority int(11) DEFAULT NULL,
worker_group varchar(64) DEFAULT '',
environment_code bigint(20) DEFAULT '-1',
create_time datetime NOT NULL,
update_time datetime NOT NULL,
PRIMARY KEY (id)
);
-- ----------------------------
-- Records of t_ds_schedules
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_session
-- ----------------------------
DROP TABLE IF EXISTS t_ds_session;
CREATE TABLE t_ds_session
(
id varchar(64) NOT NULL,
user_id int(11) DEFAULT NULL,
ip varchar(45) DEFAULT NULL,
last_login_time datetime DEFAULT NULL,
PRIMARY KEY (id)
);
-- ----------------------------
-- Records of t_ds_session
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_task_instance
-- ----------------------------
DROP TABLE IF EXISTS t_ds_task_instance;
CREATE TABLE t_ds_task_instance
(
id int(11) NOT NULL AUTO_INCREMENT,
name varchar(255) DEFAULT NULL,
task_type varchar(50) NOT NULL,
task_code bigint(20) NOT NULL,
task_definition_version int(11) DEFAULT NULL,
process_instance_id int(11) DEFAULT NULL,
state tinyint(4) DEFAULT NULL,
submit_time datetime DEFAULT NULL,
start_time datetime DEFAULT NULL,
end_time datetime DEFAULT NULL,
host varchar(135) DEFAULT NULL,
execute_path varchar(200) DEFAULT NULL,
log_path varchar(200) DEFAULT NULL,
alert_flag tinyint(4) DEFAULT NULL,
retry_times int(4) DEFAULT '0',
pid int(4) DEFAULT NULL,
app_link text,
task_params text,
flag tinyint(4) DEFAULT '1',
retry_interval int(4) DEFAULT NULL,
max_retry_times int(2) DEFAULT NULL,
task_instance_priority int(11) DEFAULT NULL,
worker_group varchar(64) DEFAULT NULL,
environment_code bigint(20) DEFAULT '-1',
environment_config text DEFAULT '',
executor_id int(11) DEFAULT NULL,
first_submit_time datetime DEFAULT NULL,
delay_time int(4) DEFAULT '0',
var_pool longtext,
dry_run int NULL DEFAULT 0,
PRIMARY KEY (id),
FOREIGN KEY (process_instance_id) REFERENCES t_ds_process_instance (id) ON DELETE CASCADE
);
-- ----------------------------
-- Records of t_ds_task_instance
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_tenant
-- ----------------------------
DROP TABLE IF EXISTS t_ds_tenant;
CREATE TABLE t_ds_tenant
(
id int(11) NOT NULL AUTO_INCREMENT,
tenant_code varchar(64) DEFAULT NULL,
description varchar(255) DEFAULT NULL,
queue_id int(11) DEFAULT NULL,
create_time datetime DEFAULT NULL,
update_time datetime DEFAULT NULL,
PRIMARY KEY (id)
);
-- ----------------------------
-- Records of t_ds_tenant
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_udfs
-- ----------------------------
DROP TABLE IF EXISTS t_ds_udfs;
CREATE TABLE t_ds_udfs
(
id int(11) NOT NULL AUTO_INCREMENT,
user_id int(11) NOT NULL,
func_name varchar(100) NOT NULL,
class_name varchar(255) NOT NULL,
type tinyint(4) NOT NULL,
arg_types varchar(255) DEFAULT NULL,
database varchar(255) DEFAULT NULL,
description varchar(255) DEFAULT NULL,
resource_id int(11) NOT NULL,
resource_name varchar(255) NOT NULL,
create_time datetime NOT NULL,
update_time datetime NOT NULL,
PRIMARY KEY (id)
);
-- ----------------------------
-- Records of t_ds_udfs
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_user
-- ----------------------------
DROP TABLE IF EXISTS t_ds_user;
CREATE TABLE t_ds_user
(
id int(11) NOT NULL AUTO_INCREMENT,
user_name varchar(64) DEFAULT NULL,
user_password varchar(64) DEFAULT NULL,
user_type tinyint(4) DEFAULT NULL,
email varchar(64) DEFAULT NULL,
phone varchar(11) DEFAULT NULL,
tenant_id int(11) DEFAULT NULL,
create_time datetime DEFAULT NULL,
update_time datetime DEFAULT NULL,
queue varchar(64) DEFAULT NULL,
state int(1) DEFAULT 1,
PRIMARY KEY (id),
UNIQUE KEY user_name_unique (user_name)
);
-- ----------------------------
-- Records of t_ds_user
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_worker_group
-- ----------------------------
DROP TABLE IF EXISTS t_ds_worker_group;
CREATE TABLE t_ds_worker_group
(
id bigint(11) NOT NULL AUTO_INCREMENT,
name varchar(255) NOT NULL,
addr_list text NULL DEFAULT NULL,
create_time datetime NULL DEFAULT NULL,
update_time datetime NULL DEFAULT NULL,
PRIMARY KEY (id),
UNIQUE KEY name_unique (name)
);
-- ----------------------------
-- Records of t_ds_worker_group
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_version
-- ----------------------------
DROP TABLE IF EXISTS t_ds_version;
CREATE TABLE t_ds_version
(
id int(11) NOT NULL AUTO_INCREMENT,
version varchar(200) NOT NULL,
PRIMARY KEY (id),
UNIQUE KEY version_UNIQUE (version)
);
-- ----------------------------
-- Records of t_ds_version
-- ----------------------------
INSERT INTO t_ds_version
VALUES ('1', '1.4.0');
-- ----------------------------
-- Records of t_ds_alertgroup
-- ----------------------------
INSERT INTO t_ds_alertgroup(alert_instance_ids, create_user_id, group_name, description, create_time, update_time)
VALUES ('1,2', 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39',
'2018-11-29 10:20:39');
-- ----------------------------
-- Records of t_ds_user
-- ----------------------------
INSERT INTO t_ds_user
VALUES ('1', 'admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', '', '0', '2018-03-27 15:48:50',
'2018-10-24 17:40:22', null, 1);
-- ----------------------------
-- Table structure for t_ds_plugin_define
-- ----------------------------
DROP TABLE IF EXISTS t_ds_plugin_define;
CREATE TABLE t_ds_plugin_define
(
id int NOT NULL AUTO_INCREMENT,
plugin_name varchar(100) NOT NULL,
plugin_type varchar(100) NOT NULL,
plugin_params text,
create_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
update_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (id),
UNIQUE KEY t_ds_plugin_define_UN (plugin_name,plugin_type)
);
-- ----------------------------
-- Table structure for t_ds_alert_plugin_instance
-- ----------------------------
DROP TABLE IF EXISTS t_ds_alert_plugin_instance;
CREATE TABLE t_ds_alert_plugin_instance
(
id int NOT NULL AUTO_INCREMENT,
plugin_define_id int NOT NULL,
plugin_instance_params text,
create_time timestamp NULL DEFAULT CURRENT_TIMESTAMP,
update_time timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
instance_name varchar(200) DEFAULT NULL,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_environment
--
DROP TABLE IF EXISTS t_ds_environment;
CREATE TABLE t_ds_environment
(
id int NOT NULL AUTO_INCREMENT,
code bigint(20) NOT NULL,
name varchar(100) DEFAULT NULL,
config text DEFAULT NULL,
description text,
operator int DEFAULT NULL,
create_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
update_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (id),
UNIQUE KEY environment_name_unique (name),
UNIQUE KEY environment_code_unique (code)
);
--
-- Table structure for table t_ds_environment_worker_group_relation
--
DROP TABLE IF EXISTS t_ds_environment_worker_group_relation;
CREATE TABLE t_ds_environment_worker_group_relation
(
id int NOT NULL AUTO_INCREMENT,
environment_code bigint(20) NOT NULL,
worker_group varchar(255) NOT NULL,
operator int DEFAULT NULL,
create_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
update_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (id),
UNIQUE KEY environment_worker_group_unique (environment_code,worker_group)
);
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,876 | [Bug][SQL] Suggest to increase the max length of the field resource_ids in the table t_ds_task_definition. | **Describe the bug**
When I saved a shell task that used many resource files ,the service threw PSQLException.
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Create a shell task and select many resource files.
2. Save the workflow, you would see that again. If you don't , maybe you would do well to pick more resource files.
**Expected behavior**
Expect to increase the max length of the field resource_ids in the table t_ds_task_definition.
**Screenshots**
![image](https://user-images.githubusercontent.com/4928204/126594849-07126f10-9031-43d2-a2ce-50ce9771d1f1.png)
![image](https://user-images.githubusercontent.com/4928204/126594860-5e38dc77-32d2-4d67-9cfe-e9424b8b07d3.png)
**Which version of Dolphin Scheduler:**
-[dev]
**Additional context**
Exception stack trace:
```
[INFO] 2021-07-22 11:51:07.658 org.apache.dolphinscheduler.api.aspect.AccessLogAspect:[75] - REQUEST TRANCE_ID:0702204b-58c0-4f0f-8b4b-dbfef2be63cc, LOGIN_USER:admin, URI:/dolphinscheduler/users/list-paging, METHOD:GET, HANDLER:org.apache.dolphinscheduler.api.controller.UsersController.queryUserList, ARGS:{searchVal=jiang.hua, pageNo=1, pageSize=10}
[INFO] 2021-07-22 11:51:07.682 org.apache.dolphinscheduler.api.aspect.AccessLogAspect:[75] - REQUEST TRANCE_ID:55caab3c-e003-4925-88c9-172dd2318d7c, LOGIN_USER:admin, URI:/dolphinscheduler/projects/feature-works/process/update, METHOD:POST, HANDLER:org.apache.dolphinscheduler.api.controller.ProcessDefinitionController.updateProcessDefinition, ARGS:{connects=[], name=jianghua_test, description=test, locations={"tasks-24242":{"name":"restore_from_checkpoint_test123","targetarr":"","nodenumber":"0","x":-800,"y":-900,"color":"#1297DC"}}, id=8, releaseState=OFFLINE, projectName=feature-works, processDefinitionJson={"tasks":[{"id":"tasks-24242","code":491450867713,"version":27,"name":"restore_from_checkpoint_test123","desc":null,"type":"SHELL","runFlag":"NORMAL","loc":null,"maxRetryTimes":3,"retryInterval":1,"params":{"resourceList":[{"id":157},{"id":158},{"id":162},{"id":164},{"id":166},{"id":168},{"id":160},{"id":161},{"id":159},{"id":163},{"id":165},{"id":167},{"id":95},{"id":97},{"id":98},{"id":99},{"id":100},{"id":104},{"id":105},{"id":106},{"id":107},{"id":116},{"id":94},{"id":96},{"id":101},{"id":102},{"id":103},{"id":108},{"id":109},{"id":110},{"id":112},{"id":113},{"id":114},{"id":115},{"id":117},{"id":170},{"id":171},{"id":172},{"id":120},{"id":121},{"id":123},{"id":125},{"id":127},{"id":129},{"id":131},{"id":133},{"id":135},{"id":137},{"id":139},{"id":141},{"id":147},{"id":148},{"id":149},{"id":150},{"id":151},{"id":152},{"id":134},{"id":136},{"id":118},{"id":119},{"id":122},{"id":124},{"id":126},{"id":128},{"id":130},{"id":132},{"id":138},{"id":140},{"id":142},{"id":143},{"id":144},{"id":145},{"id":146},{"id":169},{"id":76},{"id":77},{"id":78}],"localParams":[{"prop":"task.name","direct":"IN","type":"VARCHAR","value":"restore_from_checkpoint_test.sql","deleteScript":"-n"},{"prop":"task.script.file","direct":"IN","type":"VARCHAR","value":"Projects/feature-works/tasks/stream/jianghua_test/restore_from_checkpoint_test.sql","deleteScript":"-f"},{"prop":"task.execution.parallel","direct":"IN","type":"VARCHAR","value":"3","deleteScript":"-p"},{"prop":"task.execution.slots","direct":"IN","type":"VARCHAR","value":"10","deleteScript":"-s"},{"prop":"task.execution.yjm","direct":"IN","type":"VARCHAR","value":"4096m","deleteScript":"-j"},{"prop":"task.execution.ytm","direct":"IN","type":"VARCHAR","value":"4096m","deleteScript":"-w"},{"prop":"task.instance.id","direct":"IN","type":"VARCHAR","value":"${system.task.instance.id}"},{"prop":"task.execute.path","direct":"IN","type":"VARCHAR","value":"${system.task.execute.path}"}],"rawScript":"chmod +x ${task.execute.path}/${datamax.home}/${datamax.version}/bin/*\n\n${task.execute.path}/${datamax.home}/${datamax.version}/bin/start.sh -m ${run.mode} -n ${task.name} -f ${task.script.file} -p ${task.execution.parallel} -s ${task.execution.slots} -j ${task.execution.yjm} -w ${task.execution.ytm} -r ${repository.home} -b ${task.execute.path} -c ${datamax.home}/${datamax.version}/lib/${datamax.core.jar} -l ${flink.home}/${flink.version} -e ${profile.home}/${run.mode}"},"preTasks":[],"preTaskNodeList":[],"extras":null,"depList":[],"dependence":{},"conditionResult":{"successNode":[""],"failedNode":[""]},"taskInstancePriority":"MEDIUM","workerGroup":"default","timeout":{"enable":false,"strategy":null,"interval":0},"delayTime":0}],"globalParams":[{"prop":"repository.home","direct":"IN","type":"VARCHAR","value":"/data/online/datamax-repository"},{"prop":"run.mode","direct":"IN","type":"VARCHAR","value":"stream"},{"prop":"flink.home","direct":"IN","type":"VARCHAR","value":"./flink/"},{"prop":"datamax.home","direct":"IN","type":"VARCHAR","value":"./datamax/"},{"prop":"datamax.core.jar","direct":"IN","type":"VARCHAR","value":"platform-on-flink_1.13.0-2.0-SNAPSHOT.jar"},{"prop":"flink.version","direct":"IN","type":"VARCHAR","value":"1.13.0"},{"prop":"datamax.version","direct":"IN","type":"VARCHAR","value":"2.0.0"},{"prop":"profile.home","direct":"IN","type":"VARCHAR","value":"./profile/prod"},{"prop":"root.home","direct":"IN","type":"VARCHAR","value":"/data/online/datamax-repository"},{"prop":"current.date","direct":"IN","type":"VARCHAR","value":"${system.biz.curdate}"}],"timeout":0,"tenantId":4}}
[ERROR] 2021-07-22 11:51:07.697 org.apache.dolphinscheduler.api.exceptions.ApiExceptionHandler:[46] - 更新工作流定义错误
org.springframework.dao.DataIntegrityViolationException:
### Error updating database. Cause: org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
### The error may exist in org/apache/dolphinscheduler/dao/mapper/TaskDefinitionMapper.java (best guess)
### The error may involve org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper.updateById-Inline
### The error occurred while setting parameters
### SQL: UPDATE t_ds_task_definition SET fail_retry_times=?, flag=?, code=?, task_priority=?, update_time=?, task_params=?, user_id=?, version=?, timeout=?, task_type=?, timeout_flag=?, create_time=?, project_code=?, fail_retry_interval=?, name=?, delay_time=?, worker_group=?, resource_ids=? WHERE id=?
### Cause: org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
; ERROR: value too long for type character varying(255); nested exception is org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
at org.springframework.jdbc.support.SQLStateSQLExceptionTranslator.doTranslate(SQLStateSQLExceptionTranslator.java:104)
at org.springframework.jdbc.support.AbstractFallbackSQLExceptionTranslator.translate(AbstractFallbackSQLExceptionTranslator.java:72)
at org.springframework.jdbc.support.AbstractFallbackSQLExceptionTranslator.translate(AbstractFallbackSQLExceptionTranslator.java:81)
at org.springframework.jdbc.support.AbstractFallbackSQLExceptionTranslator.translate(AbstractFallbackSQLExceptionTranslator.java:81)
at org.mybatis.spring.MyBatisExceptionTranslator.translateExceptionIfPossible(MyBatisExceptionTranslator.java:74)
at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:440)
at com.sun.proxy.$Proxy101.update(Unknown Source)
at org.mybatis.spring.SqlSessionTemplate.update(SqlSessionTemplate.java:287)
at com.baomidou.mybatisplus.core.override.MybatisMapperMethod.execute(MybatisMapperMethod.java:63)
at com.baomidou.mybatisplus.core.override.MybatisMapperProxy.invoke(MybatisMapperProxy.java:61)
at com.sun.proxy.$Proxy122.updateById(Unknown Source)
at org.apache.dolphinscheduler.service.process.ProcessService.updateTaskDefinition(ProcessService.java:2156)
at org.apache.dolphinscheduler.service.process.ProcessService.handleTaskDefinition(ProcessService.java:2282)
at org.apache.dolphinscheduler.service.process.ProcessService.saveProcessDefinition(ProcessService.java:2212)
at org.apache.dolphinscheduler.service.process.ProcessService$$FastClassBySpringCGLIB$$ed138739.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:687)
at org.apache.dolphinscheduler.service.process.ProcessService$$EnhancerBySpringCGLIB$$d02bc54d.saveProcessDefinition(<generated>)
at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl.updateProcessDefinition(ProcessDefinitionServiceImpl.java:418)
at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$FastClassBySpringCGLIB$$e8e34ed9.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:687)
at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$EnhancerBySpringCGLIB$$4564e02d.updateProcessDefinition(<generated>)
at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController.updateProcessDefinition(ProcessDefinitionController.java:242)
at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController$$FastClassBySpringCGLIB$$dc9bf5db.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:752)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163)
at org.springframework.aop.aspectj.MethodInvocationProceedingJoinPoint.proceed(MethodInvocationProceedingJoinPoint.java:88)
at org.apache.dolphinscheduler.api.aspect.AccessLogAspect.doAround(AccessLogAspect.java:86)
at sun.reflect.GeneratedMethodAccessor159.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.springframework.aop.aspectj.AbstractAspectJAdvice.invokeAdviceMethodWithGivenArgs(AbstractAspectJAdvice.java:644)
at org.springframework.aop.aspectj.AbstractAspectJAdvice.invokeAdviceMethod(AbstractAspectJAdvice.java:633)
at org.springframework.aop.aspectj.AspectJAroundAdvice.invoke(AspectJAroundAdvice.java:70)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:175)
at org.springframework.aop.interceptor.ExposeInvocationInterceptor.invoke(ExposeInvocationInterceptor.java:93)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:691)
at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController$$EnhancerBySpringCGLIB$$22f5b91d.updateProcessDefinition(<generated>)
at sun.reflect.GeneratedMethodAccessor622.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.springframework.web.method.support.InvocableHandlerMethod.doInvoke(InvocableHandlerMethod.java:190)
at org.springframework.web.method.support.InvocableHandlerMethod.invokeForRequest(InvocableHandlerMethod.java:138)
at org.springframework.web.servlet.mvc.method.annotation.ServletInvocableHandlerMethod.invokeAndHandle(ServletInvocableHandlerMethod.java:105)
at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.invokeHandlerMethod(RequestMappingHandlerAdapter.java:892)
at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.handleInternal(RequestMappingHandlerAdapter.java:797)
at org.springframework.web.servlet.mvc.method.AbstractHandlerMethodAdapter.handle(AbstractHandlerMethodAdapter.java:87)
at org.springframework.web.servlet.DispatcherServlet.doDispatch(DispatcherServlet.java:1040)
at org.springframework.web.servlet.DispatcherServlet.doService(DispatcherServlet.java:943)
at org.springframework.web.servlet.FrameworkServlet.processRequest(FrameworkServlet.java:1006)
at org.springframework.web.servlet.FrameworkServlet.doPost(FrameworkServlet.java:909)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:707)
at org.springframework.web.servlet.FrameworkServlet.service(FrameworkServlet.java:883)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:790)
at org.eclipse.jetty.servlet.ServletHolder.handle(ServletHolder.java:763)
at org.eclipse.jetty.servlet.ServletHandler$ChainEnd.doFilter(ServletHandler.java:1633)
at com.github.xiaoymin.swaggerbootstrapui.filter.SecurityBasicAuthFilter.doFilter(SecurityBasicAuthFilter.java:84)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at com.github.xiaoymin.swaggerbootstrapui.filter.ProductionSecurityFilter.doFilter(ProductionSecurityFilter.java:53)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.CorsFilter.doFilterInternal(CorsFilter.java:97)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.RequestContextFilter.doFilterInternal(RequestContextFilter.java:100)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.FormContentFilter.doFilterInternal(FormContentFilter.java:93)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.HiddenHttpMethodFilter.doFilterInternal(HiddenHttpMethodFilter.java:94)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.CharacterEncodingFilter.doFilterInternal(CharacterEncodingFilter.java:201)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:561)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:143)
at org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:602)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127)
at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:235)
at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1612)
at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:233)
at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1434)
at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:188)
at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:501)
at org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1582)
at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:186)
at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1349)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141)
at org.eclipse.jetty.server.handler.gzip.GzipHandler.handle(GzipHandler.java:766)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127)
at org.eclipse.jetty.server.Server.handle(Server.java:516)
at org.eclipse.jetty.server.HttpChannel.lambda$handle$1(HttpChannel.java:383)
at org.eclipse.jetty.server.HttpChannel.dispatch(HttpChannel.java:556)
at org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:375)
at org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:273)
at org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:311)
at org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:105)
at org.eclipse.jetty.io.ChannelEndPoint$1.run(ChannelEndPoint.java:104)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:336)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:313)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:129)
at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:375)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:773)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:905)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
at org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2440)
at org.postgresql.core.v3.QueryExecutorImpl.processResults(QueryExecutorImpl.java:2183)
at org.postgresql.core.v3.QueryExecutorImpl.execute(QueryExecutorImpl.java:308)
at org.postgresql.jdbc.PgStatement.executeInternal(PgStatement.java:441)
at org.postgresql.jdbc.PgStatement.execute(PgStatement.java:365)
at org.postgresql.jdbc.PgPreparedStatement.executeWithFlags(PgPreparedStatement.java:143)
at org.postgresql.jdbc.PgPreparedStatement.execute(PgPreparedStatement.java:132)
at com.alibaba.druid.pool.DruidPooledPreparedStatement.execute(DruidPooledPreparedStatement.java:497)
at org.apache.ibatis.executor.statement.PreparedStatementHandler.update(PreparedStatementHandler.java:47)
at org.apache.ibatis.executor.statement.RoutingStatementHandler.update(RoutingStatementHandler.java:74)
at sun.reflect.GeneratedMethodAccessor431.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.ibatis.plugin.Plugin.invoke(Plugin.java:63)
at com.sun.proxy.$Proxy177.update(Unknown Source)
at com.baomidou.mybatisplus.core.executor.MybatisSimpleExecutor.doUpdate(MybatisSimpleExecutor.java:54)
at org.apache.ibatis.executor.BaseExecutor.update(BaseExecutor.java:117)
at org.apache.ibatis.session.defaults.DefaultSqlSession.update(DefaultSqlSession.java:197)
at sun.reflect.GeneratedMethodAccessor505.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:426)
... 111 common frames omitted
```
**Requirement or improvement**
- I suggest to add some validation to check fields in the form.
| https://github.com/apache/dolphinscheduler/issues/5876 | https://github.com/apache/dolphinscheduler/pull/6457 | 6495a204f3f5520f3ebce8b939f1254ee676facc | ba4cb9d22ad02ba4927fd23ea811c82639261e69 | "2021-07-22T06:00:38Z" | java | "2021-10-14T07:31:06Z" | sql/dolphinscheduler_mysql.sql | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
SET FOREIGN_KEY_CHECKS=0;
-- ----------------------------
-- Table structure for QRTZ_BLOB_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_BLOB_TRIGGERS`;
CREATE TABLE `QRTZ_BLOB_TRIGGERS` (
`SCHED_NAME` varchar(120) NOT NULL,
`TRIGGER_NAME` varchar(200) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
`BLOB_DATA` blob,
PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
KEY `SCHED_NAME` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
CONSTRAINT `QRTZ_BLOB_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_BLOB_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_CALENDARS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_CALENDARS`;
CREATE TABLE `QRTZ_CALENDARS` (
`SCHED_NAME` varchar(120) NOT NULL,
`CALENDAR_NAME` varchar(200) NOT NULL,
`CALENDAR` blob NOT NULL,
PRIMARY KEY (`SCHED_NAME`,`CALENDAR_NAME`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_CALENDARS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_CRON_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_CRON_TRIGGERS`;
CREATE TABLE `QRTZ_CRON_TRIGGERS` (
`SCHED_NAME` varchar(120) NOT NULL,
`TRIGGER_NAME` varchar(200) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
`CRON_EXPRESSION` varchar(120) NOT NULL,
`TIME_ZONE_ID` varchar(80) DEFAULT NULL,
PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
CONSTRAINT `QRTZ_CRON_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_CRON_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_FIRED_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_FIRED_TRIGGERS`;
CREATE TABLE `QRTZ_FIRED_TRIGGERS` (
`SCHED_NAME` varchar(120) NOT NULL,
`ENTRY_ID` varchar(200) NOT NULL,
`TRIGGER_NAME` varchar(200) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
`INSTANCE_NAME` varchar(200) NOT NULL,
`FIRED_TIME` bigint(13) NOT NULL,
`SCHED_TIME` bigint(13) NOT NULL,
`PRIORITY` int(11) NOT NULL,
`STATE` varchar(16) NOT NULL,
`JOB_NAME` varchar(200) DEFAULT NULL,
`JOB_GROUP` varchar(200) DEFAULT NULL,
`IS_NONCONCURRENT` varchar(1) DEFAULT NULL,
`REQUESTS_RECOVERY` varchar(1) DEFAULT NULL,
PRIMARY KEY (`SCHED_NAME`,`ENTRY_ID`),
KEY `IDX_QRTZ_FT_TRIG_INST_NAME` (`SCHED_NAME`,`INSTANCE_NAME`),
KEY `IDX_QRTZ_FT_INST_JOB_REQ_RCVRY` (`SCHED_NAME`,`INSTANCE_NAME`,`REQUESTS_RECOVERY`),
KEY `IDX_QRTZ_FT_J_G` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`),
KEY `IDX_QRTZ_FT_JG` (`SCHED_NAME`,`JOB_GROUP`),
KEY `IDX_QRTZ_FT_T_G` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
KEY `IDX_QRTZ_FT_TG` (`SCHED_NAME`,`TRIGGER_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_FIRED_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_JOB_DETAILS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_JOB_DETAILS`;
CREATE TABLE `QRTZ_JOB_DETAILS` (
`SCHED_NAME` varchar(120) NOT NULL,
`JOB_NAME` varchar(200) NOT NULL,
`JOB_GROUP` varchar(200) NOT NULL,
`DESCRIPTION` varchar(250) DEFAULT NULL,
`JOB_CLASS_NAME` varchar(250) NOT NULL,
`IS_DURABLE` varchar(1) NOT NULL,
`IS_NONCONCURRENT` varchar(1) NOT NULL,
`IS_UPDATE_DATA` varchar(1) NOT NULL,
`REQUESTS_RECOVERY` varchar(1) NOT NULL,
`JOB_DATA` blob,
PRIMARY KEY (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`),
KEY `IDX_QRTZ_J_REQ_RECOVERY` (`SCHED_NAME`,`REQUESTS_RECOVERY`),
KEY `IDX_QRTZ_J_GRP` (`SCHED_NAME`,`JOB_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_JOB_DETAILS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_LOCKS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_LOCKS`;
CREATE TABLE `QRTZ_LOCKS` (
`SCHED_NAME` varchar(120) NOT NULL,
`LOCK_NAME` varchar(40) NOT NULL,
PRIMARY KEY (`SCHED_NAME`,`LOCK_NAME`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_LOCKS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_PAUSED_TRIGGER_GRPS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_PAUSED_TRIGGER_GRPS`;
CREATE TABLE `QRTZ_PAUSED_TRIGGER_GRPS` (
`SCHED_NAME` varchar(120) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
PRIMARY KEY (`SCHED_NAME`,`TRIGGER_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_PAUSED_TRIGGER_GRPS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_SCHEDULER_STATE
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_SCHEDULER_STATE`;
CREATE TABLE `QRTZ_SCHEDULER_STATE` (
`SCHED_NAME` varchar(120) NOT NULL,
`INSTANCE_NAME` varchar(200) NOT NULL,
`LAST_CHECKIN_TIME` bigint(13) NOT NULL,
`CHECKIN_INTERVAL` bigint(13) NOT NULL,
PRIMARY KEY (`SCHED_NAME`,`INSTANCE_NAME`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_SCHEDULER_STATE
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_SIMPLE_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_SIMPLE_TRIGGERS`;
CREATE TABLE `QRTZ_SIMPLE_TRIGGERS` (
`SCHED_NAME` varchar(120) NOT NULL,
`TRIGGER_NAME` varchar(200) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
`REPEAT_COUNT` bigint(7) NOT NULL,
`REPEAT_INTERVAL` bigint(12) NOT NULL,
`TIMES_TRIGGERED` bigint(10) NOT NULL,
PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
CONSTRAINT `QRTZ_SIMPLE_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_SIMPLE_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_SIMPROP_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_SIMPROP_TRIGGERS`;
CREATE TABLE `QRTZ_SIMPROP_TRIGGERS` (
`SCHED_NAME` varchar(120) NOT NULL,
`TRIGGER_NAME` varchar(200) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
`STR_PROP_1` varchar(512) DEFAULT NULL,
`STR_PROP_2` varchar(512) DEFAULT NULL,
`STR_PROP_3` varchar(512) DEFAULT NULL,
`INT_PROP_1` int(11) DEFAULT NULL,
`INT_PROP_2` int(11) DEFAULT NULL,
`LONG_PROP_1` bigint(20) DEFAULT NULL,
`LONG_PROP_2` bigint(20) DEFAULT NULL,
`DEC_PROP_1` decimal(13,4) DEFAULT NULL,
`DEC_PROP_2` decimal(13,4) DEFAULT NULL,
`BOOL_PROP_1` varchar(1) DEFAULT NULL,
`BOOL_PROP_2` varchar(1) DEFAULT NULL,
PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
CONSTRAINT `QRTZ_SIMPROP_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_SIMPROP_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_TRIGGERS`;
CREATE TABLE `QRTZ_TRIGGERS` (
`SCHED_NAME` varchar(120) NOT NULL,
`TRIGGER_NAME` varchar(200) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
`JOB_NAME` varchar(200) NOT NULL,
`JOB_GROUP` varchar(200) NOT NULL,
`DESCRIPTION` varchar(250) DEFAULT NULL,
`NEXT_FIRE_TIME` bigint(13) DEFAULT NULL,
`PREV_FIRE_TIME` bigint(13) DEFAULT NULL,
`PRIORITY` int(11) DEFAULT NULL,
`TRIGGER_STATE` varchar(16) NOT NULL,
`TRIGGER_TYPE` varchar(8) NOT NULL,
`START_TIME` bigint(13) NOT NULL,
`END_TIME` bigint(13) DEFAULT NULL,
`CALENDAR_NAME` varchar(200) DEFAULT NULL,
`MISFIRE_INSTR` smallint(2) DEFAULT NULL,
`JOB_DATA` blob,
PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
KEY `IDX_QRTZ_T_J` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`),
KEY `IDX_QRTZ_T_JG` (`SCHED_NAME`,`JOB_GROUP`),
KEY `IDX_QRTZ_T_C` (`SCHED_NAME`,`CALENDAR_NAME`),
KEY `IDX_QRTZ_T_G` (`SCHED_NAME`,`TRIGGER_GROUP`),
KEY `IDX_QRTZ_T_STATE` (`SCHED_NAME`,`TRIGGER_STATE`),
KEY `IDX_QRTZ_T_N_STATE` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`),
KEY `IDX_QRTZ_T_N_G_STATE` (`SCHED_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`),
KEY `IDX_QRTZ_T_NEXT_FIRE_TIME` (`SCHED_NAME`,`NEXT_FIRE_TIME`),
KEY `IDX_QRTZ_T_NFT_ST` (`SCHED_NAME`,`TRIGGER_STATE`,`NEXT_FIRE_TIME`),
KEY `IDX_QRTZ_T_NFT_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`),
KEY `IDX_QRTZ_T_NFT_ST_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_STATE`),
KEY `IDX_QRTZ_T_NFT_ST_MISFIRE_GRP` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_GROUP`,`TRIGGER_STATE`),
CONSTRAINT `QRTZ_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`) REFERENCES `QRTZ_JOB_DETAILS` (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_access_token
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_access_token`;
CREATE TABLE `t_ds_access_token` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`user_id` int(11) DEFAULT NULL COMMENT 'user id',
`token` varchar(64) DEFAULT NULL COMMENT 'token',
`expire_time` datetime DEFAULT NULL COMMENT 'end time of token ',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_access_token
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_alert
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_alert`;
CREATE TABLE `t_ds_alert` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`title` varchar(64) DEFAULT NULL COMMENT 'title',
`content` text COMMENT 'Message content (can be email, can be SMS. Mail is stored in JSON map, and SMS is string)',
`alert_status` tinyint(4) DEFAULT '0' COMMENT '0:wait running,1:success,2:failed',
`log` text COMMENT 'log',
`alertgroup_id` int(11) DEFAULT NULL COMMENT 'alert group id',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_alert
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_alertgroup
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_alertgroup`;
CREATE TABLE `t_ds_alertgroup`(
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`alert_instance_ids` varchar (255) DEFAULT NULL COMMENT 'alert instance ids',
`create_user_id` int(11) DEFAULT NULL COMMENT 'create user id',
`group_name` varchar(255) DEFAULT NULL COMMENT 'group name',
`description` varchar(255) DEFAULT NULL,
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`),
UNIQUE KEY `t_ds_alertgroup_name_un` (`group_name`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_alertgroup
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_command
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_command`;
CREATE TABLE `t_ds_command` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`command_type` tinyint(4) DEFAULT NULL COMMENT 'Command type: 0 start workflow, 1 start execution from current node, 2 resume fault-tolerant workflow, 3 resume pause process, 4 start execution from failed node, 5 complement, 6 schedule, 7 rerun, 8 pause, 9 stop, 10 resume waiting thread',
`process_definition_code` bigint(20) DEFAULT NULL COMMENT 'process definition code',
`command_param` text COMMENT 'json command parameters',
`task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'Node dependency type: 0 current node, 1 forward, 2 backward',
`failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'Failed policy: 0 end, 1 continue',
`warning_type` tinyint(4) DEFAULT '0' COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent',
`warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group',
`schedule_time` datetime DEFAULT NULL COMMENT 'schedule time',
`start_time` datetime DEFAULT NULL COMMENT 'start time',
`executor_id` int(11) DEFAULT NULL COMMENT 'executor id',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
`process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority: 0 Highest,1 High,2 Medium,3 Low,4 Lowest',
`worker_group` varchar(64) COMMENT 'worker group',
`environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code',
`dry_run` int NULL DEFAULT 0 COMMENT 'dry run flag:0 normal, 1 dry run',
`process_instance_id` int(11) DEFAULT 0 COMMENT 'process instance id',
`process_definition_version` int(11) DEFAULT 0 COMMENT 'process definition version',
PRIMARY KEY (`id`),
KEY `priority_id_index` (`process_instance_priority`,`id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_command
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_datasource
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_datasource`;
CREATE TABLE `t_ds_datasource` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`name` varchar(64) NOT NULL COMMENT 'data source name',
`note` varchar(255) DEFAULT NULL COMMENT 'description',
`type` tinyint(4) NOT NULL COMMENT 'data source type: 0:mysql,1:postgresql,2:hive,3:spark',
`user_id` int(11) NOT NULL COMMENT 'the creator id',
`connection_params` text NOT NULL COMMENT 'json connection params',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`),
UNIQUE KEY `t_ds_datasource_name_un` (`name`, `type`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_datasource
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_error_command
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_error_command`;
CREATE TABLE `t_ds_error_command` (
`id` int(11) NOT NULL COMMENT 'key',
`command_type` tinyint(4) DEFAULT NULL COMMENT 'command type',
`executor_id` int(11) DEFAULT NULL COMMENT 'executor id',
`process_definition_code` bigint(20) DEFAULT NULL COMMENT 'process definition code',
`command_param` text COMMENT 'json command parameters',
`task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type',
`failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy',
`warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type',
`warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id',
`schedule_time` datetime DEFAULT NULL COMMENT 'scheduler time',
`start_time` datetime DEFAULT NULL COMMENT 'start time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
`process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority, 0 Highest,1 High,2 Medium,3 Low,4 Lowest',
`worker_group` varchar(64) COMMENT 'worker group',
`environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code',
`message` text COMMENT 'message',
`dry_run` int NULL DEFAULT NULL COMMENT 'dry run flag: 0 normal, 1 dry run',
`process_instance_id` int(11) DEFAULT 0 COMMENT 'process instance id: 0',
`process_definition_version` int(11) DEFAULT 0 COMMENT 'process definition version',
PRIMARY KEY (`id`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC;
-- ----------------------------
-- Records of t_ds_error_command
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_process_definition
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_process_definition`;
CREATE TABLE `t_ds_process_definition` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id',
`code` bigint(20) NOT NULL COMMENT 'encoding',
`name` varchar(255) DEFAULT NULL COMMENT 'process definition name',
`version` int(11) DEFAULT NULL COMMENT 'process definition version',
`description` text COMMENT 'description',
`project_code` bigint(20) NOT NULL COMMENT 'project code',
`release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online',
`user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id',
`global_params` text COMMENT 'global parameters',
`flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available',
`locations` text COMMENT 'Node location information',
`warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id',
`timeout` int(11) DEFAULT '0' COMMENT 'time out, unit: minute',
`tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`,`code`),
UNIQUE KEY `process_unique` (`name`,`project_code`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_process_definition
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_process_definition_log
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_process_definition_log`;
CREATE TABLE `t_ds_process_definition_log` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id',
`code` bigint(20) NOT NULL COMMENT 'encoding',
`name` varchar(200) DEFAULT NULL COMMENT 'process definition name',
`version` int(11) DEFAULT NULL COMMENT 'process definition version',
`description` text COMMENT 'description',
`project_code` bigint(20) NOT NULL COMMENT 'project code',
`release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online',
`user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id',
`global_params` text COMMENT 'global parameters',
`flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available',
`locations` text COMMENT 'Node location information',
`warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id',
`timeout` int(11) DEFAULT '0' COMMENT 'time out,unit: minute',
`tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id',
`operator` int(11) DEFAULT NULL COMMENT 'operator user id',
`operate_time` datetime DEFAULT NULL COMMENT 'operate time',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_task_definition
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_task_definition`;
CREATE TABLE `t_ds_task_definition` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id',
`code` bigint(20) NOT NULL COMMENT 'encoding',
`name` varchar(200) DEFAULT NULL COMMENT 'task definition name',
`version` int(11) DEFAULT NULL COMMENT 'task definition version',
`description` text COMMENT 'description',
`project_code` bigint(20) NOT NULL COMMENT 'project code',
`user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id',
`task_type` varchar(50) NOT NULL COMMENT 'task type',
`task_params` longtext COMMENT 'job custom parameters',
`flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available',
`task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority',
`worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping',
`environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code',
`fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries',
`fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval',
`timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open',
`timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail',
`timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute',
`delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute',
`resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource id, separated by comma',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`,`code`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_task_definition_log
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_task_definition_log`;
CREATE TABLE `t_ds_task_definition_log` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id',
`code` bigint(20) NOT NULL COMMENT 'encoding',
`name` varchar(200) DEFAULT NULL COMMENT 'task definition name',
`version` int(11) DEFAULT NULL COMMENT 'task definition version',
`description` text COMMENT 'description',
`project_code` bigint(20) NOT NULL COMMENT 'project code',
`user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id',
`task_type` varchar(50) NOT NULL COMMENT 'task type',
`task_params` text COMMENT 'job custom parameters',
`flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available',
`task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority',
`worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping',
`environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code',
`fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries',
`fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval',
`timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open',
`timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail',
`timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute',
`delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute',
`resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource id, separated by comma',
`operator` int(11) DEFAULT NULL COMMENT 'operator user id',
`operate_time` datetime DEFAULT NULL COMMENT 'operate time',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_process_task_relation
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_process_task_relation`;
CREATE TABLE `t_ds_process_task_relation` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id',
`name` varchar(200) DEFAULT NULL COMMENT 'relation name',
`process_definition_version` int(11) DEFAULT NULL COMMENT 'process version',
`project_code` bigint(20) NOT NULL COMMENT 'project code',
`process_definition_code` bigint(20) NOT NULL COMMENT 'process code',
`pre_task_code` bigint(20) NOT NULL COMMENT 'pre task code',
`pre_task_version` int(11) NOT NULL COMMENT 'pre task version',
`post_task_code` bigint(20) NOT NULL COMMENT 'post task code',
`post_task_version` int(11) NOT NULL COMMENT 'post task version',
`condition_type` tinyint(2) DEFAULT NULL COMMENT 'condition type : 0 none, 1 judge 2 delay',
`condition_params` text COMMENT 'condition params(json)',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_process_task_relation_log
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_process_task_relation_log`;
CREATE TABLE `t_ds_process_task_relation_log` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id',
`name` varchar(200) DEFAULT NULL COMMENT 'relation name',
`process_definition_version` int(11) DEFAULT NULL COMMENT 'process version',
`project_code` bigint(20) NOT NULL COMMENT 'project code',
`process_definition_code` bigint(20) NOT NULL COMMENT 'process code',
`pre_task_code` bigint(20) NOT NULL COMMENT 'pre task code',
`pre_task_version` int(11) NOT NULL COMMENT 'pre task version',
`post_task_code` bigint(20) NOT NULL COMMENT 'post task code',
`post_task_version` int(11) NOT NULL COMMENT 'post task version',
`condition_type` tinyint(2) DEFAULT NULL COMMENT 'condition type : 0 none, 1 judge 2 delay',
`condition_params` text COMMENT 'condition params(json)',
`operator` int(11) DEFAULT NULL COMMENT 'operator user id',
`operate_time` datetime DEFAULT NULL COMMENT 'operate time',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_process_instance
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_process_instance`;
CREATE TABLE `t_ds_process_instance` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`name` varchar(255) DEFAULT NULL COMMENT 'process instance name',
`process_definition_version` int(11) DEFAULT NULL COMMENT 'process definition version',
`process_definition_code` bigint(20) not NULL COMMENT 'process definition code',
`state` tinyint(4) DEFAULT NULL COMMENT 'process instance Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete',
`recovery` tinyint(4) DEFAULT NULL COMMENT 'process instance failover flag:0:normal,1:failover instance',
`start_time` datetime DEFAULT NULL COMMENT 'process instance start time',
`end_time` datetime DEFAULT NULL COMMENT 'process instance end time',
`run_times` int(11) DEFAULT NULL COMMENT 'process instance run times',
`host` varchar(135) DEFAULT NULL COMMENT 'process instance host',
`command_type` tinyint(4) DEFAULT NULL COMMENT 'command type',
`command_param` text COMMENT 'json command parameters',
`task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type. 0: only current node,1:before the node,2:later nodes',
`max_try_times` tinyint(4) DEFAULT '0' COMMENT 'max try times',
`failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy. 0:end the process when node failed,1:continue running the other nodes when node failed',
`warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type. 0:no warning,1:warning if process success,2:warning if process failed,3:warning if success',
`warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id',
`schedule_time` datetime DEFAULT NULL COMMENT 'schedule time',
`command_start_time` datetime DEFAULT NULL COMMENT 'command start time',
`global_params` text COMMENT 'global parameters',
`flag` tinyint(4) DEFAULT '1' COMMENT 'flag',
`update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`is_sub_process` int(11) DEFAULT '0' COMMENT 'flag, whether the process is sub process',
`executor_id` int(11) NOT NULL COMMENT 'executor id',
`history_cmd` text COMMENT 'history commands of process instance operation',
`process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority. 0 Highest,1 High,2 Medium,3 Low,4 Lowest',
`worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id',
`environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code',
`timeout` int(11) DEFAULT '0' COMMENT 'time out',
`tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id',
`var_pool` longtext COMMENT 'var_pool',
`dry_run` int NULL DEFAULT 0 COMMENT 'dry run flag: 0 normal, 1 dry run ',
PRIMARY KEY (`id`),
KEY `process_instance_index` (`process_definition_code`,`id`) USING BTREE,
KEY `start_time_index` (`start_time`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_process_instance
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_project
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_project`;
CREATE TABLE `t_ds_project` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`name` varchar(100) DEFAULT NULL COMMENT 'project name',
`code` bigint(20) NOT NULL COMMENT 'encoding',
`description` varchar(200) DEFAULT NULL,
`user_id` int(11) DEFAULT NULL COMMENT 'creator id',
`flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`),
KEY `user_id_index` (`user_id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_project
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_queue
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_queue`;
CREATE TABLE `t_ds_queue` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`queue_name` varchar(64) DEFAULT NULL COMMENT 'queue name',
`queue` varchar(64) DEFAULT NULL COMMENT 'yarn queue name',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_queue
-- ----------------------------
INSERT INTO `t_ds_queue` VALUES ('1', 'default', 'default', null, null);
-- ----------------------------
-- Table structure for t_ds_relation_datasource_user
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_relation_datasource_user`;
CREATE TABLE `t_ds_relation_datasource_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`user_id` int(11) NOT NULL COMMENT 'user id',
`datasource_id` int(11) DEFAULT NULL COMMENT 'data source id',
`perm` int(11) DEFAULT '1' COMMENT 'limits of authority',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_relation_datasource_user
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_relation_process_instance
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_relation_process_instance`;
CREATE TABLE `t_ds_relation_process_instance` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`parent_process_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id',
`parent_task_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id',
`process_instance_id` int(11) DEFAULT NULL COMMENT 'child process instance id',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_relation_process_instance
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_relation_project_user
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_relation_project_user`;
CREATE TABLE `t_ds_relation_project_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`user_id` int(11) NOT NULL COMMENT 'user id',
`project_id` int(11) DEFAULT NULL COMMENT 'project id',
`perm` int(11) DEFAULT '1' COMMENT 'limits of authority',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`),
KEY `user_id_index` (`user_id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_relation_project_user
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_relation_resources_user
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_relation_resources_user`;
CREATE TABLE `t_ds_relation_resources_user` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`user_id` int(11) NOT NULL COMMENT 'user id',
`resources_id` int(11) DEFAULT NULL COMMENT 'resource id',
`perm` int(11) DEFAULT '1' COMMENT 'limits of authority',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_relation_resources_user
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_relation_udfs_user
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_relation_udfs_user`;
CREATE TABLE `t_ds_relation_udfs_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`user_id` int(11) NOT NULL COMMENT 'userid',
`udf_id` int(11) DEFAULT NULL COMMENT 'udf id',
`perm` int(11) DEFAULT '1' COMMENT 'limits of authority',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_resources
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_resources`;
CREATE TABLE `t_ds_resources` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`alias` varchar(64) DEFAULT NULL COMMENT 'alias',
`file_name` varchar(64) DEFAULT NULL COMMENT 'file name',
`description` varchar(255) DEFAULT NULL,
`user_id` int(11) DEFAULT NULL COMMENT 'user id',
`type` tinyint(4) DEFAULT NULL COMMENT 'resource type,0:FILE,1:UDF',
`size` bigint(20) DEFAULT NULL COMMENT 'resource size',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
`pid` int(11) DEFAULT NULL,
`full_name` varchar(64) DEFAULT NULL,
`is_directory` tinyint(4) DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `t_ds_resources_un` (`full_name`,`type`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_resources
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_schedules
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_schedules`;
CREATE TABLE `t_ds_schedules` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`process_definition_code` bigint(20) NOT NULL COMMENT 'process definition code',
`start_time` datetime NOT NULL COMMENT 'start time',
`end_time` datetime NOT NULL COMMENT 'end time',
`timezone_id` varchar(40) DEFAULT NULL COMMENT 'timezoneId',
`crontab` varchar(255) NOT NULL COMMENT 'crontab description',
`failure_strategy` tinyint(4) NOT NULL COMMENT 'failure strategy. 0:end,1:continue',
`user_id` int(11) NOT NULL COMMENT 'user id',
`release_state` tinyint(4) NOT NULL COMMENT 'release state. 0:offline,1:online ',
`warning_type` tinyint(4) NOT NULL COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent',
`warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id',
`process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest',
`worker_group` varchar(64) DEFAULT '' COMMENT 'worker group id',
`environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime NOT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_schedules
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_session
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_session`;
CREATE TABLE `t_ds_session` (
`id` varchar(64) NOT NULL COMMENT 'key',
`user_id` int(11) DEFAULT NULL COMMENT 'user id',
`ip` varchar(45) DEFAULT NULL COMMENT 'ip',
`last_login_time` datetime DEFAULT NULL COMMENT 'last login time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_session
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_task_instance
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_task_instance`;
CREATE TABLE `t_ds_task_instance` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`name` varchar(255) DEFAULT NULL COMMENT 'task name',
`task_type` varchar(50) NOT NULL COMMENT 'task type',
`task_code` bigint(20) NOT NULL COMMENT 'task definition code',
`task_definition_version` int(11) DEFAULT NULL COMMENT 'task definition version',
`process_instance_id` int(11) DEFAULT NULL COMMENT 'process instance id',
`state` tinyint(4) DEFAULT NULL COMMENT 'Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete',
`submit_time` datetime DEFAULT NULL COMMENT 'task submit time',
`start_time` datetime DEFAULT NULL COMMENT 'task start time',
`end_time` datetime DEFAULT NULL COMMENT 'task end time',
`host` varchar(135) DEFAULT NULL COMMENT 'host of task running on',
`execute_path` varchar(200) DEFAULT NULL COMMENT 'task execute path in the host',
`log_path` varchar(200) DEFAULT NULL COMMENT 'task log path',
`alert_flag` tinyint(4) DEFAULT NULL COMMENT 'whether alert',
`retry_times` int(4) DEFAULT '0' COMMENT 'task retry times',
`pid` int(4) DEFAULT NULL COMMENT 'pid of task',
`app_link` text COMMENT 'yarn app id',
`task_params` text COMMENT 'job custom parameters',
`flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available',
`retry_interval` int(4) DEFAULT NULL COMMENT 'retry interval when task failed ',
`max_retry_times` int(2) DEFAULT NULL COMMENT 'max retry times',
`task_instance_priority` int(11) DEFAULT NULL COMMENT 'task instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest',
`worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id',
`environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code',
`environment_config` text COMMENT 'this config contains many environment variables config',
`executor_id` int(11) DEFAULT NULL,
`first_submit_time` datetime DEFAULT NULL COMMENT 'task first submit time',
`delay_time` int(4) DEFAULT '0' COMMENT 'task delay execution time',
`var_pool` longtext COMMENT 'var_pool',
`dry_run` int NULL DEFAULT NULL COMMENT 'dry run flag: 0 normal, 1 dry run',
PRIMARY KEY (`id`),
KEY `process_instance_id` (`process_instance_id`) USING BTREE,
CONSTRAINT `foreign_key_instance_id` FOREIGN KEY (`process_instance_id`) REFERENCES `t_ds_process_instance` (`id`) ON DELETE CASCADE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_task_instance
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_tenant
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_tenant`;
CREATE TABLE `t_ds_tenant` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`tenant_code` varchar(64) DEFAULT NULL COMMENT 'tenant code',
`description` varchar(255) DEFAULT NULL,
`queue_id` int(11) DEFAULT NULL COMMENT 'queue id',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_tenant
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_udfs
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_udfs`;
CREATE TABLE `t_ds_udfs` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`user_id` int(11) NOT NULL COMMENT 'user id',
`func_name` varchar(100) NOT NULL COMMENT 'UDF function name',
`class_name` varchar(255) NOT NULL COMMENT 'class of udf',
`type` tinyint(4) NOT NULL COMMENT 'Udf function type',
`arg_types` varchar(255) DEFAULT NULL COMMENT 'arguments types',
`database` varchar(255) DEFAULT NULL COMMENT 'data base',
`description` varchar(255) DEFAULT NULL,
`resource_id` int(11) NOT NULL COMMENT 'resource id',
`resource_name` varchar(255) NOT NULL COMMENT 'resource name',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime NOT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_udfs
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_user
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_user`;
CREATE TABLE `t_ds_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'user id',
`user_name` varchar(64) DEFAULT NULL COMMENT 'user name',
`user_password` varchar(64) DEFAULT NULL COMMENT 'user password',
`user_type` tinyint(4) DEFAULT NULL COMMENT 'user type, 0:administrator,1:ordinary user',
`email` varchar(64) DEFAULT NULL COMMENT 'email',
`phone` varchar(11) DEFAULT NULL COMMENT 'phone',
`tenant_id` int(11) DEFAULT NULL COMMENT 'tenant id',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
`queue` varchar(64) DEFAULT NULL COMMENT 'queue',
`state` int(1) DEFAULT 1 COMMENT 'state 0:disable 1:enable',
PRIMARY KEY (`id`),
UNIQUE KEY `user_name_unique` (`user_name`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_user
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_worker_group
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_worker_group`;
CREATE TABLE `t_ds_worker_group` (
`id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id',
`name` varchar(255) NOT NULL COMMENT 'worker group name',
`addr_list` text NULL DEFAULT NULL COMMENT 'worker addr list. split by [,]',
`create_time` datetime NULL DEFAULT NULL COMMENT 'create time',
`update_time` datetime NULL DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`),
UNIQUE KEY `name_unique` (`name`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_worker_group
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_version
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_version`;
CREATE TABLE `t_ds_version` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`version` varchar(200) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `version_UNIQUE` (`version`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8 COMMENT='version';
-- ----------------------------
-- Records of t_ds_version
-- ----------------------------
INSERT INTO `t_ds_version` VALUES ('1', '1.4.0');
-- ----------------------------
-- Records of t_ds_alertgroup
-- ----------------------------
INSERT INTO `t_ds_alertgroup`(alert_instance_ids, create_user_id, group_name, description, create_time, update_time)
VALUES ("1,2", 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39');
-- ----------------------------
-- Records of t_ds_user
-- ----------------------------
INSERT INTO `t_ds_user`
VALUES ('1', 'admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', '', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22', null, 1);
-- ----------------------------
-- Table structure for t_ds_plugin_define
-- ----------------------------
SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY',''));
DROP TABLE IF EXISTS `t_ds_plugin_define`;
CREATE TABLE `t_ds_plugin_define` (
`id` int NOT NULL AUTO_INCREMENT,
`plugin_name` varchar(100) NOT NULL COMMENT 'the name of plugin eg: email',
`plugin_type` varchar(100) NOT NULL COMMENT 'plugin type . alert=alert plugin, job=job plugin',
`plugin_params` text COMMENT 'plugin params',
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
UNIQUE KEY `t_ds_plugin_define_UN` (`plugin_name`,`plugin_type`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_alert_plugin_instance
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_alert_plugin_instance`;
CREATE TABLE `t_ds_alert_plugin_instance` (
`id` int NOT NULL AUTO_INCREMENT,
`plugin_define_id` int NOT NULL,
`plugin_instance_params` text COMMENT 'plugin instance params. Also contain the params value which user input in web ui.',
`create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
`update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`instance_name` varchar(200) DEFAULT NULL COMMENT 'alert instance name',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_environment
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_environment`;
CREATE TABLE `t_ds_environment` (
`id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id',
`code` bigint(20) DEFAULT NULL COMMENT 'encoding',
`name` varchar(100) NOT NULL COMMENT 'environment name',
`config` text NULL DEFAULT NULL COMMENT 'this config contains many environment variables config',
`description` text NULL DEFAULT NULL COMMENT 'the details',
`operator` int(11) DEFAULT NULL COMMENT 'operator user id',
`create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
`update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
UNIQUE KEY `environment_name_unique` (`name`),
UNIQUE KEY `environment_code_unique` (`code`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_environment_worker_group_relation
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_environment_worker_group_relation`;
CREATE TABLE `t_ds_environment_worker_group_relation` (
`id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id',
`environment_code` bigint(20) NOT NULL COMMENT 'environment code',
`worker_group` varchar(255) NOT NULL COMMENT 'worker group id',
`operator` int(11) DEFAULT NULL COMMENT 'operator user id',
`create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
`update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
UNIQUE KEY `environment_worker_group_unique` (`environment_code`,`worker_group`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,876 | [Bug][SQL] Suggest to increase the max length of the field resource_ids in the table t_ds_task_definition. | **Describe the bug**
When I saved a shell task that used many resource files ,the service threw PSQLException.
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Create a shell task and select many resource files.
2. Save the workflow, you would see that again. If you don't , maybe you would do well to pick more resource files.
**Expected behavior**
Expect to increase the max length of the field resource_ids in the table t_ds_task_definition.
**Screenshots**
![image](https://user-images.githubusercontent.com/4928204/126594849-07126f10-9031-43d2-a2ce-50ce9771d1f1.png)
![image](https://user-images.githubusercontent.com/4928204/126594860-5e38dc77-32d2-4d67-9cfe-e9424b8b07d3.png)
**Which version of Dolphin Scheduler:**
-[dev]
**Additional context**
Exception stack trace:
```
[INFO] 2021-07-22 11:51:07.658 org.apache.dolphinscheduler.api.aspect.AccessLogAspect:[75] - REQUEST TRANCE_ID:0702204b-58c0-4f0f-8b4b-dbfef2be63cc, LOGIN_USER:admin, URI:/dolphinscheduler/users/list-paging, METHOD:GET, HANDLER:org.apache.dolphinscheduler.api.controller.UsersController.queryUserList, ARGS:{searchVal=jiang.hua, pageNo=1, pageSize=10}
[INFO] 2021-07-22 11:51:07.682 org.apache.dolphinscheduler.api.aspect.AccessLogAspect:[75] - REQUEST TRANCE_ID:55caab3c-e003-4925-88c9-172dd2318d7c, LOGIN_USER:admin, URI:/dolphinscheduler/projects/feature-works/process/update, METHOD:POST, HANDLER:org.apache.dolphinscheduler.api.controller.ProcessDefinitionController.updateProcessDefinition, ARGS:{connects=[], name=jianghua_test, description=test, locations={"tasks-24242":{"name":"restore_from_checkpoint_test123","targetarr":"","nodenumber":"0","x":-800,"y":-900,"color":"#1297DC"}}, id=8, releaseState=OFFLINE, projectName=feature-works, processDefinitionJson={"tasks":[{"id":"tasks-24242","code":491450867713,"version":27,"name":"restore_from_checkpoint_test123","desc":null,"type":"SHELL","runFlag":"NORMAL","loc":null,"maxRetryTimes":3,"retryInterval":1,"params":{"resourceList":[{"id":157},{"id":158},{"id":162},{"id":164},{"id":166},{"id":168},{"id":160},{"id":161},{"id":159},{"id":163},{"id":165},{"id":167},{"id":95},{"id":97},{"id":98},{"id":99},{"id":100},{"id":104},{"id":105},{"id":106},{"id":107},{"id":116},{"id":94},{"id":96},{"id":101},{"id":102},{"id":103},{"id":108},{"id":109},{"id":110},{"id":112},{"id":113},{"id":114},{"id":115},{"id":117},{"id":170},{"id":171},{"id":172},{"id":120},{"id":121},{"id":123},{"id":125},{"id":127},{"id":129},{"id":131},{"id":133},{"id":135},{"id":137},{"id":139},{"id":141},{"id":147},{"id":148},{"id":149},{"id":150},{"id":151},{"id":152},{"id":134},{"id":136},{"id":118},{"id":119},{"id":122},{"id":124},{"id":126},{"id":128},{"id":130},{"id":132},{"id":138},{"id":140},{"id":142},{"id":143},{"id":144},{"id":145},{"id":146},{"id":169},{"id":76},{"id":77},{"id":78}],"localParams":[{"prop":"task.name","direct":"IN","type":"VARCHAR","value":"restore_from_checkpoint_test.sql","deleteScript":"-n"},{"prop":"task.script.file","direct":"IN","type":"VARCHAR","value":"Projects/feature-works/tasks/stream/jianghua_test/restore_from_checkpoint_test.sql","deleteScript":"-f"},{"prop":"task.execution.parallel","direct":"IN","type":"VARCHAR","value":"3","deleteScript":"-p"},{"prop":"task.execution.slots","direct":"IN","type":"VARCHAR","value":"10","deleteScript":"-s"},{"prop":"task.execution.yjm","direct":"IN","type":"VARCHAR","value":"4096m","deleteScript":"-j"},{"prop":"task.execution.ytm","direct":"IN","type":"VARCHAR","value":"4096m","deleteScript":"-w"},{"prop":"task.instance.id","direct":"IN","type":"VARCHAR","value":"${system.task.instance.id}"},{"prop":"task.execute.path","direct":"IN","type":"VARCHAR","value":"${system.task.execute.path}"}],"rawScript":"chmod +x ${task.execute.path}/${datamax.home}/${datamax.version}/bin/*\n\n${task.execute.path}/${datamax.home}/${datamax.version}/bin/start.sh -m ${run.mode} -n ${task.name} -f ${task.script.file} -p ${task.execution.parallel} -s ${task.execution.slots} -j ${task.execution.yjm} -w ${task.execution.ytm} -r ${repository.home} -b ${task.execute.path} -c ${datamax.home}/${datamax.version}/lib/${datamax.core.jar} -l ${flink.home}/${flink.version} -e ${profile.home}/${run.mode}"},"preTasks":[],"preTaskNodeList":[],"extras":null,"depList":[],"dependence":{},"conditionResult":{"successNode":[""],"failedNode":[""]},"taskInstancePriority":"MEDIUM","workerGroup":"default","timeout":{"enable":false,"strategy":null,"interval":0},"delayTime":0}],"globalParams":[{"prop":"repository.home","direct":"IN","type":"VARCHAR","value":"/data/online/datamax-repository"},{"prop":"run.mode","direct":"IN","type":"VARCHAR","value":"stream"},{"prop":"flink.home","direct":"IN","type":"VARCHAR","value":"./flink/"},{"prop":"datamax.home","direct":"IN","type":"VARCHAR","value":"./datamax/"},{"prop":"datamax.core.jar","direct":"IN","type":"VARCHAR","value":"platform-on-flink_1.13.0-2.0-SNAPSHOT.jar"},{"prop":"flink.version","direct":"IN","type":"VARCHAR","value":"1.13.0"},{"prop":"datamax.version","direct":"IN","type":"VARCHAR","value":"2.0.0"},{"prop":"profile.home","direct":"IN","type":"VARCHAR","value":"./profile/prod"},{"prop":"root.home","direct":"IN","type":"VARCHAR","value":"/data/online/datamax-repository"},{"prop":"current.date","direct":"IN","type":"VARCHAR","value":"${system.biz.curdate}"}],"timeout":0,"tenantId":4}}
[ERROR] 2021-07-22 11:51:07.697 org.apache.dolphinscheduler.api.exceptions.ApiExceptionHandler:[46] - 更新工作流定义错误
org.springframework.dao.DataIntegrityViolationException:
### Error updating database. Cause: org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
### The error may exist in org/apache/dolphinscheduler/dao/mapper/TaskDefinitionMapper.java (best guess)
### The error may involve org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper.updateById-Inline
### The error occurred while setting parameters
### SQL: UPDATE t_ds_task_definition SET fail_retry_times=?, flag=?, code=?, task_priority=?, update_time=?, task_params=?, user_id=?, version=?, timeout=?, task_type=?, timeout_flag=?, create_time=?, project_code=?, fail_retry_interval=?, name=?, delay_time=?, worker_group=?, resource_ids=? WHERE id=?
### Cause: org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
; ERROR: value too long for type character varying(255); nested exception is org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
at org.springframework.jdbc.support.SQLStateSQLExceptionTranslator.doTranslate(SQLStateSQLExceptionTranslator.java:104)
at org.springframework.jdbc.support.AbstractFallbackSQLExceptionTranslator.translate(AbstractFallbackSQLExceptionTranslator.java:72)
at org.springframework.jdbc.support.AbstractFallbackSQLExceptionTranslator.translate(AbstractFallbackSQLExceptionTranslator.java:81)
at org.springframework.jdbc.support.AbstractFallbackSQLExceptionTranslator.translate(AbstractFallbackSQLExceptionTranslator.java:81)
at org.mybatis.spring.MyBatisExceptionTranslator.translateExceptionIfPossible(MyBatisExceptionTranslator.java:74)
at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:440)
at com.sun.proxy.$Proxy101.update(Unknown Source)
at org.mybatis.spring.SqlSessionTemplate.update(SqlSessionTemplate.java:287)
at com.baomidou.mybatisplus.core.override.MybatisMapperMethod.execute(MybatisMapperMethod.java:63)
at com.baomidou.mybatisplus.core.override.MybatisMapperProxy.invoke(MybatisMapperProxy.java:61)
at com.sun.proxy.$Proxy122.updateById(Unknown Source)
at org.apache.dolphinscheduler.service.process.ProcessService.updateTaskDefinition(ProcessService.java:2156)
at org.apache.dolphinscheduler.service.process.ProcessService.handleTaskDefinition(ProcessService.java:2282)
at org.apache.dolphinscheduler.service.process.ProcessService.saveProcessDefinition(ProcessService.java:2212)
at org.apache.dolphinscheduler.service.process.ProcessService$$FastClassBySpringCGLIB$$ed138739.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:687)
at org.apache.dolphinscheduler.service.process.ProcessService$$EnhancerBySpringCGLIB$$d02bc54d.saveProcessDefinition(<generated>)
at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl.updateProcessDefinition(ProcessDefinitionServiceImpl.java:418)
at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$FastClassBySpringCGLIB$$e8e34ed9.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:687)
at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$EnhancerBySpringCGLIB$$4564e02d.updateProcessDefinition(<generated>)
at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController.updateProcessDefinition(ProcessDefinitionController.java:242)
at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController$$FastClassBySpringCGLIB$$dc9bf5db.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:752)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163)
at org.springframework.aop.aspectj.MethodInvocationProceedingJoinPoint.proceed(MethodInvocationProceedingJoinPoint.java:88)
at org.apache.dolphinscheduler.api.aspect.AccessLogAspect.doAround(AccessLogAspect.java:86)
at sun.reflect.GeneratedMethodAccessor159.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.springframework.aop.aspectj.AbstractAspectJAdvice.invokeAdviceMethodWithGivenArgs(AbstractAspectJAdvice.java:644)
at org.springframework.aop.aspectj.AbstractAspectJAdvice.invokeAdviceMethod(AbstractAspectJAdvice.java:633)
at org.springframework.aop.aspectj.AspectJAroundAdvice.invoke(AspectJAroundAdvice.java:70)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:175)
at org.springframework.aop.interceptor.ExposeInvocationInterceptor.invoke(ExposeInvocationInterceptor.java:93)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:691)
at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController$$EnhancerBySpringCGLIB$$22f5b91d.updateProcessDefinition(<generated>)
at sun.reflect.GeneratedMethodAccessor622.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.springframework.web.method.support.InvocableHandlerMethod.doInvoke(InvocableHandlerMethod.java:190)
at org.springframework.web.method.support.InvocableHandlerMethod.invokeForRequest(InvocableHandlerMethod.java:138)
at org.springframework.web.servlet.mvc.method.annotation.ServletInvocableHandlerMethod.invokeAndHandle(ServletInvocableHandlerMethod.java:105)
at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.invokeHandlerMethod(RequestMappingHandlerAdapter.java:892)
at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.handleInternal(RequestMappingHandlerAdapter.java:797)
at org.springframework.web.servlet.mvc.method.AbstractHandlerMethodAdapter.handle(AbstractHandlerMethodAdapter.java:87)
at org.springframework.web.servlet.DispatcherServlet.doDispatch(DispatcherServlet.java:1040)
at org.springframework.web.servlet.DispatcherServlet.doService(DispatcherServlet.java:943)
at org.springframework.web.servlet.FrameworkServlet.processRequest(FrameworkServlet.java:1006)
at org.springframework.web.servlet.FrameworkServlet.doPost(FrameworkServlet.java:909)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:707)
at org.springframework.web.servlet.FrameworkServlet.service(FrameworkServlet.java:883)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:790)
at org.eclipse.jetty.servlet.ServletHolder.handle(ServletHolder.java:763)
at org.eclipse.jetty.servlet.ServletHandler$ChainEnd.doFilter(ServletHandler.java:1633)
at com.github.xiaoymin.swaggerbootstrapui.filter.SecurityBasicAuthFilter.doFilter(SecurityBasicAuthFilter.java:84)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at com.github.xiaoymin.swaggerbootstrapui.filter.ProductionSecurityFilter.doFilter(ProductionSecurityFilter.java:53)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.CorsFilter.doFilterInternal(CorsFilter.java:97)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.RequestContextFilter.doFilterInternal(RequestContextFilter.java:100)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.FormContentFilter.doFilterInternal(FormContentFilter.java:93)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.HiddenHttpMethodFilter.doFilterInternal(HiddenHttpMethodFilter.java:94)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.CharacterEncodingFilter.doFilterInternal(CharacterEncodingFilter.java:201)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:561)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:143)
at org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:602)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127)
at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:235)
at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1612)
at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:233)
at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1434)
at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:188)
at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:501)
at org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1582)
at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:186)
at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1349)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141)
at org.eclipse.jetty.server.handler.gzip.GzipHandler.handle(GzipHandler.java:766)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127)
at org.eclipse.jetty.server.Server.handle(Server.java:516)
at org.eclipse.jetty.server.HttpChannel.lambda$handle$1(HttpChannel.java:383)
at org.eclipse.jetty.server.HttpChannel.dispatch(HttpChannel.java:556)
at org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:375)
at org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:273)
at org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:311)
at org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:105)
at org.eclipse.jetty.io.ChannelEndPoint$1.run(ChannelEndPoint.java:104)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:336)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:313)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:129)
at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:375)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:773)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:905)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
at org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2440)
at org.postgresql.core.v3.QueryExecutorImpl.processResults(QueryExecutorImpl.java:2183)
at org.postgresql.core.v3.QueryExecutorImpl.execute(QueryExecutorImpl.java:308)
at org.postgresql.jdbc.PgStatement.executeInternal(PgStatement.java:441)
at org.postgresql.jdbc.PgStatement.execute(PgStatement.java:365)
at org.postgresql.jdbc.PgPreparedStatement.executeWithFlags(PgPreparedStatement.java:143)
at org.postgresql.jdbc.PgPreparedStatement.execute(PgPreparedStatement.java:132)
at com.alibaba.druid.pool.DruidPooledPreparedStatement.execute(DruidPooledPreparedStatement.java:497)
at org.apache.ibatis.executor.statement.PreparedStatementHandler.update(PreparedStatementHandler.java:47)
at org.apache.ibatis.executor.statement.RoutingStatementHandler.update(RoutingStatementHandler.java:74)
at sun.reflect.GeneratedMethodAccessor431.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.ibatis.plugin.Plugin.invoke(Plugin.java:63)
at com.sun.proxy.$Proxy177.update(Unknown Source)
at com.baomidou.mybatisplus.core.executor.MybatisSimpleExecutor.doUpdate(MybatisSimpleExecutor.java:54)
at org.apache.ibatis.executor.BaseExecutor.update(BaseExecutor.java:117)
at org.apache.ibatis.session.defaults.DefaultSqlSession.update(DefaultSqlSession.java:197)
at sun.reflect.GeneratedMethodAccessor505.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:426)
... 111 common frames omitted
```
**Requirement or improvement**
- I suggest to add some validation to check fields in the form.
| https://github.com/apache/dolphinscheduler/issues/5876 | https://github.com/apache/dolphinscheduler/pull/6457 | 6495a204f3f5520f3ebce8b939f1254ee676facc | ba4cb9d22ad02ba4927fd23ea811c82639261e69 | "2021-07-22T06:00:38Z" | java | "2021-10-14T07:31:06Z" | sql/dolphinscheduler_postgre.sql | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS;
DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE;
DROP TABLE IF EXISTS QRTZ_LOCKS;
DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_JOB_DETAILS;
DROP TABLE IF EXISTS QRTZ_CALENDARS;
CREATE TABLE QRTZ_JOB_DETAILS (
SCHED_NAME character varying(120) NOT NULL,
JOB_NAME character varying(200) NOT NULL,
JOB_GROUP character varying(200) NOT NULL,
DESCRIPTION character varying(250) NULL,
JOB_CLASS_NAME character varying(250) NOT NULL,
IS_DURABLE boolean NOT NULL,
IS_NONCONCURRENT boolean NOT NULL,
IS_UPDATE_DATA boolean NOT NULL,
REQUESTS_RECOVERY boolean NOT NULL,
JOB_DATA bytea NULL
);
alter table QRTZ_JOB_DETAILS add primary key(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE TABLE QRTZ_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
JOB_NAME character varying(200) NOT NULL,
JOB_GROUP character varying(200) NOT NULL,
DESCRIPTION character varying(250) NULL,
NEXT_FIRE_TIME BIGINT NULL,
PREV_FIRE_TIME BIGINT NULL,
PRIORITY INTEGER NULL,
TRIGGER_STATE character varying(16) NOT NULL,
TRIGGER_TYPE character varying(8) NOT NULL,
START_TIME BIGINT NOT NULL,
END_TIME BIGINT NULL,
CALENDAR_NAME character varying(200) NULL,
MISFIRE_INSTR SMALLINT NULL,
JOB_DATA bytea NULL
) ;
alter table QRTZ_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_SIMPLE_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
REPEAT_COUNT BIGINT NOT NULL,
REPEAT_INTERVAL BIGINT NOT NULL,
TIMES_TRIGGERED BIGINT NOT NULL
) ;
alter table QRTZ_SIMPLE_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_CRON_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
CRON_EXPRESSION character varying(120) NOT NULL,
TIME_ZONE_ID character varying(80)
) ;
alter table QRTZ_CRON_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_SIMPROP_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
STR_PROP_1 character varying(512) NULL,
STR_PROP_2 character varying(512) NULL,
STR_PROP_3 character varying(512) NULL,
INT_PROP_1 INT NULL,
INT_PROP_2 INT NULL,
LONG_PROP_1 BIGINT NULL,
LONG_PROP_2 BIGINT NULL,
DEC_PROP_1 NUMERIC(13,4) NULL,
DEC_PROP_2 NUMERIC(13,4) NULL,
BOOL_PROP_1 boolean NULL,
BOOL_PROP_2 boolean NULL
) ;
alter table QRTZ_SIMPROP_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_BLOB_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
BLOB_DATA bytea NULL
) ;
alter table QRTZ_BLOB_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_CALENDARS (
SCHED_NAME character varying(120) NOT NULL,
CALENDAR_NAME character varying(200) NOT NULL,
CALENDAR bytea NOT NULL
) ;
alter table QRTZ_CALENDARS add primary key(SCHED_NAME,CALENDAR_NAME);
CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL
) ;
alter table QRTZ_PAUSED_TRIGGER_GRPS add primary key(SCHED_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_FIRED_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
ENTRY_ID character varying(200) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
INSTANCE_NAME character varying(200) NOT NULL,
FIRED_TIME BIGINT NOT NULL,
SCHED_TIME BIGINT NOT NULL,
PRIORITY INTEGER NOT NULL,
STATE character varying(16) NOT NULL,
JOB_NAME character varying(200) NULL,
JOB_GROUP character varying(200) NULL,
IS_NONCONCURRENT boolean NULL,
REQUESTS_RECOVERY boolean NULL
) ;
alter table QRTZ_FIRED_TRIGGERS add primary key(SCHED_NAME,ENTRY_ID);
CREATE TABLE QRTZ_SCHEDULER_STATE (
SCHED_NAME character varying(120) NOT NULL,
INSTANCE_NAME character varying(200) NOT NULL,
LAST_CHECKIN_TIME BIGINT NOT NULL,
CHECKIN_INTERVAL BIGINT NOT NULL
) ;
alter table QRTZ_SCHEDULER_STATE add primary key(SCHED_NAME,INSTANCE_NAME);
CREATE TABLE QRTZ_LOCKS (
SCHED_NAME character varying(120) NOT NULL,
LOCK_NAME character varying(40) NOT NULL
) ;
alter table QRTZ_LOCKS add primary key(SCHED_NAME,LOCK_NAME);
CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY);
CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME);
CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME);
CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
--
-- Table structure for table t_ds_access_token
--
DROP TABLE IF EXISTS t_ds_access_token;
CREATE TABLE t_ds_access_token (
id int NOT NULL ,
user_id int DEFAULT NULL ,
token varchar(64) DEFAULT NULL ,
expire_time timestamp DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_alert
--
DROP TABLE IF EXISTS t_ds_alert;
CREATE TABLE t_ds_alert (
id int NOT NULL ,
title varchar(64) DEFAULT NULL ,
content text ,
alert_status int DEFAULT '0' ,
log text ,
alertgroup_id int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_alertgroup
--
DROP TABLE IF EXISTS t_ds_alertgroup;
CREATE TABLE t_ds_alertgroup(
id int NOT NULL,
alert_instance_ids varchar (255) DEFAULT NULL,
create_user_id int4 DEFAULT NULL,
group_name varchar(255) DEFAULT NULL,
description varchar(255) DEFAULT NULL,
create_time timestamp DEFAULT NULL,
update_time timestamp DEFAULT NULL,
PRIMARY KEY (id),
CONSTRAINT t_ds_alertgroup_name_un UNIQUE (group_name)
) ;
--
-- Table structure for table t_ds_command
--
DROP TABLE IF EXISTS t_ds_command;
CREATE TABLE t_ds_command (
id int NOT NULL ,
command_type int DEFAULT NULL ,
process_definition_code bigint NOT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
executor_id int DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
environment_code bigint DEFAULT '-1',
dry_run int DEFAULT '0' ,
process_instance_id int DEFAULT 0,
process_definition_version int DEFAULT 0,
PRIMARY KEY (id)
) ;
create index priority_id_index on t_ds_command (process_instance_priority,id);
--
-- Table structure for table t_ds_datasource
--
DROP TABLE IF EXISTS t_ds_datasource;
CREATE TABLE t_ds_datasource (
id int NOT NULL ,
name varchar(64) NOT NULL ,
note varchar(255) DEFAULT NULL ,
type int NOT NULL ,
user_id int NOT NULL ,
connection_params text NOT NULL ,
create_time timestamp NOT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id),
CONSTRAINT t_ds_datasource_name_un UNIQUE (name, type)
) ;
--
-- Table structure for table t_ds_error_command
--
DROP TABLE IF EXISTS t_ds_error_command;
CREATE TABLE t_ds_error_command (
id int NOT NULL ,
command_type int DEFAULT NULL ,
process_definition_code bigint NOT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
executor_id int DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
environment_code bigint DEFAULT '-1',
dry_run int DEFAULT '0' ,
process_instance_id int DEFAULT 0,
process_definition_version int DEFAULT 0,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_master_server
--
--
-- Table structure for table t_ds_process_definition
--
DROP TABLE IF EXISTS t_ds_process_definition;
CREATE TABLE t_ds_process_definition (
id int NOT NULL ,
code bigint NOT NULL,
name varchar(255) DEFAULT NULL ,
version int DEFAULT NULL ,
description text ,
project_code bigint DEFAULT NULL ,
release_state int DEFAULT NULL ,
user_id int DEFAULT NULL ,
global_params text ,
locations text ,
warning_group_id int DEFAULT NULL ,
flag int DEFAULT NULL ,
timeout int DEFAULT '0' ,
tenant_id int DEFAULT '-1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id) ,
CONSTRAINT process_definition_unique UNIQUE (name, project_code)
) ;
create index process_definition_index on t_ds_process_definition (code,id);
DROP TABLE IF EXISTS t_ds_process_definition_log;
CREATE TABLE t_ds_process_definition_log (
id int NOT NULL ,
code bigint NOT NULL,
name varchar(255) DEFAULT NULL ,
version int DEFAULT NULL ,
description text ,
project_code bigint DEFAULT NULL ,
release_state int DEFAULT NULL ,
user_id int DEFAULT NULL ,
global_params text ,
locations text ,
warning_group_id int DEFAULT NULL ,
flag int DEFAULT NULL ,
timeout int DEFAULT '0' ,
tenant_id int DEFAULT '-1' ,
operator int DEFAULT NULL ,
operate_time timestamp DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
DROP TABLE IF EXISTS t_ds_task_definition;
CREATE TABLE t_ds_task_definition (
id int NOT NULL ,
code bigint NOT NULL,
name varchar(255) DEFAULT NULL ,
version int DEFAULT NULL ,
description text ,
project_code bigint DEFAULT NULL ,
user_id int DEFAULT NULL ,
task_type varchar(50) DEFAULT NULL ,
task_params text ,
flag int DEFAULT NULL ,
task_priority int DEFAULT NULL ,
worker_group varchar(255) DEFAULT NULL ,
environment_code bigint DEFAULT '-1',
fail_retry_times int DEFAULT NULL ,
fail_retry_interval int DEFAULT NULL ,
timeout_flag int DEFAULT NULL ,
timeout_notify_strategy int DEFAULT NULL ,
timeout int DEFAULT '0' ,
delay_time int DEFAULT '0' ,
resource_ids varchar(255) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
create index task_definition_index on t_ds_task_definition (project_code,id);
DROP TABLE IF EXISTS t_ds_task_definition_log;
CREATE TABLE t_ds_task_definition_log (
id int NOT NULL ,
code bigint NOT NULL,
name varchar(255) DEFAULT NULL ,
version int DEFAULT NULL ,
description text ,
project_code bigint DEFAULT NULL ,
user_id int DEFAULT NULL ,
task_type varchar(50) DEFAULT NULL ,
task_params text ,
flag int DEFAULT NULL ,
task_priority int DEFAULT NULL ,
worker_group varchar(255) DEFAULT NULL ,
environment_code bigint DEFAULT '-1',
fail_retry_times int DEFAULT NULL ,
fail_retry_interval int DEFAULT NULL ,
timeout_flag int DEFAULT NULL ,
timeout_notify_strategy int DEFAULT NULL ,
timeout int DEFAULT '0' ,
delay_time int DEFAULT '0' ,
resource_ids varchar(255) DEFAULT NULL ,
operator int DEFAULT NULL ,
operate_time timestamp DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
DROP TABLE IF EXISTS t_ds_process_task_relation;
CREATE TABLE t_ds_process_task_relation (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
process_definition_version int DEFAULT NULL ,
project_code bigint DEFAULT NULL ,
process_definition_code bigint DEFAULT NULL ,
pre_task_code bigint DEFAULT NULL ,
pre_task_version int DEFAULT '0' ,
post_task_code bigint DEFAULT NULL ,
post_task_version int DEFAULT '0' ,
condition_type int DEFAULT NULL ,
condition_params text ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
DROP TABLE IF EXISTS t_ds_process_task_relation_log;
CREATE TABLE t_ds_process_task_relation_log (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
process_definition_version int DEFAULT NULL ,
project_code bigint DEFAULT NULL ,
process_definition_code bigint DEFAULT NULL ,
pre_task_code bigint DEFAULT NULL ,
pre_task_version int DEFAULT '0' ,
post_task_code bigint DEFAULT NULL ,
post_task_version int DEFAULT '0' ,
condition_type int DEFAULT NULL ,
condition_params text ,
operator int DEFAULT NULL ,
operate_time timestamp DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_process_instance
--
DROP TABLE IF EXISTS t_ds_process_instance;
CREATE TABLE t_ds_process_instance (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
process_definition_version int DEFAULT NULL ,
process_definition_code bigint DEFAULT NULL ,
state int DEFAULT NULL ,
recovery int DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
end_time timestamp DEFAULT NULL ,
run_times int DEFAULT NULL ,
host varchar(135) DEFAULT NULL ,
command_type int DEFAULT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
max_try_times int DEFAULT '0' ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
command_start_time timestamp DEFAULT NULL ,
global_params text ,
process_instance_json text ,
flag int DEFAULT '1' ,
update_time timestamp NULL ,
is_sub_process int DEFAULT '0' ,
executor_id int NOT NULL ,
history_cmd text ,
dependence_schedule_times text ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64) ,
environment_code bigint DEFAULT '-1',
timeout int DEFAULT '0' ,
tenant_id int NOT NULL DEFAULT '-1' ,
var_pool text ,
dry_run int DEFAULT '0' ,
PRIMARY KEY (id)
) ;
create index process_instance_index on t_ds_process_instance (process_definition_code,id);
create index start_time_index on t_ds_process_instance (start_time);
--
-- Table structure for table t_ds_project
--
DROP TABLE IF EXISTS t_ds_project;
CREATE TABLE t_ds_project (
id int NOT NULL ,
name varchar(100) DEFAULT NULL ,
code bigint NOT NULL,
description varchar(200) DEFAULT NULL ,
user_id int DEFAULT NULL ,
flag int DEFAULT '1' ,
create_time timestamp DEFAULT CURRENT_TIMESTAMP ,
update_time timestamp DEFAULT CURRENT_TIMESTAMP ,
PRIMARY KEY (id)
) ;
create index user_id_index on t_ds_project (user_id);
--
-- Table structure for table t_ds_queue
--
DROP TABLE IF EXISTS t_ds_queue;
CREATE TABLE t_ds_queue (
id int NOT NULL ,
queue_name varchar(64) DEFAULT NULL ,
queue varchar(64) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_relation_datasource_user
--
DROP TABLE IF EXISTS t_ds_relation_datasource_user;
CREATE TABLE t_ds_relation_datasource_user (
id int NOT NULL ,
user_id int NOT NULL ,
datasource_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
;
--
-- Table structure for table t_ds_relation_process_instance
--
DROP TABLE IF EXISTS t_ds_relation_process_instance;
CREATE TABLE t_ds_relation_process_instance (
id int NOT NULL ,
parent_process_instance_id int DEFAULT NULL ,
parent_task_instance_id int DEFAULT NULL ,
process_instance_id int DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_relation_project_user
--
DROP TABLE IF EXISTS t_ds_relation_project_user;
CREATE TABLE t_ds_relation_project_user (
id int NOT NULL ,
user_id int NOT NULL ,
project_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
create index relation_project_user_id_index on t_ds_relation_project_user (user_id);
--
-- Table structure for table t_ds_relation_resources_user
--
DROP TABLE IF EXISTS t_ds_relation_resources_user;
CREATE TABLE t_ds_relation_resources_user (
id int NOT NULL ,
user_id int NOT NULL ,
resources_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_relation_udfs_user
--
DROP TABLE IF EXISTS t_ds_relation_udfs_user;
CREATE TABLE t_ds_relation_udfs_user (
id int NOT NULL ,
user_id int NOT NULL ,
udf_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
;
--
-- Table structure for table t_ds_resources
--
DROP TABLE IF EXISTS t_ds_resources;
CREATE TABLE t_ds_resources (
id int NOT NULL ,
alias varchar(64) DEFAULT NULL ,
file_name varchar(64) DEFAULT NULL ,
description varchar(255) DEFAULT NULL ,
user_id int DEFAULT NULL ,
type int DEFAULT NULL ,
size bigint DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
pid int,
full_name varchar(64),
is_directory int,
PRIMARY KEY (id),
CONSTRAINT t_ds_resources_un UNIQUE (full_name, type)
) ;
--
-- Table structure for table t_ds_schedules
--
DROP TABLE IF EXISTS t_ds_schedules;
CREATE TABLE t_ds_schedules (
id int NOT NULL ,
process_definition_code bigint NOT NULL ,
start_time timestamp NOT NULL ,
end_time timestamp NOT NULL ,
timezone_id varchar(40) default NULL ,
crontab varchar(255) NOT NULL ,
failure_strategy int NOT NULL ,
user_id int NOT NULL ,
release_state int NOT NULL ,
warning_type int NOT NULL ,
warning_group_id int DEFAULT NULL ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
environment_code bigint DEFAULT '-1',
create_time timestamp NOT NULL ,
update_time timestamp NOT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_session
--
DROP TABLE IF EXISTS t_ds_session;
CREATE TABLE t_ds_session (
id varchar(64) NOT NULL ,
user_id int DEFAULT NULL ,
ip varchar(45) DEFAULT NULL ,
last_login_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_task_instance
--
DROP TABLE IF EXISTS t_ds_task_instance;
CREATE TABLE t_ds_task_instance (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
task_type varchar(50) DEFAULT NULL ,
task_code bigint NOT NULL,
task_definition_version int DEFAULT NULL ,
process_instance_id int DEFAULT NULL ,
state int DEFAULT NULL ,
submit_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
end_time timestamp DEFAULT NULL ,
host varchar(135) DEFAULT NULL ,
execute_path varchar(200) DEFAULT NULL ,
log_path varchar(200) DEFAULT NULL ,
alert_flag int DEFAULT NULL ,
retry_times int DEFAULT '0' ,
pid int DEFAULT NULL ,
app_link text ,
task_params text ,
flag int DEFAULT '1' ,
retry_interval int DEFAULT NULL ,
max_retry_times int DEFAULT NULL ,
task_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
environment_code bigint DEFAULT '-1',
environment_config text,
executor_id int DEFAULT NULL ,
first_submit_time timestamp DEFAULT NULL ,
delay_time int DEFAULT '0' ,
var_pool text ,
dry_run int DEFAULT '0' ,
PRIMARY KEY (id),
CONSTRAINT foreign_key_instance_id FOREIGN KEY(process_instance_id) REFERENCES t_ds_process_instance(id) ON DELETE CASCADE
) ;
--
-- Table structure for table t_ds_tenant
--
DROP TABLE IF EXISTS t_ds_tenant;
CREATE TABLE t_ds_tenant (
id int NOT NULL ,
tenant_code varchar(64) DEFAULT NULL ,
description varchar(255) DEFAULT NULL ,
queue_id int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_udfs
--
DROP TABLE IF EXISTS t_ds_udfs;
CREATE TABLE t_ds_udfs (
id int NOT NULL ,
user_id int NOT NULL ,
func_name varchar(100) NOT NULL ,
class_name varchar(255) NOT NULL ,
type int NOT NULL ,
arg_types varchar(255) DEFAULT NULL ,
database varchar(255) DEFAULT NULL ,
description varchar(255) DEFAULT NULL ,
resource_id int NOT NULL ,
resource_name varchar(255) NOT NULL ,
create_time timestamp NOT NULL ,
update_time timestamp NOT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_user
--
DROP TABLE IF EXISTS t_ds_user;
CREATE TABLE t_ds_user (
id int NOT NULL ,
user_name varchar(64) DEFAULT NULL ,
user_password varchar(64) DEFAULT NULL ,
user_type int DEFAULT NULL ,
email varchar(64) DEFAULT NULL ,
phone varchar(11) DEFAULT NULL ,
tenant_id int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
queue varchar(64) DEFAULT NULL ,
state int DEFAULT 1 ,
PRIMARY KEY (id)
);
comment on column t_ds_user.state is 'state 0:disable 1:enable';
--
-- Table structure for table t_ds_version
--
DROP TABLE IF EXISTS t_ds_version;
CREATE TABLE t_ds_version (
id int NOT NULL ,
version varchar(200) NOT NULL,
PRIMARY KEY (id)
) ;
create index version_index on t_ds_version(version);
--
-- Table structure for table t_ds_worker_group
--
DROP TABLE IF EXISTS t_ds_worker_group;
CREATE TABLE t_ds_worker_group (
id bigint NOT NULL ,
name varchar(255) NOT NULL ,
addr_list text DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id) ,
CONSTRAINT name_unique UNIQUE (name)
) ;
--
-- Table structure for table t_ds_worker_server
--
DROP TABLE IF EXISTS t_ds_worker_server;
CREATE TABLE t_ds_worker_server (
id int NOT NULL ,
host varchar(45) DEFAULT NULL ,
port int DEFAULT NULL ,
zk_directory varchar(64) DEFAULT NULL ,
res_info varchar(255) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
last_heartbeat_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
DROP SEQUENCE IF EXISTS t_ds_access_token_id_sequence;
CREATE SEQUENCE t_ds_access_token_id_sequence;
ALTER TABLE t_ds_access_token ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_access_token_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_alert_id_sequence;
CREATE SEQUENCE t_ds_alert_id_sequence;
ALTER TABLE t_ds_alert ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alert_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_alertgroup_id_sequence;
CREATE SEQUENCE t_ds_alertgroup_id_sequence;
ALTER TABLE t_ds_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alertgroup_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_command_id_sequence;
CREATE SEQUENCE t_ds_command_id_sequence;
ALTER TABLE t_ds_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_command_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_datasource_id_sequence;
CREATE SEQUENCE t_ds_datasource_id_sequence;
ALTER TABLE t_ds_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_datasource_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_definition_id_sequence;
CREATE SEQUENCE t_ds_process_definition_id_sequence;
ALTER TABLE t_ds_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_definition_log_id_sequence;
CREATE SEQUENCE t_ds_process_definition_log_id_sequence;
ALTER TABLE t_ds_process_definition_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_log_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_task_definition_id_sequence;
CREATE SEQUENCE t_ds_task_definition_id_sequence;
ALTER TABLE t_ds_task_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_definition_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_task_definition_log_id_sequence;
CREATE SEQUENCE t_ds_task_definition_log_id_sequence;
ALTER TABLE t_ds_task_definition_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_definition_log_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_task_relation_id_sequence;
CREATE SEQUENCE t_ds_process_task_relation_id_sequence;
ALTER TABLE t_ds_process_task_relation ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_task_relation_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_task_relation_log_id_sequence;
CREATE SEQUENCE t_ds_process_task_relation_log_id_sequence;
ALTER TABLE t_ds_process_task_relation_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_task_relation_log_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_instance_id_sequence;
CREATE SEQUENCE t_ds_process_instance_id_sequence;
ALTER TABLE t_ds_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_project_id_sequence;
CREATE SEQUENCE t_ds_project_id_sequence;
ALTER TABLE t_ds_project ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_project_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_queue_id_sequence;
CREATE SEQUENCE t_ds_queue_id_sequence;
ALTER TABLE t_ds_queue ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_queue_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_datasource_user_id_sequence;
CREATE SEQUENCE t_ds_relation_datasource_user_id_sequence;
ALTER TABLE t_ds_relation_datasource_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_datasource_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_process_instance_id_sequence;
CREATE SEQUENCE t_ds_relation_process_instance_id_sequence;
ALTER TABLE t_ds_relation_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_process_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_project_user_id_sequence;
CREATE SEQUENCE t_ds_relation_project_user_id_sequence;
ALTER TABLE t_ds_relation_project_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_project_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_resources_user_id_sequence;
CREATE SEQUENCE t_ds_relation_resources_user_id_sequence;
ALTER TABLE t_ds_relation_resources_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_resources_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_udfs_user_id_sequence;
CREATE SEQUENCE t_ds_relation_udfs_user_id_sequence;
ALTER TABLE t_ds_relation_udfs_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_udfs_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_resources_id_sequence;
CREATE SEQUENCE t_ds_resources_id_sequence;
ALTER TABLE t_ds_resources ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_resources_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_schedules_id_sequence;
CREATE SEQUENCE t_ds_schedules_id_sequence;
ALTER TABLE t_ds_schedules ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_schedules_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_task_instance_id_sequence;
CREATE SEQUENCE t_ds_task_instance_id_sequence;
ALTER TABLE t_ds_task_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_tenant_id_sequence;
CREATE SEQUENCE t_ds_tenant_id_sequence;
ALTER TABLE t_ds_tenant ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_tenant_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_udfs_id_sequence;
CREATE SEQUENCE t_ds_udfs_id_sequence;
ALTER TABLE t_ds_udfs ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_udfs_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_user_id_sequence;
CREATE SEQUENCE t_ds_user_id_sequence;
ALTER TABLE t_ds_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_version_id_sequence;
CREATE SEQUENCE t_ds_version_id_sequence;
ALTER TABLE t_ds_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_version_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_worker_group_id_sequence;
CREATE SEQUENCE t_ds_worker_group_id_sequence;
ALTER TABLE t_ds_worker_group ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_group_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_worker_server_id_sequence;
CREATE SEQUENCE t_ds_worker_server_id_sequence;
ALTER TABLE t_ds_worker_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_server_id_sequence');
-- Records of t_ds_user?user : admin , password : dolphinscheduler123
INSERT INTO t_ds_user(user_name, user_password, user_type, email, phone, tenant_id, state, create_time, update_time)
VALUES ('admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', '', '0', 1, '2018-03-27 15:48:50', '2018-10-24 17:40:22');
-- Records of t_ds_alertgroup, default admin warning group
INSERT INTO t_ds_alertgroup(alert_instance_ids, create_user_id, group_name, description, create_time, update_time)
VALUES ('1,2', 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39');
-- Records of t_ds_queue,default queue name : default
INSERT INTO t_ds_queue(queue_name, queue, create_time, update_time)
VALUES ('default', 'default', '2018-11-29 10:22:33', '2018-11-29 10:22:33');
-- Records of t_ds_queue,default queue name : default
INSERT INTO t_ds_version(version) VALUES ('1.4.0');
--
-- Table structure for table t_ds_plugin_define
--
DROP TABLE IF EXISTS t_ds_plugin_define;
CREATE TABLE t_ds_plugin_define (
id serial NOT NULL,
plugin_name varchar(100) NOT NULL,
plugin_type varchar(100) NOT NULL,
plugin_params text NULL,
create_time timestamp NULL,
update_time timestamp NULL,
CONSTRAINT t_ds_plugin_define_pk PRIMARY KEY (id),
CONSTRAINT t_ds_plugin_define_un UNIQUE (plugin_name, plugin_type)
);
--
-- Table structure for table t_ds_alert_plugin_instance
--
DROP TABLE IF EXISTS t_ds_alert_plugin_instance;
CREATE TABLE t_ds_alert_plugin_instance (
id serial NOT NULL,
plugin_define_id int4 NOT NULL,
plugin_instance_params text NULL,
create_time timestamp NULL,
update_time timestamp NULL,
instance_name varchar(200) NULL,
CONSTRAINT t_ds_alert_plugin_instance_pk PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_environment
--
DROP TABLE IF EXISTS t_ds_environment;
CREATE TABLE t_ds_environment (
id serial NOT NULL,
code bigint NOT NULL,
name varchar(100) DEFAULT NULL,
config text DEFAULT NULL,
description text,
operator int DEFAULT NULL,
create_time timestamp DEFAULT NULL,
update_time timestamp DEFAULT NULL,
PRIMARY KEY (id),
CONSTRAINT environment_name_unique UNIQUE (name),
CONSTRAINT environment_code_unique UNIQUE (code)
);
--
-- Table structure for table t_ds_environment_worker_group_relation
--
DROP TABLE IF EXISTS t_ds_environment_worker_group_relation;
CREATE TABLE t_ds_environment_worker_group_relation (
id serial NOT NULL,
environment_code bigint NOT NULL,
worker_group varchar(255) NOT NULL,
operator int DEFAULT NULL,
create_time timestamp DEFAULT NULL,
update_time timestamp DEFAULT NULL,
PRIMARY KEY (id) ,
CONSTRAINT environment_worker_group_unique UNIQUE (environment_code,worker_group)
);
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,876 | [Bug][SQL] Suggest to increase the max length of the field resource_ids in the table t_ds_task_definition. | **Describe the bug**
When I saved a shell task that used many resource files ,the service threw PSQLException.
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Create a shell task and select many resource files.
2. Save the workflow, you would see that again. If you don't , maybe you would do well to pick more resource files.
**Expected behavior**
Expect to increase the max length of the field resource_ids in the table t_ds_task_definition.
**Screenshots**
![image](https://user-images.githubusercontent.com/4928204/126594849-07126f10-9031-43d2-a2ce-50ce9771d1f1.png)
![image](https://user-images.githubusercontent.com/4928204/126594860-5e38dc77-32d2-4d67-9cfe-e9424b8b07d3.png)
**Which version of Dolphin Scheduler:**
-[dev]
**Additional context**
Exception stack trace:
```
[INFO] 2021-07-22 11:51:07.658 org.apache.dolphinscheduler.api.aspect.AccessLogAspect:[75] - REQUEST TRANCE_ID:0702204b-58c0-4f0f-8b4b-dbfef2be63cc, LOGIN_USER:admin, URI:/dolphinscheduler/users/list-paging, METHOD:GET, HANDLER:org.apache.dolphinscheduler.api.controller.UsersController.queryUserList, ARGS:{searchVal=jiang.hua, pageNo=1, pageSize=10}
[INFO] 2021-07-22 11:51:07.682 org.apache.dolphinscheduler.api.aspect.AccessLogAspect:[75] - REQUEST TRANCE_ID:55caab3c-e003-4925-88c9-172dd2318d7c, LOGIN_USER:admin, URI:/dolphinscheduler/projects/feature-works/process/update, METHOD:POST, HANDLER:org.apache.dolphinscheduler.api.controller.ProcessDefinitionController.updateProcessDefinition, ARGS:{connects=[], name=jianghua_test, description=test, locations={"tasks-24242":{"name":"restore_from_checkpoint_test123","targetarr":"","nodenumber":"0","x":-800,"y":-900,"color":"#1297DC"}}, id=8, releaseState=OFFLINE, projectName=feature-works, processDefinitionJson={"tasks":[{"id":"tasks-24242","code":491450867713,"version":27,"name":"restore_from_checkpoint_test123","desc":null,"type":"SHELL","runFlag":"NORMAL","loc":null,"maxRetryTimes":3,"retryInterval":1,"params":{"resourceList":[{"id":157},{"id":158},{"id":162},{"id":164},{"id":166},{"id":168},{"id":160},{"id":161},{"id":159},{"id":163},{"id":165},{"id":167},{"id":95},{"id":97},{"id":98},{"id":99},{"id":100},{"id":104},{"id":105},{"id":106},{"id":107},{"id":116},{"id":94},{"id":96},{"id":101},{"id":102},{"id":103},{"id":108},{"id":109},{"id":110},{"id":112},{"id":113},{"id":114},{"id":115},{"id":117},{"id":170},{"id":171},{"id":172},{"id":120},{"id":121},{"id":123},{"id":125},{"id":127},{"id":129},{"id":131},{"id":133},{"id":135},{"id":137},{"id":139},{"id":141},{"id":147},{"id":148},{"id":149},{"id":150},{"id":151},{"id":152},{"id":134},{"id":136},{"id":118},{"id":119},{"id":122},{"id":124},{"id":126},{"id":128},{"id":130},{"id":132},{"id":138},{"id":140},{"id":142},{"id":143},{"id":144},{"id":145},{"id":146},{"id":169},{"id":76},{"id":77},{"id":78}],"localParams":[{"prop":"task.name","direct":"IN","type":"VARCHAR","value":"restore_from_checkpoint_test.sql","deleteScript":"-n"},{"prop":"task.script.file","direct":"IN","type":"VARCHAR","value":"Projects/feature-works/tasks/stream/jianghua_test/restore_from_checkpoint_test.sql","deleteScript":"-f"},{"prop":"task.execution.parallel","direct":"IN","type":"VARCHAR","value":"3","deleteScript":"-p"},{"prop":"task.execution.slots","direct":"IN","type":"VARCHAR","value":"10","deleteScript":"-s"},{"prop":"task.execution.yjm","direct":"IN","type":"VARCHAR","value":"4096m","deleteScript":"-j"},{"prop":"task.execution.ytm","direct":"IN","type":"VARCHAR","value":"4096m","deleteScript":"-w"},{"prop":"task.instance.id","direct":"IN","type":"VARCHAR","value":"${system.task.instance.id}"},{"prop":"task.execute.path","direct":"IN","type":"VARCHAR","value":"${system.task.execute.path}"}],"rawScript":"chmod +x ${task.execute.path}/${datamax.home}/${datamax.version}/bin/*\n\n${task.execute.path}/${datamax.home}/${datamax.version}/bin/start.sh -m ${run.mode} -n ${task.name} -f ${task.script.file} -p ${task.execution.parallel} -s ${task.execution.slots} -j ${task.execution.yjm} -w ${task.execution.ytm} -r ${repository.home} -b ${task.execute.path} -c ${datamax.home}/${datamax.version}/lib/${datamax.core.jar} -l ${flink.home}/${flink.version} -e ${profile.home}/${run.mode}"},"preTasks":[],"preTaskNodeList":[],"extras":null,"depList":[],"dependence":{},"conditionResult":{"successNode":[""],"failedNode":[""]},"taskInstancePriority":"MEDIUM","workerGroup":"default","timeout":{"enable":false,"strategy":null,"interval":0},"delayTime":0}],"globalParams":[{"prop":"repository.home","direct":"IN","type":"VARCHAR","value":"/data/online/datamax-repository"},{"prop":"run.mode","direct":"IN","type":"VARCHAR","value":"stream"},{"prop":"flink.home","direct":"IN","type":"VARCHAR","value":"./flink/"},{"prop":"datamax.home","direct":"IN","type":"VARCHAR","value":"./datamax/"},{"prop":"datamax.core.jar","direct":"IN","type":"VARCHAR","value":"platform-on-flink_1.13.0-2.0-SNAPSHOT.jar"},{"prop":"flink.version","direct":"IN","type":"VARCHAR","value":"1.13.0"},{"prop":"datamax.version","direct":"IN","type":"VARCHAR","value":"2.0.0"},{"prop":"profile.home","direct":"IN","type":"VARCHAR","value":"./profile/prod"},{"prop":"root.home","direct":"IN","type":"VARCHAR","value":"/data/online/datamax-repository"},{"prop":"current.date","direct":"IN","type":"VARCHAR","value":"${system.biz.curdate}"}],"timeout":0,"tenantId":4}}
[ERROR] 2021-07-22 11:51:07.697 org.apache.dolphinscheduler.api.exceptions.ApiExceptionHandler:[46] - 更新工作流定义错误
org.springframework.dao.DataIntegrityViolationException:
### Error updating database. Cause: org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
### The error may exist in org/apache/dolphinscheduler/dao/mapper/TaskDefinitionMapper.java (best guess)
### The error may involve org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper.updateById-Inline
### The error occurred while setting parameters
### SQL: UPDATE t_ds_task_definition SET fail_retry_times=?, flag=?, code=?, task_priority=?, update_time=?, task_params=?, user_id=?, version=?, timeout=?, task_type=?, timeout_flag=?, create_time=?, project_code=?, fail_retry_interval=?, name=?, delay_time=?, worker_group=?, resource_ids=? WHERE id=?
### Cause: org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
; ERROR: value too long for type character varying(255); nested exception is org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
at org.springframework.jdbc.support.SQLStateSQLExceptionTranslator.doTranslate(SQLStateSQLExceptionTranslator.java:104)
at org.springframework.jdbc.support.AbstractFallbackSQLExceptionTranslator.translate(AbstractFallbackSQLExceptionTranslator.java:72)
at org.springframework.jdbc.support.AbstractFallbackSQLExceptionTranslator.translate(AbstractFallbackSQLExceptionTranslator.java:81)
at org.springframework.jdbc.support.AbstractFallbackSQLExceptionTranslator.translate(AbstractFallbackSQLExceptionTranslator.java:81)
at org.mybatis.spring.MyBatisExceptionTranslator.translateExceptionIfPossible(MyBatisExceptionTranslator.java:74)
at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:440)
at com.sun.proxy.$Proxy101.update(Unknown Source)
at org.mybatis.spring.SqlSessionTemplate.update(SqlSessionTemplate.java:287)
at com.baomidou.mybatisplus.core.override.MybatisMapperMethod.execute(MybatisMapperMethod.java:63)
at com.baomidou.mybatisplus.core.override.MybatisMapperProxy.invoke(MybatisMapperProxy.java:61)
at com.sun.proxy.$Proxy122.updateById(Unknown Source)
at org.apache.dolphinscheduler.service.process.ProcessService.updateTaskDefinition(ProcessService.java:2156)
at org.apache.dolphinscheduler.service.process.ProcessService.handleTaskDefinition(ProcessService.java:2282)
at org.apache.dolphinscheduler.service.process.ProcessService.saveProcessDefinition(ProcessService.java:2212)
at org.apache.dolphinscheduler.service.process.ProcessService$$FastClassBySpringCGLIB$$ed138739.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:687)
at org.apache.dolphinscheduler.service.process.ProcessService$$EnhancerBySpringCGLIB$$d02bc54d.saveProcessDefinition(<generated>)
at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl.updateProcessDefinition(ProcessDefinitionServiceImpl.java:418)
at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$FastClassBySpringCGLIB$$e8e34ed9.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:687)
at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$EnhancerBySpringCGLIB$$4564e02d.updateProcessDefinition(<generated>)
at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController.updateProcessDefinition(ProcessDefinitionController.java:242)
at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController$$FastClassBySpringCGLIB$$dc9bf5db.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:752)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163)
at org.springframework.aop.aspectj.MethodInvocationProceedingJoinPoint.proceed(MethodInvocationProceedingJoinPoint.java:88)
at org.apache.dolphinscheduler.api.aspect.AccessLogAspect.doAround(AccessLogAspect.java:86)
at sun.reflect.GeneratedMethodAccessor159.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.springframework.aop.aspectj.AbstractAspectJAdvice.invokeAdviceMethodWithGivenArgs(AbstractAspectJAdvice.java:644)
at org.springframework.aop.aspectj.AbstractAspectJAdvice.invokeAdviceMethod(AbstractAspectJAdvice.java:633)
at org.springframework.aop.aspectj.AspectJAroundAdvice.invoke(AspectJAroundAdvice.java:70)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:175)
at org.springframework.aop.interceptor.ExposeInvocationInterceptor.invoke(ExposeInvocationInterceptor.java:93)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:691)
at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController$$EnhancerBySpringCGLIB$$22f5b91d.updateProcessDefinition(<generated>)
at sun.reflect.GeneratedMethodAccessor622.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.springframework.web.method.support.InvocableHandlerMethod.doInvoke(InvocableHandlerMethod.java:190)
at org.springframework.web.method.support.InvocableHandlerMethod.invokeForRequest(InvocableHandlerMethod.java:138)
at org.springframework.web.servlet.mvc.method.annotation.ServletInvocableHandlerMethod.invokeAndHandle(ServletInvocableHandlerMethod.java:105)
at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.invokeHandlerMethod(RequestMappingHandlerAdapter.java:892)
at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.handleInternal(RequestMappingHandlerAdapter.java:797)
at org.springframework.web.servlet.mvc.method.AbstractHandlerMethodAdapter.handle(AbstractHandlerMethodAdapter.java:87)
at org.springframework.web.servlet.DispatcherServlet.doDispatch(DispatcherServlet.java:1040)
at org.springframework.web.servlet.DispatcherServlet.doService(DispatcherServlet.java:943)
at org.springframework.web.servlet.FrameworkServlet.processRequest(FrameworkServlet.java:1006)
at org.springframework.web.servlet.FrameworkServlet.doPost(FrameworkServlet.java:909)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:707)
at org.springframework.web.servlet.FrameworkServlet.service(FrameworkServlet.java:883)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:790)
at org.eclipse.jetty.servlet.ServletHolder.handle(ServletHolder.java:763)
at org.eclipse.jetty.servlet.ServletHandler$ChainEnd.doFilter(ServletHandler.java:1633)
at com.github.xiaoymin.swaggerbootstrapui.filter.SecurityBasicAuthFilter.doFilter(SecurityBasicAuthFilter.java:84)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at com.github.xiaoymin.swaggerbootstrapui.filter.ProductionSecurityFilter.doFilter(ProductionSecurityFilter.java:53)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.CorsFilter.doFilterInternal(CorsFilter.java:97)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.RequestContextFilter.doFilterInternal(RequestContextFilter.java:100)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.FormContentFilter.doFilterInternal(FormContentFilter.java:93)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.HiddenHttpMethodFilter.doFilterInternal(HiddenHttpMethodFilter.java:94)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.CharacterEncodingFilter.doFilterInternal(CharacterEncodingFilter.java:201)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:561)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:143)
at org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:602)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127)
at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:235)
at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1612)
at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:233)
at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1434)
at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:188)
at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:501)
at org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1582)
at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:186)
at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1349)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141)
at org.eclipse.jetty.server.handler.gzip.GzipHandler.handle(GzipHandler.java:766)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127)
at org.eclipse.jetty.server.Server.handle(Server.java:516)
at org.eclipse.jetty.server.HttpChannel.lambda$handle$1(HttpChannel.java:383)
at org.eclipse.jetty.server.HttpChannel.dispatch(HttpChannel.java:556)
at org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:375)
at org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:273)
at org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:311)
at org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:105)
at org.eclipse.jetty.io.ChannelEndPoint$1.run(ChannelEndPoint.java:104)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:336)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:313)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:129)
at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:375)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:773)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:905)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
at org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2440)
at org.postgresql.core.v3.QueryExecutorImpl.processResults(QueryExecutorImpl.java:2183)
at org.postgresql.core.v3.QueryExecutorImpl.execute(QueryExecutorImpl.java:308)
at org.postgresql.jdbc.PgStatement.executeInternal(PgStatement.java:441)
at org.postgresql.jdbc.PgStatement.execute(PgStatement.java:365)
at org.postgresql.jdbc.PgPreparedStatement.executeWithFlags(PgPreparedStatement.java:143)
at org.postgresql.jdbc.PgPreparedStatement.execute(PgPreparedStatement.java:132)
at com.alibaba.druid.pool.DruidPooledPreparedStatement.execute(DruidPooledPreparedStatement.java:497)
at org.apache.ibatis.executor.statement.PreparedStatementHandler.update(PreparedStatementHandler.java:47)
at org.apache.ibatis.executor.statement.RoutingStatementHandler.update(RoutingStatementHandler.java:74)
at sun.reflect.GeneratedMethodAccessor431.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.ibatis.plugin.Plugin.invoke(Plugin.java:63)
at com.sun.proxy.$Proxy177.update(Unknown Source)
at com.baomidou.mybatisplus.core.executor.MybatisSimpleExecutor.doUpdate(MybatisSimpleExecutor.java:54)
at org.apache.ibatis.executor.BaseExecutor.update(BaseExecutor.java:117)
at org.apache.ibatis.session.defaults.DefaultSqlSession.update(DefaultSqlSession.java:197)
at sun.reflect.GeneratedMethodAccessor505.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:426)
... 111 common frames omitted
```
**Requirement or improvement**
- I suggest to add some validation to check fields in the form.
| https://github.com/apache/dolphinscheduler/issues/5876 | https://github.com/apache/dolphinscheduler/pull/6457 | 6495a204f3f5520f3ebce8b939f1254ee676facc | ba4cb9d22ad02ba4927fd23ea811c82639261e69 | "2021-07-22T06:00:38Z" | java | "2021-10-14T07:31:06Z" | sql/upgrade/1.4.0_schema/mysql/dolphinscheduler_ddl.sql | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY',''));
-- uc_dolphin_T_t_ds_user_A_state
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_user_A_state;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_user_A_state()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_user'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='state')
THEN
ALTER TABLE t_ds_user ADD `state` int(1) DEFAULT 1 COMMENT 'state 0:disable 1:enable';
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_user_A_state;
DROP PROCEDURE uc_dolphin_T_t_ds_user_A_state;
-- uc_dolphin_T_t_ds_tenant_A_tenant_name
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_tenant_A_tenant_name;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_tenant_A_tenant_name()
BEGIN
IF EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_tenant'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='tenant_name')
THEN
ALTER TABLE t_ds_tenant DROP `tenant_name`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_tenant_A_tenant_name;
DROP PROCEDURE uc_dolphin_T_t_ds_tenant_A_tenant_name;
-- uc_dolphin_T_t_ds_task_instance_A_first_submit_time
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_first_submit_time;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_first_submit_time()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='first_submit_time')
THEN
ALTER TABLE t_ds_task_instance ADD `first_submit_time` datetime DEFAULT NULL COMMENT 'task first submit time';
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_task_instance_A_first_submit_time();
DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_first_submit_time;
-- uc_dolphin_T_t_ds_task_instance_A_delay_time
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_delay_time;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_delay_time()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='delay_time')
THEN
ALTER TABLE t_ds_task_instance ADD `delay_time` int(4) DEFAULT '0' COMMENT 'task delay execution time';
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_task_instance_A_delay_time();
DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_delay_time;
-- uc_dolphin_T_t_ds_task_instance_A_var_pool
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_var_pool;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_var_pool()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='var_pool')
THEN
ALTER TABLE t_ds_task_instance ADD `var_pool` longtext NULL;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_task_instance_A_var_pool();
DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_var_pool;
-- uc_dolphin_T_t_ds_process_instance_A_var_pool
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_instance_A_var_pool;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_process_instance_A_var_pool()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_instance'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='var_pool')
THEN
ALTER TABLE t_ds_process_instance ADD `var_pool` longtext NULL;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_process_instance_A_var_pool();
DROP PROCEDURE uc_dolphin_T_t_ds_process_instance_A_var_pool;
-- uc_dolphin_T_t_ds_process_definition_A_modify_by
drop PROCEDURE if EXISTS ct_dolphin_T_t_ds_process_definition_version;
delimiter d//
CREATE PROCEDURE ct_dolphin_T_t_ds_process_definition_version()
BEGIN
CREATE TABLE IF NOT EXISTS `t_ds_process_definition_version` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`process_definition_id` int(11) NOT NULL COMMENT 'process definition id',
`version` int(11) DEFAULT NULL COMMENT 'process definition version',
`process_definition_json` longtext COMMENT 'process definition json content',
`description` text,
`global_params` text COMMENT 'global parameters',
`locations` text COMMENT 'Node location information',
`connects` text COMMENT 'Node connection information',
`receivers` text COMMENT 'receivers',
`receivers_cc` text COMMENT 'cc',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`timeout` int(11) DEFAULT '0' COMMENT 'time out',
`resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource ids',
PRIMARY KEY (`id`),
UNIQUE KEY `process_definition_id_and_version` (`process_definition_id`,`version`) USING BTREE,
KEY `process_definition_index` (`id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=84 DEFAULT CHARSET=utf8;
END;
d//
delimiter ;
CALL ct_dolphin_T_t_ds_process_definition_version;
DROP PROCEDURE ct_dolphin_T_t_ds_process_definition_version;
-- ----------------------------
-- Table structure for t_ds_plugin_define
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_plugin_define`;
CREATE TABLE `t_ds_plugin_define` (
`id` int NOT NULL AUTO_INCREMENT,
`plugin_name` varchar(100) NOT NULL COMMENT 'the name of plugin eg: email',
`plugin_type` varchar(100) NOT NULL COMMENT 'plugin type . alert=alert plugin, job=job plugin',
`plugin_params` text COMMENT 'plugin params',
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
UNIQUE KEY `t_ds_plugin_define_UN` (`plugin_name`,`plugin_type`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_alert_plugin_instance
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_alert_plugin_instance`;
CREATE TABLE `t_ds_alert_plugin_instance` (
`id` int NOT NULL AUTO_INCREMENT,
`plugin_define_id` int NOT NULL,
`plugin_instance_params` text COMMENT 'plugin instance params. Also contain the params value which user input in web ui.',
`create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
`update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`instance_name` varchar(200) DEFAULT NULL COMMENT 'alert instance name',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- uc_dolphin_T_t_ds_process_definition_A_warning_group_id
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_definition_A_warning_group_id;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_process_definition_A_warning_group_id()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_definition'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='warning_group_id')
THEN
ALTER TABLE t_ds_process_definition ADD COLUMN `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id' AFTER `connects`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_process_definition_A_warning_group_id();
DROP PROCEDURE uc_dolphin_T_t_ds_process_definition_A_warning_group_id;
-- uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_definition_version'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='warning_group_id')
THEN
ALTER TABLE t_ds_process_definition_version ADD COLUMN `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id' AFTER `connects`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id();
DROP PROCEDURE uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id;
-- uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_alertgroup'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='alert_instance_ids')
THEN
ALTER TABLE t_ds_alertgroup ADD COLUMN `alert_instance_ids` varchar (255) DEFAULT NULL COMMENT 'alert instance ids' AFTER `id`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids();
DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids;
-- uc_dolphin_T_t_ds_alertgroup_A_create_user_id
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_create_user_id;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_create_user_id()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_alertgroup'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='create_user_id')
THEN
ALTER TABLE t_ds_alertgroup ADD COLUMN `create_user_id` int(11) DEFAULT NULL COMMENT 'create user id' AFTER `alert_instance_ids`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_alertgroup_A_create_user_id();
DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_create_user_id;
-- uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.STATISTICS
WHERE TABLE_NAME='t_ds_alertgroup'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND INDEX_NAME ='t_ds_alertgroup_name_un')
THEN
ALTER TABLE t_ds_alertgroup ADD UNIQUE KEY `t_ds_alertgroup_name_un` (`group_name`);
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName();
DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName;
-- uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.STATISTICS
WHERE TABLE_NAME='t_ds_datasource'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND INDEX_NAME ='t_ds_datasource_name_un')
THEN
ALTER TABLE t_ds_datasource ADD UNIQUE KEY `t_ds_datasource_name_un` (`name`, `type`);
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName();
DROP PROCEDURE uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName;
-- uc_dolphin_T_t_ds_schedules_A_add_timezone
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_schedules_A_add_timezone;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_schedules_A_add_timezone()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_schedules'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='timezone_id')
THEN
ALTER TABLE t_ds_schedules ADD COLUMN `timezone_id` varchar(40) default NULL COMMENT 'schedule timezone id' AFTER `end_time`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_schedules_A_add_timezone();
DROP PROCEDURE uc_dolphin_T_t_ds_schedules_A_add_timezone;
-- uc_dolphin_T_t_ds_task_definition_A_drop_UN_taskName
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_definition_A_drop_UN_taskName;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_task_definition_A_drop_UN_taskName()
BEGIN
IF EXISTS (SELECT 1 FROM information_schema.STATISTICS
WHERE TABLE_NAME='t_ds_task_definition'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND INDEX_NAME ='task_unique')
ALTER TABLE t_ds_task_definition drop INDEX `task_unique`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_task_definition_A_drop_UN_taskName();
DROP PROCEDURE uc_dolphin_T_t_ds_task_definition_A_drop_UN_taskName;
-- ----------------------------
-- Table structure for t_ds_environment
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_environment`;
CREATE TABLE `t_ds_environment` (
`id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id',
`code` bigint(20) DEFAULT NULL COMMENT 'encoding',
`name` varchar(100) NOT NULL COMMENT 'environment config name',
`config` text NULL DEFAULT NULL COMMENT 'this config contains many environment variables config',
`description` text NULL DEFAULT NULL COMMENT 'the details',
`operator` int(11) DEFAULT NULL COMMENT 'operator user id',
`create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
`update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
UNIQUE KEY `environment_name_unique` (`name`),
UNIQUE KEY `environment_code_unique` (`code`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_task_definition
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_task_definition`;
CREATE TABLE `t_ds_task_definition` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id',
`code` bigint(20) NOT NULL COMMENT 'encoding',
`name` varchar(200) DEFAULT NULL COMMENT 'task definition name',
`version` int(11) DEFAULT NULL COMMENT 'task definition version',
`description` text COMMENT 'description',
`project_code` bigint(20) NOT NULL COMMENT 'project code',
`user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id',
`task_type` varchar(50) NOT NULL COMMENT 'task type',
`task_params` longtext COMMENT 'job custom parameters',
`flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available',
`task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority',
`worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping',
`environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code',
`fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries',
`fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval',
`timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open',
`timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail',
`timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute',
`delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute',
`resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource id, separated by comma',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`,`code`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_task_definition_log
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_task_definition_log`;
CREATE TABLE `t_ds_task_definition_log` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id',
`code` bigint(20) NOT NULL COMMENT 'encoding',
`name` varchar(200) DEFAULT NULL COMMENT 'task definition name',
`version` int(11) DEFAULT NULL COMMENT 'task definition version',
`description` text COMMENT 'description',
`project_code` bigint(20) NOT NULL COMMENT 'project code',
`user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id',
`task_type` varchar(50) NOT NULL COMMENT 'task type',
`task_params` text COMMENT 'job custom parameters',
`flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available',
`task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority',
`worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping',
`environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code',
`fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries',
`fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval',
`timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open',
`timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail',
`timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute',
`delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute',
`resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource id, separated by comma',
`operator` int(11) DEFAULT NULL COMMENT 'operator user id',
`operate_time` datetime DEFAULT NULL COMMENT 'operate time',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
ALTER TABLE t_ds_command ADD COLUMN `environment_code` bigint(20) default '-1' COMMENT 'environment code' AFTER `worker_group`;
ALTER TABLE t_ds_error_command ADD COLUMN `environment_code` bigint(20) default '-1' COMMENT 'environment code' AFTER `worker_group`;
ALTER TABLE t_ds_schedules ADD COLUMN `environment_code` bigint(20) default '-1' COMMENT 'environment code' AFTER `worker_group`;
ALTER TABLE t_ds_process_instance ADD COLUMN `environment_code` bigint(20) default '-1' COMMENT 'environment code' AFTER `worker_group`;
ALTER TABLE t_ds_task_instance ADD COLUMN `environment_code` bigint(20) default '-1' COMMENT 'environment code' AFTER `worker_group`;
ALTER TABLE t_ds_task_instance ADD COLUMN `environment_config` text COMMENT 'environment config' AFTER `environment_code`;
-- ----------------------------
-- Table structure for t_ds_environment_worker_group_relation
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_environment_worker_group_relation`;
CREATE TABLE `t_ds_environment_worker_group_relation` (
`id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id',
`environment_code` bigint(20) NOT NULL COMMENT 'environment code',
`worker_group` varchar(255) NOT NULL COMMENT 'worker group id',
`operator` int(11) DEFAULT NULL COMMENT 'operator user id',
`create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
`update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
UNIQUE KEY `environment_worker_group_unique` (`environment_code`,`worker_group`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- These columns will not be used in the new version,if you determine that the historical data is useless, you can delete it using the sql below
-- ----------------------------
-- ALTER TABLE t_ds_alert DROP `show_type`, DROP `alert_type`, DROP `receivers`, DROP `receivers_cc`;
-- ALTER TABLE t_ds_alertgroup DROP `group_type`;
-- ALTER TABLE t_ds_process_definition DROP `receivers`, DROP `receivers_cc`;
-- ALTER TABLE t_ds_process_definition_version DROP `receivers`, DROP `receivers_cc`;
-- DROP TABLE IF EXISTS t_ds_relation_user_alertgroup;
-- ALTER TABLE t_ds_command DROP `dependence`;
-- ALTER TABLE t_ds_error_command DROP `dependence`;
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,876 | [Bug][SQL] Suggest to increase the max length of the field resource_ids in the table t_ds_task_definition. | **Describe the bug**
When I saved a shell task that used many resource files ,the service threw PSQLException.
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Create a shell task and select many resource files.
2. Save the workflow, you would see that again. If you don't , maybe you would do well to pick more resource files.
**Expected behavior**
Expect to increase the max length of the field resource_ids in the table t_ds_task_definition.
**Screenshots**
![image](https://user-images.githubusercontent.com/4928204/126594849-07126f10-9031-43d2-a2ce-50ce9771d1f1.png)
![image](https://user-images.githubusercontent.com/4928204/126594860-5e38dc77-32d2-4d67-9cfe-e9424b8b07d3.png)
**Which version of Dolphin Scheduler:**
-[dev]
**Additional context**
Exception stack trace:
```
[INFO] 2021-07-22 11:51:07.658 org.apache.dolphinscheduler.api.aspect.AccessLogAspect:[75] - REQUEST TRANCE_ID:0702204b-58c0-4f0f-8b4b-dbfef2be63cc, LOGIN_USER:admin, URI:/dolphinscheduler/users/list-paging, METHOD:GET, HANDLER:org.apache.dolphinscheduler.api.controller.UsersController.queryUserList, ARGS:{searchVal=jiang.hua, pageNo=1, pageSize=10}
[INFO] 2021-07-22 11:51:07.682 org.apache.dolphinscheduler.api.aspect.AccessLogAspect:[75] - REQUEST TRANCE_ID:55caab3c-e003-4925-88c9-172dd2318d7c, LOGIN_USER:admin, URI:/dolphinscheduler/projects/feature-works/process/update, METHOD:POST, HANDLER:org.apache.dolphinscheduler.api.controller.ProcessDefinitionController.updateProcessDefinition, ARGS:{connects=[], name=jianghua_test, description=test, locations={"tasks-24242":{"name":"restore_from_checkpoint_test123","targetarr":"","nodenumber":"0","x":-800,"y":-900,"color":"#1297DC"}}, id=8, releaseState=OFFLINE, projectName=feature-works, processDefinitionJson={"tasks":[{"id":"tasks-24242","code":491450867713,"version":27,"name":"restore_from_checkpoint_test123","desc":null,"type":"SHELL","runFlag":"NORMAL","loc":null,"maxRetryTimes":3,"retryInterval":1,"params":{"resourceList":[{"id":157},{"id":158},{"id":162},{"id":164},{"id":166},{"id":168},{"id":160},{"id":161},{"id":159},{"id":163},{"id":165},{"id":167},{"id":95},{"id":97},{"id":98},{"id":99},{"id":100},{"id":104},{"id":105},{"id":106},{"id":107},{"id":116},{"id":94},{"id":96},{"id":101},{"id":102},{"id":103},{"id":108},{"id":109},{"id":110},{"id":112},{"id":113},{"id":114},{"id":115},{"id":117},{"id":170},{"id":171},{"id":172},{"id":120},{"id":121},{"id":123},{"id":125},{"id":127},{"id":129},{"id":131},{"id":133},{"id":135},{"id":137},{"id":139},{"id":141},{"id":147},{"id":148},{"id":149},{"id":150},{"id":151},{"id":152},{"id":134},{"id":136},{"id":118},{"id":119},{"id":122},{"id":124},{"id":126},{"id":128},{"id":130},{"id":132},{"id":138},{"id":140},{"id":142},{"id":143},{"id":144},{"id":145},{"id":146},{"id":169},{"id":76},{"id":77},{"id":78}],"localParams":[{"prop":"task.name","direct":"IN","type":"VARCHAR","value":"restore_from_checkpoint_test.sql","deleteScript":"-n"},{"prop":"task.script.file","direct":"IN","type":"VARCHAR","value":"Projects/feature-works/tasks/stream/jianghua_test/restore_from_checkpoint_test.sql","deleteScript":"-f"},{"prop":"task.execution.parallel","direct":"IN","type":"VARCHAR","value":"3","deleteScript":"-p"},{"prop":"task.execution.slots","direct":"IN","type":"VARCHAR","value":"10","deleteScript":"-s"},{"prop":"task.execution.yjm","direct":"IN","type":"VARCHAR","value":"4096m","deleteScript":"-j"},{"prop":"task.execution.ytm","direct":"IN","type":"VARCHAR","value":"4096m","deleteScript":"-w"},{"prop":"task.instance.id","direct":"IN","type":"VARCHAR","value":"${system.task.instance.id}"},{"prop":"task.execute.path","direct":"IN","type":"VARCHAR","value":"${system.task.execute.path}"}],"rawScript":"chmod +x ${task.execute.path}/${datamax.home}/${datamax.version}/bin/*\n\n${task.execute.path}/${datamax.home}/${datamax.version}/bin/start.sh -m ${run.mode} -n ${task.name} -f ${task.script.file} -p ${task.execution.parallel} -s ${task.execution.slots} -j ${task.execution.yjm} -w ${task.execution.ytm} -r ${repository.home} -b ${task.execute.path} -c ${datamax.home}/${datamax.version}/lib/${datamax.core.jar} -l ${flink.home}/${flink.version} -e ${profile.home}/${run.mode}"},"preTasks":[],"preTaskNodeList":[],"extras":null,"depList":[],"dependence":{},"conditionResult":{"successNode":[""],"failedNode":[""]},"taskInstancePriority":"MEDIUM","workerGroup":"default","timeout":{"enable":false,"strategy":null,"interval":0},"delayTime":0}],"globalParams":[{"prop":"repository.home","direct":"IN","type":"VARCHAR","value":"/data/online/datamax-repository"},{"prop":"run.mode","direct":"IN","type":"VARCHAR","value":"stream"},{"prop":"flink.home","direct":"IN","type":"VARCHAR","value":"./flink/"},{"prop":"datamax.home","direct":"IN","type":"VARCHAR","value":"./datamax/"},{"prop":"datamax.core.jar","direct":"IN","type":"VARCHAR","value":"platform-on-flink_1.13.0-2.0-SNAPSHOT.jar"},{"prop":"flink.version","direct":"IN","type":"VARCHAR","value":"1.13.0"},{"prop":"datamax.version","direct":"IN","type":"VARCHAR","value":"2.0.0"},{"prop":"profile.home","direct":"IN","type":"VARCHAR","value":"./profile/prod"},{"prop":"root.home","direct":"IN","type":"VARCHAR","value":"/data/online/datamax-repository"},{"prop":"current.date","direct":"IN","type":"VARCHAR","value":"${system.biz.curdate}"}],"timeout":0,"tenantId":4}}
[ERROR] 2021-07-22 11:51:07.697 org.apache.dolphinscheduler.api.exceptions.ApiExceptionHandler:[46] - 更新工作流定义错误
org.springframework.dao.DataIntegrityViolationException:
### Error updating database. Cause: org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
### The error may exist in org/apache/dolphinscheduler/dao/mapper/TaskDefinitionMapper.java (best guess)
### The error may involve org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper.updateById-Inline
### The error occurred while setting parameters
### SQL: UPDATE t_ds_task_definition SET fail_retry_times=?, flag=?, code=?, task_priority=?, update_time=?, task_params=?, user_id=?, version=?, timeout=?, task_type=?, timeout_flag=?, create_time=?, project_code=?, fail_retry_interval=?, name=?, delay_time=?, worker_group=?, resource_ids=? WHERE id=?
### Cause: org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
; ERROR: value too long for type character varying(255); nested exception is org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
at org.springframework.jdbc.support.SQLStateSQLExceptionTranslator.doTranslate(SQLStateSQLExceptionTranslator.java:104)
at org.springframework.jdbc.support.AbstractFallbackSQLExceptionTranslator.translate(AbstractFallbackSQLExceptionTranslator.java:72)
at org.springframework.jdbc.support.AbstractFallbackSQLExceptionTranslator.translate(AbstractFallbackSQLExceptionTranslator.java:81)
at org.springframework.jdbc.support.AbstractFallbackSQLExceptionTranslator.translate(AbstractFallbackSQLExceptionTranslator.java:81)
at org.mybatis.spring.MyBatisExceptionTranslator.translateExceptionIfPossible(MyBatisExceptionTranslator.java:74)
at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:440)
at com.sun.proxy.$Proxy101.update(Unknown Source)
at org.mybatis.spring.SqlSessionTemplate.update(SqlSessionTemplate.java:287)
at com.baomidou.mybatisplus.core.override.MybatisMapperMethod.execute(MybatisMapperMethod.java:63)
at com.baomidou.mybatisplus.core.override.MybatisMapperProxy.invoke(MybatisMapperProxy.java:61)
at com.sun.proxy.$Proxy122.updateById(Unknown Source)
at org.apache.dolphinscheduler.service.process.ProcessService.updateTaskDefinition(ProcessService.java:2156)
at org.apache.dolphinscheduler.service.process.ProcessService.handleTaskDefinition(ProcessService.java:2282)
at org.apache.dolphinscheduler.service.process.ProcessService.saveProcessDefinition(ProcessService.java:2212)
at org.apache.dolphinscheduler.service.process.ProcessService$$FastClassBySpringCGLIB$$ed138739.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:687)
at org.apache.dolphinscheduler.service.process.ProcessService$$EnhancerBySpringCGLIB$$d02bc54d.saveProcessDefinition(<generated>)
at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl.updateProcessDefinition(ProcessDefinitionServiceImpl.java:418)
at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$FastClassBySpringCGLIB$$e8e34ed9.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:687)
at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$EnhancerBySpringCGLIB$$4564e02d.updateProcessDefinition(<generated>)
at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController.updateProcessDefinition(ProcessDefinitionController.java:242)
at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController$$FastClassBySpringCGLIB$$dc9bf5db.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:752)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163)
at org.springframework.aop.aspectj.MethodInvocationProceedingJoinPoint.proceed(MethodInvocationProceedingJoinPoint.java:88)
at org.apache.dolphinscheduler.api.aspect.AccessLogAspect.doAround(AccessLogAspect.java:86)
at sun.reflect.GeneratedMethodAccessor159.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.springframework.aop.aspectj.AbstractAspectJAdvice.invokeAdviceMethodWithGivenArgs(AbstractAspectJAdvice.java:644)
at org.springframework.aop.aspectj.AbstractAspectJAdvice.invokeAdviceMethod(AbstractAspectJAdvice.java:633)
at org.springframework.aop.aspectj.AspectJAroundAdvice.invoke(AspectJAroundAdvice.java:70)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:175)
at org.springframework.aop.interceptor.ExposeInvocationInterceptor.invoke(ExposeInvocationInterceptor.java:93)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:691)
at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController$$EnhancerBySpringCGLIB$$22f5b91d.updateProcessDefinition(<generated>)
at sun.reflect.GeneratedMethodAccessor622.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.springframework.web.method.support.InvocableHandlerMethod.doInvoke(InvocableHandlerMethod.java:190)
at org.springframework.web.method.support.InvocableHandlerMethod.invokeForRequest(InvocableHandlerMethod.java:138)
at org.springframework.web.servlet.mvc.method.annotation.ServletInvocableHandlerMethod.invokeAndHandle(ServletInvocableHandlerMethod.java:105)
at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.invokeHandlerMethod(RequestMappingHandlerAdapter.java:892)
at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.handleInternal(RequestMappingHandlerAdapter.java:797)
at org.springframework.web.servlet.mvc.method.AbstractHandlerMethodAdapter.handle(AbstractHandlerMethodAdapter.java:87)
at org.springframework.web.servlet.DispatcherServlet.doDispatch(DispatcherServlet.java:1040)
at org.springframework.web.servlet.DispatcherServlet.doService(DispatcherServlet.java:943)
at org.springframework.web.servlet.FrameworkServlet.processRequest(FrameworkServlet.java:1006)
at org.springframework.web.servlet.FrameworkServlet.doPost(FrameworkServlet.java:909)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:707)
at org.springframework.web.servlet.FrameworkServlet.service(FrameworkServlet.java:883)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:790)
at org.eclipse.jetty.servlet.ServletHolder.handle(ServletHolder.java:763)
at org.eclipse.jetty.servlet.ServletHandler$ChainEnd.doFilter(ServletHandler.java:1633)
at com.github.xiaoymin.swaggerbootstrapui.filter.SecurityBasicAuthFilter.doFilter(SecurityBasicAuthFilter.java:84)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at com.github.xiaoymin.swaggerbootstrapui.filter.ProductionSecurityFilter.doFilter(ProductionSecurityFilter.java:53)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.CorsFilter.doFilterInternal(CorsFilter.java:97)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.RequestContextFilter.doFilterInternal(RequestContextFilter.java:100)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.FormContentFilter.doFilterInternal(FormContentFilter.java:93)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.HiddenHttpMethodFilter.doFilterInternal(HiddenHttpMethodFilter.java:94)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.springframework.web.filter.CharacterEncodingFilter.doFilterInternal(CharacterEncodingFilter.java:201)
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609)
at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:561)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:143)
at org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:602)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127)
at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:235)
at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1612)
at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:233)
at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1434)
at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:188)
at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:501)
at org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1582)
at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:186)
at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1349)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141)
at org.eclipse.jetty.server.handler.gzip.GzipHandler.handle(GzipHandler.java:766)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127)
at org.eclipse.jetty.server.Server.handle(Server.java:516)
at org.eclipse.jetty.server.HttpChannel.lambda$handle$1(HttpChannel.java:383)
at org.eclipse.jetty.server.HttpChannel.dispatch(HttpChannel.java:556)
at org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:375)
at org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:273)
at org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:311)
at org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:105)
at org.eclipse.jetty.io.ChannelEndPoint$1.run(ChannelEndPoint.java:104)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:336)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:313)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:129)
at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:375)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:773)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:905)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
at org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2440)
at org.postgresql.core.v3.QueryExecutorImpl.processResults(QueryExecutorImpl.java:2183)
at org.postgresql.core.v3.QueryExecutorImpl.execute(QueryExecutorImpl.java:308)
at org.postgresql.jdbc.PgStatement.executeInternal(PgStatement.java:441)
at org.postgresql.jdbc.PgStatement.execute(PgStatement.java:365)
at org.postgresql.jdbc.PgPreparedStatement.executeWithFlags(PgPreparedStatement.java:143)
at org.postgresql.jdbc.PgPreparedStatement.execute(PgPreparedStatement.java:132)
at com.alibaba.druid.pool.DruidPooledPreparedStatement.execute(DruidPooledPreparedStatement.java:497)
at org.apache.ibatis.executor.statement.PreparedStatementHandler.update(PreparedStatementHandler.java:47)
at org.apache.ibatis.executor.statement.RoutingStatementHandler.update(RoutingStatementHandler.java:74)
at sun.reflect.GeneratedMethodAccessor431.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.ibatis.plugin.Plugin.invoke(Plugin.java:63)
at com.sun.proxy.$Proxy177.update(Unknown Source)
at com.baomidou.mybatisplus.core.executor.MybatisSimpleExecutor.doUpdate(MybatisSimpleExecutor.java:54)
at org.apache.ibatis.executor.BaseExecutor.update(BaseExecutor.java:117)
at org.apache.ibatis.session.defaults.DefaultSqlSession.update(DefaultSqlSession.java:197)
at sun.reflect.GeneratedMethodAccessor505.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:426)
... 111 common frames omitted
```
**Requirement or improvement**
- I suggest to add some validation to check fields in the form.
| https://github.com/apache/dolphinscheduler/issues/5876 | https://github.com/apache/dolphinscheduler/pull/6457 | 6495a204f3f5520f3ebce8b939f1254ee676facc | ba4cb9d22ad02ba4927fd23ea811c82639261e69 | "2021-07-22T06:00:38Z" | java | "2021-10-14T07:31:06Z" | sql/upgrade/1.4.0_schema/postgresql/dolphinscheduler_ddl.sql | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-- uc_dolphin_T_t_ds_user_A_state
delimiter ;
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_user_A_state();
delimiter d//
CREATE FUNCTION uc_dolphin_T_t_ds_user_A_state() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_CATALOG=current_database()
AND TABLE_SCHEMA=current_schema()
AND TABLE_NAME='t_ds_user'
AND COLUMN_NAME ='state')
THEN
ALTER TABLE t_ds_user ADD COLUMN state int DEFAULT 1;
comment on column t_ds_user.state is 'state 0:disable 1:enable';
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
select uc_dolphin_T_t_ds_user_A_state();
DROP FUNCTION uc_dolphin_T_t_ds_user_A_state();
-- uc_dolphin_T_t_ds_tenant_A_tenant_name
delimiter ;
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_tenant_A_tenant_name();
delimiter d//
CREATE FUNCTION uc_dolphin_T_t_ds_tenant_A_tenant_name() RETURNS void AS $$
BEGIN
IF EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_CATALOG=current_database()
AND TABLE_SCHEMA=current_schema()
AND TABLE_NAME='t_ds_tenant'
AND COLUMN_NAME ='tenant_name')
THEN
ALTER TABLE t_ds_tenant DROP COLUMN "tenant_name";
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
select uc_dolphin_T_t_ds_tenant_A_tenant_name();
DROP FUNCTION uc_dolphin_T_t_ds_tenant_A_tenant_name();
-- uc_dolphin_T_t_ds_task_instance_A_first_submit_time
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_first_submit_time() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND COLUMN_NAME ='first_submit_time')
THEN
ALTER TABLE t_ds_task_instance ADD COLUMN first_submit_time timestamp DEFAULT NULL;
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_task_instance_A_first_submit_time();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_first_submit_time();
-- uc_dolphin_T_t_ds_task_instance_A_delay_time
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_delay_time() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND COLUMN_NAME ='delay_time')
THEN
ALTER TABLE t_ds_task_instance ADD COLUMN delay_time int DEFAULT '0';
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_task_instance_A_delay_time();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_delay_time();
-- uc_dolphin_T_t_ds_task_instance_A_var_pool
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_var_pool() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND COLUMN_NAME ='var_pool')
THEN
ALTER TABLE t_ds_task_instance ADD COLUMN var_pool text;
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_task_instance_A_var_pool();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_var_pool();
-- uc_dolphin_T_t_ds_process_instance_A_var_pool
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_instance_A_var_pool() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_instance'
AND COLUMN_NAME ='var_pool')
THEN
ALTER TABLE t_ds_process_instance ADD COLUMN var_pool text;
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_process_instance_A_var_pool();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_instance_A_var_pool();
-- uc_dolphin_T_t_ds_process_definition_A_modify_by
delimiter d//
CREATE OR REPLACE FUNCTION ct_dolphin_T_t_ds_process_definition_version() RETURNS void AS $$
BEGIN
CREATE TABLE IF NOT EXISTS t_ds_process_definition_version (
id int NOT NULL ,
process_definition_id int NOT NULL ,
version int DEFAULT NULL ,
process_definition_json text ,
description text ,
global_params text ,
locations text ,
connects text ,
receivers text ,
receivers_cc text ,
create_time timestamp DEFAULT NULL ,
timeout int DEFAULT '0' ,
resource_ids varchar(64),
PRIMARY KEY (id)
) ;
create index process_definition_id_and_version on t_ds_process_definition_version (process_definition_id,version);
DROP SEQUENCE IF EXISTS t_ds_process_definition_version_id_sequence;
CREATE SEQUENCE t_ds_process_definition_version_id_sequence;
ALTER TABLE t_ds_process_definition_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_version_id_sequence');
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT ct_dolphin_T_t_ds_process_definition_version();
DROP FUNCTION IF EXISTS ct_dolphin_T_t_ds_process_definition_version();
-- ----------------------------
-- Table structure for t_ds_plugin_define
-- ----------------------------
DROP TABLE IF EXISTS t_ds_plugin_define;
CREATE TABLE t_ds_plugin_define (
id serial NOT NULL,
plugin_name varchar(100) NOT NULL,
plugin_type varchar(100) NOT NULL,
plugin_params text NULL,
create_time timestamp NULL,
update_time timestamp NULL,
CONSTRAINT t_ds_plugin_define_pk PRIMARY KEY (id),
CONSTRAINT t_ds_plugin_define_un UNIQUE (plugin_name, plugin_type)
);
-- ----------------------------
-- Table structure for t_ds_alert_plugin_instance
-- ----------------------------
DROP TABLE IF EXISTS t_ds_alert_plugin_instance;
CREATE TABLE t_ds_alert_plugin_instance (
id serial NOT NULL,
plugin_define_id int4 NOT NULL,
plugin_instance_params text NULL,
create_time timestamp NULL,
update_time timestamp NULL,
instance_name varchar(200) NULL,
CONSTRAINT t_ds_alert_plugin_instance_pk PRIMARY KEY (id)
);
-- uc_dolphin_T_t_ds_process_definition_A_warning_group_id
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_definition_A_warning_group_id() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_definition'
AND COLUMN_NAME ='warning_group_id')
THEN
ALTER TABLE t_ds_process_definition ADD COLUMN warning_group_id int4 DEFAULT NULL;
COMMENT ON COLUMN t_ds_process_definition.warning_group_id IS 'alert group id';
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_process_definition_A_warning_group_id();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_definition_A_warning_group_id();
-- uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_definition_version'
AND COLUMN_NAME ='warning_group_id')
THEN
ALTER TABLE t_ds_process_definition_version ADD COLUMN warning_group_id int4 DEFAULT NULL;
COMMENT ON COLUMN t_ds_process_definition_version.warning_group_id IS 'alert group id';
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id();
-- uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_alertgroup'
AND COLUMN_NAME ='alert_instance_ids')
THEN
ALTER TABLE t_ds_alertgroup ADD COLUMN alert_instance_ids varchar (255) DEFAULT NULL;
COMMENT ON COLUMN t_ds_alertgroup.alert_instance_ids IS 'alert instance ids';
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids();
-- uc_dolphin_T_t_ds_alertgroup_A_create_user_id
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_create_user_id() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_alertgroup'
AND COLUMN_NAME ='create_user_id')
THEN
ALTER TABLE t_ds_alertgroup ADD COLUMN create_user_id int4 DEFAULT NULL;
COMMENT ON COLUMN t_ds_alertgroup.create_user_id IS 'create user id';
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_alertgroup_A_create_user_id();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_create_user_id();
-- uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_stat_all_indexes
WHERE relname='t_ds_alertgroup'
AND indexrelname ='t_ds_alertgroup_name_un')
THEN
ALTER TABLE t_ds_alertgroup ADD CONSTRAINT t_ds_alertgroup_name_un UNIQUE (group_name);
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName();
-- uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_stat_all_indexes
WHERE relname='t_ds_datasource'
AND indexrelname ='t_ds_datasource_name_un')
THEN
ALTER TABLE t_ds_datasource ADD CONSTRAINT t_ds_datasource_name_un UNIQUE (name, type);
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName();
-- uc_dolphin_T_t_ds_schedules_A_add_timezone
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_schedules_A_add_timezone() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_schedules'
AND COLUMN_NAME ='timezone_id')
THEN
ALTER TABLE t_ds_schedules ADD COLUMN timezone_id varchar(40) DEFAULT NULL;
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_schedules_A_add_timezone();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_schedules_A_add_timezone();
--
-- Table structure for table t_ds_environment
--
DROP TABLE IF EXISTS t_ds_environment;
CREATE TABLE t_ds_environment (
id serial NOT NULL ,
code bigint NOT NULL,
name varchar(100) DEFAULT NULL ,
config text DEFAULT NULL ,
description text ,
operator int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id) ,
CONSTRAINT environment_name_unique UNIQUE (name),
CONSTRAINT environment_code_unique UNIQUE (code)
);
ALTER TABLE t_ds_task_definition ADD COLUMN environment_code bigint DEFAULT '-1';
comment on column t_ds_task_definition.environment_code is 'environment code';
ALTER TABLE t_ds_task_definition_log ADD COLUMN environment_code bigint DEFAULT '-1';
comment on column t_ds_task_definition_log.environment_code is 'environment code';
ALTER TABLE t_ds_command ADD COLUMN environment_code bigint DEFAULT '-1';
comment on column t_ds_command.environment_code is 'environment code';
ALTER TABLE t_ds_error_command ADD COLUMN environment_code bigint DEFAULT '-1';
comment on column t_ds_error_command.environment_code is 'environment code';
ALTER TABLE t_ds_schedules ADD COLUMN environment_code bigint DEFAULT '-1';
comment on column t_ds_schedules.environment_code is 'environment code';
ALTER TABLE t_ds_process_instance ADD COLUMN environment_code bigint DEFAULT '-1';
comment on column t_ds_process_instance.environment_code is 'environment code';
ALTER TABLE t_ds_task_instance ADD COLUMN environment_code bigint DEFAULT '-1';
comment on column t_ds_task_instance.environment_code is 'environment code';
ALTER TABLE t_ds_task_instance ADD COLUMN environment_config text;
comment on column t_ds_task_instance.environment_config is 'environment config';
--
-- Table structure for table t_ds_environment_worker_group_relation
--
DROP TABLE IF EXISTS t_ds_environment_worker_group_relation;
CREATE TABLE t_ds_environment_worker_group_relation (
id serial NOT NULL,
environment_code bigint NOT NULL,
worker_group varchar(255) NOT NULL,
operator int DEFAULT NULL,
create_time timestamp DEFAULT NULL,
update_time timestamp DEFAULT NULL,
PRIMARY KEY (id) ,
CONSTRAINT environment_worker_group_unique UNIQUE (environment_code,worker_group)
);
-- uc_dolphin_T_t_ds_task_definition_A_drop_UN_taskName
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_definition_A_drop_UN_taskName() RETURNS void AS $$
BEGIN
IF EXISTS (SELECT 1 FROM pg_stat_all_indexes
WHERE relname='t_ds_task_definition'
AND indexrelname ='task_definition_unique')
ALTER TABLE t_ds_task_definition drop CONSTRAINT task_definition_unique;
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_task_definition_A_drop_UN_taskName();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_definition_A_drop_UN_taskName();
-- ----------------------------
-- These columns will not be used in the new version,if you determine that the historical data is useless, you can delete it using the sql below
-- ----------------------------
-- ALTER TABLE t_ds_alert DROP COLUMN "show_type", DROP COLUMN "alert_type", DROP COLUMN "receivers", DROP COLUMN "receivers_cc";
-- ALTER TABLE t_ds_alertgroup DROP COLUMN "group_type";
-- ALTER TABLE t_ds_process_definition DROP COLUMN "receivers", DROP COLUMN "receivers_cc";
-- ALTER TABLE t_ds_process_definition_version DROP COLUMN "receivers", DROP COLUMN "receivers_cc";
-- DROP TABLE IF EXISTS t_ds_relation_user_alertgroup;
-- ALTER TABLE t_ds_command DROP COLUMN "dependence";
-- ALTER TABLE t_ds_error_command DROP COLUMN "dependence"; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,543 | [Bug] [Master] process instance state is always running when failure task exists | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
process instance state is always running when failure task exists.
the failure task setting: retry times: 1, retry interval: 1 minute
### What you expected to happen
the process instance state is failed when failed task exists.
### How to reproduce
process instance state is always running when failure task exists.
the failure task setting: retry times: 1, retry interval: 1 minute
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6543 | https://github.com/apache/dolphinscheduler/pull/6547 | ba4cb9d22ad02ba4927fd23ea811c82639261e69 | 52a550b6aefa5652f17b7bbc9e056a83efa23c8a | "2021-10-15T08:39:19Z" | java | "2021-10-15T10:04:04Z" | dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/TaskInstance.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.entity;
import com.fasterxml.jackson.core.type.TypeReference;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.enums.Priority;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.task.dependent.DependentParameters;
import org.apache.dolphinscheduler.common.task.switchtask.SwitchParameters;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import java.io.Serializable;
import java.util.Date;
import java.util.Map;
import com.baomidou.mybatisplus.annotation.IdType;
import com.baomidou.mybatisplus.annotation.TableField;
import com.baomidou.mybatisplus.annotation.TableId;
import com.baomidou.mybatisplus.annotation.TableName;
import com.fasterxml.jackson.annotation.JsonFormat;
/**
* task instance
*/
@TableName("t_ds_task_instance")
public class TaskInstance implements Serializable {
/**
* id
*/
@TableId(value = "id", type = IdType.AUTO)
private int id;
/**
* task name
*/
private String name;
/**
* task type
*/
private String taskType;
/**
* process instance id
*/
private int processInstanceId;
/**
* task code
*/
private long taskCode;
/**
* task definition version
*/
private int taskDefinitionVersion;
/**
* process instance name
*/
@TableField(exist = false)
private String processInstanceName;
/**
* state
*/
private ExecutionStatus state;
/**
* task first submit time.
*/
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8")
private Date firstSubmitTime;
/**
* task submit time
*/
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8")
private Date submitTime;
/**
* task start time
*/
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8")
private Date startTime;
/**
* task end time
*/
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8")
private Date endTime;
/**
* task host
*/
private String host;
/**
* task shell execute path and the resource down from hdfs
* default path: $base_run_dir/processInstanceId/taskInstanceId/retryTimes
*/
private String executePath;
/**
* task log path
* default path: $base_run_dir/processInstanceId/taskInstanceId/retryTimes
*/
private String logPath;
/**
* retry times
*/
private int retryTimes;
/**
* alert flag
*/
private Flag alertFlag;
/**
* process instance
*/
@TableField(exist = false)
private ProcessInstance processInstance;
/**
* process definition
*/
@TableField(exist = false)
private ProcessDefinition processDefine;
/**
* task definition
*/
@TableField(exist = false)
private TaskDefinition taskDefine;
/**
* process id
*/
private int pid;
/**
* appLink
*/
private String appLink;
/**
* flag
*/
private Flag flag;
/**
* dependency
*/
@TableField(exist = false)
private DependentParameters dependency;
/**
* switch dependency
*/
@TableField(exist = false)
private SwitchParameters switchDependency;
/**
* duration
*/
@TableField(exist = false)
private String duration;
/**
* max retry times
*/
private int maxRetryTimes;
/**
* task retry interval, unit: minute
*/
private int retryInterval;
/**
* task intance priority
*/
private Priority taskInstancePriority;
/**
* process intance priority
*/
@TableField(exist = false)
private Priority processInstancePriority;
/**
* dependent state
*/
@TableField(exist = false)
private String dependentResult;
/**
* workerGroup
*/
private String workerGroup;
/**
* environment code
*/
private Long environmentCode;
/**
* environment config
*/
private String environmentConfig;
/**
* executor id
*/
private int executorId;
/**
* varPool string
*/
private String varPool;
/**
* executor name
*/
@TableField(exist = false)
private String executorName;
@TableField(exist = false)
private Map<String, String> resources;
/**
* delay execution time.
*/
private int delayTime;
/**
* task params
*/
private String taskParams;
/**
* dry run flag
*/
private int dryRun;
public void init(String host, Date startTime, String executePath) {
this.host = host;
this.startTime = startTime;
this.executePath = executePath;
}
public String getVarPool() {
return varPool;
}
public void setVarPool(String varPool) {
this.varPool = varPool;
}
public ProcessInstance getProcessInstance() {
return processInstance;
}
public void setProcessInstance(ProcessInstance processInstance) {
this.processInstance = processInstance;
}
public ProcessDefinition getProcessDefine() {
return processDefine;
}
public void setProcessDefine(ProcessDefinition processDefine) {
this.processDefine = processDefine;
}
public TaskDefinition getTaskDefine() {
return taskDefine;
}
public void setTaskDefine(TaskDefinition taskDefine) {
this.taskDefine = taskDefine;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getTaskType() {
return taskType;
}
public void setTaskType(String taskType) {
this.taskType = taskType;
}
public int getProcessInstanceId() {
return processInstanceId;
}
public void setProcessInstanceId(int processInstanceId) {
this.processInstanceId = processInstanceId;
}
public ExecutionStatus getState() {
return state;
}
public void setState(ExecutionStatus state) {
this.state = state;
}
public Date getFirstSubmitTime() {
return firstSubmitTime;
}
public void setFirstSubmitTime(Date firstSubmitTime) {
this.firstSubmitTime = firstSubmitTime;
}
public Date getSubmitTime() {
return submitTime;
}
public void setSubmitTime(Date submitTime) {
this.submitTime = submitTime;
}
public Date getStartTime() {
return startTime;
}
public void setStartTime(Date startTime) {
this.startTime = startTime;
}
public Date getEndTime() {
return endTime;
}
public void setEndTime(Date endTime) {
this.endTime = endTime;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public String getExecutePath() {
return executePath;
}
public void setExecutePath(String executePath) {
this.executePath = executePath;
}
public String getLogPath() {
return logPath;
}
public void setLogPath(String logPath) {
this.logPath = logPath;
}
public Flag getAlertFlag() {
return alertFlag;
}
public void setAlertFlag(Flag alertFlag) {
this.alertFlag = alertFlag;
}
public int getRetryTimes() {
return retryTimes;
}
public void setRetryTimes(int retryTimes) {
this.retryTimes = retryTimes;
}
public Boolean isTaskSuccess() {
return this.state == ExecutionStatus.SUCCESS;
}
public int getPid() {
return pid;
}
public void setPid(int pid) {
this.pid = pid;
}
public String getAppLink() {
return appLink;
}
public void setAppLink(String appLink) {
this.appLink = appLink;
}
public Long getEnvironmentCode() {
return this.environmentCode;
}
public void setEnvironmentCode(Long environmentCode) {
this.environmentCode = environmentCode;
}
public String getEnvironmentConfig() {
return this.environmentConfig;
}
public void setEnvironmentConfig(String environmentConfig) {
this.environmentConfig = environmentConfig;
}
public DependentParameters getDependency() {
if (this.dependency == null) {
Map<String, Object> taskParamsMap = JSONUtils.parseObject(this.getTaskParams(), new TypeReference<Map<String, Object>>() {});
this.dependency = JSONUtils.parseObject((String) taskParamsMap.get(Constants.DEPENDENCE), DependentParameters.class);
}
return this.dependency;
}
public void setDependency(DependentParameters dependency) {
this.dependency = dependency;
}
public SwitchParameters getSwitchDependency() {
if (this.switchDependency == null) {
Map<String, Object> taskParamsMap = JSONUtils.parseObject(this.getTaskParams(), new TypeReference<Map<String, Object>>() {});
this.switchDependency = JSONUtils.parseObject((String) taskParamsMap.get(Constants.SWITCH_RESULT), SwitchParameters.class);
}
return this.switchDependency;
}
public void setSwitchDependency(SwitchParameters switchDependency) {
Map<String, Object> taskParamsMap = JSONUtils.parseObject(this.getTaskParams(), new TypeReference<Map<String, Object>>() {});
taskParamsMap.put(Constants.SWITCH_RESULT,JSONUtils.toJsonString(switchDependency));
this.setTaskParams(JSONUtils.toJsonString(taskParamsMap));
}
public Flag getFlag() {
return flag;
}
public void setFlag(Flag flag) {
this.flag = flag;
}
public String getProcessInstanceName() {
return processInstanceName;
}
public void setProcessInstanceName(String processInstanceName) {
this.processInstanceName = processInstanceName;
}
public String getDuration() {
return duration;
}
public void setDuration(String duration) {
this.duration = duration;
}
public int getMaxRetryTimes() {
return maxRetryTimes;
}
public void setMaxRetryTimes(int maxRetryTimes) {
this.maxRetryTimes = maxRetryTimes;
}
public int getRetryInterval() {
return retryInterval;
}
public void setRetryInterval(int retryInterval) {
this.retryInterval = retryInterval;
}
public int getExecutorId() {
return executorId;
}
public void setExecutorId(int executorId) {
this.executorId = executorId;
}
public String getExecutorName() {
return executorName;
}
public void setExecutorName(String executorName) {
this.executorName = executorName;
}
public int getDryRun() {
return dryRun;
}
public void setDryRun(int dryRun) {
this.dryRun = dryRun;
}
public boolean isTaskComplete() {
return this.getState().typeIsPause()
|| this.getState().typeIsSuccess()
|| this.getState().typeIsCancel()
|| (this.getState().typeIsFailure() && !taskCanRetry());
}
public Map<String, String> getResources() {
return resources;
}
public void setResources(Map<String, String> resources) {
this.resources = resources;
}
public boolean isSubProcess() {
return TaskType.SUB_PROCESS.getDesc().equalsIgnoreCase(this.taskType);
}
public boolean isDependTask() {
return TaskType.DEPENDENT.getDesc().equalsIgnoreCase(this.taskType);
}
public boolean isConditionsTask() {
return TaskType.CONDITIONS.getDesc().equalsIgnoreCase(this.taskType);
}
public boolean isSwitchTask() {
return TaskType.SWITCH.getDesc().equalsIgnoreCase(this.taskType);
}
/**
* determine if you can try again
*
* @return can try result
*/
public boolean taskCanRetry() {
if (this.isSubProcess()) {
return false;
}
if (this.getState() == ExecutionStatus.NEED_FAULT_TOLERANCE) {
return true;
} else {
return (this.getState().typeIsFailure()
&& this.getRetryTimes() < this.getMaxRetryTimes());
}
}
public Priority getTaskInstancePriority() {
return taskInstancePriority;
}
public void setTaskInstancePriority(Priority taskInstancePriority) {
this.taskInstancePriority = taskInstancePriority;
}
public Priority getProcessInstancePriority() {
return processInstancePriority;
}
public void setProcessInstancePriority(Priority processInstancePriority) {
this.processInstancePriority = processInstancePriority;
}
public String getWorkerGroup() {
return workerGroup;
}
public void setWorkerGroup(String workerGroup) {
this.workerGroup = workerGroup;
}
public String getDependentResult() {
return dependentResult;
}
public void setDependentResult(String dependentResult) {
this.dependentResult = dependentResult;
}
public int getDelayTime() {
return delayTime;
}
public void setDelayTime(int delayTime) {
this.delayTime = delayTime;
}
@Override
public String toString() {
return "TaskInstance{"
+ "id=" + id
+ ", name='" + name + '\''
+ ", taskType='" + taskType + '\''
+ ", processInstanceId=" + processInstanceId
+ ", processInstanceName='" + processInstanceName + '\''
+ ", state=" + state
+ ", firstSubmitTime=" + firstSubmitTime
+ ", submitTime=" + submitTime
+ ", startTime=" + startTime
+ ", endTime=" + endTime
+ ", host='" + host + '\''
+ ", executePath='" + executePath + '\''
+ ", logPath='" + logPath + '\''
+ ", retryTimes=" + retryTimes
+ ", alertFlag=" + alertFlag
+ ", processInstance=" + processInstance
+ ", processDefine=" + processDefine
+ ", pid=" + pid
+ ", appLink='" + appLink + '\''
+ ", flag=" + flag
+ ", dependency='" + dependency + '\''
+ ", duration=" + duration
+ ", maxRetryTimes=" + maxRetryTimes
+ ", retryInterval=" + retryInterval
+ ", taskInstancePriority=" + taskInstancePriority
+ ", processInstancePriority=" + processInstancePriority
+ ", dependentResult='" + dependentResult + '\''
+ ", workerGroup='" + workerGroup + '\''
+ ", environmentCode=" + environmentCode
+ ", environmentConfig='" + environmentConfig + '\''
+ ", executorId=" + executorId
+ ", executorName='" + executorName + '\''
+ ", delayTime=" + delayTime
+ ", dryRun=" + dryRun
+ '}';
}
public long getTaskCode() {
return taskCode;
}
public void setTaskCode(long taskCode) {
this.taskCode = taskCode;
}
public int getTaskDefinitionVersion() {
return taskDefinitionVersion;
}
public void setTaskDefinitionVersion(int taskDefinitionVersion) {
this.taskDefinitionVersion = taskDefinitionVersion;
}
public String getTaskParams() {
return taskParams;
}
public void setTaskParams(String taskParams) {
this.taskParams = taskParams;
}
public boolean isFirstRun() {
return endTime == null;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,543 | [Bug] [Master] process instance state is always running when failure task exists | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
process instance state is always running when failure task exists.
the failure task setting: retry times: 1, retry interval: 1 minute
### What you expected to happen
the process instance state is failed when failed task exists.
### How to reproduce
process instance state is always running when failure task exists.
the failure task setting: retry times: 1, retry interval: 1 minute
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6543 | https://github.com/apache/dolphinscheduler/pull/6547 | ba4cb9d22ad02ba4927fd23ea811c82639261e69 | 52a550b6aefa5652f17b7bbc9e056a83efa23c8a | "2021-10-15T08:39:19Z" | java | "2021-10-15T10:04:04Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/EventExecuteService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.runner;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.StateEvent;
import org.apache.dolphinscheduler.common.enums.StateEventType;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.NetUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.remote.command.StateEventChangeCommand;
import org.apache.dolphinscheduler.remote.processor.StateEventCallbackService;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.commons.lang.StringUtils;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
@Service
public class EventExecuteService extends Thread {
private static final Logger logger = LoggerFactory.getLogger(EventExecuteService.class);
/**
* dolphinscheduler database interface
*/
@Autowired
private ProcessService processService;
@Autowired
private MasterConfig masterConfig;
private ExecutorService eventExecService;
/**
*
*/
private StateEventCallbackService stateEventCallbackService;
private ConcurrentHashMap<Integer, WorkflowExecuteThread> processInstanceExecMaps;
private ConcurrentHashMap<String, WorkflowExecuteThread> eventHandlerMap = new ConcurrentHashMap();
ListeningExecutorService listeningExecutorService;
public void init(ConcurrentHashMap<Integer, WorkflowExecuteThread> processInstanceExecMaps) {
eventExecService = ThreadUtils.newDaemonFixedThreadExecutor("MasterEventExecution", masterConfig.getMasterExecThreads());
this.processInstanceExecMaps = processInstanceExecMaps;
listeningExecutorService = MoreExecutors.listeningDecorator(eventExecService);
this.stateEventCallbackService = SpringApplicationContext.getBean(StateEventCallbackService.class);
}
@Override
public synchronized void start() {
super.setName("EventServiceStarted");
super.start();
}
public void close() {
eventExecService.shutdown();
logger.info("event service stopped...");
}
@Override
public void run() {
logger.info("Event service started");
while (Stopper.isRunning()) {
try {
eventHandler();
TimeUnit.MILLISECONDS.sleep(Constants.SLEEP_TIME_MILLIS);
} catch (Exception e) {
logger.error("Event service thread error", e);
}
}
}
private void eventHandler() {
for (WorkflowExecuteThread workflowExecuteThread : this.processInstanceExecMaps.values()) {
if (workflowExecuteThread.eventSize() == 0
|| StringUtils.isEmpty(workflowExecuteThread.getKey())
|| eventHandlerMap.containsKey(workflowExecuteThread.getKey())) {
continue;
}
int processInstanceId = workflowExecuteThread.getProcessInstance().getId();
logger.info("handle process instance : {} events, count:{}",
processInstanceId,
workflowExecuteThread.eventSize());
logger.info("already exists handler process size:{}", this.eventHandlerMap.size());
eventHandlerMap.put(workflowExecuteThread.getKey(), workflowExecuteThread);
ListenableFuture future = this.listeningExecutorService.submit(workflowExecuteThread);
FutureCallback futureCallback = new FutureCallback() {
@Override
public void onSuccess(Object o) {
if (workflowExecuteThread.workFlowFinish()) {
processInstanceExecMaps.remove(processInstanceId);
notifyProcessChanged();
logger.info("process instance {} finished.", processInstanceId);
}
if (workflowExecuteThread.getProcessInstance().getId() != processInstanceId) {
processInstanceExecMaps.remove(processInstanceId);
processInstanceExecMaps.put(workflowExecuteThread.getProcessInstance().getId(), workflowExecuteThread);
}
eventHandlerMap.remove(workflowExecuteThread.getKey());
}
private void notifyProcessChanged() {
Map<ProcessInstance, TaskInstance> fatherMaps
= processService.notifyProcessList(processInstanceId, 0);
for (ProcessInstance processInstance : fatherMaps.keySet()) {
String address = NetUtils.getAddr(masterConfig.getListenPort());
if (processInstance.getHost().equalsIgnoreCase(address)) {
notifyMyself(processInstance, fatherMaps.get(processInstance));
} else {
notifyProcess(processInstance, fatherMaps.get(processInstance));
}
}
}
private void notifyMyself(ProcessInstance processInstance, TaskInstance taskInstance) {
logger.info("notify process {} task {} state change", processInstance.getId(), taskInstance.getId());
if (!processInstanceExecMaps.containsKey(processInstance.getId())) {
return;
}
WorkflowExecuteThread workflowExecuteThreadNotify = processInstanceExecMaps.get(processInstance.getId());
StateEvent stateEvent = new StateEvent();
stateEvent.setTaskInstanceId(taskInstance.getId());
stateEvent.setType(StateEventType.TASK_STATE_CHANGE);
stateEvent.setProcessInstanceId(processInstance.getId());
stateEvent.setExecutionStatus(ExecutionStatus.RUNNING_EXECUTION);
workflowExecuteThreadNotify.addStateEvent(stateEvent);
}
private void notifyProcess(ProcessInstance processInstance, TaskInstance taskInstance) {
String host = processInstance.getHost();
if (StringUtils.isEmpty(host)) {
logger.info("process {} host is empty, cannot notify task {} now.",
processInstance.getId(), taskInstance.getId());
return;
}
String address = host.split(":")[0];
int port = Integer.parseInt(host.split(":")[1]);
logger.info("notify process {} task {} state change, host:{}",
processInstance.getId(), taskInstance.getId(), host);
StateEventChangeCommand stateEventChangeCommand = new StateEventChangeCommand(
processInstanceId, 0, workflowExecuteThread.getProcessInstance().getState(), processInstance.getId(), taskInstance.getId()
);
stateEventCallbackService.sendResult(address, port, stateEventChangeCommand.convert2Command());
}
@Override
public void onFailure(Throwable throwable) {
}
};
Futures.addCallback(future, futureCallback, this.listeningExecutorService);
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,543 | [Bug] [Master] process instance state is always running when failure task exists | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
process instance state is always running when failure task exists.
the failure task setting: retry times: 1, retry interval: 1 minute
### What you expected to happen
the process instance state is failed when failed task exists.
### How to reproduce
process instance state is always running when failure task exists.
the failure task setting: retry times: 1, retry interval: 1 minute
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6543 | https://github.com/apache/dolphinscheduler/pull/6547 | ba4cb9d22ad02ba4927fd23ea811c82639261e69 | 52a550b6aefa5652f17b7bbc9e056a83efa23c8a | "2021-10-15T08:39:19Z" | java | "2021-10-15T10:04:04Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/StateWheelExecuteThread.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.runner;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.StateEvent;
import org.apache.dolphinscheduler.common.enums.StateEventType;
import org.apache.dolphinscheduler.common.enums.TimeoutFlag;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.hadoop.util.ThreadUtil;
import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* 1. timeout check wheel
* 2. dependent task check wheel
*/
public class StateWheelExecuteThread extends Thread {
private static final Logger logger = LoggerFactory.getLogger(StateWheelExecuteThread.class);
ConcurrentHashMap<Integer, ProcessInstance> processInstanceCheckList;
ConcurrentHashMap<Integer, TaskInstance> taskInstanceCheckList;
private ConcurrentHashMap<Integer, WorkflowExecuteThread> processInstanceExecMaps;
private int stateCheckIntervalSecs;
public StateWheelExecuteThread(ConcurrentHashMap<Integer, ProcessInstance> processInstances,
ConcurrentHashMap<Integer, TaskInstance> taskInstances,
ConcurrentHashMap<Integer, WorkflowExecuteThread> processInstanceExecMaps,
int stateCheckIntervalSecs) {
this.processInstanceCheckList = processInstances;
this.taskInstanceCheckList = taskInstances;
this.processInstanceExecMaps = processInstanceExecMaps;
this.stateCheckIntervalSecs = stateCheckIntervalSecs;
}
@Override
public void run() {
logger.info("state wheel thread start");
while (Stopper.isRunning()) {
try {
checkProcess();
checkTask();
} catch (Exception e) {
logger.error("state wheel thread check error:", e);
}
ThreadUtil.sleepAtLeastIgnoreInterrupts(stateCheckIntervalSecs);
}
}
public boolean addProcess(ProcessInstance processInstance) {
this.processInstanceCheckList.put(processInstance.getId(), processInstance);
return true;
}
public boolean addTask(TaskInstance taskInstance) {
this.taskInstanceCheckList.put(taskInstance.getId(), taskInstance);
return true;
}
private void checkTask() {
if (taskInstanceCheckList.isEmpty()) {
return;
}
for (TaskInstance taskInstance : this.taskInstanceCheckList.values()) {
if (TimeoutFlag.OPEN == taskInstance.getTaskDefine().getTimeoutFlag()) {
long timeRemain = DateUtils.getRemainTime(taskInstance.getStartTime(), taskInstance.getTaskDefine().getTimeout() * Constants.SEC_2_MINUTES_TIME_UNIT);
if (0 <= timeRemain && processTimeout(taskInstance)) {
taskInstanceCheckList.remove(taskInstance.getId());
return;
}
}
if (taskInstance.isSubProcess() || taskInstance.isDependTask()) {
processDependCheck(taskInstance);
}
}
}
private void checkProcess() {
if (processInstanceCheckList.isEmpty()) {
return;
}
for (ProcessInstance processInstance : this.processInstanceCheckList.values()) {
long timeRemain = DateUtils.getRemainTime(processInstance.getStartTime(), processInstance.getTimeout() * Constants.SEC_2_MINUTES_TIME_UNIT);
if (0 <= timeRemain && processTimeout(processInstance)) {
processInstanceCheckList.remove(processInstance.getId());
}
}
}
private void putEvent(StateEvent stateEvent) {
if (!processInstanceExecMaps.containsKey(stateEvent.getProcessInstanceId())) {
return;
}
WorkflowExecuteThread workflowExecuteThread = this.processInstanceExecMaps.get(stateEvent.getProcessInstanceId());
workflowExecuteThread.addStateEvent(stateEvent);
}
private boolean processDependCheck(TaskInstance taskInstance) {
StateEvent stateEvent = new StateEvent();
stateEvent.setType(StateEventType.TASK_STATE_CHANGE);
stateEvent.setProcessInstanceId(taskInstance.getProcessInstanceId());
stateEvent.setTaskInstanceId(taskInstance.getId());
stateEvent.setExecutionStatus(ExecutionStatus.RUNNING_EXECUTION);
putEvent(stateEvent);
return true;
}
private boolean processTimeout(TaskInstance taskInstance) {
StateEvent stateEvent = new StateEvent();
stateEvent.setType(StateEventType.TASK_TIMEOUT);
stateEvent.setProcessInstanceId(taskInstance.getProcessInstanceId());
stateEvent.setTaskInstanceId(taskInstance.getId());
putEvent(stateEvent);
return true;
}
private boolean processTimeout(ProcessInstance processInstance) {
StateEvent stateEvent = new StateEvent();
stateEvent.setType(StateEventType.PROCESS_TIMEOUT);
stateEvent.setProcessInstanceId(processInstance.getId());
putEvent(stateEvent);
return true;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,543 | [Bug] [Master] process instance state is always running when failure task exists | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
process instance state is always running when failure task exists.
the failure task setting: retry times: 1, retry interval: 1 minute
### What you expected to happen
the process instance state is failed when failed task exists.
### How to reproduce
process instance state is always running when failure task exists.
the failure task setting: retry times: 1, retry interval: 1 minute
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6543 | https://github.com/apache/dolphinscheduler/pull/6547 | ba4cb9d22ad02ba4927fd23ea811c82639261e69 | 52a550b6aefa5652f17b7bbc9e056a83efa23c8a | "2021-10-15T08:39:19Z" | java | "2021-10-15T10:04:04Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteThread.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.runner;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVERY_START_NODE_STRING;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODE_NAMES;
import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP;
import static org.apache.dolphinscheduler.common.Constants.SEC_2_MINUTES_TIME_UNIT;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.DependResult;
import org.apache.dolphinscheduler.common.enums.Direct;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.enums.Priority;
import org.apache.dolphinscheduler.common.enums.StateEvent;
import org.apache.dolphinscheduler.common.enums.StateEventType;
import org.apache.dolphinscheduler.common.enums.TaskDependType;
import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy;
import org.apache.dolphinscheduler.common.enums.TimeoutFlag;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.NetUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.dao.entity.Environment;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.ProjectUser;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskDefinition;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.apache.dolphinscheduler.remote.command.HostUpdateCommand;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager;
import org.apache.dolphinscheduler.server.master.runner.task.ITaskProcessor;
import org.apache.dolphinscheduler.server.master.runner.task.TaskAction;
import org.apache.dolphinscheduler.server.master.runner.task.TaskProcessorFactory;
import org.apache.dolphinscheduler.service.alert.ProcessAlertManager;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.apache.dolphinscheduler.service.queue.PeerTaskInstancePriorityQueue;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutorService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.HashBasedTable;
import com.google.common.collect.Lists;
import com.google.common.collect.Table;
/**
* master exec thread,split dag
*/
public class WorkflowExecuteThread implements Runnable {
/**
* logger of WorkflowExecuteThread
*/
private static final Logger logger = LoggerFactory.getLogger(WorkflowExecuteThread.class);
/**
* runing TaskNode
*/
private final Map<Integer, ITaskProcessor> activeTaskProcessorMaps = new ConcurrentHashMap<>();
/**
* task exec service
*/
private final ExecutorService taskExecService;
/**
* process instance
*/
private ProcessInstance processInstance;
/**
* submit failure nodes
*/
private boolean taskFailedSubmit = false;
/**
* recover node id list
*/
private List<TaskInstance> recoverNodeIdList = new ArrayList<>();
/**
* error task list
*/
private Map<String, TaskInstance> errorTaskList = new ConcurrentHashMap<>();
/**
* complete task list
*/
private Map<String, TaskInstance> completeTaskList = new ConcurrentHashMap<>();
/**
* ready to submit task queue
*/
private PeerTaskInstancePriorityQueue readyToSubmitTaskQueue = new PeerTaskInstancePriorityQueue();
/**
* depend failed task map
*/
private Map<String, TaskInstance> dependFailedTask = new ConcurrentHashMap<>();
/**
* forbidden task map
*/
private Map<String, TaskNode> forbiddenTaskList = new ConcurrentHashMap<>();
/**
* skip task map
*/
private Map<String, TaskNode> skipTaskNodeList = new ConcurrentHashMap<>();
/**
* recover tolerance fault task list
*/
private List<TaskInstance> recoverToleranceFaultTaskList = new ArrayList<>();
/**
* alert manager
*/
private ProcessAlertManager processAlertManager;
/**
* the object of DAG
*/
private DAG<String, TaskNode, TaskNodeRelation> dag;
/**
* process service
*/
private ProcessService processService;
/**
* master config
*/
private MasterConfig masterConfig;
/**
*
*/
private NettyExecutorManager nettyExecutorManager;
private ConcurrentLinkedQueue<StateEvent> stateEvents = new ConcurrentLinkedQueue<>();
private List<Date> complementListDate = Lists.newLinkedList();
private Table<Integer, Long, TaskInstance> taskInstanceHashMap = HashBasedTable.create();
private ProcessDefinition processDefinition;
private String key;
private ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList;
/**
* start flag, true: start nodes submit completely
*
*/
private boolean isStart = false;
/**
* constructor of WorkflowExecuteThread
*
* @param processInstance processInstance
* @param processService processService
* @param nettyExecutorManager nettyExecutorManager
* @param taskTimeoutCheckList
*/
public WorkflowExecuteThread(ProcessInstance processInstance
, ProcessService processService
, NettyExecutorManager nettyExecutorManager
, ProcessAlertManager processAlertManager
, MasterConfig masterConfig
, ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList) {
this.processService = processService;
this.processInstance = processInstance;
this.masterConfig = masterConfig;
int masterTaskExecNum = masterConfig.getMasterExecTaskNum();
this.taskExecService = ThreadUtils.newDaemonFixedThreadExecutor("Master-Task-Exec-Thread",
masterTaskExecNum);
this.nettyExecutorManager = nettyExecutorManager;
this.processAlertManager = processAlertManager;
this.taskTimeoutCheckList = taskTimeoutCheckList;
}
@Override
public void run() {
try {
startProcess();
handleEvents();
} catch (Exception e) {
logger.error("handler error:", e);
}
}
/**
* the process start nodes are submitted completely.
* @return
*/
public boolean isStart() {
return this.isStart;
}
private void handleEvents() {
while (this.stateEvents.size() > 0) {
try {
StateEvent stateEvent = this.stateEvents.peek();
if (stateEventHandler(stateEvent)) {
this.stateEvents.remove(stateEvent);
}
} catch (Exception e) {
logger.error("state handle error:", e);
}
}
}
public String getKey() {
if (StringUtils.isNotEmpty(key)
|| this.processDefinition == null) {
return key;
}
key = String.format("%d_%d_%d",
this.processDefinition.getCode(),
this.processDefinition.getVersion(),
this.processInstance.getId());
return key;
}
public boolean addStateEvent(StateEvent stateEvent) {
if (processInstance.getId() != stateEvent.getProcessInstanceId()) {
logger.info("state event would be abounded :{}", stateEvent.toString());
return false;
}
this.stateEvents.add(stateEvent);
return true;
}
public int eventSize() {
return this.stateEvents.size();
}
public ProcessInstance getProcessInstance() {
return this.processInstance;
}
private boolean stateEventHandler(StateEvent stateEvent) {
logger.info("process event: {}", stateEvent.toString());
if (!checkStateEvent(stateEvent)) {
return false;
}
boolean result = false;
switch (stateEvent.getType()) {
case PROCESS_STATE_CHANGE:
result = processStateChangeHandler(stateEvent);
break;
case TASK_STATE_CHANGE:
result = taskStateChangeHandler(stateEvent);
break;
case PROCESS_TIMEOUT:
result = processTimeout();
break;
case TASK_TIMEOUT:
result = taskTimeout(stateEvent);
break;
default:
break;
}
if (result) {
this.stateEvents.remove(stateEvent);
}
return result;
}
private boolean taskTimeout(StateEvent stateEvent) {
if (taskInstanceHashMap.containsRow(stateEvent.getTaskInstanceId())) {
return true;
}
TaskInstance taskInstance = taskInstanceHashMap
.row(stateEvent.getTaskInstanceId())
.values()
.iterator().next();
if (TimeoutFlag.CLOSE == taskInstance.getTaskDefine().getTimeoutFlag()) {
return true;
}
TaskTimeoutStrategy taskTimeoutStrategy = taskInstance.getTaskDefine().getTimeoutNotifyStrategy();
if (TaskTimeoutStrategy.FAILED == taskTimeoutStrategy) {
ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId());
taskProcessor.action(TaskAction.TIMEOUT);
return false;
} else {
processAlertManager.sendTaskTimeoutAlert(processInstance, taskInstance, taskInstance.getTaskDefine());
return true;
}
}
private boolean processTimeout() {
this.processAlertManager.sendProcessTimeoutAlert(this.processInstance, this.processDefinition);
return true;
}
private boolean taskStateChangeHandler(StateEvent stateEvent) {
TaskInstance task = processService.findTaskInstanceById(stateEvent.getTaskInstanceId());
if (stateEvent.getExecutionStatus().typeIsFinished()) {
taskFinished(task);
} else if (activeTaskProcessorMaps.containsKey(stateEvent.getTaskInstanceId())) {
ITaskProcessor iTaskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId());
iTaskProcessor.run();
if (iTaskProcessor.taskState().typeIsFinished()) {
task = processService.findTaskInstanceById(stateEvent.getTaskInstanceId());
taskFinished(task);
}
} else {
logger.error("state handler error: {}", stateEvent.toString());
}
return true;
}
private void taskFinished(TaskInstance task) {
logger.info("work flow {} task {} state:{} ",
processInstance.getId(),
task.getId(),
task.getState());
if (task.taskCanRetry()) {
addTaskToStandByList(task);
return;
}
ProcessInstance processInstance = processService.findProcessInstanceById(this.processInstance.getId());
completeTaskList.put(Long.toString(task.getTaskCode()), task);
activeTaskProcessorMaps.remove(task.getId());
taskTimeoutCheckList.remove(task.getId());
if (task.getState().typeIsSuccess()) {
processInstance.setVarPool(task.getVarPool());
processService.saveProcessInstance(processInstance);
submitPostNode(Long.toString(task.getTaskCode()));
} else if (task.getState().typeIsFailure()) {
if (task.isConditionsTask()
|| DagHelper.haveConditionsAfterNode(Long.toString(task.getTaskCode()), dag)) {
submitPostNode(Long.toString(task.getTaskCode()));
} else {
errorTaskList.put(Long.toString(task.getTaskCode()), task);
if (processInstance.getFailureStrategy() == FailureStrategy.END) {
killAllTasks();
}
}
}
this.updateProcessInstanceState();
}
private boolean checkStateEvent(StateEvent stateEvent) {
if (this.processInstance.getId() != stateEvent.getProcessInstanceId()) {
logger.error("mismatch process instance id: {}, state event:{}",
this.processInstance.getId(),
stateEvent.toString());
return false;
}
return true;
}
private boolean processStateChangeHandler(StateEvent stateEvent) {
try {
logger.info("process:{} state {} change to {}", processInstance.getId(), processInstance.getState(), stateEvent.getExecutionStatus());
processInstance = processService.findProcessInstanceById(this.processInstance.getId());
if (processComplementData()) {
return true;
}
if (stateEvent.getExecutionStatus().typeIsFinished()) {
endProcess();
}
if (processInstance.getState() == ExecutionStatus.READY_STOP) {
killAllTasks();
}
return true;
} catch (Exception e) {
logger.error("process state change error:", e);
}
return true;
}
private boolean processComplementData() throws Exception {
if (!needComplementProcess()) {
return false;
}
Date scheduleDate = processInstance.getScheduleTime();
if (scheduleDate == null) {
scheduleDate = complementListDate.get(0);
} else if (processInstance.getState().typeIsFinished()) {
endProcess();
if (complementListDate.size() <= 0) {
logger.info("process complement end. process id:{}", processInstance.getId());
return true;
}
int index = complementListDate.indexOf(scheduleDate);
if (index >= complementListDate.size() - 1 || !processInstance.getState().typeIsSuccess()) {
logger.info("process complement end. process id:{}", processInstance.getId());
// complement data ends || no success
return true;
}
logger.info("process complement continue. process id:{}, schedule time:{} complementListDate:{}",
processInstance.getId(),
processInstance.getScheduleTime(),
complementListDate.toString());
scheduleDate = complementListDate.get(index + 1);
//the next process complement
processInstance.setId(0);
}
processInstance.setScheduleTime(scheduleDate);
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) {
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
}
processInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
processInstance.setStartTime(new Date());
processInstance.setEndTime(null);
processService.saveProcessInstance(processInstance);
this.taskInstanceHashMap.clear();
startProcess();
return true;
}
private boolean needComplementProcess() {
if (processInstance.isComplementData()
&& Flag.NO == processInstance.getIsSubProcess()) {
return true;
}
return false;
}
private void startProcess() throws Exception {
if (this.taskInstanceHashMap.size() == 0) {
isStart = false;
buildFlowDag();
initTaskQueue();
submitPostNode(null);
isStart = true;
}
}
/**
* process end handle
*/
private void endProcess() {
this.stateEvents.clear();
processInstance.setEndTime(new Date());
processService.updateProcessInstance(processInstance);
if (processInstance.getState().typeIsWaitingThread()) {
processService.createRecoveryWaitingThreadCommand(null, processInstance);
}
List<TaskInstance> taskInstances = processService.findValidTaskListByProcessId(processInstance.getId());
ProjectUser projectUser = processService.queryProjectWithUserByProcessInstanceId(processInstance.getId());
processAlertManager.sendAlertProcessInstance(processInstance, taskInstances, projectUser);
}
/**
* generate process dag
*
* @throws Exception exception
*/
private void buildFlowDag() throws Exception {
if (this.dag != null) {
return;
}
processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(),
processInstance.getProcessDefinitionVersion());
recoverNodeIdList = getStartTaskInstanceList(processInstance.getCommandParam());
List<TaskNode> taskNodeList =
processService.transformTask(processService.findRelationByCode(processDefinition.getProjectCode(), processDefinition.getCode()), Lists.newArrayList());
forbiddenTaskList.clear();
taskNodeList.forEach(taskNode -> {
if (taskNode.isForbidden()) {
forbiddenTaskList.put(Long.toString(taskNode.getCode()), taskNode);
}
});
// generate process to get DAG info
List<String> recoveryNodeCodeList = getRecoveryNodeCodeList();
List<String> startNodeNameList = parseStartNodeName(processInstance.getCommandParam());
ProcessDag processDag = generateFlowDag(taskNodeList,
startNodeNameList, recoveryNodeCodeList, processInstance.getTaskDependType());
if (processDag == null) {
logger.error("processDag is null");
return;
}
// generate process dag
dag = DagHelper.buildDagGraph(processDag);
}
/**
* init task queue
*/
private void initTaskQueue() {
taskFailedSubmit = false;
activeTaskProcessorMaps.clear();
dependFailedTask.clear();
completeTaskList.clear();
errorTaskList.clear();
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processInstance.getId());
for (TaskInstance task : taskInstanceList) {
if (task.isTaskComplete()) {
completeTaskList.put(Long.toString(task.getTaskCode()), task);
}
if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(Long.toString(task.getTaskCode()), dag)) {
continue;
}
if (task.getState().typeIsFailure() && !task.taskCanRetry()) {
errorTaskList.put(Long.toString(task.getTaskCode()), task);
}
}
if (processInstance.isComplementData() && complementListDate.size() == 0) {
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
if (cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) {
Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionCode(processInstance.getProcessDefinitionCode());
if (complementListDate.size() == 0 && needComplementProcess()) {
complementListDate = CronUtils.getSelfFireDateList(start, end, schedules);
logger.info(" process definition code:{} complement data: {}",
processInstance.getProcessDefinitionCode(), complementListDate.toString());
if (complementListDate.size() > 0 && Flag.NO == processInstance.getIsSubProcess()) {
processInstance.setScheduleTime(complementListDate.get(0));
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
processService.updateProcessInstance(processInstance);
}
}
}
}
}
/**
* submit task to execute
*
* @param taskInstance task instance
* @return TaskInstance
*/
private TaskInstance submitTaskExec(TaskInstance taskInstance) {
try {
ITaskProcessor taskProcessor = TaskProcessorFactory.getTaskProcessor(taskInstance.getTaskType());
if (taskInstance.getState() == ExecutionStatus.RUNNING_EXECUTION
&& taskProcessor.getType().equalsIgnoreCase(Constants.COMMON_TASK_TYPE)) {
notifyProcessHostUpdate(taskInstance);
}
boolean submit = taskProcessor.submit(taskInstance, processInstance, masterConfig.getMasterTaskCommitRetryTimes(), masterConfig.getMasterTaskCommitInterval());
if (submit) {
this.taskInstanceHashMap.put(taskInstance.getId(), taskInstance.getTaskCode(), taskInstance);
activeTaskProcessorMaps.put(taskInstance.getId(), taskProcessor);
taskProcessor.run();
addTimeoutCheck(taskInstance);
TaskDefinition taskDefinition = processService.findTaskDefinition(
taskInstance.getTaskCode(),
taskInstance.getTaskDefinitionVersion());
taskInstance.setTaskDefine(taskDefinition);
if (taskProcessor.taskState().typeIsFinished()) {
StateEvent stateEvent = new StateEvent();
stateEvent.setProcessInstanceId(this.processInstance.getId());
stateEvent.setTaskInstanceId(taskInstance.getId());
stateEvent.setExecutionStatus(taskProcessor.taskState());
stateEvent.setType(StateEventType.TASK_STATE_CHANGE);
this.stateEvents.add(stateEvent);
}
return taskInstance;
} else {
logger.error("process id:{} name:{} submit standby task id:{} name:{} failed!",
processInstance.getId(), processInstance.getName(),
taskInstance.getId(), taskInstance.getName());
return null;
}
} catch (Exception e) {
logger.error("submit standby task error", e);
return null;
}
}
private void notifyProcessHostUpdate(TaskInstance taskInstance) {
if (StringUtils.isEmpty(taskInstance.getHost())) {
return;
}
try {
HostUpdateCommand hostUpdateCommand = new HostUpdateCommand();
hostUpdateCommand.setProcessHost(NetUtils.getAddr(masterConfig.getListenPort()));
hostUpdateCommand.setTaskInstanceId(taskInstance.getId());
Host host = new Host(taskInstance.getHost());
nettyExecutorManager.doExecute(host, hostUpdateCommand.convert2Command());
} catch (Exception e) {
logger.error("notify process host update", e);
}
}
private void addTimeoutCheck(TaskInstance taskInstance) {
TaskDefinition taskDefinition = processService.findTaskDefinition(
taskInstance.getTaskCode(),
taskInstance.getTaskDefinitionVersion()
);
taskInstance.setTaskDefine(taskDefinition);
if (TimeoutFlag.OPEN == taskDefinition.getTimeoutFlag()) {
this.taskTimeoutCheckList.put(taskInstance.getId(), taskInstance);
return;
}
if (taskInstance.isDependTask() || taskInstance.isSubProcess()) {
this.taskTimeoutCheckList.put(taskInstance.getId(), taskInstance);
}
}
/**
* find task instance in db.
* in case submit more than one same name task in the same time.
*
* @param taskCode task code
* @param taskVersion task version
* @return TaskInstance
*/
private TaskInstance findTaskIfExists(Long taskCode, int taskVersion) {
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(this.processInstance.getId());
for (TaskInstance taskInstance : taskInstanceList) {
if (taskInstance.getTaskCode() == taskCode && taskInstance.getTaskDefinitionVersion() == taskVersion) {
return taskInstance;
}
}
return null;
}
/**
* encapsulation task
*
* @param processInstance process instance
* @param taskNode taskNode
* @return TaskInstance
*/
private TaskInstance createTaskInstance(ProcessInstance processInstance, TaskNode taskNode) {
TaskInstance taskInstance = findTaskIfExists(taskNode.getCode(), taskNode.getVersion());
if (taskInstance == null) {
taskInstance = new TaskInstance();
taskInstance.setTaskCode(taskNode.getCode());
taskInstance.setTaskDefinitionVersion(taskNode.getVersion());
// task name
taskInstance.setName(taskNode.getName());
// task instance state
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
// process instance id
taskInstance.setProcessInstanceId(processInstance.getId());
// task instance type
taskInstance.setTaskType(taskNode.getType().toUpperCase());
// task instance whether alert
taskInstance.setAlertFlag(Flag.NO);
// task instance start time
taskInstance.setStartTime(null);
// task instance flag
taskInstance.setFlag(Flag.YES);
// task dry run flag
taskInstance.setDryRun(processInstance.getDryRun());
// task instance retry times
taskInstance.setRetryTimes(0);
// max task instance retry times
taskInstance.setMaxRetryTimes(taskNode.getMaxRetryTimes());
// retry task instance interval
taskInstance.setRetryInterval(taskNode.getRetryInterval());
//set task param
taskInstance.setTaskParams(taskNode.getTaskParams());
// task instance priority
if (taskNode.getTaskInstancePriority() == null) {
taskInstance.setTaskInstancePriority(Priority.MEDIUM);
} else {
taskInstance.setTaskInstancePriority(taskNode.getTaskInstancePriority());
}
String processWorkerGroup = processInstance.getWorkerGroup();
processWorkerGroup = StringUtils.isBlank(processWorkerGroup) ? DEFAULT_WORKER_GROUP : processWorkerGroup;
String taskWorkerGroup = StringUtils.isBlank(taskNode.getWorkerGroup()) ? processWorkerGroup : taskNode.getWorkerGroup();
Long processEnvironmentCode = Objects.isNull(processInstance.getEnvironmentCode()) ? -1 : processInstance.getEnvironmentCode();
Long taskEnvironmentCode = Objects.isNull(taskNode.getEnvironmentCode()) ? processEnvironmentCode : taskNode.getEnvironmentCode();
if (!processWorkerGroup.equals(DEFAULT_WORKER_GROUP) && taskWorkerGroup.equals(DEFAULT_WORKER_GROUP)) {
taskInstance.setWorkerGroup(processWorkerGroup);
taskInstance.setEnvironmentCode(processEnvironmentCode);
} else {
taskInstance.setWorkerGroup(taskWorkerGroup);
taskInstance.setEnvironmentCode(taskEnvironmentCode);
}
if (!taskInstance.getEnvironmentCode().equals(-1L)) {
Environment environment = processService.findEnvironmentByCode(taskInstance.getEnvironmentCode());
if (Objects.nonNull(environment) && StringUtils.isNotEmpty(environment.getConfig())) {
taskInstance.setEnvironmentConfig(environment.getConfig());
}
}
// delay execution time
taskInstance.setDelayTime(taskNode.getDelayTime());
}
return taskInstance;
}
public void getPreVarPool(TaskInstance taskInstance, Set<String> preTask) {
Map<String, Property> allProperty = new HashMap<>();
Map<String, TaskInstance> allTaskInstance = new HashMap<>();
if (CollectionUtils.isNotEmpty(preTask)) {
for (String preTaskName : preTask) {
TaskInstance preTaskInstance = completeTaskList.get(preTaskName);
if (preTaskInstance == null) {
continue;
}
String preVarPool = preTaskInstance.getVarPool();
if (StringUtils.isNotEmpty(preVarPool)) {
List<Property> properties = JSONUtils.toList(preVarPool, Property.class);
for (Property info : properties) {
setVarPoolValue(allProperty, allTaskInstance, preTaskInstance, info);
}
}
}
if (allProperty.size() > 0) {
taskInstance.setVarPool(JSONUtils.toJsonString(allProperty.values()));
}
}
}
private void setVarPoolValue(Map<String, Property> allProperty, Map<String, TaskInstance> allTaskInstance, TaskInstance preTaskInstance, Property thisProperty) {
//for this taskInstance all the param in this part is IN.
thisProperty.setDirect(Direct.IN);
//get the pre taskInstance Property's name
String proName = thisProperty.getProp();
//if the Previous nodes have the Property of same name
if (allProperty.containsKey(proName)) {
//comparison the value of two Property
Property otherPro = allProperty.get(proName);
//if this property'value of loop is empty,use the other,whether the other's value is empty or not
if (StringUtils.isEmpty(thisProperty.getValue())) {
allProperty.put(proName, otherPro);
//if property'value of loop is not empty,and the other's value is not empty too, use the earlier value
} else if (StringUtils.isNotEmpty(otherPro.getValue())) {
TaskInstance otherTask = allTaskInstance.get(proName);
if (otherTask.getEndTime().getTime() > preTaskInstance.getEndTime().getTime()) {
allProperty.put(proName, thisProperty);
allTaskInstance.put(proName, preTaskInstance);
} else {
allProperty.put(proName, otherPro);
}
} else {
allProperty.put(proName, thisProperty);
allTaskInstance.put(proName, preTaskInstance);
}
} else {
allProperty.put(proName, thisProperty);
allTaskInstance.put(proName, preTaskInstance);
}
}
private void submitPostNode(String parentNodeCode) {
Set<String> submitTaskNodeList = DagHelper.parsePostNodes(parentNodeCode, skipTaskNodeList, dag, completeTaskList);
List<TaskInstance> taskInstances = new ArrayList<>();
for (String taskNode : submitTaskNodeList) {
TaskNode taskNodeObject = dag.getNode(taskNode);
if (taskInstanceHashMap.containsColumn(taskNodeObject.getCode())) {
continue;
}
TaskInstance task = createTaskInstance(processInstance, taskNodeObject);
taskInstances.add(task);
}
// if previous node success , post node submit
for (TaskInstance task : taskInstances) {
if (readyToSubmitTaskQueue.contains(task)) {
continue;
}
if (completeTaskList.containsKey(Long.toString(task.getTaskCode()))) {
logger.info("task {} has already run success", task.getName());
continue;
}
if (task.getState().typeIsPause() || task.getState().typeIsCancel()) {
logger.info("task {} stopped, the state is {}", task.getName(), task.getState());
} else {
addTaskToStandByList(task);
}
}
submitStandByTask();
updateProcessInstanceState();
}
/**
* determine whether the dependencies of the task node are complete
*
* @return DependResult
*/
private DependResult isTaskDepsComplete(String taskCode) {
Collection<String> startNodes = dag.getBeginNode();
// if vertex,returns true directly
if (startNodes.contains(taskCode)) {
return DependResult.SUCCESS;
}
TaskNode taskNode = dag.getNode(taskCode);
List<String> depCodeList = taskNode.getDepList();
for (String depsNode : depCodeList) {
if (!dag.containsNode(depsNode)
|| forbiddenTaskList.containsKey(depsNode)
|| skipTaskNodeList.containsKey(depsNode)) {
continue;
}
// dependencies must be fully completed
if (!completeTaskList.containsKey(depsNode)) {
return DependResult.WAITING;
}
ExecutionStatus depTaskState = completeTaskList.get(depsNode).getState();
if (depTaskState.typeIsPause() || depTaskState.typeIsCancel()) {
return DependResult.NON_EXEC;
}
// ignore task state if current task is condition
if (taskNode.isConditionsTask()) {
continue;
}
if (!dependTaskSuccess(depsNode, taskCode)) {
return DependResult.FAILED;
}
}
logger.info("taskCode: {} completeDependTaskList: {}", taskCode, Arrays.toString(completeTaskList.keySet().toArray()));
return DependResult.SUCCESS;
}
/**
* depend node is completed, but here need check the condition task branch is the next node
*/
private boolean dependTaskSuccess(String dependNodeName, String nextNodeName) {
if (dag.getNode(dependNodeName).isConditionsTask()) {
//condition task need check the branch to run
List<String> nextTaskList = DagHelper.parseConditionTask(dependNodeName, skipTaskNodeList, dag, completeTaskList);
if (!nextTaskList.contains(nextNodeName)) {
return false;
}
} else {
ExecutionStatus depTaskState = completeTaskList.get(dependNodeName).getState();
if (depTaskState.typeIsFailure()) {
return false;
}
}
return true;
}
/**
* query task instance by complete state
*
* @param state state
* @return task instance list
*/
private List<TaskInstance> getCompleteTaskByState(ExecutionStatus state) {
List<TaskInstance> resultList = new ArrayList<>();
for (Map.Entry<String, TaskInstance> entry : completeTaskList.entrySet()) {
if (entry.getValue().getState() == state) {
resultList.add(entry.getValue());
}
}
return resultList;
}
/**
* where there are ongoing tasks
*
* @param state state
* @return ExecutionStatus
*/
private ExecutionStatus runningState(ExecutionStatus state) {
if (state == ExecutionStatus.READY_STOP
|| state == ExecutionStatus.READY_PAUSE
|| state == ExecutionStatus.WAITING_THREAD
|| state == ExecutionStatus.DELAY_EXECUTION) {
// if the running task is not completed, the state remains unchanged
return state;
} else {
return ExecutionStatus.RUNNING_EXECUTION;
}
}
/**
* exists failure task,contains submit failure、dependency failure,execute failure(retry after)
*
* @return Boolean whether has failed task
*/
private boolean hasFailedTask() {
if (this.taskFailedSubmit) {
return true;
}
if (this.errorTaskList.size() > 0) {
return true;
}
return this.dependFailedTask.size() > 0;
}
/**
* process instance failure
*
* @return Boolean whether process instance failed
*/
private boolean processFailed() {
if (hasFailedTask()) {
if (processInstance.getFailureStrategy() == FailureStrategy.END) {
return true;
}
if (processInstance.getFailureStrategy() == FailureStrategy.CONTINUE) {
return readyToSubmitTaskQueue.size() == 0 || activeTaskProcessorMaps.size() == 0;
}
}
return false;
}
/**
* whether task for waiting thread
*
* @return Boolean whether has waiting thread task
*/
private boolean hasWaitingThreadTask() {
List<TaskInstance> waitingList = getCompleteTaskByState(ExecutionStatus.WAITING_THREAD);
return CollectionUtils.isNotEmpty(waitingList);
}
/**
* prepare for pause
* 1,failed retry task in the preparation queue , returns to failure directly
* 2,exists pause task,complement not completed, pending submission of tasks, return to suspension
* 3,success
*
* @return ExecutionStatus
*/
private ExecutionStatus processReadyPause() {
if (hasRetryTaskInStandBy()) {
return ExecutionStatus.FAILURE;
}
List<TaskInstance> pauseList = getCompleteTaskByState(ExecutionStatus.PAUSE);
if (CollectionUtils.isNotEmpty(pauseList)
|| !isComplementEnd()
|| readyToSubmitTaskQueue.size() > 0) {
return ExecutionStatus.PAUSE;
} else {
return ExecutionStatus.SUCCESS;
}
}
/**
* generate the latest process instance status by the tasks state
*
* @param instance
* @return process instance execution status
*/
private ExecutionStatus getProcessInstanceState(ProcessInstance instance) {
ExecutionStatus state = instance.getState();
if (activeTaskProcessorMaps.size() > 0 || hasRetryTaskInStandBy()) {
// active task and retry task exists
return runningState(state);
}
// process failure
if (processFailed()) {
return ExecutionStatus.FAILURE;
}
// waiting thread
if (hasWaitingThreadTask()) {
return ExecutionStatus.WAITING_THREAD;
}
// pause
if (state == ExecutionStatus.READY_PAUSE) {
return processReadyPause();
}
// stop
if (state == ExecutionStatus.READY_STOP) {
List<TaskInstance> stopList = getCompleteTaskByState(ExecutionStatus.STOP);
List<TaskInstance> killList = getCompleteTaskByState(ExecutionStatus.KILL);
if (CollectionUtils.isNotEmpty(stopList)
|| CollectionUtils.isNotEmpty(killList)
|| !isComplementEnd()) {
return ExecutionStatus.STOP;
} else {
return ExecutionStatus.SUCCESS;
}
}
// success
if (state == ExecutionStatus.RUNNING_EXECUTION) {
List<TaskInstance> killTasks = getCompleteTaskByState(ExecutionStatus.KILL);
if (readyToSubmitTaskQueue.size() > 0) {
//tasks currently pending submission, no retries, indicating that depend is waiting to complete
return ExecutionStatus.RUNNING_EXECUTION;
} else if (CollectionUtils.isNotEmpty(killTasks)) {
// tasks maybe killed manually
return ExecutionStatus.FAILURE;
} else {
// if the waiting queue is empty and the status is in progress, then success
return ExecutionStatus.SUCCESS;
}
}
return state;
}
/**
* whether complement end
*
* @return Boolean whether is complement end
*/
private boolean isComplementEnd() {
if (!processInstance.isComplementData()) {
return true;
}
try {
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
Date endTime = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
return processInstance.getScheduleTime().equals(endTime);
} catch (Exception e) {
logger.error("complement end failed ", e);
return false;
}
}
/**
* updateProcessInstance process instance state
* after each batch of tasks is executed, the status of the process instance is updated
*/
private void updateProcessInstanceState() {
ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId());
ExecutionStatus state = getProcessInstanceState(instance);
if (processInstance.getState() != state) {
logger.info(
"work flow process instance [id: {}, name:{}], state change from {} to {}, cmd type: {}",
processInstance.getId(), processInstance.getName(),
processInstance.getState(), state,
processInstance.getCommandType());
instance.setState(state);
processService.updateProcessInstance(instance);
processInstance = instance;
StateEvent stateEvent = new StateEvent();
stateEvent.setExecutionStatus(processInstance.getState());
stateEvent.setProcessInstanceId(this.processInstance.getId());
stateEvent.setType(StateEventType.PROCESS_STATE_CHANGE);
this.processStateChangeHandler(stateEvent);
}
}
/**
* get task dependency result
*
* @param taskInstance task instance
* @return DependResult
*/
private DependResult getDependResultForTask(TaskInstance taskInstance) {
return isTaskDepsComplete(Long.toString(taskInstance.getTaskCode()));
}
/**
* add task to standby list
*
* @param taskInstance task instance
*/
private void addTaskToStandByList(TaskInstance taskInstance) {
logger.info("add task to stand by list: {}", taskInstance.getName());
try {
readyToSubmitTaskQueue.put(taskInstance);
} catch (Exception e) {
logger.error("add task instance to readyToSubmitTaskQueue error, taskName: {}", taskInstance.getName(), e);
}
}
/**
* remove task from stand by list
*
* @param taskInstance task instance
*/
private void removeTaskFromStandbyList(TaskInstance taskInstance) {
logger.info("remove task from stand by list, id: {} name:{}",
taskInstance.getId(),
taskInstance.getName());
try {
readyToSubmitTaskQueue.remove(taskInstance);
} catch (Exception e) {
logger.error("remove task instance from readyToSubmitTaskQueue error, task id:{}, Name: {}",
taskInstance.getId(),
taskInstance.getName(), e);
}
}
/**
* has retry task in standby
*
* @return Boolean whether has retry task in standby
*/
private boolean hasRetryTaskInStandBy() {
for (Iterator<TaskInstance> iter = readyToSubmitTaskQueue.iterator(); iter.hasNext(); ) {
if (iter.next().getState().typeIsFailure()) {
return true;
}
}
return false;
}
/**
* close the on going tasks
*/
private void killAllTasks() {
logger.info("kill called on process instance id: {}, num: {}", processInstance.getId(),
activeTaskProcessorMaps.size());
for (int taskId : activeTaskProcessorMaps.keySet()) {
TaskInstance taskInstance = processService.findTaskInstanceById(taskId);
if (taskInstance == null || taskInstance.getState().typeIsFinished()) {
continue;
}
ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(taskId);
taskProcessor.action(TaskAction.STOP);
if (taskProcessor.taskState().typeIsFinished()) {
StateEvent stateEvent = new StateEvent();
stateEvent.setType(StateEventType.TASK_STATE_CHANGE);
stateEvent.setProcessInstanceId(this.processInstance.getId());
stateEvent.setTaskInstanceId(taskInstance.getId());
stateEvent.setExecutionStatus(taskProcessor.taskState());
this.addStateEvent(stateEvent);
}
}
}
public boolean workFlowFinish() {
return this.processInstance.getState().typeIsFinished();
}
/**
* whether the retry interval is timed out
*
* @param taskInstance task instance
* @return Boolean
*/
private boolean retryTaskIntervalOverTime(TaskInstance taskInstance) {
if (taskInstance.getState() != ExecutionStatus.FAILURE) {
return true;
}
if (taskInstance.getId() == 0
||
taskInstance.getMaxRetryTimes() == 0
||
taskInstance.getRetryInterval() == 0) {
return true;
}
Date now = new Date();
long failedTimeInterval = DateUtils.differSec(now, taskInstance.getEndTime());
// task retry does not over time, return false
return taskInstance.getRetryInterval() * SEC_2_MINUTES_TIME_UNIT < failedTimeInterval;
}
/**
* handling the list of tasks to be submitted
*/
private void submitStandByTask() {
try {
int length = readyToSubmitTaskQueue.size();
for (int i = 0; i < length; i++) {
TaskInstance task = readyToSubmitTaskQueue.peek();
if (task == null) {
continue;
}
// stop tasks which is retrying if forced success happens
if (task.taskCanRetry()) {
TaskInstance retryTask = processService.findTaskInstanceById(task.getId());
if (retryTask != null && retryTask.getState().equals(ExecutionStatus.FORCED_SUCCESS)) {
task.setState(retryTask.getState());
logger.info("task: {} has been forced success, put it into complete task list and stop retrying", task.getName());
removeTaskFromStandbyList(task);
completeTaskList.put(Long.toString(task.getTaskCode()), task);
submitPostNode(Long.toString(task.getTaskCode()));
continue;
}
}
//init varPool only this task is the first time running
if (task.isFirstRun()) {
//get pre task ,get all the task varPool to this task
Set<String> preTask = dag.getPreviousNodes(Long.toString(task.getTaskCode()));
getPreVarPool(task, preTask);
}
DependResult dependResult = getDependResultForTask(task);
if (DependResult.SUCCESS == dependResult) {
if (retryTaskIntervalOverTime(task)) {
TaskInstance taskInstance = submitTaskExec(task);
if (taskInstance == null) {
this.taskFailedSubmit = true;
} else {
removeTaskFromStandbyList(task);
}
}
} else if (DependResult.FAILED == dependResult) {
// if the dependency fails, the current node is not submitted and the state changes to failure.
dependFailedTask.put(Long.toString(task.getTaskCode()), task);
removeTaskFromStandbyList(task);
logger.info("task {},id:{} depend result : {}", task.getName(), task.getId(), dependResult);
} else if (DependResult.NON_EXEC == dependResult) {
// for some reasons(depend task pause/stop) this task would not be submit
removeTaskFromStandbyList(task);
logger.info("remove task {},id:{} , because depend result : {}", task.getName(), task.getId(), dependResult);
}
}
} catch (Exception e) {
logger.error("submit standby task error", e);
}
}
/**
* get recovery task instance
*
* @param taskId task id
* @return recovery task instance
*/
private TaskInstance getRecoveryTaskInstance(String taskId) {
if (!StringUtils.isNotEmpty(taskId)) {
return null;
}
try {
Integer intId = Integer.valueOf(taskId);
TaskInstance task = processService.findTaskInstanceById(intId);
if (task == null) {
logger.error("start node id cannot be found: {}", taskId);
} else {
return task;
}
} catch (Exception e) {
logger.error("get recovery task instance failed ", e);
}
return null;
}
/**
* get start task instance list
*
* @param cmdParam command param
* @return task instance list
*/
private List<TaskInstance> getStartTaskInstanceList(String cmdParam) {
List<TaskInstance> instanceList = new ArrayList<>();
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
if (paramMap != null && paramMap.containsKey(CMD_PARAM_RECOVERY_START_NODE_STRING)) {
String[] idList = paramMap.get(CMD_PARAM_RECOVERY_START_NODE_STRING).split(Constants.COMMA);
for (String nodeId : idList) {
TaskInstance task = getRecoveryTaskInstance(nodeId);
if (task != null) {
instanceList.add(task);
}
}
}
return instanceList;
}
/**
* parse "StartNodeNameList" from cmd param
*
* @param cmdParam command param
* @return start node name list
*/
private List<String> parseStartNodeName(String cmdParam) {
List<String> startNodeNameList = new ArrayList<>();
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
if (paramMap == null) {
return startNodeNameList;
}
if (paramMap.containsKey(CMD_PARAM_START_NODE_NAMES)) {
startNodeNameList = Arrays.asList(paramMap.get(CMD_PARAM_START_NODE_NAMES).split(Constants.COMMA));
}
return startNodeNameList;
}
/**
* generate start node code list from parsing command param;
* if "StartNodeIdList" exists in command param, return StartNodeIdList
*
* @return recovery node code list
*/
private List<String> getRecoveryNodeCodeList() {
List<String> recoveryNodeCodeList = new ArrayList<>();
if (CollectionUtils.isNotEmpty(recoverNodeIdList)) {
for (TaskInstance task : recoverNodeIdList) {
recoveryNodeCodeList.add(Long.toString(task.getTaskCode()));
}
}
return recoveryNodeCodeList;
}
/**
* generate flow dag
*
* @param totalTaskNodeList total task node list
* @param startNodeNameList start node name list
* @param recoveryNodeCodeList recovery node code list
* @param depNodeType depend node type
* @return ProcessDag process dag
* @throws Exception exception
*/
public ProcessDag generateFlowDag(List<TaskNode> totalTaskNodeList,
List<String> startNodeNameList,
List<String> recoveryNodeCodeList,
TaskDependType depNodeType) throws Exception {
return DagHelper.generateFlowDag(totalTaskNodeList, startNodeNameList, recoveryNodeCodeList, depNodeType);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,543 | [Bug] [Master] process instance state is always running when failure task exists | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
process instance state is always running when failure task exists.
the failure task setting: retry times: 1, retry interval: 1 minute
### What you expected to happen
the process instance state is failed when failed task exists.
### How to reproduce
process instance state is always running when failure task exists.
the failure task setting: retry times: 1, retry interval: 1 minute
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6543 | https://github.com/apache/dolphinscheduler/pull/6547 | ba4cb9d22ad02ba4927fd23ea811c82639261e69 | 52a550b6aefa5652f17b7bbc9e056a83efa23c8a | "2021-10-15T08:39:19Z" | java | "2021-10-15T10:04:04Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.service.process;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_EMPTY_SUB_PROCESS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_FATHER_PARAMS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID;
import static org.apache.dolphinscheduler.common.Constants.LOCAL_PARAMS;
import static java.util.stream.Collectors.toSet;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.Direct;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.enums.ReleaseState;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.TaskDependType;
import org.apache.dolphinscheduler.common.enums.TimeoutFlag;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.DateInterval;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter;
import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils;
import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException;
import org.apache.dolphinscheduler.common.utils.TaskParametersUtils;
import org.apache.dolphinscheduler.dao.entity.Command;
import org.apache.dolphinscheduler.dao.entity.DagData;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.dao.entity.Environment;
import org.apache.dolphinscheduler.dao.entity.ErrorCommand;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap;
import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation;
import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.ProjectUser;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskDefinition;
import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.CommandMapper;
import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper;
import org.apache.dolphinscheduler.dao.mapper.EnvironmentMapper;
import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper;
import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.TenantMapper;
import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper;
import org.apache.dolphinscheduler.dao.mapper.UserMapper;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.service.log.LogClientService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import com.facebook.presto.jdbc.internal.guava.collect.Lists;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.node.ObjectNode;
/**
* process relative dao that some mappers in this.
*/
@Component
public class ProcessService {
private final Logger logger = LoggerFactory.getLogger(getClass());
private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
ExecutionStatus.RUNNING_EXECUTION.ordinal(),
ExecutionStatus.DELAY_EXECUTION.ordinal(),
ExecutionStatus.READY_PAUSE.ordinal(),
ExecutionStatus.READY_STOP.ordinal()};
@Autowired
private UserMapper userMapper;
@Autowired
private ProcessDefinitionMapper processDefineMapper;
@Autowired
private ProcessDefinitionLogMapper processDefineLogMapper;
@Autowired
private ProcessInstanceMapper processInstanceMapper;
@Autowired
private DataSourceMapper dataSourceMapper;
@Autowired
private ProcessInstanceMapMapper processInstanceMapMapper;
@Autowired
private TaskInstanceMapper taskInstanceMapper;
@Autowired
private CommandMapper commandMapper;
@Autowired
private ScheduleMapper scheduleMapper;
@Autowired
private UdfFuncMapper udfFuncMapper;
@Autowired
private ResourceMapper resourceMapper;
@Autowired
private ResourceUserMapper resourceUserMapper;
@Autowired
private ErrorCommandMapper errorCommandMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private ProjectMapper projectMapper;
@Autowired
private TaskDefinitionMapper taskDefinitionMapper;
@Autowired
private TaskDefinitionLogMapper taskDefinitionLogMapper;
@Autowired
private ProcessTaskRelationMapper processTaskRelationMapper;
@Autowired
private ProcessTaskRelationLogMapper processTaskRelationLogMapper;
@Autowired
private EnvironmentMapper environmentMapper;
/**
* handle Command (construct ProcessInstance from Command) , wrapped in transaction
*
* @param logger logger
* @param host host
* @param command found command
* @param processDefinitionCacheMaps
* @return process instance
*/
public ProcessInstance handleCommand(Logger logger, String host, Command command, HashMap<String, ProcessDefinition> processDefinitionCacheMaps) {
ProcessInstance processInstance = constructProcessInstance(command, host, processDefinitionCacheMaps);
// cannot construct process instance, return null
if (processInstance == null) {
logger.error("scan command, command parameter is error: {}", command);
moveToErrorCommand(command, "process instance is null");
return null;
}
processInstance.setCommandType(command.getCommandType());
processInstance.addHistoryCmd(command.getCommandType());
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
this.commandMapper.deleteById(command.getId());
return processInstance;
}
/**
* save error command, and delete original command
*
* @param command command
* @param message message
*/
public void moveToErrorCommand(Command command, String message) {
ErrorCommand errorCommand = new ErrorCommand(command, message);
this.errorCommandMapper.insert(errorCommand);
this.commandMapper.deleteById(command.getId());
}
/**
* set process waiting thread
*
* @param command command
* @param processInstance processInstance
* @return process instance
*/
private ProcessInstance setWaitingThreadProcess(Command command, ProcessInstance processInstance) {
processInstance.setState(ExecutionStatus.WAITING_THREAD);
if (command.getCommandType() != CommandType.RECOVER_WAITING_THREAD) {
processInstance.addHistoryCmd(command.getCommandType());
}
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
createRecoveryWaitingThreadCommand(command, processInstance);
return null;
}
/**
* check thread num
*
* @param command command
* @param validThreadNum validThreadNum
* @return if thread is enough
*/
private boolean checkThreadNum(Command command, int validThreadNum) {
int commandThreadCount = this.workProcessThreadNumCount(command.getProcessDefinitionCode());
return validThreadNum >= commandThreadCount;
}
/**
* insert one command
*
* @param command command
* @return create result
*/
public int createCommand(Command command) {
int result = 0;
if (command != null) {
result = commandMapper.insert(command);
}
return result;
}
/**
* get command page
*
* @param pageSize
* @param pageNumber
* @return
*/
public List<Command> findCommandPage(int pageSize, int pageNumber) {
return commandMapper.queryCommandPage(pageSize, pageNumber * pageSize);
}
/**
* check the input command exists in queue list
*
* @param command command
* @return create command result
*/
public boolean verifyIsNeedCreateCommand(Command command) {
boolean isNeedCreate = true;
EnumMap<CommandType, Integer> cmdTypeMap = new EnumMap<>(CommandType.class);
cmdTypeMap.put(CommandType.REPEAT_RUNNING, 1);
cmdTypeMap.put(CommandType.RECOVER_SUSPENDED_PROCESS, 1);
cmdTypeMap.put(CommandType.START_FAILURE_TASK_PROCESS, 1);
CommandType commandType = command.getCommandType();
if (cmdTypeMap.containsKey(commandType)) {
ObjectNode cmdParamObj = JSONUtils.parseObject(command.getCommandParam());
int processInstanceId = cmdParamObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt();
List<Command> commands = commandMapper.selectList(null);
// for all commands
for (Command tmpCommand : commands) {
if (cmdTypeMap.containsKey(tmpCommand.getCommandType())) {
ObjectNode tempObj = JSONUtils.parseObject(tmpCommand.getCommandParam());
if (tempObj != null && processInstanceId == tempObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt()) {
isNeedCreate = false;
break;
}
}
}
}
return isNeedCreate;
}
/**
* find process instance detail by id
*
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceDetailById(int processId) {
return processInstanceMapper.queryDetailById(processId);
}
/**
* get task node list by definitionId
*/
public List<TaskDefinition> getTaskNodeListByDefinitionId(Integer defineId) {
ProcessDefinition processDefinition = processDefineMapper.selectById(defineId);
if (processDefinition == null) {
logger.error("process define not exists");
return new ArrayList<>();
}
List<ProcessTaskRelationLog> processTaskRelations = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion());
Set<TaskDefinition> taskDefinitionSet = new HashSet<>();
for (ProcessTaskRelationLog processTaskRelation : processTaskRelations) {
if (processTaskRelation.getPostTaskCode() > 0) {
taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion()));
}
}
List<TaskDefinitionLog> taskDefinitionLogs = taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet);
return new ArrayList<>(taskDefinitionLogs);
}
/**
* find process instance by id
*
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceById(int processId) {
return processInstanceMapper.selectById(processId);
}
/**
* find process define by id.
*
* @param processDefinitionId processDefinitionId
* @return process definition
*/
public ProcessDefinition findProcessDefineById(int processDefinitionId) {
return processDefineMapper.selectById(processDefinitionId);
}
/**
* find process define by code and version.
*
* @param processDefinitionCode processDefinitionCode
* @return process definition
*/
public ProcessDefinition findProcessDefinition(Long processDefinitionCode, int version) {
ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode);
if (processDefinition == null || processDefinition.getVersion() != version) {
processDefinition = processDefineLogMapper.queryByDefinitionCodeAndVersion(processDefinitionCode, version);
if (processDefinition != null) {
processDefinition.setId(0);
}
}
return processDefinition;
}
/**
* find process define by code.
*
* @param processDefinitionCode processDefinitionCode
* @return process definition
*/
public ProcessDefinition findProcessDefinitionByCode(Long processDefinitionCode) {
return processDefineMapper.queryByCode(processDefinitionCode);
}
/**
* delete work process instance by id
*
* @param processInstanceId processInstanceId
* @return delete process instance result
*/
public int deleteWorkProcessInstanceById(int processInstanceId) {
return processInstanceMapper.deleteById(processInstanceId);
}
/**
* delete all sub process by parent instance id
*
* @param processInstanceId processInstanceId
* @return delete all sub process instance result
*/
public int deleteAllSubWorkProcessByParentId(int processInstanceId) {
List<Integer> subProcessIdList = processInstanceMapMapper.querySubIdListByParentId(processInstanceId);
for (Integer subId : subProcessIdList) {
deleteAllSubWorkProcessByParentId(subId);
deleteWorkProcessMapByParentId(subId);
removeTaskLogFile(subId);
deleteWorkProcessInstanceById(subId);
}
return 1;
}
/**
* remove task log file
*
* @param processInstanceId processInstanceId
*/
public void removeTaskLogFile(Integer processInstanceId) {
List<TaskInstance> taskInstanceList = findValidTaskListByProcessId(processInstanceId);
if (CollectionUtils.isEmpty(taskInstanceList)) {
return;
}
try (LogClientService logClient = new LogClientService()) {
for (TaskInstance taskInstance : taskInstanceList) {
String taskLogPath = taskInstance.getLogPath();
if (StringUtils.isEmpty(taskInstance.getHost())) {
continue;
}
int port = Constants.RPC_PORT;
String ip = "";
try {
ip = Host.of(taskInstance.getHost()).getIp();
} catch (Exception e) {
// compatible old version
ip = taskInstance.getHost();
}
// remove task log from loggerserver
logClient.removeTaskLog(ip, port, taskLogPath);
}
}
}
/**
* calculate sub process number in the process define.
*
* @param processDefinitionCode processDefinitionCode
* @return process thread num count
*/
private Integer workProcessThreadNumCount(long processDefinitionCode) {
ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode);
List<Integer> ids = new ArrayList<>();
recurseFindSubProcessId(processDefinition.getId(), ids);
return ids.size() + 1;
}
/**
* recursive query sub process definition id by parent id.
*
* @param parentId parentId
* @param ids ids
*/
public void recurseFindSubProcessId(int parentId, List<Integer> ids) {
List<TaskDefinition> taskNodeList = this.getTaskNodeListByDefinitionId(parentId);
if (taskNodeList != null && !taskNodeList.isEmpty()) {
for (TaskDefinition taskNode : taskNodeList) {
String parameter = taskNode.getTaskParams();
ObjectNode parameterJson = JSONUtils.parseObject(parameter);
if (parameterJson.get(CMD_PARAM_SUB_PROCESS_DEFINE_ID) != null) {
SubProcessParameters subProcessParam = JSONUtils.parseObject(parameter, SubProcessParameters.class);
ids.add(subProcessParam.getProcessDefinitionId());
recurseFindSubProcessId(subProcessParam.getProcessDefinitionId(), ids);
}
}
}
}
/**
* create recovery waiting thread command when thread pool is not enough for the process instance.
* sub work process instance need not to create recovery command.
* create recovery waiting thread command and delete origin command at the same time.
* if the recovery command is exists, only update the field update_time
*
* @param originCommand originCommand
* @param processInstance processInstance
*/
public void createRecoveryWaitingThreadCommand(Command originCommand, ProcessInstance processInstance) {
// sub process doesnot need to create wait command
if (processInstance.getIsSubProcess() == Flag.YES) {
if (originCommand != null) {
commandMapper.deleteById(originCommand.getId());
}
return;
}
Map<String, String> cmdParam = new HashMap<>();
cmdParam.put(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD, String.valueOf(processInstance.getId()));
// process instance quit by "waiting thread" state
if (originCommand == null) {
Command command = new Command(
CommandType.RECOVER_WAITING_THREAD,
processInstance.getTaskDependType(),
processInstance.getFailureStrategy(),
processInstance.getExecutorId(),
processInstance.getProcessDefinition().getCode(),
JSONUtils.toJsonString(cmdParam),
processInstance.getWarningType(),
processInstance.getWarningGroupId(),
processInstance.getScheduleTime(),
processInstance.getWorkerGroup(),
processInstance.getEnvironmentCode(),
processInstance.getProcessInstancePriority(),
processInstance.getDryRun(),
processInstance.getId(),
processInstance.getProcessDefinitionVersion()
);
saveCommand(command);
return;
}
// update the command time if current command if recover from waiting
if (originCommand.getCommandType() == CommandType.RECOVER_WAITING_THREAD) {
originCommand.setUpdateTime(new Date());
saveCommand(originCommand);
} else {
// delete old command and create new waiting thread command
commandMapper.deleteById(originCommand.getId());
originCommand.setId(0);
originCommand.setCommandType(CommandType.RECOVER_WAITING_THREAD);
originCommand.setUpdateTime(new Date());
originCommand.setCommandParam(JSONUtils.toJsonString(cmdParam));
originCommand.setProcessInstancePriority(processInstance.getProcessInstancePriority());
saveCommand(originCommand);
}
}
/**
* get schedule time from command
*
* @param command command
* @param cmdParam cmdParam map
* @return date
*/
private Date getScheduleTime(Command command, Map<String, String> cmdParam) {
Date scheduleTime = command.getScheduleTime();
if (scheduleTime == null
&& cmdParam != null
&& cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) {
Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
List<Schedule> schedules = queryReleaseSchedulerListByProcessDefinitionCode(command.getProcessDefinitionCode());
List<Date> complementDateList = CronUtils.getSelfFireDateList(start, end, schedules);
if (complementDateList.size() > 0) {
scheduleTime = complementDateList.get(0);
} else {
logger.error("set scheduler time error: complement date list is empty, command: {}",
command.toString());
}
}
return scheduleTime;
}
/**
* generate a new work process instance from command.
*
* @param processDefinition processDefinition
* @param command command
* @param cmdParam cmdParam map
* @return process instance
*/
private ProcessInstance generateNewProcessInstance(ProcessDefinition processDefinition,
Command command,
Map<String, String> cmdParam) {
ProcessInstance processInstance = new ProcessInstance(processDefinition);
processInstance.setProcessDefinitionCode(processDefinition.getCode());
processInstance.setProcessDefinitionVersion(processDefinition.getVersion());
processInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
processInstance.setRecovery(Flag.NO);
processInstance.setStartTime(new Date());
processInstance.setRunTimes(1);
processInstance.setMaxTryTimes(0);
//processInstance.setProcessDefinitionId(command.getProcessDefinitionId());
processInstance.setCommandParam(command.getCommandParam());
processInstance.setCommandType(command.getCommandType());
processInstance.setIsSubProcess(Flag.NO);
processInstance.setTaskDependType(command.getTaskDependType());
processInstance.setFailureStrategy(command.getFailureStrategy());
processInstance.setExecutorId(command.getExecutorId());
WarningType warningType = command.getWarningType() == null ? WarningType.NONE : command.getWarningType();
processInstance.setWarningType(warningType);
Integer warningGroupId = command.getWarningGroupId() == null ? 0 : command.getWarningGroupId();
processInstance.setWarningGroupId(warningGroupId);
processInstance.setDryRun(command.getDryRun());
if (command.getScheduleTime() != null) {
processInstance.setScheduleTime(command.getScheduleTime());
}
processInstance.setCommandStartTime(command.getStartTime());
processInstance.setLocations(processDefinition.getLocations());
// reset global params while there are start parameters
setGlobalParamIfCommanded(processDefinition, cmdParam);
// curing global params
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
getCommandTypeIfComplement(processInstance, command),
processInstance.getScheduleTime()));
// set process instance priority
processInstance.setProcessInstancePriority(command.getProcessInstancePriority());
String workerGroup = StringUtils.isBlank(command.getWorkerGroup()) ? Constants.DEFAULT_WORKER_GROUP : command.getWorkerGroup();
processInstance.setWorkerGroup(workerGroup);
processInstance.setEnvironmentCode(Objects.isNull(command.getEnvironmentCode()) ? -1 : command.getEnvironmentCode());
processInstance.setTimeout(processDefinition.getTimeout());
processInstance.setTenantId(processDefinition.getTenantId());
return processInstance;
}
private void setGlobalParamIfCommanded(ProcessDefinition processDefinition, Map<String, String> cmdParam) {
// get start params from command param
Map<String, String> startParamMap = new HashMap<>();
if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_START_PARAMS)) {
String startParamJson = cmdParam.get(Constants.CMD_PARAM_START_PARAMS);
startParamMap = JSONUtils.toMap(startParamJson);
}
Map<String, String> fatherParamMap = new HashMap<>();
if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_FATHER_PARAMS)) {
String fatherParamJson = cmdParam.get(Constants.CMD_PARAM_FATHER_PARAMS);
fatherParamMap = JSONUtils.toMap(fatherParamJson);
}
startParamMap.putAll(fatherParamMap);
// set start param into global params
if (startParamMap.size() > 0
&& processDefinition.getGlobalParamMap() != null) {
for (Map.Entry<String, String> param : processDefinition.getGlobalParamMap().entrySet()) {
String val = startParamMap.get(param.getKey());
if (val != null) {
param.setValue(val);
}
}
}
}
/**
* get process tenant
* there is tenant id in definition, use the tenant of the definition.
* if there is not tenant id in the definiton or the tenant not exist
* use definition creator's tenant.
*
* @param tenantId tenantId
* @param userId userId
* @return tenant
*/
public Tenant getTenantForProcess(int tenantId, int userId) {
Tenant tenant = null;
if (tenantId >= 0) {
tenant = tenantMapper.queryById(tenantId);
}
if (userId == 0) {
return null;
}
if (tenant == null) {
User user = userMapper.selectById(userId);
tenant = tenantMapper.queryById(user.getTenantId());
}
return tenant;
}
/**
* get an environment
* use the code of the environment to find a environment.
*
* @param environmentCode environmentCode
* @return Environment
*/
public Environment findEnvironmentByCode(Long environmentCode) {
Environment environment = null;
if (environmentCode >= 0) {
environment = environmentMapper.queryByEnvironmentCode(environmentCode);
}
return environment;
}
/**
* check command parameters is valid
*
* @param command command
* @param cmdParam cmdParam map
* @return whether command param is valid
*/
private Boolean checkCmdParam(Command command, Map<String, String> cmdParam) {
if (command.getTaskDependType() == TaskDependType.TASK_ONLY || command.getTaskDependType() == TaskDependType.TASK_PRE) {
if (cmdParam == null
|| !cmdParam.containsKey(Constants.CMD_PARAM_START_NODE_NAMES)
|| cmdParam.get(Constants.CMD_PARAM_START_NODE_NAMES).isEmpty()) {
logger.error("command node depend type is {}, but start nodes is null ", command.getTaskDependType());
return false;
}
}
return true;
}
/**
* construct process instance according to one command.
*
* @param command command
* @param host host
* @param processDefinitionCacheMaps
* @return process instance
*/
private ProcessInstance constructProcessInstance(Command command, String host, HashMap<String, ProcessDefinition> processDefinitionCacheMaps) {
ProcessInstance processInstance;
ProcessDefinition processDefinition;
CommandType commandType = command.getCommandType();
String key = String.format("%d-%d", command.getProcessDefinitionCode(), command.getProcessDefinitionVersion());
if (processDefinitionCacheMaps.containsKey(key)) {
processDefinition = processDefinitionCacheMaps.get(key);
} else {
processDefinition = this.findProcessDefinition(command.getProcessDefinitionCode(), command.getProcessDefinitionVersion());
if (processDefinition != null) {
processDefinitionCacheMaps.put(key, processDefinition);
}
}
if (processDefinition == null) {
logger.error("cannot find the work process define! define code : {}", command.getProcessDefinitionCode());
return null;
}
Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam());
int processInstanceId = command.getProcessInstanceId();
if (processInstanceId == 0) {
processInstance = generateNewProcessInstance(processDefinition, command, cmdParam);
} else {
processInstance = this.findProcessInstanceDetailById(processInstanceId);
if (processInstance == null) {
return processInstance;
}
}
if (cmdParam != null) {
CommandType commandTypeIfComplement = getCommandTypeIfComplement(processInstance, command);
// reset global params while repeat running is needed by cmdParam
if (commandTypeIfComplement == CommandType.REPEAT_RUNNING) {
setGlobalParamIfCommanded(processDefinition, cmdParam);
}
// Recalculate global parameters after rerun.
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
commandTypeIfComplement,
processInstance.getScheduleTime()));
processInstance.setProcessDefinition(processDefinition);
}
//reset command parameter
if (processInstance.getCommandParam() != null) {
Map<String, String> processCmdParam = JSONUtils.toMap(processInstance.getCommandParam());
for (Map.Entry<String, String> entry : processCmdParam.entrySet()) {
if (!cmdParam.containsKey(entry.getKey())) {
cmdParam.put(entry.getKey(), entry.getValue());
}
}
}
// reset command parameter if sub process
if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) {
processInstance.setCommandParam(command.getCommandParam());
}
if (Boolean.FALSE.equals(checkCmdParam(command, cmdParam))) {
logger.error("command parameter check failed!");
return null;
}
if (command.getScheduleTime() != null) {
processInstance.setScheduleTime(command.getScheduleTime());
}
processInstance.setHost(host);
ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXECUTION;
int runTime = processInstance.getRunTimes();
switch (commandType) {
case START_PROCESS:
break;
case START_FAILURE_TASK_PROCESS:
// find failed tasks and init these tasks
List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE);
List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE);
List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL);
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
failedList.addAll(killedList);
failedList.addAll(toleranceList);
for (Integer taskId : failedList) {
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING,
String.join(Constants.COMMA, convertIntListToString(failedList)));
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
processInstance.setRunTimes(runTime + 1);
break;
case START_CURRENT_TASK_PROCESS:
break;
case RECOVER_WAITING_THREAD:
break;
case RECOVER_SUSPENDED_PROCESS:
// find pause tasks and init task's state
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE);
List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(),
ExecutionStatus.KILL);
suspendedNodeList.addAll(stopNodeList);
for (Integer taskId : suspendedNodeList) {
// initialize the pause state
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(",", convertIntListToString(suspendedNodeList)));
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
processInstance.setRunTimes(runTime + 1);
break;
case RECOVER_TOLERANCE_FAULT_PROCESS:
// recover tolerance fault process
processInstance.setRecovery(Flag.YES);
runStatus = processInstance.getState();
break;
case COMPLEMENT_DATA:
// delete all the valid tasks when complement data if id is not null
if (processInstance.getId() != 0) {
List<TaskInstance> taskInstanceList = this.findValidTaskListByProcessId(processInstance.getId());
for (TaskInstance taskInstance : taskInstanceList) {
taskInstance.setFlag(Flag.NO);
this.updateTaskInstance(taskInstance);
}
}
break;
case REPEAT_RUNNING:
// delete the recover task names from command parameter
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) {
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
}
// delete all the valid tasks when repeat running
List<TaskInstance> validTaskList = findValidTaskListByProcessId(processInstance.getId());
for (TaskInstance taskInstance : validTaskList) {
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
}
processInstance.setStartTime(new Date());
processInstance.setEndTime(null);
processInstance.setRunTimes(runTime + 1);
initComplementDataParam(processDefinition, processInstance, cmdParam);
break;
case SCHEDULER:
break;
default:
break;
}
processInstance.setState(runStatus);
return processInstance;
}
/**
* get process definition by command
* If it is a fault-tolerant command, get the specified version of ProcessDefinition through ProcessInstance
* Otherwise, get the latest version of ProcessDefinition
*
* @return ProcessDefinition
*/
private ProcessDefinition getProcessDefinitionByCommand(long processDefinitionCode, Map<String, String> cmdParam) {
if (cmdParam != null) {
int processInstanceId = 0;
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) {
processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING));
} else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) {
processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS));
} else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) {
processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD));
}
if (processInstanceId != 0) {
ProcessInstance processInstance = this.findProcessInstanceDetailById(processInstanceId);
if (processInstance == null) {
return null;
}
return processDefineLogMapper.queryByDefinitionCodeAndVersion(
processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion());
}
}
return processDefineMapper.queryByCode(processDefinitionCode);
}
/**
* return complement data if the process start with complement data
*
* @param processInstance processInstance
* @param command command
* @return command type
*/
private CommandType getCommandTypeIfComplement(ProcessInstance processInstance, Command command) {
if (CommandType.COMPLEMENT_DATA == processInstance.getCmdTypeIfComplement()) {
return CommandType.COMPLEMENT_DATA;
} else {
return command.getCommandType();
}
}
/**
* initialize complement data parameters
*
* @param processDefinition processDefinition
* @param processInstance processInstance
* @param cmdParam cmdParam
*/
private void initComplementDataParam(ProcessDefinition processDefinition,
ProcessInstance processInstance,
Map<String, String> cmdParam) {
if (!processInstance.isComplementData()) {
return;
}
Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
List<Schedule> listSchedules = queryReleaseSchedulerListByProcessDefinitionCode(processInstance.getProcessDefinitionCode());
List<Date> complementDate = CronUtils.getSelfFireDateList(start, end, listSchedules);
if (complementDate.size() > 0
&& Flag.NO == processInstance.getIsSubProcess()) {
processInstance.setScheduleTime(complementDate.get(0));
}
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
}
/**
* set sub work process parameters.
* handle sub work process instance, update relation table and command parameters
* set sub work process flag, extends parent work process command parameters
*
* @param subProcessInstance subProcessInstance
*/
public void setSubProcessParam(ProcessInstance subProcessInstance) {
String cmdParam = subProcessInstance.getCommandParam();
if (StringUtils.isEmpty(cmdParam)) {
return;
}
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
// write sub process id into cmd param.
if (paramMap.containsKey(CMD_PARAM_SUB_PROCESS)
&& CMD_PARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMD_PARAM_SUB_PROCESS))) {
paramMap.remove(CMD_PARAM_SUB_PROCESS);
paramMap.put(CMD_PARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId()));
subProcessInstance.setCommandParam(JSONUtils.toJsonString(paramMap));
subProcessInstance.setIsSubProcess(Flag.YES);
this.saveProcessInstance(subProcessInstance);
}
// copy parent instance user def params to sub process..
String parentInstanceId = paramMap.get(CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID);
if (StringUtils.isNotEmpty(parentInstanceId)) {
ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId));
if (parentInstance != null) {
subProcessInstance.setGlobalParams(
joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams()));
this.saveProcessInstance(subProcessInstance);
} else {
logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam);
}
}
ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class);
if (processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0) {
return;
}
// update sub process id to process map table
processInstanceMap.setProcessInstanceId(subProcessInstance.getId());
this.updateWorkProcessInstanceMap(processInstanceMap);
}
/**
* join parent global params into sub process.
* only the keys doesn't in sub process global would be joined.
*
* @param parentGlobalParams parentGlobalParams
* @param subGlobalParams subGlobalParams
* @return global params join
*/
private String joinGlobalParams(String parentGlobalParams, String subGlobalParams) {
List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class);
List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class);
Map<String, String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
for (Property parent : parentPropertyList) {
if (!subMap.containsKey(parent.getProp())) {
subPropertyList.add(parent);
}
}
return JSONUtils.toJsonString(subPropertyList);
}
/**
* initialize task instance
*
* @param taskInstance taskInstance
*/
private void initTaskInstance(TaskInstance taskInstance) {
if (!taskInstance.isSubProcess()
&& (taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure())) {
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
return;
}
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
updateTaskInstance(taskInstance);
}
/**
* retry submit task to db
*
* @param taskInstance
* @param commitRetryTimes
* @param commitInterval
* @return
*/
public TaskInstance submitTask(TaskInstance taskInstance, int commitRetryTimes, int commitInterval) {
int retryTimes = 1;
boolean submitDB = false;
TaskInstance task = null;
while (retryTimes <= commitRetryTimes) {
try {
if (!submitDB) {
// submit task to db
task = submitTask(taskInstance);
if (task != null && task.getId() != 0) {
submitDB = true;
break;
}
}
if (!submitDB) {
logger.error("task commit to db failed , taskId {} has already retry {} times, please check the database", taskInstance.getId(), retryTimes);
}
Thread.sleep(commitInterval);
} catch (Exception e) {
logger.error("task commit to mysql failed", e);
}
retryTimes += 1;
}
return task;
}
/**
* submit task to db
* submit sub process to command
*
* @param taskInstance taskInstance
* @return task instance
*/
@Transactional(rollbackFor = Exception.class)
public TaskInstance submitTask(TaskInstance taskInstance) {
ProcessInstance processInstance = this.findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
logger.info("start submit task : {}, instance id:{}, state: {}",
taskInstance.getName(), taskInstance.getProcessInstanceId(), processInstance.getState());
//submit to db
TaskInstance task = submitTaskInstanceToDB(taskInstance, processInstance);
if (task == null) {
logger.error("end submit task to db error, task name:{}, process id:{} state: {} ",
taskInstance.getName(), taskInstance.getProcessInstance(), processInstance.getState());
return task;
}
if (!task.getState().typeIsFinished()) {
createSubWorkProcess(processInstance, task);
}
logger.info("end submit task to db successfully:{} {} state:{} complete, instance id:{} state: {} ",
taskInstance.getId(), taskInstance.getName(), task.getState(), processInstance.getId(), processInstance.getState());
return task;
}
/**
* set work process instance map
* consider o
* repeat running does not generate new sub process instance
* set map {parent instance id, task instance id, 0(child instance id)}
*
* @param parentInstance parentInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask) {
ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId());
if (processMap != null) {
return processMap;
}
if (parentInstance.getCommandType() == CommandType.REPEAT_RUNNING) {
// update current task id to map
processMap = findPreviousTaskProcessMap(parentInstance, parentTask);
if (processMap != null) {
processMap.setParentTaskInstanceId(parentTask.getId());
updateWorkProcessInstanceMap(processMap);
return processMap;
}
}
// new task
processMap = new ProcessInstanceMap();
processMap.setParentProcessInstanceId(parentInstance.getId());
processMap.setParentTaskInstanceId(parentTask.getId());
createWorkProcessInstanceMap(processMap);
return processMap;
}
/**
* find previous task work process map.
*
* @param parentProcessInstance parentProcessInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap findPreviousTaskProcessMap(ProcessInstance parentProcessInstance,
TaskInstance parentTask) {
Integer preTaskId = 0;
List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId());
for (TaskInstance task : preTaskList) {
if (task.getName().equals(parentTask.getName())) {
preTaskId = task.getId();
ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId);
if (map != null) {
return map;
}
}
}
logger.info("sub process instance is not found,parent task:{},parent instance:{}",
parentTask.getId(), parentProcessInstance.getId());
return null;
}
/**
* create sub work process command
*
* @param parentProcessInstance parentProcessInstance
* @param task task
*/
public void createSubWorkProcess(ProcessInstance parentProcessInstance, TaskInstance task) {
if (!task.isSubProcess()) {
return;
}
//check create sub work flow firstly
ProcessInstanceMap instanceMap = findWorkProcessMapByParent(parentProcessInstance.getId(), task.getId());
if (null != instanceMap && CommandType.RECOVER_TOLERANCE_FAULT_PROCESS == parentProcessInstance.getCommandType()) {
// recover failover tolerance would not create a new command when the sub command already have been created
return;
}
instanceMap = setProcessInstanceMap(parentProcessInstance, task);
ProcessInstance childInstance = null;
if (instanceMap.getProcessInstanceId() != 0) {
childInstance = findProcessInstanceById(instanceMap.getProcessInstanceId());
}
Command subProcessCommand = createSubProcessCommand(parentProcessInstance, childInstance, instanceMap, task);
updateSubProcessDefinitionByParent(parentProcessInstance, subProcessCommand.getProcessDefinitionCode());
initSubInstanceState(childInstance);
createCommand(subProcessCommand);
logger.info("sub process command created: {} ", subProcessCommand);
}
/**
* complement data needs transform parent parameter to child.
*/
private String getSubWorkFlowParam(ProcessInstanceMap instanceMap, ProcessInstance parentProcessInstance, Map<String, String> fatherParams) {
// set sub work process command
String processMapStr = JSONUtils.toJsonString(instanceMap);
Map<String, String> cmdParam = JSONUtils.toMap(processMapStr);
if (parentProcessInstance.isComplementData()) {
Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam());
String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE);
String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime);
processMapStr = JSONUtils.toJsonString(cmdParam);
}
if (fatherParams.size() != 0) {
cmdParam.put(CMD_PARAM_FATHER_PARAMS, JSONUtils.toJsonString(fatherParams));
processMapStr = JSONUtils.toJsonString(cmdParam);
}
return processMapStr;
}
public Map<String, String> getGlobalParamMap(String globalParams) {
List<Property> propList;
Map<String, String> globalParamMap = new HashMap<>();
if (StringUtils.isNotEmpty(globalParams)) {
propList = JSONUtils.toList(globalParams, Property.class);
globalParamMap = propList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
}
return globalParamMap;
}
/**
* create sub work process command
*/
public Command createSubProcessCommand(ProcessInstance parentProcessInstance,
ProcessInstance childInstance,
ProcessInstanceMap instanceMap,
TaskInstance task) {
CommandType commandType = getSubCommandType(parentProcessInstance, childInstance);
Map<String, String> subProcessParam = JSONUtils.toMap(task.getTaskParams());
int childDefineId = Integer.parseInt(subProcessParam.get(Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID));
ProcessDefinition subProcessDefinition = processDefineMapper.queryByDefineId(childDefineId);
Object localParams = subProcessParam.get(Constants.LOCAL_PARAMS);
List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class);
Map<String, String> globalMap = this.getGlobalParamMap(parentProcessInstance.getGlobalParams());
Map<String, String> fatherParams = new HashMap<>();
if (CollectionUtils.isNotEmpty(allParam)) {
for (Property info : allParam) {
fatherParams.put(info.getProp(), globalMap.get(info.getProp()));
}
}
String processParam = getSubWorkFlowParam(instanceMap, parentProcessInstance, fatherParams);
int subProcessInstanceId = childInstance == null ? 0 : childInstance.getId();
return new Command(
commandType,
TaskDependType.TASK_POST,
parentProcessInstance.getFailureStrategy(),
parentProcessInstance.getExecutorId(),
subProcessDefinition.getCode(),
processParam,
parentProcessInstance.getWarningType(),
parentProcessInstance.getWarningGroupId(),
parentProcessInstance.getScheduleTime(),
task.getWorkerGroup(),
task.getEnvironmentCode(),
parentProcessInstance.getProcessInstancePriority(),
parentProcessInstance.getDryRun(),
subProcessInstanceId,
subProcessDefinition.getVersion()
);
}
/**
* initialize sub work flow state
* child instance state would be initialized when 'recovery from pause/stop/failure'
*/
private void initSubInstanceState(ProcessInstance childInstance) {
if (childInstance != null) {
childInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
updateProcessInstance(childInstance);
}
}
/**
* get sub work flow command type
* child instance exist: child command = fatherCommand
* child instance not exists: child command = fatherCommand[0]
*/
private CommandType getSubCommandType(ProcessInstance parentProcessInstance, ProcessInstance childInstance) {
CommandType commandType = parentProcessInstance.getCommandType();
if (childInstance == null) {
String fatherHistoryCommand = parentProcessInstance.getHistoryCmd();
commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]);
}
return commandType;
}
/**
* update sub process definition
*
* @param parentProcessInstance parentProcessInstance
* @param childDefinitionCode childDefinitionId
*/
private void updateSubProcessDefinitionByParent(ProcessInstance parentProcessInstance, long childDefinitionCode) {
ProcessDefinition fatherDefinition = this.findProcessDefinition(parentProcessInstance.getProcessDefinitionCode(),
parentProcessInstance.getProcessDefinitionVersion());
ProcessDefinition childDefinition = this.findProcessDefinitionByCode(childDefinitionCode);
if (childDefinition != null && fatherDefinition != null) {
childDefinition.setWarningGroupId(fatherDefinition.getWarningGroupId());
processDefineMapper.updateById(childDefinition);
}
}
/**
* submit task to mysql
*
* @param taskInstance taskInstance
* @param processInstance processInstance
* @return task instance
*/
public TaskInstance submitTaskInstanceToDB(TaskInstance taskInstance, ProcessInstance processInstance) {
ExecutionStatus processInstanceState = processInstance.getState();
if (taskInstance.getState().typeIsFailure()) {
if (taskInstance.isSubProcess()) {
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1);
} else {
if (processInstanceState != ExecutionStatus.READY_STOP
&& processInstanceState != ExecutionStatus.READY_PAUSE) {
// failure task set invalid
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
// crate new task instance
if (taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE) {
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1);
}
taskInstance.setSubmitTime(null);
taskInstance.setStartTime(null);
taskInstance.setEndTime(null);
taskInstance.setFlag(Flag.YES);
taskInstance.setHost(null);
taskInstance.setId(0);
}
}
}
taskInstance.setExecutorId(processInstance.getExecutorId());
taskInstance.setProcessInstancePriority(processInstance.getProcessInstancePriority());
taskInstance.setState(getSubmitTaskState(taskInstance, processInstanceState));
if (taskInstance.getSubmitTime() == null) {
taskInstance.setSubmitTime(new Date());
}
if (taskInstance.getFirstSubmitTime() == null) {
taskInstance.setFirstSubmitTime(taskInstance.getSubmitTime());
}
boolean saveResult = saveTaskInstance(taskInstance);
if (!saveResult) {
return null;
}
return taskInstance;
}
/**
* get submit task instance state by the work process state
* cannot modify the task state when running/kill/submit success, or this
* task instance is already exists in task queue .
* return pause if work process state is ready pause
* return stop if work process state is ready stop
* if all of above are not satisfied, return submit success
*
* @param taskInstance taskInstance
* @param processInstanceState processInstanceState
* @return process instance state
*/
public ExecutionStatus getSubmitTaskState(TaskInstance taskInstance, ExecutionStatus processInstanceState) {
ExecutionStatus state = taskInstance.getState();
// running, delayed or killed
// the task already exists in task queue
// return state
if (
state == ExecutionStatus.RUNNING_EXECUTION
|| state == ExecutionStatus.DELAY_EXECUTION
|| state == ExecutionStatus.KILL
) {
return state;
}
//return pasue /stop if process instance state is ready pause / stop
// or return submit success
if (processInstanceState == ExecutionStatus.READY_PAUSE) {
state = ExecutionStatus.PAUSE;
} else if (processInstanceState == ExecutionStatus.READY_STOP
|| !checkProcessStrategy(taskInstance)) {
state = ExecutionStatus.KILL;
} else {
state = ExecutionStatus.SUBMITTED_SUCCESS;
}
return state;
}
/**
* check process instance strategy
*
* @param taskInstance taskInstance
* @return check strategy result
*/
private boolean checkProcessStrategy(TaskInstance taskInstance) {
ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId());
FailureStrategy failureStrategy = processInstance.getFailureStrategy();
if (failureStrategy == FailureStrategy.CONTINUE) {
return true;
}
List<TaskInstance> taskInstances = this.findValidTaskListByProcessId(taskInstance.getProcessInstanceId());
for (TaskInstance task : taskInstances) {
if (task.getState() == ExecutionStatus.FAILURE
&& task.getRetryTimes() >= task.getMaxRetryTimes()) {
return false;
}
}
return true;
}
/**
* insert or update work process instance to data base
*
* @param processInstance processInstance
*/
public void saveProcessInstance(ProcessInstance processInstance) {
if (processInstance == null) {
logger.error("save error, process instance is null!");
return;
}
if (processInstance.getId() != 0) {
processInstanceMapper.updateById(processInstance);
} else {
processInstanceMapper.insert(processInstance);
}
}
/**
* insert or update command
*
* @param command command
* @return save command result
*/
public int saveCommand(Command command) {
if (command.getId() != 0) {
return commandMapper.updateById(command);
} else {
return commandMapper.insert(command);
}
}
/**
* insert or update task instance
*
* @param taskInstance taskInstance
* @return save task instance result
*/
public boolean saveTaskInstance(TaskInstance taskInstance) {
if (taskInstance.getId() != 0) {
return updateTaskInstance(taskInstance);
} else {
return createTaskInstance(taskInstance);
}
}
/**
* insert task instance
*
* @param taskInstance taskInstance
* @return create task instance result
*/
public boolean createTaskInstance(TaskInstance taskInstance) {
int count = taskInstanceMapper.insert(taskInstance);
return count > 0;
}
/**
* update task instance
*
* @param taskInstance taskInstance
* @return update task instance result
*/
public boolean updateTaskInstance(TaskInstance taskInstance) {
int count = taskInstanceMapper.updateById(taskInstance);
return count > 0;
}
/**
* find task instance by id
*
* @param taskId task id
* @return task intance
*/
public TaskInstance findTaskInstanceById(Integer taskId) {
return taskInstanceMapper.selectById(taskId);
}
/**
* package task instance,associate processInstance and processDefine
*
* @param taskInstId taskInstId
* @return task instance
*/
public TaskInstance getTaskInstanceDetailByTaskId(int taskInstId) {
// get task instance
TaskInstance taskInstance = findTaskInstanceById(taskInstId);
if (taskInstance == null) {
return null;
}
setTaskInstanceDetail(taskInstance);
return taskInstance;
}
/**
* package task instance,associate processInstance and processDefine
*
* @param taskInstance taskInstance
* @return task instance
*/
public void setTaskInstanceDetail(TaskInstance taskInstance) {
// get process instance
ProcessInstance processInstance = findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
// get process define
ProcessDefinition processDefine = findProcessDefinition(processInstance.getProcessDefinitionCode(),
processInstance.getProcessDefinitionVersion());
taskInstance.setProcessInstance(processInstance);
taskInstance.setProcessDefine(processDefine);
TaskDefinition taskDefinition = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(
taskInstance.getTaskCode(),
taskInstance.getTaskDefinitionVersion());
updateTaskDefinitionResources(taskDefinition);
taskInstance.setTaskDefine(taskDefinition);
}
/**
* Update {@link ResourceInfo} information in {@link TaskDefinition}
*
* @param taskDefinition the given {@link TaskDefinition}
*/
private void updateTaskDefinitionResources(TaskDefinition taskDefinition) {
Map<String, Object> taskParameters = JSONUtils.parseObject(
taskDefinition.getTaskParams(),
new TypeReference<Map<String, Object>>() { });
if (taskParameters != null) {
// if contains mainJar field, query resource from database
// Flink, Spark, MR
if (taskParameters.containsKey("mainJar")) {
Object mainJarObj = taskParameters.get("mainJar");
ResourceInfo mainJar = JSONUtils.parseObject(
JSONUtils.toJsonString(mainJarObj),
ResourceInfo.class);
ResourceInfo resourceInfo = updateResourceInfo(mainJar);
if (resourceInfo != null) {
taskParameters.put("mainJar", resourceInfo);
}
}
// update resourceList information
if (taskParameters.containsKey("resourceList")) {
String resourceListStr = JSONUtils.toJsonString(taskParameters.get("resourceList"));
List<ResourceInfo> resourceInfos = JSONUtils.toList(resourceListStr, ResourceInfo.class);
List<ResourceInfo> updatedResourceInfos = resourceInfos
.stream()
.map(this::updateResourceInfo)
.filter(Objects::nonNull)
.collect(Collectors.toList());
taskParameters.put("resourceList", updatedResourceInfos);
}
// set task parameters
taskDefinition.setTaskParams(JSONUtils.toJsonString(taskParameters));
}
}
/**
* update {@link ResourceInfo} by given original ResourceInfo
*
* @param res origin resource info
* @return {@link ResourceInfo}
*/
private ResourceInfo updateResourceInfo(ResourceInfo res) {
ResourceInfo resourceInfo = null;
// only if mainJar is not null and does not contains "resourceName" field
if (res != null) {
int resourceId = res.getId();
if (resourceId <= 0) {
logger.error("invalid resourceId, {}", resourceId);
return null;
}
resourceInfo = new ResourceInfo();
// get resource from database, only one resource should be returned
Resource resource = getResourceById(resourceId);
resourceInfo.setId(resourceId);
resourceInfo.setRes(resource.getFileName());
resourceInfo.setResourceName(resource.getFullName());
if (logger.isInfoEnabled()) {
logger.info("updated resource info {}",
JSONUtils.toJsonString(resourceInfo));
}
}
return resourceInfo;
}
/**
* get id list by task state
*
* @param instanceId instanceId
* @param state state
* @return task instance states
*/
public List<Integer> findTaskIdByInstanceState(int instanceId, ExecutionStatus state) {
return taskInstanceMapper.queryTaskByProcessIdAndState(instanceId, state.ordinal());
}
/**
* find valid task list by process definition id
*
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findValidTaskListByProcessId(Integer processInstanceId) {
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.YES);
}
/**
* find previous task list by work process id
*
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId) {
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO);
}
/**
* update work process instance map
*
* @param processInstanceMap processInstanceMap
* @return update process instance result
*/
public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) {
return processInstanceMapMapper.updateById(processInstanceMap);
}
/**
* create work process instance map
*
* @param processInstanceMap processInstanceMap
* @return create process instance result
*/
public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) {
int count = 0;
if (processInstanceMap != null) {
return processInstanceMapMapper.insert(processInstanceMap);
}
return count;
}
/**
* find work process map by parent process id and parent task id.
*
* @param parentWorkProcessId parentWorkProcessId
* @param parentTaskId parentTaskId
* @return process instance map
*/
public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId) {
return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId);
}
/**
* delete work process map by parent process id
*
* @param parentWorkProcessId parentWorkProcessId
* @return delete process map result
*/
public int deleteWorkProcessMapByParentId(int parentWorkProcessId) {
return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId);
}
/**
* find sub process instance
*
* @param parentProcessId parentProcessId
* @param parentTaskId parentTaskId
* @return process instance
*/
public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId) {
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId);
if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) {
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId());
return processInstance;
}
/**
* find parent process instance
*
* @param subProcessId subProcessId
* @return process instance
*/
public ProcessInstance findParentProcessInstance(Integer subProcessId) {
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId);
if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) {
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId());
return processInstance;
}
/**
* change task state
*
* @param state state
* @param startTime startTime
* @param host host
* @param executePath executePath
* @param logPath logPath
* @param taskInstId taskInstId
*/
public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date startTime, String host,
String executePath,
String logPath,
int taskInstId) {
taskInstance.setState(state);
taskInstance.setStartTime(startTime);
taskInstance.setHost(host);
taskInstance.setExecutePath(executePath);
taskInstance.setLogPath(logPath);
saveTaskInstance(taskInstance);
}
/**
* update process instance
*
* @param processInstance processInstance
* @return update process instance result
*/
public int updateProcessInstance(ProcessInstance processInstance) {
return processInstanceMapper.updateById(processInstance);
}
/**
* change task state
*
* @param state state
* @param endTime endTime
* @param taskInstId taskInstId
* @param varPool varPool
*/
public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state,
Date endTime,
int processId,
String appIds,
int taskInstId,
String varPool) {
taskInstance.setPid(processId);
taskInstance.setAppLink(appIds);
taskInstance.setState(state);
taskInstance.setEndTime(endTime);
taskInstance.setVarPool(varPool);
changeOutParam(taskInstance);
saveTaskInstance(taskInstance);
}
/**
* for show in page of taskInstance
*
* @param taskInstance
*/
public void changeOutParam(TaskInstance taskInstance) {
if (StringUtils.isEmpty(taskInstance.getVarPool())) {
return;
}
List<Property> properties = JSONUtils.toList(taskInstance.getVarPool(), Property.class);
if (CollectionUtils.isEmpty(properties)) {
return;
}
//if the result more than one line,just get the first .
Map<String, Object> taskParams = JSONUtils.parseObject(taskInstance.getTaskParams(), new TypeReference<Map<String, Object>>() {});
Object localParams = taskParams.get(LOCAL_PARAMS);
if (localParams == null) {
return;
}
List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class);
Map<String, String> outProperty = new HashMap<>();
for (Property info : properties) {
if (info.getDirect() == Direct.OUT) {
outProperty.put(info.getProp(), info.getValue());
}
}
for (Property info : allParam) {
if (info.getDirect() == Direct.OUT) {
String paramName = info.getProp();
info.setValue(outProperty.get(paramName));
}
}
taskParams.put(LOCAL_PARAMS, allParam);
taskInstance.setTaskParams(JSONUtils.toJsonString(taskParams));
}
/**
* convert integer list to string list
*
* @param intList intList
* @return string list
*/
public List<String> convertIntListToString(List<Integer> intList) {
if (intList == null) {
return new ArrayList<>();
}
List<String> result = new ArrayList<>(intList.size());
for (Integer intVar : intList) {
result.add(String.valueOf(intVar));
}
return result;
}
/**
* query schedule by id
*
* @param id id
* @return schedule
*/
public Schedule querySchedule(int id) {
return scheduleMapper.selectById(id);
}
/**
* query Schedule by processDefinitionCode
*
* @param processDefinitionCode processDefinitionCode
* @see Schedule
*/
public List<Schedule> queryReleaseSchedulerListByProcessDefinitionCode(long processDefinitionCode) {
return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode);
}
/**
* query need failover process instance
*
* @param host host
* @return process instance list
*/
public List<ProcessInstance> queryNeedFailoverProcessInstances(String host) {
return processInstanceMapper.queryByHostAndStatus(host, stateArray);
}
/**
* process need failover process instance
*
* @param processInstance processInstance
*/
@Transactional(rollbackFor = RuntimeException.class)
public void processNeedFailoverProcessInstances(ProcessInstance processInstance) {
//1 update processInstance host is null
processInstance.setHost(Constants.NULL);
processInstanceMapper.updateById(processInstance);
ProcessDefinition processDefinition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion());
//2 insert into recover command
Command cmd = new Command();
cmd.setProcessDefinitionCode(processDefinition.getCode());
cmd.setProcessDefinitionVersion(processDefinition.getVersion());
cmd.setProcessInstanceId(processInstance.getId());
cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId()));
cmd.setExecutorId(processInstance.getExecutorId());
cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS);
createCommand(cmd);
}
/**
* query all need failover task instances by host
*
* @param host host
* @return task instance list
*/
public List<TaskInstance> queryNeedFailoverTaskInstances(String host) {
return taskInstanceMapper.queryByHostAndStatus(host,
stateArray);
}
/**
* find data source by id
*
* @param id id
* @return datasource
*/
public DataSource findDataSourceById(int id) {
return dataSourceMapper.selectById(id);
}
/**
* update process instance state by id
*
* @param processInstanceId processInstanceId
* @param executionStatus executionStatus
* @return update process result
*/
public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) {
ProcessInstance instance = processInstanceMapper.selectById(processInstanceId);
instance.setState(executionStatus);
return processInstanceMapper.updateById(instance);
}
/**
* find process instance by the task id
*
* @param taskId taskId
* @return process instance
*/
public ProcessInstance findProcessInstanceByTaskId(int taskId) {
TaskInstance taskInstance = taskInstanceMapper.selectById(taskId);
if (taskInstance != null) {
return processInstanceMapper.selectById(taskInstance.getProcessInstanceId());
}
return null;
}
/**
* find udf function list by id list string
*
* @param ids ids
* @return udf function list
*/
public List<UdfFunc> queryUdfFunListByIds(int[] ids) {
return udfFuncMapper.queryUdfByIdStr(ids, null);
}
/**
* find tenant code by resource name
*
* @param resName resource name
* @param resourceType resource type
* @return tenant code
*/
public String queryTenantCodeByResName(String resName, ResourceType resourceType) {
// in order to query tenant code successful although the version is older
String fullName = resName.startsWith("/") ? resName : String.format("/%s", resName);
List<Resource> resourceList = resourceMapper.queryResource(fullName, resourceType.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
return StringUtils.EMPTY;
}
int userId = resourceList.get(0).getUserId();
User user = userMapper.selectById(userId);
if (Objects.isNull(user)) {
return StringUtils.EMPTY;
}
Tenant tenant = tenantMapper.selectById(user.getTenantId());
if (Objects.isNull(tenant)) {
return StringUtils.EMPTY;
}
return tenant.getTenantCode();
}
/**
* find schedule list by process define codes.
*
* @param codes codes
* @return schedule list
*/
public List<Schedule> selectAllByProcessDefineCode(long[] codes) {
return scheduleMapper.selectAllByProcessDefineArray(codes);
}
/**
* find last scheduler process instance in the date interval
*
* @param definitionCode definitionCode
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastSchedulerProcessInterval(Long definitionCode, DateInterval dateInterval) {
return processInstanceMapper.queryLastSchedulerProcess(definitionCode,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last manual process instance interval
*
* @param definitionCode process definition code
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastManualProcessInterval(Long definitionCode, DateInterval dateInterval) {
return processInstanceMapper.queryLastManualProcess(definitionCode,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last running process instance
*
* @param definitionCode process definition code
* @param startTime start time
* @param endTime end time
* @return process instance
*/
public ProcessInstance findLastRunningProcess(Long definitionCode, Date startTime, Date endTime) {
return processInstanceMapper.queryLastRunningProcess(definitionCode,
startTime,
endTime,
stateArray);
}
/**
* query user queue by process instance id
*
* @param processInstanceId processInstanceId
* @return queue
*/
public String queryUserQueueByProcessInstanceId(int processInstanceId) {
String queue = "";
ProcessInstance processInstance = processInstanceMapper.selectById(processInstanceId);
if (processInstance == null) {
return queue;
}
User executor = userMapper.selectById(processInstance.getExecutorId());
if (executor != null) {
queue = executor.getQueue();
}
return queue;
}
/**
* query project name and user name by processInstanceId.
*
* @param processInstanceId processInstanceId
* @return projectName and userName
*/
public ProjectUser queryProjectWithUserByProcessInstanceId(int processInstanceId) {
return projectMapper.queryProjectWithUserByProcessInstanceId(processInstanceId);
}
/**
* get task worker group
*
* @param taskInstance taskInstance
* @return workerGroupId
*/
public String getTaskWorkerGroup(TaskInstance taskInstance) {
String workerGroup = taskInstance.getWorkerGroup();
if (StringUtils.isNotBlank(workerGroup)) {
return workerGroup;
}
int processInstanceId = taskInstance.getProcessInstanceId();
ProcessInstance processInstance = findProcessInstanceById(processInstanceId);
if (processInstance != null) {
return processInstance.getWorkerGroup();
}
logger.info("task : {} will use default worker group", taskInstance.getId());
return Constants.DEFAULT_WORKER_GROUP;
}
/**
* get have perm project list
*
* @param userId userId
* @return project list
*/
public List<Project> getProjectListHavePerm(int userId) {
List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId);
List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId);
if (createProjects == null) {
createProjects = new ArrayList<>();
}
if (authedProjects != null) {
createProjects.addAll(authedProjects);
}
return createProjects;
}
/**
* list unauthorized udf function
*
* @param userId user id
* @param needChecks data source id array
* @return unauthorized udf function list
*/
public <T> List<T> listUnauthorized(int userId, T[] needChecks, AuthorizationType authorizationType) {
List<T> resultList = new ArrayList<>();
if (Objects.nonNull(needChecks) && needChecks.length > 0) {
Set<T> originResSet = new HashSet<>(Arrays.asList(needChecks));
switch (authorizationType) {
case RESOURCE_FILE_ID:
case UDF_FILE:
List<Resource> ownUdfResources = resourceMapper.listAuthorizedResourceById(userId, needChecks);
addAuthorizedResources(ownUdfResources, userId);
Set<Integer> authorizedResourceFiles = ownUdfResources.stream().map(Resource::getId).collect(toSet());
originResSet.removeAll(authorizedResourceFiles);
break;
case RESOURCE_FILE_NAME:
List<Resource> ownResources = resourceMapper.listAuthorizedResource(userId, needChecks);
addAuthorizedResources(ownResources, userId);
Set<String> authorizedResources = ownResources.stream().map(Resource::getFullName).collect(toSet());
originResSet.removeAll(authorizedResources);
break;
case DATASOURCE:
Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId, needChecks).stream().map(DataSource::getId).collect(toSet());
originResSet.removeAll(authorizedDatasources);
break;
case UDF:
Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(UdfFunc::getId).collect(toSet());
originResSet.removeAll(authorizedUdfs);
break;
default:
break;
}
resultList.addAll(originResSet);
}
return resultList;
}
/**
* get user by user id
*
* @param userId user id
* @return User
*/
public User getUserById(int userId) {
return userMapper.selectById(userId);
}
/**
* get resource by resource id
*
* @param resourceId resource id
* @return Resource
*/
public Resource getResourceById(int resourceId) {
return resourceMapper.selectById(resourceId);
}
/**
* list resources by ids
*
* @param resIds resIds
* @return resource list
*/
public List<Resource> listResourceByIds(Integer[] resIds) {
return resourceMapper.listResourceByIds(resIds);
}
/**
* format task app id in task instance
*/
public String formatTaskAppId(TaskInstance taskInstance) {
ProcessInstance processInstance = findProcessInstanceById(taskInstance.getProcessInstanceId());
if (processInstance == null) {
return "";
}
ProcessDefinition definition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion());
if (definition == null) {
return "";
}
return String.format("%s_%s_%s", definition.getId(), processInstance.getId(), taskInstance.getId());
}
/**
* switch process definition version to process definition log version
*/
public int switchVersion(ProcessDefinition processDefinition, ProcessDefinitionLog processDefinitionLog) {
if (null == processDefinition || null == processDefinitionLog) {
return Constants.DEFINITION_FAILURE;
}
processDefinitionLog.setId(processDefinition.getId());
processDefinitionLog.setReleaseState(ReleaseState.OFFLINE);
processDefinitionLog.setFlag(Flag.YES);
int result = processDefineMapper.updateById(processDefinitionLog);
if (result > 0) {
result = switchProcessTaskRelationVersion(processDefinitionLog);
if (result <= 0) {
return Constants.DEFINITION_FAILURE;
}
}
return result;
}
public int switchProcessTaskRelationVersion(ProcessDefinition processDefinition) {
List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode());
if (!processTaskRelationList.isEmpty()) {
processTaskRelationMapper.deleteByCode(processDefinition.getProjectCode(), processDefinition.getCode());
}
List<ProcessTaskRelationLog> processTaskRelationLogList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion());
return processTaskRelationMapper.batchInsert(processTaskRelationLogList);
}
/**
* get resource ids
*
* @param taskDefinition taskDefinition
* @return resource ids
*/
public String getResourceIds(TaskDefinition taskDefinition) {
Set<Integer> resourceIds = null;
AbstractParameters params = TaskParametersUtils.getParameters(taskDefinition.getTaskType(), taskDefinition.getTaskParams());
if (params != null && CollectionUtils.isNotEmpty(params.getResourceFilesList())) {
resourceIds = params.getResourceFilesList().
stream()
.filter(t -> t.getId() != 0)
.map(ResourceInfo::getId)
.collect(Collectors.toSet());
}
if (CollectionUtils.isEmpty(resourceIds)) {
return StringUtils.EMPTY;
}
return StringUtils.join(resourceIds, ",");
}
public int saveTaskDefine(User operator, long projectCode, List<TaskDefinitionLog> taskDefinitionLogs) {
Date now = new Date();
List<TaskDefinitionLog> newTaskDefinitionLogs = new ArrayList<>();
List<TaskDefinitionLog> updateTaskDefinitionLogs = new ArrayList<>();
for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) {
taskDefinitionLog.setProjectCode(projectCode);
taskDefinitionLog.setUpdateTime(now);
taskDefinitionLog.setOperateTime(now);
taskDefinitionLog.setOperator(operator.getId());
taskDefinitionLog.setResourceIds(getResourceIds(taskDefinitionLog));
if (taskDefinitionLog.getCode() > 0 && taskDefinitionLog.getVersion() > 0) {
TaskDefinitionLog definitionCodeAndVersion = taskDefinitionLogMapper
.queryByDefinitionCodeAndVersion(taskDefinitionLog.getCode(), taskDefinitionLog.getVersion());
if (definitionCodeAndVersion != null) {
if (!taskDefinitionLog.equals(definitionCodeAndVersion)) {
taskDefinitionLog.setUserId(definitionCodeAndVersion.getUserId());
Integer version = taskDefinitionLogMapper.queryMaxVersionForDefinition(taskDefinitionLog.getCode());
taskDefinitionLog.setVersion(version + 1);
taskDefinitionLog.setCreateTime(definitionCodeAndVersion.getCreateTime());
updateTaskDefinitionLogs.add(taskDefinitionLog);
}
continue;
}
}
taskDefinitionLog.setUserId(operator.getId());
taskDefinitionLog.setVersion(Constants.VERSION_FIRST);
taskDefinitionLog.setCreateTime(now);
if (taskDefinitionLog.getCode() == 0) {
try {
taskDefinitionLog.setCode(SnowFlakeUtils.getInstance().nextId());
} catch (SnowFlakeException e) {
logger.error("Task code get error, ", e);
return Constants.DEFINITION_FAILURE;
}
}
newTaskDefinitionLogs.add(taskDefinitionLog);
}
int insertResult = 0;
int updateResult = 0;
for (TaskDefinitionLog taskDefinitionToUpdate : updateTaskDefinitionLogs) {
TaskDefinition task = taskDefinitionMapper.queryByCode(taskDefinitionToUpdate.getCode());
if (task == null) {
newTaskDefinitionLogs.add(taskDefinitionToUpdate);
} else {
insertResult += taskDefinitionLogMapper.insert(taskDefinitionToUpdate);
taskDefinitionToUpdate.setId(task.getId());
updateResult += taskDefinitionMapper.updateById(taskDefinitionToUpdate);
}
}
if (!newTaskDefinitionLogs.isEmpty()) {
updateResult += taskDefinitionMapper.batchInsert(newTaskDefinitionLogs);
insertResult += taskDefinitionLogMapper.batchInsert(newTaskDefinitionLogs);
}
return (insertResult & updateResult) > 0 ? 1 : Constants.EXIT_CODE_SUCCESS;
}
/**
* save processDefinition (including create or update processDefinition)
*/
public int saveProcessDefine(User operator, ProcessDefinition processDefinition, Boolean isFromProcessDefine) {
ProcessDefinitionLog processDefinitionLog = new ProcessDefinitionLog(processDefinition);
Integer version = processDefineLogMapper.queryMaxVersionForDefinition(processDefinition.getCode());
int insertVersion = version == null || version == 0 ? Constants.VERSION_FIRST : version + 1;
processDefinitionLog.setVersion(insertVersion);
processDefinitionLog.setReleaseState(isFromProcessDefine ? ReleaseState.OFFLINE : ReleaseState.ONLINE);
processDefinitionLog.setOperator(operator.getId());
processDefinitionLog.setOperateTime(processDefinition.getUpdateTime());
int insertLog = processDefineLogMapper.insert(processDefinitionLog);
int result;
if (0 == processDefinition.getId()) {
result = processDefineMapper.insert(processDefinitionLog);
} else {
processDefinitionLog.setId(processDefinition.getId());
result = processDefineMapper.updateById(processDefinitionLog);
}
return (insertLog & result) > 0 ? insertVersion : 0;
}
/**
* save task relations
*/
public int saveTaskRelation(User operator, long projectCode, long processDefinitionCode, int processDefinitionVersion,
List<ProcessTaskRelationLog> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) {
Map<Long, TaskDefinitionLog> taskDefinitionLogMap = null;
if (CollectionUtils.isNotEmpty(taskDefinitionLogs)) {
taskDefinitionLogMap = taskDefinitionLogs.stream()
.collect(Collectors.toMap(TaskDefinition::getCode, taskDefinitionLog -> taskDefinitionLog));
}
Date now = new Date();
for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) {
processTaskRelationLog.setProjectCode(projectCode);
processTaskRelationLog.setProcessDefinitionCode(processDefinitionCode);
processTaskRelationLog.setProcessDefinitionVersion(processDefinitionVersion);
if (taskDefinitionLogMap != null) {
TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(processTaskRelationLog.getPreTaskCode());
if (taskDefinitionLog != null) {
processTaskRelationLog.setPreTaskVersion(taskDefinitionLog.getVersion());
}
processTaskRelationLog.setPostTaskVersion(taskDefinitionLogMap.get(processTaskRelationLog.getPostTaskCode()).getVersion());
}
processTaskRelationLog.setCreateTime(now);
processTaskRelationLog.setUpdateTime(now);
processTaskRelationLog.setOperator(operator.getId());
processTaskRelationLog.setOperateTime(now);
}
List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode);
if (!processTaskRelationList.isEmpty()) {
Set<Integer> processTaskRelationSet = processTaskRelationList.stream().map(ProcessTaskRelation::hashCode).collect(toSet());
Set<Integer> taskRelationSet = taskRelationList.stream().map(ProcessTaskRelationLog::hashCode).collect(toSet());
if (CollectionUtils.isEqualCollection(processTaskRelationSet, taskRelationSet)) {
return Constants.EXIT_CODE_SUCCESS;
}
processTaskRelationMapper.deleteByCode(projectCode, processDefinitionCode);
}
int result = processTaskRelationMapper.batchInsert(taskRelationList);
int resultLog = processTaskRelationLogMapper.batchInsert(taskRelationList);
return (result & resultLog) > 0 ? Constants.EXIT_CODE_SUCCESS : Constants.EXIT_CODE_FAILURE;
}
public boolean isTaskOnline(long taskCode) {
List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByTaskCode(taskCode);
if (!processTaskRelationList.isEmpty()) {
Set<Long> processDefinitionCodes = processTaskRelationList
.stream()
.map(ProcessTaskRelation::getProcessDefinitionCode)
.collect(Collectors.toSet());
List<ProcessDefinition> processDefinitionList = processDefineMapper.queryByCodes(processDefinitionCodes);
// check process definition is already online
for (ProcessDefinition processDefinition : processDefinitionList) {
if (processDefinition.getReleaseState() == ReleaseState.ONLINE) {
return true;
}
}
}
return false;
}
/**
* Generate the DAG Graph based on the process definition id
*
* @param processDefinition process definition
* @return dag graph
*/
public DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) {
List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode());
List<TaskNode> taskNodeList = transformTask(processTaskRelations, Lists.newArrayList());
ProcessDag processDag = DagHelper.getProcessDag(taskNodeList, new ArrayList<>(processTaskRelations));
// Generate concrete Dag to be executed
return DagHelper.buildDagGraph(processDag);
}
/**
* generate DagData
*/
public DagData genDagData(ProcessDefinition processDefinition) {
List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode());
List<TaskDefinitionLog> taskDefinitionLogList = genTaskDefineList(processTaskRelations);
List<TaskDefinition> taskDefinitions = taskDefinitionLogList.stream()
.map(taskDefinitionLog -> JSONUtils.parseObject(JSONUtils.toJsonString(taskDefinitionLog), TaskDefinition.class))
.collect(Collectors.toList());
return new DagData(processDefinition, processTaskRelations, taskDefinitions);
}
public List<TaskDefinitionLog> genTaskDefineList(List<ProcessTaskRelation> processTaskRelations) {
Set<TaskDefinition> taskDefinitionSet = new HashSet<>();
for (ProcessTaskRelation processTaskRelation : processTaskRelations) {
if (processTaskRelation.getPreTaskCode() > 0) {
taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPreTaskCode(), processTaskRelation.getPreTaskVersion()));
}
if (processTaskRelation.getPostTaskCode() > 0) {
taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion()));
}
}
return taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet);
}
/**
* find task definition by code and version
*/
public TaskDefinition findTaskDefinition(long taskCode, int taskDefinitionVersion) {
return taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskCode, taskDefinitionVersion);
}
/**
* find process task relation list by projectCode and processDefinitionCode
*/
public List<ProcessTaskRelation> findRelationByCode(long projectCode, long processDefinitionCode) {
return processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode);
}
/**
* add authorized resources
*
* @param ownResources own resources
* @param userId userId
*/
private void addAuthorizedResources(List<Resource> ownResources, int userId) {
List<Integer> relationResourceIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 7);
List<Resource> relationResources = CollectionUtils.isNotEmpty(relationResourceIds) ? resourceMapper.queryResourceListById(relationResourceIds) : new ArrayList<>();
ownResources.addAll(relationResources);
}
/**
* Use temporarily before refactoring taskNode
*/
public List<TaskNode> transformTask(List<ProcessTaskRelation> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) {
Map<Long, List<Long>> taskCodeMap = new HashMap<>();
for (ProcessTaskRelation processTaskRelation : taskRelationList) {
taskCodeMap.compute(processTaskRelation.getPostTaskCode(), (k, v) -> {
if (v == null) {
v = new ArrayList<>();
}
if (processTaskRelation.getPreTaskCode() != 0L) {
v.add(processTaskRelation.getPreTaskCode());
}
return v;
});
}
if (CollectionUtils.isEmpty(taskDefinitionLogs)) {
taskDefinitionLogs = genTaskDefineList(taskRelationList);
}
Map<Long, TaskDefinitionLog> taskDefinitionLogMap = taskDefinitionLogs.stream()
.collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog));
List<TaskNode> taskNodeList = new ArrayList<>();
for (Entry<Long, List<Long>> code : taskCodeMap.entrySet()) {
TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(code.getKey());
if (taskDefinitionLog != null) {
TaskNode taskNode = new TaskNode();
taskNode.setCode(taskDefinitionLog.getCode());
taskNode.setVersion(taskDefinitionLog.getVersion());
taskNode.setName(taskDefinitionLog.getName());
taskNode.setDesc(taskDefinitionLog.getDescription());
taskNode.setType(taskDefinitionLog.getTaskType().toUpperCase());
taskNode.setRunFlag(taskDefinitionLog.getFlag() == Flag.YES ? Constants.FLOWNODE_RUN_FLAG_NORMAL : Constants.FLOWNODE_RUN_FLAG_FORBIDDEN);
taskNode.setMaxRetryTimes(taskDefinitionLog.getFailRetryTimes());
taskNode.setRetryInterval(taskDefinitionLog.getFailRetryInterval());
Map<String, Object> taskParamsMap = taskNode.taskParamsToJsonObj(taskDefinitionLog.getTaskParams());
taskNode.setConditionResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.CONDITION_RESULT)));
taskNode.setSwitchResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.SWITCH_RESULT)));
taskNode.setDependence(JSONUtils.toJsonString(taskParamsMap.get(Constants.DEPENDENCE)));
taskParamsMap.remove(Constants.CONDITION_RESULT);
taskParamsMap.remove(Constants.DEPENDENCE);
taskNode.setParams(JSONUtils.toJsonString(taskParamsMap));
taskNode.setTaskInstancePriority(taskDefinitionLog.getTaskPriority());
taskNode.setWorkerGroup(taskDefinitionLog.getWorkerGroup());
taskNode.setEnvironmentCode(taskDefinitionLog.getEnvironmentCode());
taskNode.setTimeout(JSONUtils.toJsonString(new TaskTimeoutParameter(taskDefinitionLog.getTimeoutFlag() == TimeoutFlag.OPEN,
taskDefinitionLog.getTimeoutNotifyStrategy(),
taskDefinitionLog.getTimeout())));
taskNode.setDelayTime(taskDefinitionLog.getDelayTime());
taskNode.setPreTasks(JSONUtils.toJsonString(code.getValue().stream().map(taskDefinitionLogMap::get).map(TaskDefinition::getCode).collect(Collectors.toList())));
taskNodeList.add(taskNode);
}
}
return taskNodeList;
}
public Map<ProcessInstance, TaskInstance> notifyProcessList(int processId, int taskId) {
HashMap<ProcessInstance, TaskInstance> processTaskMap = new HashMap<>();
//find sub tasks
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(processId);
if (processInstanceMap == null) {
return processTaskMap;
}
ProcessInstance fatherProcess = this.findProcessInstanceById(processInstanceMap.getParentProcessInstanceId());
TaskInstance fatherTask = this.findTaskInstanceById(processInstanceMap.getParentTaskInstanceId());
if (fatherProcess != null) {
processTaskMap.put(fatherProcess, fatherTask);
}
return processTaskMap;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,543 | [Bug] [Master] process instance state is always running when failure task exists | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
process instance state is always running when failure task exists.
the failure task setting: retry times: 1, retry interval: 1 minute
### What you expected to happen
the process instance state is failed when failed task exists.
### How to reproduce
process instance state is always running when failure task exists.
the failure task setting: retry times: 1, retry interval: 1 minute
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6543 | https://github.com/apache/dolphinscheduler/pull/6547 | ba4cb9d22ad02ba4927fd23ea811c82639261e69 | 52a550b6aefa5652f17b7bbc9e056a83efa23c8a | "2021-10-15T08:39:19Z" | java | "2021-10-15T10:04:04Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/queue/PeerTaskInstancePriorityQueue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.service.queue;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.service.exceptions.TaskPriorityQueueException;
import java.util.Comparator;
import java.util.Iterator;
import java.util.PriorityQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;
/**
* Task instances priority queue implementation
* All the task instances are in the same process instance.
*/
public class PeerTaskInstancePriorityQueue implements TaskPriorityQueue<TaskInstance> {
/**
* queue size
*/
private static final Integer QUEUE_MAX_SIZE = 3000;
/**
* queue
*/
private PriorityQueue<TaskInstance> queue = new PriorityQueue<>(QUEUE_MAX_SIZE, new TaskInfoComparator());
/**
* Lock used for all public operations
*/
private final ReentrantLock lock = new ReentrantLock(true);
/**
* put task instance to priority queue
*
* @param taskInstance taskInstance
* @throws TaskPriorityQueueException
*/
@Override
public void put(TaskInstance taskInstance) throws TaskPriorityQueueException {
queue.add(taskInstance);
}
/**
* take task info
*
* @return task instance
* @throws TaskPriorityQueueException
*/
@Override
public TaskInstance take() throws TaskPriorityQueueException {
return queue.poll();
}
/**
* poll task info with timeout
* <p>
* WARN: Please use PriorityBlockingQueue if you want to use poll(timeout, unit)
* because this method of override interface used without considering accuracy of timeout
*
* @param timeout
* @param unit
* @return
* @throws TaskPriorityQueueException
* @throws InterruptedException
*/
@Override
public TaskInstance poll(long timeout, TimeUnit unit) throws TaskPriorityQueueException {
throw new TaskPriorityQueueException("This operation is not currently supported and suggest to use PriorityBlockingQueue if you want!");
}
/**
* peek taskInfo
*
* @return task instance
*/
public TaskInstance peek() {
return queue.peek();
}
/**
* queue size
*
* @return size
*/
@Override
public int size() {
return queue.size();
}
/**
* whether contains the task instance
*
* @param taskInstance task instance
* @return true is contains
*/
public boolean contains(TaskInstance taskInstance) {
return queue.contains(taskInstance);
}
public boolean contains(int taskId) {
Iterator<TaskInstance> iterator = this.queue.iterator();
while (iterator.hasNext()) {
TaskInstance taskInstance = iterator.next();
if (taskId == taskInstance.getId()) {
return true;
}
}
return false;
}
/**
* remove task
*
* @param taskInstance task instance
* @return true if remove success
*/
public boolean remove(TaskInstance taskInstance) {
return queue.remove(taskInstance);
}
/**
* get iterator
*
* @return Iterator
*/
public Iterator<TaskInstance> iterator() {
return queue.iterator();
}
/**
* TaskInfoComparator
*/
private class TaskInfoComparator implements Comparator<TaskInstance> {
/**
* compare o1 o2
*
* @param o1 o1
* @param o2 o2
* @return compare result
*/
@Override
public int compare(TaskInstance o1, TaskInstance o2) {
return o1.getTaskInstancePriority().compareTo(o2.getTaskInstancePriority());
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,537 | [Bug] [Task] task log using error | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
![image](https://user-images.githubusercontent.com/16631152/137439540-4594a679-f489-4b76-bab6-da562e8035bd.png)
### What you expected to happen
Business logs should not be handed over to task-logger
### How to reproduce
no
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6537 | https://github.com/apache/dolphinscheduler/pull/6538 | 52a550b6aefa5652f17b7bbc9e056a83efa23c8a | de61d65af2fff9f129a3f3c4889328e104f7d2ad | "2021-10-15T06:01:08Z" | java | "2021-10-15T12:27:24Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskExecuteThread.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.runner;
import static java.util.Calendar.DAY_OF_MONTH;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.Event;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.LoggerUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.RetryerUtils;
import org.apache.dolphinscheduler.remote.command.Command;
import org.apache.dolphinscheduler.remote.command.TaskExecuteAckCommand;
import org.apache.dolphinscheduler.remote.command.TaskExecuteResponseCommand;
import org.apache.dolphinscheduler.server.utils.ProcessUtils;
import org.apache.dolphinscheduler.server.worker.cache.ResponceCache;
import org.apache.dolphinscheduler.server.worker.plugin.TaskPluginManager;
import org.apache.dolphinscheduler.server.worker.processor.TaskCallbackService;
import org.apache.dolphinscheduler.service.alert.AlertClientService;
import org.apache.dolphinscheduler.service.queue.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.spi.exception.PluginNotFoundException;
import org.apache.dolphinscheduler.spi.task.AbstractTask;
import org.apache.dolphinscheduler.spi.task.TaskAlertInfo;
import org.apache.dolphinscheduler.spi.task.TaskChannel;
import org.apache.dolphinscheduler.spi.task.TaskExecutionContextCacheManager;
import org.apache.dolphinscheduler.spi.task.request.TaskRequest;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang.StringUtils;
import java.io.File;
import java.io.IOException;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Delayed;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.rholder.retry.RetryException;
/**
* task scheduler thread
*/
public class TaskExecuteThread implements Runnable, Delayed {
/**
* logger
*/
private final Logger logger = LoggerFactory.getLogger(TaskExecuteThread.class);
/**
* task instance
*/
private TaskExecutionContext taskExecutionContext;
/**
* abstract task
*/
private AbstractTask task;
/**
* task callback service
*/
private TaskCallbackService taskCallbackService;
/**
* taskExecutionContextCacheManager
*/
private TaskExecutionContextCacheManager taskExecutionContextCacheManager;
/**
* task logger
*/
private Logger taskLogger;
/**
* alert client server
*/
private AlertClientService alertClientService;
private TaskPluginManager taskPluginManager;
/**
* constructor
*
* @param taskExecutionContext taskExecutionContext
* @param taskCallbackService taskCallbackService
*/
public TaskExecuteThread(TaskExecutionContext taskExecutionContext,
TaskCallbackService taskCallbackService,
AlertClientService alertClientService) {
this.taskExecutionContext = taskExecutionContext;
this.taskCallbackService = taskCallbackService;
this.alertClientService = alertClientService;
}
public TaskExecuteThread(TaskExecutionContext taskExecutionContext,
TaskCallbackService taskCallbackService,
AlertClientService alertClientService,
TaskPluginManager taskPluginManager) {
this.taskExecutionContext = taskExecutionContext;
this.taskCallbackService = taskCallbackService;
this.alertClientService = alertClientService;
this.taskPluginManager = taskPluginManager;
}
@Override
public void run() {
TaskExecuteResponseCommand responseCommand = new TaskExecuteResponseCommand(taskExecutionContext.getTaskInstanceId(), taskExecutionContext.getProcessInstanceId());
try {
logger.info("script path : {}", taskExecutionContext.getExecutePath());
// check if the OS user exists
if (!OSUtils.getUserList().contains(taskExecutionContext.getTenantCode())) {
String errorLog = String.format("tenantCode: %s does not exist", taskExecutionContext.getTenantCode());
taskLogger.error(errorLog);
responseCommand.setStatus(ExecutionStatus.FAILURE.getCode());
responseCommand.setEndTime(new Date());
return;
}
if (taskExecutionContext.getStartTime() == null) {
taskExecutionContext.setStartTime(new Date());
}
if (taskExecutionContext.getCurrentExecutionStatus() != ExecutionStatus.RUNNING_EXECUTION) {
changeTaskExecutionStatusToRunning();
}
logger.info("the task begins to execute. task instance id: {}", taskExecutionContext.getTaskInstanceId());
int dryRun = taskExecutionContext.getDryRun();
// copy hdfs/minio file to local
if (dryRun == Constants.DRY_RUN_FLAG_NO) {
downloadResource(taskExecutionContext.getExecutePath(),
taskExecutionContext.getResources(),
logger);
}
taskExecutionContext.setEnvFile(CommonUtils.getSystemEnvPath());
taskExecutionContext.setDefinedParams(getGlobalParamsMap());
taskExecutionContext.setTaskAppId(String.format("%s_%s",
taskExecutionContext.getProcessInstanceId(),
taskExecutionContext.getTaskInstanceId()));
preBuildBusinessParams();
TaskChannel taskChannel = taskPluginManager.getTaskChannelMap().get(taskExecutionContext.getTaskType());
if (null == taskChannel) {
throw new PluginNotFoundException(String.format("%s Task Plugin Not Found,Please Check Config File.", taskExecutionContext.getTaskType()));
}
TaskRequest taskRequest = JSONUtils.parseObject(JSONUtils.toJsonString(taskExecutionContext), TaskRequest.class);
String taskLogName = LoggerUtils.buildTaskId(LoggerUtils.TASK_LOGGER_INFO_PREFIX,
taskExecutionContext.getProcessDefineCode(),
taskExecutionContext.getProcessDefineVersion(),
taskExecutionContext.getProcessInstanceId(),
taskExecutionContext.getTaskInstanceId());
taskRequest.setTaskLogName(taskLogName);
task = taskChannel.createTask(taskRequest);
// task init
this.task.init();
//init varPool
this.task.getParameters().setVarPool(taskExecutionContext.getVarPool());
if (dryRun == Constants.DRY_RUN_FLAG_NO) {
// task handle
this.task.handle();
// task result process
if (this.task.getNeedAlert()) {
sendAlert(this.task.getTaskAlertInfo());
}
responseCommand.setStatus(this.task.getExitStatus().getCode());
} else {
responseCommand.setStatus(ExecutionStatus.SUCCESS.getCode());
task.setExitStatusCode(Constants.EXIT_CODE_SUCCESS);
}
responseCommand.setEndTime(new Date());
responseCommand.setProcessId(this.task.getProcessId());
responseCommand.setAppIds(this.task.getAppIds());
responseCommand.setVarPool(JSONUtils.toJsonString(this.task.getParameters().getVarPool()));
logger.info("task instance id : {},task final status : {}", taskExecutionContext.getTaskInstanceId(), this.task.getExitStatus());
} catch (Throwable e) {
logger.error("task scheduler failure", e);
kill();
responseCommand.setStatus(ExecutionStatus.FAILURE.getCode());
responseCommand.setEndTime(new Date());
responseCommand.setProcessId(task.getProcessId());
responseCommand.setAppIds(task.getAppIds());
} finally {
TaskExecutionContextCacheManager.removeByTaskInstanceId(taskExecutionContext.getTaskInstanceId());
ResponceCache.get().cache(taskExecutionContext.getTaskInstanceId(), responseCommand.convert2Command(), Event.RESULT);
taskCallbackService.sendResult(taskExecutionContext.getTaskInstanceId(), responseCommand.convert2Command());
clearTaskExecPath();
}
}
private void sendAlert(TaskAlertInfo taskAlertInfo) {
alertClientService.sendAlert(taskAlertInfo.getAlertGroupId(), taskAlertInfo.getTitle(), taskAlertInfo.getContent());
}
/**
* when task finish, clear execute path.
*/
private void clearTaskExecPath() {
logger.info("develop mode is: {}", CommonUtils.isDevelopMode());
if (!CommonUtils.isDevelopMode()) {
// get exec dir
String execLocalPath = taskExecutionContext.getExecutePath();
if (StringUtils.isEmpty(execLocalPath)) {
logger.warn("task: {} exec local path is empty.", taskExecutionContext.getTaskName());
return;
}
if ("/".equals(execLocalPath)) {
logger.warn("task: {} exec local path is '/', direct deletion is not allowed", taskExecutionContext.getTaskName());
return;
}
try {
org.apache.commons.io.FileUtils.deleteDirectory(new File(execLocalPath));
logger.info("exec local path: {} cleared.", execLocalPath);
} catch (IOException e) {
logger.error("delete exec dir failed : {}", e.getMessage(), e);
}
}
}
/**
* get global paras map
*
* @return map
*/
private Map<String, String> getGlobalParamsMap() {
Map<String, String> globalParamsMap = new HashMap<>(16);
// global params string
String globalParamsStr = taskExecutionContext.getGlobalParams();
if (globalParamsStr != null) {
List<Property> globalParamsList = JSONUtils.toList(globalParamsStr, Property.class);
globalParamsMap.putAll(globalParamsList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)));
}
return globalParamsMap;
}
/**
* kill task
*/
public void kill() {
if (task != null) {
try {
task.cancelApplication(true);
ProcessUtils.killYarnJob(taskExecutionContext);
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
}
/**
* download resource file
*
* @param execLocalPath execLocalPath
* @param projectRes projectRes
* @param logger logger
*/
private void downloadResource(String execLocalPath, Map<String, String> projectRes, Logger logger) {
if (MapUtils.isEmpty(projectRes)) {
return;
}
Set<Map.Entry<String, String>> resEntries = projectRes.entrySet();
for (Map.Entry<String, String> resource : resEntries) {
String fullName = resource.getKey();
String tenantCode = resource.getValue();
File resFile = new File(execLocalPath, fullName);
if (!resFile.exists()) {
try {
// query the tenant code of the resource according to the name of the resource
String resHdfsPath = HadoopUtils.getHdfsResourceFileName(tenantCode, fullName);
logger.info("get resource file from hdfs :{}", resHdfsPath);
HadoopUtils.getInstance().copyHdfsToLocal(resHdfsPath, execLocalPath + File.separator + fullName, false, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
throw new RuntimeException(e.getMessage());
}
} else {
logger.info("file : {} exists ", resFile.getName());
}
}
}
/**
* send an ack to change the status of the task.
*/
private void changeTaskExecutionStatusToRunning() {
taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.RUNNING_EXECUTION);
Command ackCommand = buildAckCommand().convert2Command();
try {
RetryerUtils.retryCall(() -> {
taskCallbackService.sendAck(taskExecutionContext.getTaskInstanceId(), ackCommand);
return Boolean.TRUE;
});
} catch (ExecutionException | RetryException e) {
logger.error(e.getMessage(), e);
}
}
/**
* build ack command.
*
* @return TaskExecuteAckCommand
*/
private TaskExecuteAckCommand buildAckCommand() {
TaskExecuteAckCommand ackCommand = new TaskExecuteAckCommand();
ackCommand.setTaskInstanceId(taskExecutionContext.getTaskInstanceId());
ackCommand.setStatus(taskExecutionContext.getCurrentExecutionStatus().getCode());
ackCommand.setStartTime(taskExecutionContext.getStartTime());
ackCommand.setLogPath(taskExecutionContext.getLogPath());
ackCommand.setHost(taskExecutionContext.getHost());
if (TaskType.SQL.getDesc().equalsIgnoreCase(taskExecutionContext.getTaskType()) || TaskType.PROCEDURE.getDesc().equalsIgnoreCase(taskExecutionContext.getTaskType())) {
ackCommand.setExecutePath(null);
} else {
ackCommand.setExecutePath(taskExecutionContext.getExecutePath());
}
return ackCommand;
}
/**
* get current TaskExecutionContext
*
* @return TaskExecutionContext
*/
public TaskExecutionContext getTaskExecutionContext() {
return this.taskExecutionContext;
}
@Override
public long getDelay(TimeUnit unit) {
return unit.convert(DateUtils.getRemainTime(taskExecutionContext.getFirstSubmitTime(),
taskExecutionContext.getDelayTime() * 60L), TimeUnit.SECONDS);
}
@Override
public int compareTo(Delayed o) {
if (o == null) {
return 1;
}
return Long.compare(this.getDelay(TimeUnit.MILLISECONDS), o.getDelay(TimeUnit.MILLISECONDS));
}
private void preBuildBusinessParams() {
Map<String, Property> paramsMap = new HashMap<>();
// replace variable TIME with $[YYYYmmddd...] in shell file when history run job and batch complement job
if (taskExecutionContext.getScheduleTime() != null) {
Date date = taskExecutionContext.getScheduleTime();
if (CommandType.COMPLEMENT_DATA.getCode() == taskExecutionContext.getCmdTypeIfComplement()) {
date = DateUtils.add(taskExecutionContext.getScheduleTime(), DAY_OF_MONTH, 1);
}
String dateTime = DateUtils.format(date, Constants.PARAMETER_FORMAT_TIME);
Property p = new Property();
p.setValue(dateTime);
p.setProp(Constants.PARAMETER_DATETIME);
paramsMap.put(Constants.PARAMETER_DATETIME, p);
}
taskExecutionContext.setParamsMap(paramsMap);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,529 | [Bug] [Task] Failed to get environment variables when I used the python task. | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
I didn't get environment variables that is set in the file 'dolphinscheduler_env.sh" when I was running a python task.
The Python script is as follows:
![image](https://user-images.githubusercontent.com/4928204/137266244-5b054db2-6658-4f09-beaf-40121ab281c4.png)
But the result was not what I want. The detail information is as follows:
```
2021-10-14 14:43:15.919 INFO 86302 --- [-Execute-Thread] o.a.d.p.t.python.PythonCommandExecutor : tenantCode :calvin, task dir:/tmp/dolphinscheduler/exec/process/723800319311872/806053553881088_2/36/50
2021-10-14 14:43:15.919 INFO 86302 --- [-Execute-Thread] o.a.d.p.t.python.PythonCommandExecutor : generate command file:/tmp/dolphinscheduler/exec/process/723800319311872/806053553881088_2/36/50/py_36_50.command
2021-10-14 14:43:15.919 INFO 86302 --- [-Execute-Thread] o.a.d.p.t.python.PythonCommandExecutor : #-*- encoding=utf8 -*-
import os
import sys
print("Python version:%s\n" %(sys.version))
print("HADOOP_HOME=%s\n" %(os.getenv('HADOOP_HOME')))
print("HADOOP_CONF_DIR=%s\n" %(os.getenv('HADOOP_CONF_DIR')))
print("SPARK_HOME1=%s\n" %(os.getenv('SPARK_HOME1')))
print("SPARK_HOME2=%s\n" %(os.getenv('SPARK_HOME2')))
print("PYTHON_HOME=%s\n" %(os.getenv('PYTHON_HOME')))
print("JAVA_HOME=%s\n" %(os.getenv('JAVA_HOME')))
print("HIVE_HOME=%s\n" %(os.getenv('HIVE_HOME')))
print("FLINK_HOME=%s\n" %(os.getenv('FLINK_HOME')))
print("JAVA_HOME=%s\n" %(os.getenv('JAVA_HOME')))
print("DATAX_HOME=%s\n" %(os.getenv('DATAX_HOME')))
print("PATH=%s\n" %(os.getenv('PATH')))
2021-10-14 14:43:15.926 INFO 86302 --- [-Execute-Thread] [taskAppId=TASK-806053553881088_2-36-50] : task run command: sudo -u calvin python /tmp/dolphinscheduler/exec/process/723800319311872/806053553881088_2/36/50/py_36_50.command
2021-10-14 14:43:15.930 INFO 86302 --- [-Execute-Thread] [taskAppId=TASK-806053553881088_2-36-50] : process start, process id is: 86802
2021-10-14 14:43:16.297 INFO 86302 --- [-Execute-Thread] [taskAppId=TASK-806053553881088_2-36-50] : process has exited, execute path:/tmp/dolphinscheduler/exec/process/723800319311872/806053553881088_2/36/50, processId:86802 ,exitStatusCode:0 ,processWaitForStatus:true ,processExitValue:0
2021-10-14 14:43:16.298 INFO 86302 --- [-Execute-Thread] o.a.d.s.worker.runner.TaskExecuteThread : task instance id : 50,task final status : SUCCESS
2021-10-14 14:43:16.298 INFO 86302 --- [-Execute-Thread] o.a.d.s.worker.runner.TaskExecuteThread : develop mode is: false
2021-10-14 14:43:16.301 INFO 86302 --- [-Execute-Thread] o.a.d.s.worker.runner.TaskExecuteThread : exec local path: /tmp/dolphinscheduler/exec/process/723800319311872/806053553881088_2/36/50 cleared.
2021-10-14 14:43:16.931 INFO 86302 --- [skLogInfo-36_50] [taskAppId=TASK-806053553881088_2-36-50] : -> welcome to use bigdata scheduling system...
Python version:2.7.16 (default, Jun 18 2021, 03:23:53)
[GCC Apple LLVM 12.0.5 (clang-1205.0.19.59.6) [+internal-os, ptrauth-isa=deploy
HADOOP_HOME=None
HADOOP_CONF_DIR=None
SPARK_HOME1=None
SPARK_HOME2=None
PYTHON_HOME=None
JAVA_HOME=None
HIVE_HOME=None
FLINK_HOME=None
JAVA_HOME=None
DATAX_HOME=None
PATH=/usr/local/Cellar/pyenv/1.2.16/libexec:/usr/local/Cellar/pyenv/1.2.16/libexec:/usr/local/opt/nvm/versions/node/v12.4.0/bin:/usr/local/Cellar/pyenv-virtualenv/1.1.3/shims:/Users/calvin/.pyenv/shims:/usr/local/bin:/usr/local/cuda/bin:/Users/calvin/opt/hadoop-2.6.5/bin:/Users/calvin/redis-4.0.8/src:/Users/calvin/btrace/bin:/Library/java/JavaVirtualMachines/jdk1.8.0_191.jdk/Contents/Home/bin:/usr/local/opt/maven3/bin:/Users/calvin/phantomjs-2.1.1/bin:/Users/calvin/sysdir/apache-flume-1.7.0/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/Library/Apple/usr/bin:/Library/Frameworks/Mono.framework/Versions/Current/Commands:/Applications/Wireshark.app/Contents/MacOS:/opt/apache-tinkerpop-gremlin-console-3.4.3/bin:/usr/local/Cellar/postgresql@10/10.11/bin:/usr/local/sbin:/opt/gradle/gradle-7.2/bin:/opt/javacc-javacc-7.0.10/scripts
2021-10-14 14:43:16.931 INFO 86302 --- [skLogInfo-36_50] [taskAppId=TASK-806053553881088_2-36-50] : FINALIZE_SESSION
```
### What you expected to happen
I expect that I can get environment variables in the python script.
### How to reproduce
1. First you can create a python task and set the script content as follows:
```
import os
import sys
print("Python version:%s\n" %(sys.version))
print("HADOOP_HOME=%s\n" %(os.getenv('HADOOP_HOME')))
print("HADOOP_CONF_DIR=%s\n" %(os.getenv('HADOOP_CONF_DIR')))
print("SPARK_HOME1=%s\n" %(os.getenv('SPARK_HOME1')))
print("SPARK_HOME2=%s\n" %(os.getenv('SPARK_HOME2')))
print("PYTHON_HOME=%s\n" %(os.getenv('PYTHON_HOME')))
print("JAVA_HOME=%s\n" %(os.getenv('JAVA_HOME')))
print("HIVE_HOME=%s\n" %(os.getenv('HIVE_HOME')))
print("FLINK_HOME=%s\n" %(os.getenv('FLINK_HOME')))
print("JAVA_HOME=%s\n" %(os.getenv('JAVA_HOME')))
print("DATAX_HOME=%s\n" %(os.getenv('DATAX_HOME')))
print("PATH=%s\n" %(os.getenv('PATH')))
```
2. And then You save it and run this task manually.
3. Finally you will see the result without environment variables in the 'dolphinscheduler_env.sh';
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6529 | https://github.com/apache/dolphinscheduler/pull/6531 | 4d0869fcc085f7b4d8af2182bb8b16b70ff6482c | 8200bc14526364fa1908b1df82befc984b0419aa | "2021-10-14T07:01:47Z" | java | "2021-10-18T09:33:16Z" | dolphinscheduler-task-plugin/dolphinscheduler-task-python/src/main/java/org/apache/dolphinscheduler/plugin/task/python/PythonCommandExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.plugin.task.python;
import java.util.Arrays;
import org.apache.dolphinscheduler.plugin.task.api.AbstractCommandExecutor;
import org.apache.dolphinscheduler.spi.task.request.TaskRequest;
import org.apache.dolphinscheduler.spi.utils.StringUtils;
import org.apache.commons.io.FileUtils;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.function.Consumer;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* python command executor
*/
public class PythonCommandExecutor extends AbstractCommandExecutor {
/**
* logger
*/
private static final Logger logger = LoggerFactory.getLogger(PythonCommandExecutor.class);
/**
* python
*/
public static final String PYTHON = "python";
private static final Pattern PYTHON_PATH_PATTERN = Pattern.compile("/bin/python[\\d.]*$");
/**
* constructor
*
* @param logHandler log handler
* @param taskRequest TaskRequest
* @param logger logger
*/
public PythonCommandExecutor(Consumer<LinkedBlockingQueue<String>> logHandler,
TaskRequest taskRequest,
Logger logger) {
super(logHandler, taskRequest, logger);
}
/**
* build command file path
*
* @return command file path
*/
@Override
protected String buildCommandFilePath() {
return String.format("%s/py_%s.command", taskRequest.getExecutePath(), taskRequest.getTaskAppId());
}
/**
* create command file if not exists
*
* @param execCommand exec command
* @param commandFile command file
* @throws IOException io exception
*/
@Override
protected void createCommandFileIfNotExists(String execCommand, String commandFile) throws IOException {
logger.info("tenantCode :{}, task dir:{}", taskRequest.getTenantCode(), taskRequest.getExecutePath());
if (!Files.exists(Paths.get(commandFile))) {
logger.info("generate command file:{}", commandFile);
StringBuilder sb = new StringBuilder();
sb.append("#-*- encoding=utf8 -*-\n");
sb.append("\n\n");
sb.append(execCommand);
logger.info(sb.toString());
// write data to file
FileUtils.writeStringToFile(new File(commandFile),
sb.toString(),
StandardCharsets.UTF_8);
}
}
/**
* get the absolute path of the Python command
* note :
* common.properties
* PYTHON_HOME configured under common.properties is Python absolute path, not PYTHON_HOME itself
* <p>
* for example :
* your PYTHON_HOM is /opt/python3.7/
* you must set PYTHON_HOME is /opt/python3.7/python under nder common.properties
* dolphinscheduler.env.path file.
*
* @param envPath env path
* @return python home
*/
private static String getPythonHome(String envPath) {
// BufferedReader br = null;
StringBuilder sb = new StringBuilder();
try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(envPath)));) {
String line;
while ((line = br.readLine()) != null) {
if (line.contains(PythonConstants.PYTHON_HOME)) {
sb.append(line);
break;
}
}
String result = sb.toString();
if (StringUtils.isEmpty(result)) {
return null;
}
String[] arrs = result.split(PythonConstants.EQUAL_SIGN);
if (arrs.length == 2) {
return arrs[1];
}
} catch (IOException e) {
logger.error("read file failure", e);
}
return null;
}
/**
* Gets the command path to which Python can execute
* @return python command path
*/
@Override
protected String commandInterpreter() {
String pythonHome = getPythonHome(taskRequest.getEnvFile());
if (StringUtils.isNotBlank(taskRequest.getEnvironmentConfig())) {
pythonHome = getPythonHomeFromEnvironmentConfig(taskRequest.getEnvironmentConfig());
}
return getPythonCommand(pythonHome);
}
/**
* get python command
*
* @param pythonHome python home
* @return python command
*/
public static String getPythonCommand(String pythonHome) {
if (StringUtils.isEmpty(pythonHome)) {
return PYTHON;
}
File file = new File(pythonHome);
if (file.exists() && file.isFile()) {
return pythonHome;
}
if (PYTHON_PATH_PATTERN.matcher(pythonHome).find()) {
return pythonHome;
}
return Paths.get(pythonHome, "/bin/python").toString();
}
/**
* get python home from the environment config
*
* @param environmentConfig env config
* @return python home
*/
public static String getPythonHomeFromEnvironmentConfig(String environmentConfig) {
String[] lines = environmentConfig.split("\n");
String pythonHomeConfig = Arrays.stream(lines).filter(line -> line.contains(PythonConstants.PYTHON_HOME)).findFirst().get();
if (StringUtils.isEmpty(pythonHomeConfig)) {
return null;
}
String[] arrs = pythonHomeConfig.split(PythonConstants.EQUAL_SIGN);
if (arrs.length == 2) {
return arrs[1];
}
return null;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,529 | [Bug] [Task] Failed to get environment variables when I used the python task. | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
I didn't get environment variables that is set in the file 'dolphinscheduler_env.sh" when I was running a python task.
The Python script is as follows:
![image](https://user-images.githubusercontent.com/4928204/137266244-5b054db2-6658-4f09-beaf-40121ab281c4.png)
But the result was not what I want. The detail information is as follows:
```
2021-10-14 14:43:15.919 INFO 86302 --- [-Execute-Thread] o.a.d.p.t.python.PythonCommandExecutor : tenantCode :calvin, task dir:/tmp/dolphinscheduler/exec/process/723800319311872/806053553881088_2/36/50
2021-10-14 14:43:15.919 INFO 86302 --- [-Execute-Thread] o.a.d.p.t.python.PythonCommandExecutor : generate command file:/tmp/dolphinscheduler/exec/process/723800319311872/806053553881088_2/36/50/py_36_50.command
2021-10-14 14:43:15.919 INFO 86302 --- [-Execute-Thread] o.a.d.p.t.python.PythonCommandExecutor : #-*- encoding=utf8 -*-
import os
import sys
print("Python version:%s\n" %(sys.version))
print("HADOOP_HOME=%s\n" %(os.getenv('HADOOP_HOME')))
print("HADOOP_CONF_DIR=%s\n" %(os.getenv('HADOOP_CONF_DIR')))
print("SPARK_HOME1=%s\n" %(os.getenv('SPARK_HOME1')))
print("SPARK_HOME2=%s\n" %(os.getenv('SPARK_HOME2')))
print("PYTHON_HOME=%s\n" %(os.getenv('PYTHON_HOME')))
print("JAVA_HOME=%s\n" %(os.getenv('JAVA_HOME')))
print("HIVE_HOME=%s\n" %(os.getenv('HIVE_HOME')))
print("FLINK_HOME=%s\n" %(os.getenv('FLINK_HOME')))
print("JAVA_HOME=%s\n" %(os.getenv('JAVA_HOME')))
print("DATAX_HOME=%s\n" %(os.getenv('DATAX_HOME')))
print("PATH=%s\n" %(os.getenv('PATH')))
2021-10-14 14:43:15.926 INFO 86302 --- [-Execute-Thread] [taskAppId=TASK-806053553881088_2-36-50] : task run command: sudo -u calvin python /tmp/dolphinscheduler/exec/process/723800319311872/806053553881088_2/36/50/py_36_50.command
2021-10-14 14:43:15.930 INFO 86302 --- [-Execute-Thread] [taskAppId=TASK-806053553881088_2-36-50] : process start, process id is: 86802
2021-10-14 14:43:16.297 INFO 86302 --- [-Execute-Thread] [taskAppId=TASK-806053553881088_2-36-50] : process has exited, execute path:/tmp/dolphinscheduler/exec/process/723800319311872/806053553881088_2/36/50, processId:86802 ,exitStatusCode:0 ,processWaitForStatus:true ,processExitValue:0
2021-10-14 14:43:16.298 INFO 86302 --- [-Execute-Thread] o.a.d.s.worker.runner.TaskExecuteThread : task instance id : 50,task final status : SUCCESS
2021-10-14 14:43:16.298 INFO 86302 --- [-Execute-Thread] o.a.d.s.worker.runner.TaskExecuteThread : develop mode is: false
2021-10-14 14:43:16.301 INFO 86302 --- [-Execute-Thread] o.a.d.s.worker.runner.TaskExecuteThread : exec local path: /tmp/dolphinscheduler/exec/process/723800319311872/806053553881088_2/36/50 cleared.
2021-10-14 14:43:16.931 INFO 86302 --- [skLogInfo-36_50] [taskAppId=TASK-806053553881088_2-36-50] : -> welcome to use bigdata scheduling system...
Python version:2.7.16 (default, Jun 18 2021, 03:23:53)
[GCC Apple LLVM 12.0.5 (clang-1205.0.19.59.6) [+internal-os, ptrauth-isa=deploy
HADOOP_HOME=None
HADOOP_CONF_DIR=None
SPARK_HOME1=None
SPARK_HOME2=None
PYTHON_HOME=None
JAVA_HOME=None
HIVE_HOME=None
FLINK_HOME=None
JAVA_HOME=None
DATAX_HOME=None
PATH=/usr/local/Cellar/pyenv/1.2.16/libexec:/usr/local/Cellar/pyenv/1.2.16/libexec:/usr/local/opt/nvm/versions/node/v12.4.0/bin:/usr/local/Cellar/pyenv-virtualenv/1.1.3/shims:/Users/calvin/.pyenv/shims:/usr/local/bin:/usr/local/cuda/bin:/Users/calvin/opt/hadoop-2.6.5/bin:/Users/calvin/redis-4.0.8/src:/Users/calvin/btrace/bin:/Library/java/JavaVirtualMachines/jdk1.8.0_191.jdk/Contents/Home/bin:/usr/local/opt/maven3/bin:/Users/calvin/phantomjs-2.1.1/bin:/Users/calvin/sysdir/apache-flume-1.7.0/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/Library/Apple/usr/bin:/Library/Frameworks/Mono.framework/Versions/Current/Commands:/Applications/Wireshark.app/Contents/MacOS:/opt/apache-tinkerpop-gremlin-console-3.4.3/bin:/usr/local/Cellar/postgresql@10/10.11/bin:/usr/local/sbin:/opt/gradle/gradle-7.2/bin:/opt/javacc-javacc-7.0.10/scripts
2021-10-14 14:43:16.931 INFO 86302 --- [skLogInfo-36_50] [taskAppId=TASK-806053553881088_2-36-50] : FINALIZE_SESSION
```
### What you expected to happen
I expect that I can get environment variables in the python script.
### How to reproduce
1. First you can create a python task and set the script content as follows:
```
import os
import sys
print("Python version:%s\n" %(sys.version))
print("HADOOP_HOME=%s\n" %(os.getenv('HADOOP_HOME')))
print("HADOOP_CONF_DIR=%s\n" %(os.getenv('HADOOP_CONF_DIR')))
print("SPARK_HOME1=%s\n" %(os.getenv('SPARK_HOME1')))
print("SPARK_HOME2=%s\n" %(os.getenv('SPARK_HOME2')))
print("PYTHON_HOME=%s\n" %(os.getenv('PYTHON_HOME')))
print("JAVA_HOME=%s\n" %(os.getenv('JAVA_HOME')))
print("HIVE_HOME=%s\n" %(os.getenv('HIVE_HOME')))
print("FLINK_HOME=%s\n" %(os.getenv('FLINK_HOME')))
print("JAVA_HOME=%s\n" %(os.getenv('JAVA_HOME')))
print("DATAX_HOME=%s\n" %(os.getenv('DATAX_HOME')))
print("PATH=%s\n" %(os.getenv('PATH')))
```
2. And then You save it and run this task manually.
3. Finally you will see the result without environment variables in the 'dolphinscheduler_env.sh';
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6529 | https://github.com/apache/dolphinscheduler/pull/6531 | 4d0869fcc085f7b4d8af2182bb8b16b70ff6482c | 8200bc14526364fa1908b1df82befc984b0419aa | "2021-10-14T07:01:47Z" | java | "2021-10-18T09:33:16Z" | dolphinscheduler-task-plugin/dolphinscheduler-task-python/src/main/java/org/apache/dolphinscheduler/plugin/task/python/PythonTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.plugin.task.python;
import org.apache.dolphinscheduler.plugin.task.api.AbstractTaskExecutor;
import org.apache.dolphinscheduler.plugin.task.api.TaskException;
import org.apache.dolphinscheduler.plugin.task.api.TaskResponse;
import org.apache.dolphinscheduler.plugin.task.util.MapUtils;
import org.apache.dolphinscheduler.spi.task.AbstractParameters;
import org.apache.dolphinscheduler.spi.task.Property;
import org.apache.dolphinscheduler.spi.task.TaskConstants;
import org.apache.dolphinscheduler.spi.task.paramparser.ParamUtils;
import org.apache.dolphinscheduler.spi.task.paramparser.ParameterUtils;
import org.apache.dolphinscheduler.spi.task.request.TaskRequest;
import org.apache.dolphinscheduler.spi.utils.JSONUtils;
import java.util.HashMap;
import java.util.Map;
/**
* python task
*/
public class PythonTask extends AbstractTaskExecutor {
/**
* python parameters
*/
private PythonParameters pythonParameters;
/**
* task dir
*/
private String taskDir;
/**
* python command executor
*/
private PythonCommandExecutor pythonCommandExecutor;
private TaskRequest taskRequest;
/**
* constructor
*
* @param taskRequest taskRequest
*/
public PythonTask(TaskRequest taskRequest) {
super(taskRequest);
this.taskRequest = taskRequest;
this.pythonCommandExecutor = new PythonCommandExecutor(this::logHandle,
taskRequest,
logger);
}
@Override
public void init() {
logger.info("python task params {}", taskRequest.getTaskParams());
pythonParameters = JSONUtils.parseObject(taskRequest.getTaskParams(), PythonParameters.class);
if (!pythonParameters.checkParameters()) {
throw new TaskException("python task params is not valid");
}
}
@Override
public String getPreScript() {
String rawPythonScript = pythonParameters.getRawScript().replaceAll("\\r\\n", "\n");
try {
rawPythonScript = convertPythonScriptPlaceholders(rawPythonScript);
} catch (StringIndexOutOfBoundsException e) {
logger.error("setShareVar field format error, raw python script : {}", rawPythonScript);
}
return rawPythonScript;
}
@Override
public void handle() throws Exception {
try {
// construct process
String command = buildCommand();
TaskResponse taskResponse = pythonCommandExecutor.run(command);
setExitStatusCode(taskResponse.getExitStatusCode());
setAppIds(taskResponse.getAppIds());
setProcessId(taskResponse.getProcessId());
setVarPool(pythonCommandExecutor.getVarPool());
} catch (Exception e) {
logger.error("python task failure", e);
setExitStatusCode(TaskConstants.EXIT_CODE_FAILURE);
throw new TaskException("run python task error", e);
}
}
@Override
public void cancelApplication(boolean cancelApplication) throws Exception {
// cancel process
pythonCommandExecutor.cancelApplication();
}
@Override
public AbstractParameters getParameters() {
return pythonParameters;
}
/**
* convertPythonScriptPlaceholders
*
* @param rawScript rawScript
* @return String
* @throws StringIndexOutOfBoundsException StringIndexOutOfBoundsException
*/
private static String convertPythonScriptPlaceholders(String rawScript) throws StringIndexOutOfBoundsException {
int len = "${setShareVar(${".length();
int scriptStart = 0;
while ((scriptStart = rawScript.indexOf("${setShareVar(${", scriptStart)) != -1) {
int start = -1;
int end = rawScript.indexOf('}', scriptStart + len);
String prop = rawScript.substring(scriptStart + len, end);
start = rawScript.indexOf(',', end);
end = rawScript.indexOf(')', start);
String value = rawScript.substring(start + 1, end);
start = rawScript.indexOf('}', start) + 1;
end = rawScript.length();
String replaceScript = String.format("print(\"${{setValue({},{})}}\".format(\"%s\",%s))", prop, value);
rawScript = rawScript.substring(0, scriptStart) + replaceScript + rawScript.substring(start, end);
scriptStart += replaceScript.length();
}
return rawScript;
}
/**
* build command
*
* @return raw python script
* @throws Exception exception
*/
private String buildCommand() throws Exception {
String rawPythonScript = pythonParameters.getRawScript().replaceAll("\\r\\n", "\n");
// replace placeholder
Map<String, Property> paramsMap = ParamUtils.convert(taskRequest, pythonParameters);
if (MapUtils.isEmpty(paramsMap)) {
paramsMap = new HashMap<>();
}
if (MapUtils.isNotEmpty(taskRequest.getParamsMap())) {
paramsMap.putAll(taskRequest.getParamsMap());
}
rawPythonScript = ParameterUtils.convertParameterPlaceholders(rawPythonScript, ParamUtils.convert(paramsMap));
logger.info("raw python script : {}", pythonParameters.getRawScript());
logger.info("task dir : {}", taskDir);
return rawPythonScript;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,563 | [Bug] [API] Switch node cannot get branch flow | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
1. switch node cannot get the branch flows.
2. save the switch node, but the conditions donot save.
![image](https://user-images.githubusercontent.com/29528966/138046176-bf27bcc4-0bc5-4080-9341-e802dfc6e9ff.png)
### What you expected to happen
switch node can get the branch flows.
### How to reproduce
create a switch node,
add conditions, branch flow.
save switch node.
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6563 | https://github.com/apache/dolphinscheduler/pull/6565 | 09657d8576cd04d7008f78c295c872f7b4a9c5c3 | 5dfb0163c2095905aa0969a9c856e61812db729b | "2021-10-20T07:21:54Z" | java | "2021-10-20T08:55:38Z" | dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/formModel.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="form-model-wrapper" v-clickoutside="_handleClose">
<div class="title-box">
<span class="name">{{ $t("Current node settings") }}</span>
<span class="go-subtask">
<!-- Component can't pop up box to do component processing -->
<m-log
v-if="type === 'instance' && taskInstance"
:item="backfillItem"
:task-instance-id="taskInstance.id"
>
<template slot="history"
><a href="javascript:" @click="_seeHistory"
><em class="ansicon el-icon-alarm-clock"></em
><em>{{ $t("View history") }}</em></a
></template
>
<template slot="log"
><a href="javascript:"
><em class="ansicon el-icon-document"></em
><em>{{ $t("View log") }}</em></a
></template
>
</m-log>
<a href="javascript:" @click="_goSubProcess" v-if="_isGoSubProcess"
><em class="ansicon ri-node-tree"></em
><em>{{ $t("Enter this child node") }}</em></a
>
</span>
</div>
<div class="content-box" v-if="isContentBox">
<div class="form-model">
<!-- Reference from task -->
<!-- <reference-from-task :taskType="nodeData.taskType" /> -->
<!-- Node name -->
<m-list-box>
<div slot="text">{{ $t("Node name") }}</div>
<div slot="content">
<el-input
type="text"
v-model="name"
size="small"
:disabled="isDetails"
:placeholder="$t('Please enter name (required)')"
maxlength="100"
@blur="_verifName()"
>
</el-input>
</div>
</m-list-box>
<!-- Running sign -->
<m-list-box>
<div slot="text">{{ $t("Run flag") }}</div>
<div slot="content">
<el-radio-group v-model="runFlag" size="small">
<el-radio :label="'YES'" :disabled="isDetails">{{
$t("Normal")
}}</el-radio>
<el-radio :label="'NO'" :disabled="isDetails">{{
$t("Prohibition execution")
}}</el-radio>
</el-radio-group>
</div>
</m-list-box>
<!-- description -->
<m-list-box>
<div slot="text">{{ $t("Description") }}</div>
<div slot="content">
<el-input
:rows="2"
type="textarea"
:disabled="isDetails"
v-model="desc"
:placeholder="$t('Please enter description')"
>
</el-input>
</div>
</m-list-box>
<!-- Task priority -->
<m-list-box>
<div slot="text">{{ $t("Task priority") }}</div>
<div slot="content">
<span class="label-box" style="width: 193px; display: inline-block">
<m-priority v-model="taskInstancePriority"></m-priority>
</span>
</div>
</m-list-box>
<!-- Worker group and environment -->
<m-list-box>
<div slot="text">{{ $t("Worker group") }}</div>
<div slot="content">
<span class="label-box" style="width: 193px; display: inline-block">
<m-worker-groups v-model="workerGroup"></m-worker-groups>
</span>
<span class="text-b">{{ $t("Environment Name") }}</span>
<m-related-environment
v-model="environmentCode"
:workerGroup="workerGroup"
:isNewCreate="isNewCreate"
v-on:environmentCodeEvent="_onUpdateEnvironmentCode"
></m-related-environment>
</div>
</m-list-box>
<!-- Number of failed retries -->
<m-list-box v-if="nodeData.taskType !== 'SUB_PROCESS'">
<div slot="text">{{ $t("Number of failed retries") }}</div>
<div slot="content">
<m-select-input
v-model="maxRetryTimes"
:list="[0, 1, 2, 3, 4]"
></m-select-input>
<span>({{ $t("Times") }})</span>
<span class="text-b">{{ $t("Failed retry interval") }}</span>
<m-select-input
v-model="retryInterval"
:list="[1, 10, 30, 60, 120]"
></m-select-input>
<span>({{ $t("Minute") }})</span>
</div>
</m-list-box>
<!-- Delay execution time -->
<m-list-box
v-if="
nodeData.taskType !== 'SUB_PROCESS' &&
nodeData.taskType !== 'CONDITIONS' &&
nodeData.taskType !== 'DEPENDENT' &&
nodeData.taskType !== 'SWITCH'
"
>
<div slot="text">{{ $t("Delay execution time") }}</div>
<div slot="content">
<m-select-input
v-model="delayTime"
:list="[0, 1, 5, 10]"
></m-select-input>
<span>({{ $t("Minute") }})</span>
</div>
</m-list-box>
<!-- Branch flow -->
<m-list-box v-if="nodeData.taskType === 'CONDITIONS'">
<div slot="text">{{ $t("State") }}</div>
<div slot="content">
<span class="label-box" style="width: 193px; display: inline-block">
<el-select
style="width: 157px"
size="small"
v-model="successNode"
:disabled="true"
>
<el-option
v-for="item in stateList"
:key="item.value"
:value="item.value"
:label="item.label"
></el-option>
</el-select>
</span>
<span class="text-b" style="padding-left: 38px">{{
$t("Branch flow")
}}</span>
<el-select
style="width: 157px"
size="small"
v-model="successBranch"
clearable
:disabled="isDetails"
>
<el-option
v-for="item in postTasks"
:key="item.code"
:value="item.name"
:label="item.name"
></el-option>
</el-select>
</div>
</m-list-box>
<m-list-box v-if="nodeData.taskType === 'CONDITIONS'">
<div slot="text">{{ $t("State") }}</div>
<div slot="content">
<span class="label-box" style="width: 193px; display: inline-block">
<el-select
style="width: 157px"
size="small"
v-model="failedNode"
:disabled="true"
>
<el-option
v-for="item in stateList"
:key="item.value"
:value="item.value"
:label="item.label"
></el-option>
</el-select>
</span>
<span class="text-b" style="padding-left: 38px">{{
$t("Branch flow")
}}</span>
<el-select
style="width: 157px"
size="small"
v-model="failedBranch"
clearable
:disabled="isDetails"
>
<el-option
v-for="item in postTasks"
:key="item.code"
:value="item.name"
:label="item.name"
></el-option>
</el-select>
</div>
</m-list-box>
<div v-if="backfillRefresh">
<!-- Task timeout alarm -->
<m-timeout-alarm
v-if="nodeData.taskType !== 'DEPENDENT'"
ref="timeout"
:backfill-item="backfillItem"
@on-timeout="_onTimeout"
>
</m-timeout-alarm>
<!-- Dependent timeout alarm -->
<m-dependent-timeout
v-if="nodeData.taskType === 'DEPENDENT'"
ref="dependentTimeout"
:backfill-item="backfillItem"
@on-timeout="_onDependentTimeout"
>
</m-dependent-timeout>
<!-- shell node -->
<m-shell
v-if="nodeData.taskType === 'SHELL'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="SHELL"
:backfill-item="backfillItem"
>
</m-shell>
<!-- sub_process node -->
<m-sub-process
v-if="nodeData.taskType === 'SUB_PROCESS'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
@on-set-process-name="_onSetProcessName"
ref="SUB_PROCESS"
:backfill-item="backfillItem"
>
</m-sub-process>
<!-- procedure node -->
<m-procedure
v-if="nodeData.taskType === 'PROCEDURE'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="PROCEDURE"
:backfill-item="backfillItem"
>
</m-procedure>
<!-- sql node -->
<m-sql
v-if="nodeData.taskType === 'SQL'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="SQL"
:create-node-id="nodeData.id"
:backfill-item="backfillItem"
>
</m-sql>
<!-- spark node -->
<m-spark
v-if="nodeData.taskType === 'SPARK'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="SPARK"
:backfill-item="backfillItem"
>
</m-spark>
<m-flink
v-if="nodeData.taskType === 'FLINK'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="FLINK"
:backfill-item="backfillItem"
>
</m-flink>
<!-- mr node -->
<m-mr
v-if="nodeData.taskType === 'MR'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="MR"
:backfill-item="backfillItem"
>
</m-mr>
<!-- python node -->
<m-python
v-if="nodeData.taskType === 'PYTHON'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="PYTHON"
:backfill-item="backfillItem"
>
</m-python>
<!-- dependent node -->
<m-dependent
v-if="nodeData.taskType === 'DEPENDENT'"
@on-dependent="_onDependent"
@on-cache-dependent="_onCacheDependent"
ref="DEPENDENT"
:backfill-item="backfillItem"
>
</m-dependent>
<m-http
v-if="nodeData.taskType === 'HTTP'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="HTTP"
:backfill-item="backfillItem"
>
</m-http>
<m-datax
v-if="nodeData.taskType === 'DATAX'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="DATAX"
:backfill-item="backfillItem"
>
</m-datax>
<m-pigeon
v-if="nodeData.taskType === 'PIGEON'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
:backfill-item="backfillItem"
ref="PIGEON">
</m-pigeon>
<m-sqoop
v-if="nodeData.taskType === 'SQOOP'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="SQOOP"
:backfill-item="backfillItem"
>
</m-sqoop>
<m-conditions
v-if="nodeData.taskType === 'CONDITIONS'"
ref="CONDITIONS"
@on-dependent="_onDependent"
@on-cache-dependent="_onCacheDependent"
:backfill-item="backfillItem"
:prev-tasks="prevTasks"
>
</m-conditions>
<m-switch
v-if="nodeData.taskType === 'SWITCH'"
ref="SWITCH"
@on-switch-result="_onSwitchResult"
:backfill-item="backfillItem"
:nodeData="nodeData"
></m-switch>
<!-- waterdrop node -->
<m-waterdrop
v-if="nodeData.taskType === 'WATERDROP'"
@on-params="_onParams"
@on-cache-params="_onCacheParams"
ref="WATERDROP"
:backfill-item="backfillItem"
>
</m-waterdrop>
</div>
<!-- Pre-tasks in workflow -->
<m-pre-tasks
ref="preTasks"
v-if="['SHELL', 'SUB_PROCESS'].indexOf(nodeData.taskType) > -1"
:code="code"
/>
</div>
</div>
<div class="bottom-box">
<div class="submit" style="background: #fff">
<el-button type="text" size="small" id="cancelBtn">
{{ $t("Cancel") }}
</el-button>
<el-button
type="primary"
size="small"
round
:loading="spinnerLoading"
@click="ok()"
:disabled="isDetails"
>{{ spinnerLoading ? $t("Loading...") : $t("Confirm add") }}
</el-button>
</div>
</div>
</div>
</template>
<script>
import _ from 'lodash'
import { mapActions, mapState } from 'vuex'
import mLog from './log'
import mMr from './tasks/mr'
import mSql from './tasks/sql'
import i18n from '@/module/i18n'
import mListBox from './tasks/_source/listBox'
import mShell from './tasks/shell'
import mWaterdrop from './tasks/waterdrop'
import mSpark from './tasks/spark'
import mFlink from './tasks/flink'
import mPython from './tasks/python'
import mProcedure from './tasks/procedure'
import mDependent from './tasks/dependent'
import mHttp from './tasks/http'
import mDatax from './tasks/datax'
import mPigeon from './tasks/pigeon'
import mConditions from './tasks/conditions'
import mSwitch from './tasks/switch.vue'
import mSqoop from './tasks/sqoop'
import mSubProcess from './tasks/sub_process'
import mSelectInput from './_source/selectInput'
import mTimeoutAlarm from './_source/timeoutAlarm'
import mDependentTimeout from './_source/dependentTimeout'
import mWorkerGroups from './_source/workerGroups'
import mRelatedEnvironment from './_source/relatedEnvironment'
import mPreTasks from './tasks/pre_tasks'
import clickoutside from '@/module/util/clickoutside'
import disabledState from '@/module/mixin/disabledState'
import mPriority from '@/module/components/priority/priority'
import { findComponentDownward } from '@/module/util/'
// import ReferenceFromTask from './_source/referenceFromTask.vue'
export default {
name: 'form-model',
data () {
return {
// loading
spinnerLoading: false,
// node name
name: '',
// description
desc: '',
// Node echo data
backfillItem: {},
cacheBackfillItem: {},
// Resource(list)
resourcesList: [],
successNode: 'success',
failedNode: 'failed',
successBranch: '',
failedBranch: '',
conditionResult: {
successNode: [],
failedNode: []
},
switchResult: {},
// dependence
dependence: {},
// cache dependence
cacheDependence: {},
// task code
code: 0,
// Current node params data
params: {},
// Running sign
runFlag: 'YES',
// The second echo problem caused by the node data is specifically which node hook caused the unfinished special treatment
isContentBox: false,
// Number of failed retries
maxRetryTimes: '0',
// Failure retry interval
retryInterval: '1',
// Delay execution time
delayTime: '0',
// Task timeout alarm
timeout: {},
// (For Dependent nodes) Wait start timeout alarm
waitStartTimeout: {},
// Task priority
taskInstancePriority: 'MEDIUM',
// worker group id
workerGroup: 'default',
// selected environment
environmentCode: '',
selectedWorkerGroup: '',
stateList: [
{
value: 'success',
label: `${i18n.$t('Success')}`
},
{
value: 'failed',
label: `${i18n.$t('Failed')}`
}
],
// for CONDITIONS
postTasks: [],
prevTasks: [],
// refresh part of the formModel, after set backfillItem outside
backfillRefresh: true,
// whether this is a new Task
isNewCreate: true
}
},
provide () {
return {
formModel: this
}
},
/**
* Click on events that are not generated internally by the component
*/
directives: { clickoutside },
mixins: [disabledState],
props: {
nodeData: Object,
type: {
type: String,
default: ''
}
},
inject: ['dagChart'],
methods: {
...mapActions('dag', ['getTaskInstanceList']),
taskToBackfillItem (task) {
return {
code: task.code,
conditionResult: task.taskParams.conditionResult,
delayTime: task.delayTime,
dependence: task.taskParams.dependence,
desc: task.description,
id: task.id,
maxRetryTimes: task.failRetryTimes,
name: task.name,
params: _.omit(task.taskParams, [
'conditionResult',
'dependence',
'waitStartTimeout'
]),
retryInterval: task.failRetryInterval,
runFlag: task.flag,
taskInstancePriority: task.taskPriority,
timeout: {
interval: task.timeout,
strategy: task.timeoutNotifyStrategy,
enable: task.timeoutFlag === 'OPEN'
},
type: task.taskType,
waitStartTimeout: task.taskParams.waitStartTimeout,
workerGroup: task.workerGroup,
environmentCode: task.environmentCode
}
},
/**
* depend
*/
_onDependent (o) {
this.dependence = Object.assign(this.dependence, {}, o)
},
_onSwitchResult (o) {
this.switchResult = o
},
/**
* cache dependent
*/
_onCacheDependent (o) {
this.cacheDependence = Object.assign(this.cacheDependence, {}, o)
},
/**
* Task timeout alarm
*/
_onTimeout (o) {
this.timeout = Object.assign(this.timeout, {}, o)
},
/**
* Dependent timeout alarm
*/
_onDependentTimeout (o) {
this.timeout = Object.assign(this.timeout, {}, o.waitCompleteTimeout)
this.waitStartTimeout = Object.assign(
this.waitStartTimeout,
{},
o.waitStartTimeout
)
},
/**
* Click external to close the current component
*/
_handleClose () {
// this.close()
},
/**
* Jump to task instance
*/
_seeHistory () {
this.$emit('seeHistory', this.backfillItem.name)
},
/**
* Enter the child node to judge the process instance or the process definition
* @param type = instance
*/
_goSubProcess () {
if (_.isEmpty(this.backfillItem)) {
this.$message.warning(
`${i18n.$t(
'The newly created sub-Process has not yet been executed and cannot enter the sub-Process'
)}`
)
return
}
if (this.router.history.current.name === 'projects-instance-details') {
if (!this.taskInstance) {
this.$message.warning(
`${i18n.$t(
'The task has not been executed and cannot enter the sub-Process'
)}`
)
return
}
this.store
.dispatch('dag/getSubProcessId', { taskId: this.taskInstance.id })
.then((res) => {
this.$emit('onSubProcess', {
subInstanceId: res.data.subProcessInstanceId,
fromThis: this
})
})
.catch((e) => {
this.$message.error(e.msg || '')
})
} else {
const processDefinitionId =
this.backfillItem.params.processDefinitionId
const process = this.processListS.find(
(process) => process.processDefinition.id === processDefinitionId
)
this.$emit('onSubProcess', {
subProcessCode: process.processDefinition.code,
fromThis: this
})
}
},
_onUpdateWorkerGroup (o) {
this.selectedWorkerGroup = o
},
/**
* return params
*/
_onParams (o) {
this.params = Object.assign({}, o)
},
_onUpdateEnvironmentCode (o) {
this.environmentCode = o
},
/**
* _onCacheParams is reserved
*/
_onCacheParams (o) {
this.params = Object.assign(this.params, {}, o)
},
/**
* verification name
*/
_verifName () {
if (!_.trim(this.name)) {
this.$message.warning(`${i18n.$t('Please enter name (required)')}`)
return false
}
if (
this.successBranch !== '' &&
this.successBranch !== null &&
this.successBranch === this.failedBranch
) {
this.$message.warning(
`${i18n.$t(
'Cannot select the same node for successful branch flow and failed branch flow'
)}`
)
return false
}
if (this.name === this.backfillItem.name) {
return true
}
return true
},
_verifWorkGroup () {
let item = this.store.state.security.workerGroupsListAll.find((item) => {
return item.id === this.workerGroup
})
if (item === undefined) {
this.$message.warning(
`${i18n.$t(
'The Worker group no longer exists, please select the correct Worker group!'
)}`
)
return false
}
return true
},
/**
* Global verification procedure
*/
_verification () {
// Verify name
if (!this._verifName()) {
return
}
// verif workGroup
if (!this._verifWorkGroup()) {
return
}
// Verify task alarm parameters
if (this.nodeData.taskType === 'DEPENDENT') {
if (!this.$refs.dependentTimeout._verification()) {
return
}
} else {
if (!this.$refs.timeout._verification()) {
return
}
}
// Verify node parameters
if (!this.$refs[this.nodeData.taskType]._verification()) {
return
}
// set preTask
if (this.$refs.preTasks) {
this.$refs.preTasks.setPreNodes()
}
this.conditionResult.successNode[0] = this.successBranch
this.conditionResult.failedNode[0] = this.failedBranch
this.$emit('addTaskInfo', {
item: {
code: this.nodeData.id,
name: this.name,
description: this.desc,
taskType: this.nodeData.taskType,
taskParams: {
...this.params,
dependence: this.cacheDependence,
conditionResult: this.conditionResult,
waitStartTimeout: this.waitStartTimeout
},
flag: this.runFlag,
taskPriority: this.taskInstancePriority,
workerGroup: this.workerGroup,
failRetryTimes: this.maxRetryTimes,
failRetryInterval: this.retryInterval,
timeoutFlag: this.timeout.enable ? 'OPEN' : 'CLOSE',
timeoutNotifyStrategy: this.timeout.strategy,
timeout: this.timeout.interval || 0,
delayTime: this.delayTime,
environmentCode: this.environmentCode || -1,
status: this.status,
branch: this.branch
},
fromThis: this
})
// set run flag
this._setRunFlag()
// set edge label
this._setEdgeLabel()
},
/**
* Sub-workflow selected node echo name
*/
_onSetProcessName (name) {
this.name = name
},
/**
* set run flag
* TODO
*/
_setRunFlag () {},
_setEdgeLabel () {
if (this.successBranch || this.failedBranch) {
const canvas = findComponentDownward(this.dagChart, 'dag-canvas')
const edges = canvas.getEdges()
const successTask = this.postTasks.find(
(t) => t.name === this.successBranch
)
const failedTask = this.postTasks.find(
(t) => t.name === this.failedBranch
)
const sEdge = edges.find(
(edge) =>
successTask &&
edge.sourceId === this.code &&
edge.targetId === successTask.code
)
const fEdge = edges.find(
(edge) =>
failedTask &&
edge.sourceId === this.code &&
edge.targetId === failedTask.code
)
sEdge && canvas.setEdgeLabel(sEdge.id, this.$t('Success'))
fEdge && canvas.setEdgeLabel(fEdge.id, this.$t('Failed'))
}
},
/**
* Submit verification
*/
ok () {
this._verification()
},
/**
* Close and destroy component and component internal events
*/
close () {
let flag = false
// Delete node without storage
if (!this.backfillItem.name) {
flag = true
}
this.isContentBox = false
// flag Whether to delete a node this.$destroy()
this.$emit('close', {
item: this.cacheBackfillItem,
flag: flag,
fromThis: this
})
},
backfill (backfillItem) {
const o = backfillItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
this.code = o.code
this.name = o.name
this.taskInstancePriority = o.taskInstancePriority
this.runFlag = o.runFlag || 'YES'
this.desc = o.desc
this.maxRetryTimes = o.maxRetryTimes
this.retryInterval = o.retryInterval
this.delayTime = o.delayTime
if (o.conditionResult) {
this.successBranch = o.conditionResult.successNode[0]
this.failedBranch = o.conditionResult.failedNode[0]
}
// If the workergroup has been deleted, set the default workergroup
for (
let i = 0;
i < this.store.state.security.workerGroupsListAll.length;
i++
) {
let workerGroup = this.store.state.security.workerGroupsListAll[i].id
if (o.workerGroup === workerGroup) {
break
}
}
if (o.workerGroup === undefined) {
this.store
.dispatch('dag/getTaskInstanceList', {
pageSize: 10,
pageNo: 1,
processInstanceId: this.nodeData.instanceId,
name: o.name
})
.then((res) => {
this.workerGroup = res.totalList[0].workerGroup
})
} else {
this.workerGroup = o.workerGroup
}
this.environmentCode = o.environmentCode === -1 ? '' : o.environmentCode
this.params = o.params || {}
this.dependence = o.dependence || {}
this.cacheDependence = o.dependence || {}
} else {
this.workerGroup = this.store.state.security.workerGroupsListAll[0].id
}
this.cacheBackfillItem = JSON.parse(JSON.stringify(o))
this.isContentBox = true
}
},
created () {
// Backfill data
let taskList = this.store.state.dag.tasks
let o = {}
if (taskList.length) {
taskList.forEach((task) => {
if (task.code === this.nodeData.id) {
const backfillItem = this.taskToBackfillItem(task)
o = backfillItem
this.backfillItem = backfillItem
this.isNewCreate = false
}
})
}
this.code = this.nodeData.id
this.backfill(o)
if (this.dagChart) {
const canvas = findComponentDownward(this.dagChart, 'dag-canvas')
const postNodes = canvas.getPostNodes(this.code)
const prevNodes = canvas.getPrevNodes(this.code)
const buildTask = (node) => ({
code: node.id,
name: node.data.taskName,
type: node.data.taskType
})
this.postTasks = postNodes.map(buildTask)
this.prevTasks = prevNodes.map(buildTask)
}
},
mounted () {
let self = this
$('#cancelBtn').mousedown(function (event) {
event.preventDefault()
self.close()
})
},
updated () {},
beforeDestroy () {},
destroyed () {},
computed: {
...mapState('dag', ['processListS', 'taskInstances']),
/**
* Child workflow entry show/hide
*/
_isGoSubProcess () {
return this.nodeData.taskType === 'SUB_PROCESS' && this.name
},
taskInstance () {
if (this.taskInstances.length > 0) {
return this.taskInstances.find(
(instance) => instance.taskCode === this.nodeData.id
)
}
return null
}
},
components: {
mListBox,
mMr,
mShell,
mWaterdrop,
mSubProcess,
mProcedure,
mSql,
mLog,
mSpark,
mFlink,
mPython,
mDependent,
mHttp,
mDatax,
mPigeon,
mSqoop,
mConditions,
mSwitch,
mSelectInput,
mTimeoutAlarm,
mDependentTimeout,
mPriority,
mWorkerGroups,
mRelatedEnvironment,
mPreTasks
// ReferenceFromTask
}
}
</script>
<style lang="scss" rel="stylesheet/scss">
@import "./formModel";
.ans-radio-disabled {
.ans-radio-inner:after {
background-color: #6f8391;
}
}
</style>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,563 | [Bug] [API] Switch node cannot get branch flow | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
1. switch node cannot get the branch flows.
2. save the switch node, but the conditions donot save.
![image](https://user-images.githubusercontent.com/29528966/138046176-bf27bcc4-0bc5-4080-9341-e802dfc6e9ff.png)
### What you expected to happen
switch node can get the branch flows.
### How to reproduce
create a switch node,
add conditions, branch flow.
save switch node.
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6563 | https://github.com/apache/dolphinscheduler/pull/6565 | 09657d8576cd04d7008f78c295c872f7b4a9c5c3 | 5dfb0163c2095905aa0969a9c856e61812db729b | "2021-10-20T07:21:54Z" | java | "2021-10-20T08:55:38Z" | dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/switch.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="conditions-model">
<m-list-box>
<div slot="text">{{$t('condition')}}</div>
<div slot="content">
<div class="dep-opt">
<a href="javascript:"
@click="!isDetails && _addDep()"
class="add-dep">
<em v-if="!isLoading" class="el-icon-circle-plus-outline" :class="_isDetails" data-toggle="tooltip" :title="$t('Add')"></em>
<em v-if="isLoading" class="el-icon-loading as as-spin" data-toggle="tooltip" :title="$t('Add')"></em>
</a>
</div>
<div class="dep-list-model">
<div v-for="(el,index) in dependItemList" :key='index' class="switch-list">
<label style="display:block">
<textarea :id="`code-switch-mirror${index}`" :name="`code-switch-mirror${index}`" style="opacity: 0;">
</textarea>
</label>
<span class="text-b" style="padding-left: 0">{{$t('Branch flow')}}</span>
<el-select style="width: 157px;" size="small" v-model="el.nextNode" clearable>
<el-option v-for="item in nodeData.rearList" :key="item.value" :value="item.value" :label="item.label"></el-option>
</el-select>
<span class="operation">
<a href="javascript:" class="delete" @click="!isDetails && _removeDep(index)" v-if="index === (dependItemList.length - 1)">
<em class="iconfont el-icon-delete" :class="_isDetails" data-toggle="tooltip" data-container="body" :title="$t('Delete')" ></em>
</a>
<a href="javascript:" class="add" @click="!isDetails && _addDep()" v-if="index === (dependItemList.length - 1)">
<em class="iconfont el-icon-circle-plus-outline" :class="_isDetails" data-toggle="tooltip" data-container="body" :title="$t('Add')"></em>
</a>
</span>
</div>
</div>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Branch flow')}}</div>
<div slot="content">
<el-select style="width: 157px;" size="small" v-model="nextNode" clearable :disabled="isDetails">
<el-option v-for="item in nodeData.rearList" :key="item.value" :value="item.value" :label="item.label"></el-option>
</el-select>
</div>
</m-list-box>
</div>
</template>
<script>
import _ from 'lodash'
import mListBox from './_source/listBox'
import disabledState from '@/module/mixin/disabledState'
import codemirror from '@/conf/home/pages/resource/pages/file/pages/_source/codemirror'
let editArray = []
export default {
name: 'dependence',
data () {
return {
nextNode: '',
relation: 'AND',
dependItemList: [],
editArray: [],
isLoading: false,
oldList: []
}
},
mixins: [disabledState],
props: {
nodeData: Object,
backfillItem: Object,
rearList: Array
},
methods: {
editList (index) {
// editor
const self = this
const editor = codemirror(`code-switch-mirror${index}`, {
mode: 'shell',
readOnly: this.isInstance
}, this)
editor.on('change', function () {
const outputList = _.cloneDeep(self.dependItemList)
outputList[index].condition = editor.getValue()
self.dependItemList = outputList
})
this.keypress = () => {
if (!editor.getOption('readOnly')) {
editor.showHint({
completeSingle: false
})
}
}
editor.on('keypress', this.keypress)
editor.setValue(this.dependItemList[index].condition || '')
editor.setSize('580px', '60px')
editArray.push(editor)
this.oldList = _.cloneDeep(this.dependItemList)
return editArray
},
_addDep () {
if (!this.isLoading) {
this.isLoading = true
this.dependItemList.push({
condition: '',
nextNode: ''
})
let dependItemListLen = this.dependItemList.length
if (dependItemListLen > 0) {
setTimeout(() => {
this.editList(dependItemListLen - 1)
this.isLoading = false
}, 200)
}
this._removeTip()
}
},
_removeDep (i) {
this.dependItemList.splice(i, 1)
this._removeTip()
},
_removeTip () {
$('body').find('.tooltip.fade.top.in').remove()
},
_verification () {
let flag = this.dependItemList.some((item) => {
return !item.condition
})
if (flag) {
this.$message.warning(`${this.$t('The condition content cannot be empty')}`)
return false
}
let params = {
dependTaskList: this.dependItemList || [],
nextNode: this.nextNode || ''
}
this.$emit('on-switch-result', params)
return true
}
},
watch: {},
beforeCreate () {
},
created () {
const o = this.backfillItem
if (!_.isEmpty(o)) {
let switchResult = o.switchResult || {}
this.dependItemList = _.cloneDeep(switchResult.dependTaskList) || []
this.nextNode = _.cloneDeep(switchResult.nextNode) || ''
}
},
mounted () {
if (this.dependItemList && this.dependItemList.length > 0) {
setTimeout(() => {
this.dependItemList.forEach((item, index) => {
this.editList(index)
})
})
}
},
destroyed () {
},
computed: {
},
components: { mListBox }
}
</script>
<style lang="scss" rel="stylesheet/scss">
.conditions-model {
margin-top: -10px;
.dep-opt {
margin-bottom: 10px;
padding-top: 3px;
line-height: 24px;
.add-dep {
color: #0097e0;
margin-right: 10px;
i {
font-size: 18px;
vertical-align: middle;
}
}
}
.dep-list-model{
position: relative;
min-height: 0px;
.switch-list {
margin-bottom: 6px;
.operation {
padding-left: 4px;
a {
i {
font-size: 18px;
vertical-align: middle;
}
}
.delete {
color: #ff0000;
}
.add {
color: #0097e0;
}
}
}
}
}
</style>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,566 | [Bug] [UI] import process definition url error | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
import process definition url error
![image](https://user-images.githubusercontent.com/87303815/138070966-89ca2768-095d-4745-8dc0-a52f36183a90.png)
### What you expected to happen
Import succeeded
### How to reproduce
![image](https://user-images.githubusercontent.com/87303815/138071084-1246a7dd-449e-4f15-8fd9-48897cff0275.png)
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6566 | https://github.com/apache/dolphinscheduler/pull/6567 | 5dfb0163c2095905aa0969a9c856e61812db729b | fa102afad0609b623aa60f1f2452f0007f71e431 | "2021-10-20T09:50:59Z" | java | "2021-10-20T10:58:23Z" | dolphinscheduler-ui/src/js/module/components/fileUpdate/definitionUpdate.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<m-popup
ref="popup"
:ok-text="$t('Upload')"
:nameText="$t('File Upload')"
@ok="_ok"
@close="close"
:disabled="progress === 0 ? false : true">
<template slot="content">
<form name="files" enctype="multipart/form-data" method="post">
<div class="file-update-model"
@drop.prevent="_onDrop"
@dragover.prevent="dragOver = true"
@dragleave.prevent="dragOver = false"
id="file-update-model">
<div class="tooltip-info">
<em class="fa el-icon-warning"></em>
<span>{{$t('Drag the file into the current upload window')}}</span>
</div>
<div class="update-popup" v-if="dragOver">
<div class="icon-box">
<em class="ans el-icon-upload"></em>
</div>
<p class="p1">
<span>{{$t('Drag area upload')}}</span>
</p>
</div>
<m-list-box-f>
<template slot="name"><strong>*</strong>{{$t('Upload Files')}}</template>
<template slot="content">
<div class="file-update-box">
<template v-if="progress === 0">
<input ref="file" name="file" type="file" class="file-update" @change="_onChange">
<el-button size="mini">{{$t('Upload')}}<em class="el-icon-upload"></em></el-button>
</template>
<div class="progress-box" v-if="progress !== 0">
<m-progress-bar :value="progress" text-placement="left-right"></m-progress-bar>
</div>
</div>
</template>
</m-list-box-f>
<m-list-box-f>
<template slot="name">{{$t('File Name')}}</template>
<template slot="content">
<el-input
type="input"
v-model="name"
:disabled="progress !== 0"
size="small"
:placeholder="$t('Please enter name')">
</el-input>
</template>
</m-list-box-f>
</div>
</form>
</template>
</m-popup>
</template>
<script>
import io from '@/module/io'
import i18n from '@/module/i18n'
import store from '@/conf/home/store'
import mPopup from '@/module/components/popup/popup'
import mListBoxF from '@/module/components/listBoxF/listBoxF'
import mProgressBar from '@/module/components/progressBar/progressBar'
export default {
name: 'file-update',
data () {
return {
store,
// name
name: '',
// description
description: '',
// progress
progress: 0,
// file
file: '',
// Whether to drag upload
dragOver: false
}
},
watch: {
},
props: {
type: String
},
methods: {
/**
* submit
*/
_ok () {
this.$refs.popup.spinnerLoading = true
if (this._validation()) {
this._formDataUpdate().then(res => {
setTimeout(() => {
this.$refs.popup.spinnerLoading = false
}, 800)
}).catch(e => {
this.$refs.popup.spinnerLoading = false
})
} else {
this.$refs.popup.spinnerLoading = false
}
},
/**
* validation
*/
_validation () {
if (!this.file) {
this.$message.warning(`${i18n.$t('Please select the file to upload')}`)
return false
}
return true
},
/**
* update file
*/
_formDataUpdate () {
return new Promise((resolve, reject) => {
let self = this
let formData = new FormData()
formData.append('file', this.file)
formData.append('projectName', this.store.state.dag.projectName)
io.post('projects/import-definition', res => {
this.$message.success(res.msg)
resolve()
self.$emit('onUpdateDefinition')
this.reset()
}, e => {
reject(e)
self.$emit('closeDefinition')
this.$message.error(e.msg || '')
this.reset()
}, {
data: formData,
emulateJSON: false,
onUploadProgress (progressEvent) {
// Size has been uploaded
let loaded = progressEvent.loaded
// Total attachment size
let total = progressEvent.total
self.progress = Math.floor(100 * loaded / total)
self.$emit('onProgressDefinition', self.progress)
}
})
})
},
/**
* Archive to the top right corner Continue uploading
*/
_ckArchive () {
$('.update-file-modal').hide()
this.$emit('onArchiveDefinition')
},
close () {
this.$emit('closeDefinition')
},
reset () {
this.name = ''
this.description = ''
this.progress = 0
this.file = ''
this.dragOver = false
},
/**
* Drag and drop upload
*/
_onDrop (e) {
let file = e.dataTransfer.files[0]
this.file = file
this.name = file.name
this.dragOver = false
},
_onChange () {
let file = this.$refs.file.files[0]
this.file = file
this.name = file.name
this.$refs.file.value = null
}
},
components: { mPopup, mListBoxF, mProgressBar }
}
</script>
<style lang="scss" rel="stylesheet/scss">
.file-update-model {
.tooltip-info {
position: absolute;
left: 20px;
bottom: 26px;
span {
font-size: 12px;
color: #666;
vertical-align: middle;
}
.fa {
color: #0097e0;
font-size: 14px;
vertical-align: middle;
}
}
.hide-archive {
position: absolute;
right: 22px;
top: 17px;
.fa{
font-size: 16px;
color: #333;
font-weight: normal;
cursor: pointer;
&:hover {
color: #0097e0;
}
}
}
.file-update-box {
padding-top: 4px;
position: relative;
.file-update {
width: 70px;
height: 40px;
position: absolute;
left: 0;
top: 0;
cursor: pointer;
filter: alpha(opacity=0);
-moz-opacity: 0;
opacity: 0;
}
&:hover {
.v-btn-dashed {
background-color: transparent;
border-color: #47c3ff;
color: #47c3ff;
cursor: pointer;
}
}
.progress-box {
width: 200px;
position: absolute;
left: 70px;
top: 14px;
}
}
.update-popup {
width: calc(100% - 20px);
height: calc(100% - 20px);
background: rgba(255,253,239,.7);
position: absolute;
top: 10px;
left: 10px;
border-radius: 3px;
z-index: 1;
border: .18rem dashed #cccccc;
.icon-box {
text-align: center;
margin-top: 96px;
.fa {
font-size: 50px;
color: #2d8cf0;
}
}
.p1 {
text-align: center;
font-size: 16px;
color: #333;
padding-top: 8px;
}
}
}
</style>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,572 | [Bug] [API] import processDefinition report json parse error | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
[ERROR] 2021-10-20 20:40:03.391 org.apache.dolphinscheduler.common.utils.JSONUtils:[167] - parse list exception!
com.fasterxml.jackson.databind.exc.InvalidDefinitionException: Cannot construct instance of `org.apache.dolphinscheduler.api.dto.DagDataSchedule` (no Creators, like default construct, exist): cannot deserialize from Object value (no delegate- or property-based Creator)
at [Source: (String)"[{"processDefinition":{"id":73,"code":818174268579840,"name":"hello-shell-2","version":1,"releaseState":"OFFLINE","projectCode":724559144984576,"description":"","globalParams":"[]","globalParamList":[],"globalParamMap":{},"createTime":"2021-10-16 23:44:57","updateTime":"2021-10-16 23:44:57","flag":"YES","userId":1,"userName":null,"projectName":null,"locations":"[{\"taskCode\":818173332135936,\"x\":202,\"y\":376}]","scheduleReleaseState":null,"timeout":0,"tenantId":-1,"tenantCode":null,"modifyBy""[truncated 1139 chars]; line: 1, column: 3] (through reference chain: java.util.ArrayList[0])
at com.fasterxml.jackson.databind.exc.InvalidDefinitionException.from(InvalidDefinitionException.java:67)
at com.fasterxml.jackson.databind.DeserializationContext.reportBadDefinition(DeserializationContext.java:1592)
### What you expected to happen
It can be import successfully
### How to reproduce
Export a processDefinition and then import it
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6572 | https://github.com/apache/dolphinscheduler/pull/6573 | 251255009a857656abf0fe7776b5ae4d68eb4cf7 | 095e2e206a8d428a0048baa38704a3de3a1e037b | "2021-10-20T13:33:36Z" | java | "2021-10-20T15:45:38Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/DagDataSchedule.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto;
import org.apache.dolphinscheduler.dao.entity.DagData;
import org.apache.dolphinscheduler.dao.entity.Schedule;
/**
* DagDataSchedule
*/
public class DagDataSchedule extends DagData {
/**
* schedule
*/
private Schedule schedule;
public DagDataSchedule(DagData dagData) {
super();
this.setProcessDefinition(dagData.getProcessDefinition());
this.setTaskDefinitionList(dagData.getTaskDefinitionList());
this.setProcessTaskRelationList(dagData.getProcessTaskRelationList());
}
public Schedule getSchedule() {
return schedule;
}
public void setSchedule(Schedule schedule) {
this.schedule = schedule;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,501 | [Bug] [Dao] Sql is not correct in WorkFlowLineageMapper.xml | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
`Select "" as xx` seems a wrong SQL in MyBatis, should change to `Select '' as xx`
https://github.com/apache/dolphinscheduler/blob/db04a5b04df8aa5754741c6dd456579d743af1e9/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/WorkFlowLineageMapper.xml#L32-L63
### What you expected to happen
Cannot run `WorkFlowLineageMapper#queryWorkFlowLineageByCode`.
### How to reproduce
Execute `WorkFlowLineageMapper#queryWorkFlowLineageByCode`, `WorkFlowLineageMapper#queryWorkFlowLineageByLineage`
### Anything else
See exception log
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6501 | https://github.com/apache/dolphinscheduler/pull/6576 | 095e2e206a8d428a0048baa38704a3de3a1e037b | 18b324e4d578b76376f6f09fbca6961430f88700 | "2021-10-11T15:34:55Z" | java | "2021-10-21T03:25:26Z" | dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/WorkFlowLineageMapper.xml | <?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.WorkFlowLineageMapper">
<select id="queryWorkFlowLineageByName" resultType="org.apache.dolphinscheduler.dao.entity.WorkFlowLineage">
select tepd.code as work_flow_code,tepd.name as work_flow_name
from t_ds_process_definition tepd
left join t_ds_schedules tes on tepd.code = tes.process_definition_code
where tepd.project_code = #{projectCode}
<if test="workFlowName != null and workFlowName != ''">
and tepd.name like concat('%', #{workFlowName}, '%')
</if>
</select>
<select id="queryWorkFlowLineageByCode" resultType="org.apache.dolphinscheduler.dao.entity.WorkFlowLineage">
select tepd.code as work_flow_code,tepd.name as work_flow_name,
"" as source_work_flow_code,
tepd.release_state as work_flow_publish_status,
tes.start_time as schedule_start_time,
tes.end_time as schedule_end_time,
tes.crontab as crontab,
tes.release_state as schedule_publish_status
from t_ds_process_definition tepd
left join t_ds_schedules tes on tepd.code = tes.process_definition_code
where tepd.project_code = #{projectCode} and tepd.code = #{workFlowCode}
</select>
<select id="queryWorkFlowLineageByLineage" resultType="org.apache.dolphinscheduler.dao.entity.WorkFlowLineage">
select tepd.code as work_flow_code,tepd.name as work_flow_name,
"" as source_work_flow_code,
tepd.release_state as work_flow_publish_status,
tes.start_time as schedule_start_time,
tes.end_time as schedule_end_time,
tes.crontab as crontab,
tes.release_state as schedule_publish_status
from t_ds_process_definition tepd
left join t_ds_schedules tes on tepd.code = tes.process_definition_code
where 1=1
<if test="processLineages != null and processLineages.size != 0">
and
<foreach collection="processLineages" index="index" item="item" open="(" separator=" or " close=")">
(tepd.project_code = #{item.projectCode}
and tepd.code = #{item.processDefinitionCode})
</foreach>
</if>
</select>
<select id="queryProcessLineage" resultType="org.apache.dolphinscheduler.dao.entity.ProcessLineage">
select ptr.project_code,
ptr.post_task_code,
ptr.post_task_version,
ptr.pre_task_code,
ptr.pre_task_version,
ptr.process_definition_code,
ptr.process_definition_version
from t_ds_process_definition pd
join t_ds_process_task_relation ptr on pd.code = ptr.process_definition_code
and pd.version = ptr.process_definition_version
where pd.project_code = #{projectCode}
</select>
<select id="queryProcessLineageByCode" resultType="org.apache.dolphinscheduler.dao.entity.ProcessLineage">
select project_code,
post_task_code,
post_task_version,
pre_task_code,
pre_task_version,
process_definition_code,
process_definition_version
from t_ds_process_task_relation
where project_code = #{projectCode}
and process_definition_code = #{processDefinitionCode}
</select>
</mapper>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,582 | [Bug] [Master] condition task fail when log show it's result is success | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
the condition task log show: `the conditions task depend result : SUCCESS`, but task state is failed.
### What you expected to happen
condition task state is right.
### How to reproduce
use condition task in your dag.
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6582 | https://github.com/apache/dolphinscheduler/pull/6583 | 9b1984c2449538e5f0e547297762fdafc5e0211c | b642e9ca546d8ea21bbd9f193ff6ff9729037f75 | "2021-10-21T10:59:46Z" | java | "2021-10-21T11:27:13Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/task/ConditionTaskProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.runner.task;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DependResult;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.model.DependentItem;
import org.apache.dolphinscheduler.common.model.DependentTaskModel;
import org.apache.dolphinscheduler.common.task.dependent.DependentParameters;
import org.apache.dolphinscheduler.common.utils.DependentUtils;
import org.apache.dolphinscheduler.common.utils.LoggerUtils;
import org.apache.dolphinscheduler.common.utils.NetUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.TaskDefinition;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.utils.LogUtils;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.LoggerFactory;
/**
* condition task processor
*/
public class ConditionTaskProcessor extends BaseTaskProcessor {
/**
* dependent parameters
*/
private DependentParameters dependentParameters;
ProcessInstance processInstance;
/**
* condition result
*/
private DependResult conditionResult = DependResult.WAITING;
/**
* complete task map
*/
private Map<String, ExecutionStatus> completeTaskList = new ConcurrentHashMap<>();
MasterConfig masterConfig = SpringApplicationContext.getBean(MasterConfig.class);
private TaskDefinition taskDefinition;
@Override
public boolean submit(TaskInstance task, ProcessInstance processInstance, int masterTaskCommitRetryTimes, int masterTaskCommitInterval) {
this.processInstance = processInstance;
this.taskInstance = processService.submitTask(task, masterTaskCommitRetryTimes, masterTaskCommitInterval);
if (this.taskInstance == null) {
return false;
}
taskDefinition = processService.findTaskDefinition(
taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion()
);
logger = LoggerFactory.getLogger(LoggerUtils.buildTaskId(LoggerUtils.TASK_LOGGER_INFO_PREFIX,
processInstance.getProcessDefinitionCode(),
processInstance.getProcessDefinitionVersion(),
taskInstance.getProcessInstanceId(),
taskInstance.getId()));
String threadLoggerInfoName = String.format(Constants.TASK_LOG_INFO_FORMAT, processService.formatTaskAppId(this.taskInstance));
Thread.currentThread().setName(threadLoggerInfoName);
initTaskParameters();
logger.info("dependent task start");
endTask();
return true;
}
@Override
public ExecutionStatus taskState() {
return this.taskInstance.getState();
}
@Override
public void run() {
if (conditionResult.equals(DependResult.WAITING)) {
setConditionResult();
} else {
endTask();
}
}
@Override
protected boolean pauseTask() {
this.taskInstance.setState(ExecutionStatus.PAUSE);
this.taskInstance.setEndTime(new Date());
processService.saveTaskInstance(taskInstance);
return true;
}
@Override
protected boolean taskTimeout() {
TaskTimeoutStrategy taskTimeoutStrategy =
taskDefinition.getTimeoutNotifyStrategy();
if (taskTimeoutStrategy == TaskTimeoutStrategy.WARN) {
return true;
}
logger.info("condition task {} timeout, strategy {} ",
taskInstance.getId(), taskTimeoutStrategy.getDescp());
conditionResult = DependResult.FAILED;
endTask();
return true;
}
@Override
protected boolean killTask() {
this.taskInstance.setState(ExecutionStatus.KILL);
this.taskInstance.setEndTime(new Date());
processService.saveTaskInstance(taskInstance);
return true;
}
@Override
public String getType() {
return TaskType.CONDITIONS.getDesc();
}
private void initTaskParameters() {
taskInstance.setLogPath(LogUtils.getTaskLogPath(processInstance.getProcessDefinitionCode(),
processInstance.getProcessDefinitionVersion(),
taskInstance.getProcessInstanceId(),
taskInstance.getId()));
this.taskInstance.setHost(NetUtils.getAddr(masterConfig.getListenPort()));
taskInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
taskInstance.setStartTime(new Date());
this.processService.saveTaskInstance(taskInstance);
this.dependentParameters = taskInstance.getDependency();
}
private void setConditionResult() {
List<TaskInstance> taskInstances = processService.findValidTaskListByProcessId(taskInstance.getProcessInstanceId());
for (TaskInstance task : taskInstances) {
completeTaskList.putIfAbsent(task.getName(), task.getState());
}
List<DependResult> modelResultList = new ArrayList<>();
for (DependentTaskModel dependentTaskModel : dependentParameters.getDependTaskList()) {
List<DependResult> itemDependResult = new ArrayList<>();
for (DependentItem item : dependentTaskModel.getDependItemList()) {
itemDependResult.add(getDependResultForItem(item));
}
DependResult modelResult = DependentUtils.getDependResultForRelation(dependentTaskModel.getRelation(), itemDependResult);
modelResultList.add(modelResult);
}
conditionResult = DependentUtils.getDependResultForRelation(dependentParameters.getRelation(), modelResultList);
logger.info("the conditions task depend result : {}", conditionResult);
}
/**
* depend result for depend item
*/
private DependResult getDependResultForItem(DependentItem item) {
DependResult dependResult = DependResult.SUCCESS;
if (!completeTaskList.containsKey(item.getDepTasks())) {
logger.info("depend item: {} have not completed yet.", item.getDepTasks());
dependResult = DependResult.FAILED;
return dependResult;
}
ExecutionStatus executionStatus = completeTaskList.get(item.getDepTasks());
if (executionStatus != item.getStatus()) {
logger.info("depend item : {} expect status: {}, actual status: {}", item.getDepTasks(), item.getStatus(), executionStatus);
dependResult = DependResult.FAILED;
}
logger.info("dependent item complete {} {},{}",
Constants.DEPENDENT_SPLIT, item.getDepTasks(), dependResult);
return dependResult;
}
/**
*
*/
private void endTask() {
ExecutionStatus status = (conditionResult == DependResult.SUCCESS) ? ExecutionStatus.SUCCESS : ExecutionStatus.FAILURE;
taskInstance.setState(status);
taskInstance.setEndTime(new Date());
processService.updateTaskInstance(taskInstance);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,461 | [Bug] [SQL] sql script upgrade exception | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Caused by: java.sql.SQLSyntaxErrorException: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'ALTER TABLE t_ds_task_definition drop INDEX `task_unique`; END IF; END' at line 1
### What you expected to happen
sql script
### How to reproduce
sql script upgrade
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6461 | https://github.com/apache/dolphinscheduler/pull/6462 | b642e9ca546d8ea21bbd9f193ff6ff9729037f75 | 28004c3b05d4644cce2b649e45ae35ef7c19d2d5 | "2021-10-08T09:44:27Z" | java | "2021-10-22T02:30:52Z" | sql/upgrade/1.4.0_schema/mysql/dolphinscheduler_ddl.sql | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY',''));
-- uc_dolphin_T_t_ds_user_A_state
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_user_A_state;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_user_A_state()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_user'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='state')
THEN
ALTER TABLE t_ds_user ADD `state` int(1) DEFAULT 1 COMMENT 'state 0:disable 1:enable';
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_user_A_state;
DROP PROCEDURE uc_dolphin_T_t_ds_user_A_state;
-- uc_dolphin_T_t_ds_tenant_A_tenant_name
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_tenant_A_tenant_name;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_tenant_A_tenant_name()
BEGIN
IF EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_tenant'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='tenant_name')
THEN
ALTER TABLE t_ds_tenant DROP `tenant_name`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_tenant_A_tenant_name;
DROP PROCEDURE uc_dolphin_T_t_ds_tenant_A_tenant_name;
-- uc_dolphin_T_t_ds_task_instance_A_first_submit_time
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_first_submit_time;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_first_submit_time()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='first_submit_time')
THEN
ALTER TABLE t_ds_task_instance ADD `first_submit_time` datetime DEFAULT NULL COMMENT 'task first submit time';
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_task_instance_A_first_submit_time();
DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_first_submit_time;
-- uc_dolphin_T_t_ds_task_instance_A_delay_time
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_delay_time;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_delay_time()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='delay_time')
THEN
ALTER TABLE t_ds_task_instance ADD `delay_time` int(4) DEFAULT '0' COMMENT 'task delay execution time';
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_task_instance_A_delay_time();
DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_delay_time;
-- uc_dolphin_T_t_ds_task_instance_A_var_pool
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_var_pool;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_var_pool()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='var_pool')
THEN
ALTER TABLE t_ds_task_instance ADD `var_pool` longtext NULL;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_task_instance_A_var_pool();
DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_var_pool;
-- uc_dolphin_T_t_ds_process_instance_A_var_pool
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_instance_A_var_pool;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_process_instance_A_var_pool()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_instance'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='var_pool')
THEN
ALTER TABLE t_ds_process_instance ADD `var_pool` longtext NULL;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_process_instance_A_var_pool();
DROP PROCEDURE uc_dolphin_T_t_ds_process_instance_A_var_pool;
-- uc_dolphin_T_t_ds_process_definition_A_modify_by
drop PROCEDURE if EXISTS ct_dolphin_T_t_ds_process_definition_version;
delimiter d//
CREATE PROCEDURE ct_dolphin_T_t_ds_process_definition_version()
BEGIN
CREATE TABLE IF NOT EXISTS `t_ds_process_definition_version` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`process_definition_id` int(11) NOT NULL COMMENT 'process definition id',
`version` int(11) DEFAULT NULL COMMENT 'process definition version',
`process_definition_json` longtext COMMENT 'process definition json content',
`description` text,
`global_params` text COMMENT 'global parameters',
`locations` text COMMENT 'Node location information',
`connects` text COMMENT 'Node connection information',
`receivers` text COMMENT 'receivers',
`receivers_cc` text COMMENT 'cc',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`timeout` int(11) DEFAULT '0' COMMENT 'time out',
`resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource ids',
PRIMARY KEY (`id`),
UNIQUE KEY `process_definition_id_and_version` (`process_definition_id`,`version`) USING BTREE,
KEY `process_definition_index` (`id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=84 DEFAULT CHARSET=utf8;
END;
d//
delimiter ;
CALL ct_dolphin_T_t_ds_process_definition_version;
DROP PROCEDURE ct_dolphin_T_t_ds_process_definition_version;
-- ----------------------------
-- Table structure for t_ds_plugin_define
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_plugin_define`;
CREATE TABLE `t_ds_plugin_define` (
`id` int NOT NULL AUTO_INCREMENT,
`plugin_name` varchar(100) NOT NULL COMMENT 'the name of plugin eg: email',
`plugin_type` varchar(100) NOT NULL COMMENT 'plugin type . alert=alert plugin, job=job plugin',
`plugin_params` text COMMENT 'plugin params',
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
UNIQUE KEY `t_ds_plugin_define_UN` (`plugin_name`,`plugin_type`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_alert_plugin_instance
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_alert_plugin_instance`;
CREATE TABLE `t_ds_alert_plugin_instance` (
`id` int NOT NULL AUTO_INCREMENT,
`plugin_define_id` int NOT NULL,
`plugin_instance_params` text COMMENT 'plugin instance params. Also contain the params value which user input in web ui.',
`create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
`update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`instance_name` varchar(200) DEFAULT NULL COMMENT 'alert instance name',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- uc_dolphin_T_t_ds_process_definition_A_warning_group_id
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_definition_A_warning_group_id;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_process_definition_A_warning_group_id()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_definition'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='warning_group_id')
THEN
ALTER TABLE t_ds_process_definition ADD COLUMN `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id' AFTER `connects`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_process_definition_A_warning_group_id();
DROP PROCEDURE uc_dolphin_T_t_ds_process_definition_A_warning_group_id;
-- uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_definition_version'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='warning_group_id')
THEN
ALTER TABLE t_ds_process_definition_version ADD COLUMN `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id' AFTER `connects`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id();
DROP PROCEDURE uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id;
-- uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_alertgroup'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='alert_instance_ids')
THEN
ALTER TABLE t_ds_alertgroup ADD COLUMN `alert_instance_ids` varchar (255) DEFAULT NULL COMMENT 'alert instance ids' AFTER `id`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids();
DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids;
-- uc_dolphin_T_t_ds_alertgroup_A_create_user_id
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_create_user_id;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_create_user_id()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_alertgroup'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='create_user_id')
THEN
ALTER TABLE t_ds_alertgroup ADD COLUMN `create_user_id` int(11) DEFAULT NULL COMMENT 'create user id' AFTER `alert_instance_ids`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_alertgroup_A_create_user_id();
DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_create_user_id;
-- uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.STATISTICS
WHERE TABLE_NAME='t_ds_alertgroup'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND INDEX_NAME ='t_ds_alertgroup_name_un')
THEN
ALTER TABLE t_ds_alertgroup ADD UNIQUE KEY `t_ds_alertgroup_name_un` (`group_name`);
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName();
DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName;
-- uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.STATISTICS
WHERE TABLE_NAME='t_ds_datasource'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND INDEX_NAME ='t_ds_datasource_name_un')
THEN
ALTER TABLE t_ds_datasource ADD UNIQUE KEY `t_ds_datasource_name_un` (`name`, `type`);
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName();
DROP PROCEDURE uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName;
-- uc_dolphin_T_t_ds_schedules_A_add_timezone
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_schedules_A_add_timezone;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_schedules_A_add_timezone()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_schedules'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='timezone_id')
THEN
ALTER TABLE t_ds_schedules ADD COLUMN `timezone_id` varchar(40) default NULL COMMENT 'schedule timezone id' AFTER `end_time`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_schedules_A_add_timezone();
DROP PROCEDURE uc_dolphin_T_t_ds_schedules_A_add_timezone;
-- uc_dolphin_T_t_ds_task_definition_A_drop_UN_taskName
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_definition_A_drop_UN_taskName;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_task_definition_A_drop_UN_taskName()
BEGIN
IF EXISTS (SELECT 1 FROM information_schema.STATISTICS
WHERE TABLE_NAME='t_ds_task_definition'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND INDEX_NAME ='task_unique')
ALTER TABLE t_ds_task_definition drop INDEX `task_unique`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_task_definition_A_drop_UN_taskName();
DROP PROCEDURE uc_dolphin_T_t_ds_task_definition_A_drop_UN_taskName;
-- ----------------------------
-- Table structure for t_ds_environment
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_environment`;
CREATE TABLE `t_ds_environment` (
`id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id',
`code` bigint(20) DEFAULT NULL COMMENT 'encoding',
`name` varchar(100) NOT NULL COMMENT 'environment config name',
`config` text NULL DEFAULT NULL COMMENT 'this config contains many environment variables config',
`description` text NULL DEFAULT NULL COMMENT 'the details',
`operator` int(11) DEFAULT NULL COMMENT 'operator user id',
`create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
`update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
UNIQUE KEY `environment_name_unique` (`name`),
UNIQUE KEY `environment_code_unique` (`code`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_task_definition
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_task_definition`;
CREATE TABLE `t_ds_task_definition` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id',
`code` bigint(20) NOT NULL COMMENT 'encoding',
`name` varchar(200) DEFAULT NULL COMMENT 'task definition name',
`version` int(11) DEFAULT NULL COMMENT 'task definition version',
`description` text COMMENT 'description',
`project_code` bigint(20) NOT NULL COMMENT 'project code',
`user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id',
`task_type` varchar(50) NOT NULL COMMENT 'task type',
`task_params` longtext COMMENT 'job custom parameters',
`flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available',
`task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority',
`worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping',
`environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code',
`fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries',
`fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval',
`timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open',
`timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail',
`timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute',
`delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute',
`resource_ids` text COMMENT 'resource id, separated by comma',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`,`code`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_task_definition_log
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_task_definition_log`;
CREATE TABLE `t_ds_task_definition_log` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id',
`code` bigint(20) NOT NULL COMMENT 'encoding',
`name` varchar(200) DEFAULT NULL COMMENT 'task definition name',
`version` int(11) DEFAULT NULL COMMENT 'task definition version',
`description` text COMMENT 'description',
`project_code` bigint(20) NOT NULL COMMENT 'project code',
`user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id',
`task_type` varchar(50) NOT NULL COMMENT 'task type',
`task_params` text COMMENT 'job custom parameters',
`flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available',
`task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority',
`worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping',
`environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code',
`fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries',
`fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval',
`timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open',
`timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail',
`timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute',
`delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute',
`resource_ids` text COMMENT 'resource id, separated by comma',
`operator` int(11) DEFAULT NULL COMMENT 'operator user id',
`operate_time` datetime DEFAULT NULL COMMENT 'operate time',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
ALTER TABLE t_ds_command ADD COLUMN `environment_code` bigint(20) default '-1' COMMENT 'environment code' AFTER `worker_group`;
ALTER TABLE t_ds_error_command ADD COLUMN `environment_code` bigint(20) default '-1' COMMENT 'environment code' AFTER `worker_group`;
ALTER TABLE t_ds_schedules ADD COLUMN `environment_code` bigint(20) default '-1' COMMENT 'environment code' AFTER `worker_group`;
ALTER TABLE t_ds_process_instance ADD COLUMN `environment_code` bigint(20) default '-1' COMMENT 'environment code' AFTER `worker_group`;
ALTER TABLE t_ds_task_instance ADD COLUMN `environment_code` bigint(20) default '-1' COMMENT 'environment code' AFTER `worker_group`;
ALTER TABLE t_ds_task_instance ADD COLUMN `environment_config` text COMMENT 'environment config' AFTER `environment_code`;
ALTER TABLE t_ds_task_definition MODIFY COLUMN `resource_ids` text;
ALTER TABLE t_ds_task_definition_log MODIFY COLUMN `resource_ids` text;
-- ----------------------------
-- Table structure for t_ds_environment_worker_group_relation
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_environment_worker_group_relation`;
CREATE TABLE `t_ds_environment_worker_group_relation` (
`id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id',
`environment_code` bigint(20) NOT NULL COMMENT 'environment code',
`worker_group` varchar(255) NOT NULL COMMENT 'worker group id',
`operator` int(11) DEFAULT NULL COMMENT 'operator user id',
`create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
`update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
UNIQUE KEY `environment_worker_group_unique` (`environment_code`,`worker_group`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- These columns will not be used in the new version,if you determine that the historical data is useless, you can delete it using the sql below
-- ----------------------------
-- ALTER TABLE t_ds_alert DROP `show_type`, DROP `alert_type`, DROP `receivers`, DROP `receivers_cc`;
-- ALTER TABLE t_ds_alertgroup DROP `group_type`;
-- ALTER TABLE t_ds_process_definition DROP `receivers`, DROP `receivers_cc`;
-- ALTER TABLE t_ds_process_definition_version DROP `receivers`, DROP `receivers_cc`;
-- DROP TABLE IF EXISTS t_ds_relation_user_alertgroup;
-- ALTER TABLE t_ds_command DROP `dependence`;
-- ALTER TABLE t_ds_error_command DROP `dependence`;
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,461 | [Bug] [SQL] sql script upgrade exception | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Caused by: java.sql.SQLSyntaxErrorException: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'ALTER TABLE t_ds_task_definition drop INDEX `task_unique`; END IF; END' at line 1
### What you expected to happen
sql script
### How to reproduce
sql script upgrade
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6461 | https://github.com/apache/dolphinscheduler/pull/6462 | b642e9ca546d8ea21bbd9f193ff6ff9729037f75 | 28004c3b05d4644cce2b649e45ae35ef7c19d2d5 | "2021-10-08T09:44:27Z" | java | "2021-10-22T02:30:52Z" | sql/upgrade/1.4.0_schema/postgresql/dolphinscheduler_ddl.sql | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-- uc_dolphin_T_t_ds_user_A_state
delimiter ;
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_user_A_state();
delimiter d//
CREATE FUNCTION uc_dolphin_T_t_ds_user_A_state() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_CATALOG=current_database()
AND TABLE_SCHEMA=current_schema()
AND TABLE_NAME='t_ds_user'
AND COLUMN_NAME ='state')
THEN
ALTER TABLE t_ds_user ADD COLUMN state int DEFAULT 1;
comment on column t_ds_user.state is 'state 0:disable 1:enable';
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
select uc_dolphin_T_t_ds_user_A_state();
DROP FUNCTION uc_dolphin_T_t_ds_user_A_state();
-- uc_dolphin_T_t_ds_tenant_A_tenant_name
delimiter ;
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_tenant_A_tenant_name();
delimiter d//
CREATE FUNCTION uc_dolphin_T_t_ds_tenant_A_tenant_name() RETURNS void AS $$
BEGIN
IF EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_CATALOG=current_database()
AND TABLE_SCHEMA=current_schema()
AND TABLE_NAME='t_ds_tenant'
AND COLUMN_NAME ='tenant_name')
THEN
ALTER TABLE t_ds_tenant DROP COLUMN "tenant_name";
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
select uc_dolphin_T_t_ds_tenant_A_tenant_name();
DROP FUNCTION uc_dolphin_T_t_ds_tenant_A_tenant_name();
-- uc_dolphin_T_t_ds_task_instance_A_first_submit_time
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_first_submit_time() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND COLUMN_NAME ='first_submit_time')
THEN
ALTER TABLE t_ds_task_instance ADD COLUMN first_submit_time timestamp DEFAULT NULL;
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_task_instance_A_first_submit_time();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_first_submit_time();
-- uc_dolphin_T_t_ds_task_instance_A_delay_time
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_delay_time() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND COLUMN_NAME ='delay_time')
THEN
ALTER TABLE t_ds_task_instance ADD COLUMN delay_time int DEFAULT '0';
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_task_instance_A_delay_time();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_delay_time();
-- uc_dolphin_T_t_ds_task_instance_A_var_pool
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_var_pool() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND COLUMN_NAME ='var_pool')
THEN
ALTER TABLE t_ds_task_instance ADD COLUMN var_pool text;
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_task_instance_A_var_pool();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_var_pool();
-- uc_dolphin_T_t_ds_process_instance_A_var_pool
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_instance_A_var_pool() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_instance'
AND COLUMN_NAME ='var_pool')
THEN
ALTER TABLE t_ds_process_instance ADD COLUMN var_pool text;
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_process_instance_A_var_pool();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_instance_A_var_pool();
-- uc_dolphin_T_t_ds_process_definition_A_modify_by
delimiter d//
CREATE OR REPLACE FUNCTION ct_dolphin_T_t_ds_process_definition_version() RETURNS void AS $$
BEGIN
CREATE TABLE IF NOT EXISTS t_ds_process_definition_version (
id int NOT NULL ,
process_definition_id int NOT NULL ,
version int DEFAULT NULL ,
process_definition_json text ,
description text ,
global_params text ,
locations text ,
connects text ,
receivers text ,
receivers_cc text ,
create_time timestamp DEFAULT NULL ,
timeout int DEFAULT '0' ,
resource_ids varchar(64),
PRIMARY KEY (id)
) ;
create index process_definition_id_and_version on t_ds_process_definition_version (process_definition_id,version);
DROP SEQUENCE IF EXISTS t_ds_process_definition_version_id_sequence;
CREATE SEQUENCE t_ds_process_definition_version_id_sequence;
ALTER TABLE t_ds_process_definition_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_version_id_sequence');
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT ct_dolphin_T_t_ds_process_definition_version();
DROP FUNCTION IF EXISTS ct_dolphin_T_t_ds_process_definition_version();
-- ----------------------------
-- Table structure for t_ds_plugin_define
-- ----------------------------
DROP TABLE IF EXISTS t_ds_plugin_define;
CREATE TABLE t_ds_plugin_define (
id serial NOT NULL,
plugin_name varchar(100) NOT NULL,
plugin_type varchar(100) NOT NULL,
plugin_params text NULL,
create_time timestamp NULL,
update_time timestamp NULL,
CONSTRAINT t_ds_plugin_define_pk PRIMARY KEY (id),
CONSTRAINT t_ds_plugin_define_un UNIQUE (plugin_name, plugin_type)
);
-- ----------------------------
-- Table structure for t_ds_alert_plugin_instance
-- ----------------------------
DROP TABLE IF EXISTS t_ds_alert_plugin_instance;
CREATE TABLE t_ds_alert_plugin_instance (
id serial NOT NULL,
plugin_define_id int4 NOT NULL,
plugin_instance_params text NULL,
create_time timestamp NULL,
update_time timestamp NULL,
instance_name varchar(200) NULL,
CONSTRAINT t_ds_alert_plugin_instance_pk PRIMARY KEY (id)
);
-- uc_dolphin_T_t_ds_process_definition_A_warning_group_id
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_definition_A_warning_group_id() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_definition'
AND COLUMN_NAME ='warning_group_id')
THEN
ALTER TABLE t_ds_process_definition ADD COLUMN warning_group_id int4 DEFAULT NULL;
COMMENT ON COLUMN t_ds_process_definition.warning_group_id IS 'alert group id';
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_process_definition_A_warning_group_id();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_definition_A_warning_group_id();
-- uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_definition_version'
AND COLUMN_NAME ='warning_group_id')
THEN
ALTER TABLE t_ds_process_definition_version ADD COLUMN warning_group_id int4 DEFAULT NULL;
COMMENT ON COLUMN t_ds_process_definition_version.warning_group_id IS 'alert group id';
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id();
-- uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_alertgroup'
AND COLUMN_NAME ='alert_instance_ids')
THEN
ALTER TABLE t_ds_alertgroup ADD COLUMN alert_instance_ids varchar (255) DEFAULT NULL;
COMMENT ON COLUMN t_ds_alertgroup.alert_instance_ids IS 'alert instance ids';
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids();
-- uc_dolphin_T_t_ds_alertgroup_A_create_user_id
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_create_user_id() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_alertgroup'
AND COLUMN_NAME ='create_user_id')
THEN
ALTER TABLE t_ds_alertgroup ADD COLUMN create_user_id int4 DEFAULT NULL;
COMMENT ON COLUMN t_ds_alertgroup.create_user_id IS 'create user id';
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_alertgroup_A_create_user_id();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_create_user_id();
-- uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_stat_all_indexes
WHERE relname='t_ds_alertgroup'
AND indexrelname ='t_ds_alertgroup_name_un')
THEN
ALTER TABLE t_ds_alertgroup ADD CONSTRAINT t_ds_alertgroup_name_un UNIQUE (group_name);
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName();
-- uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_stat_all_indexes
WHERE relname='t_ds_datasource'
AND indexrelname ='t_ds_datasource_name_un')
THEN
ALTER TABLE t_ds_datasource ADD CONSTRAINT t_ds_datasource_name_un UNIQUE (name, type);
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName();
-- uc_dolphin_T_t_ds_schedules_A_add_timezone
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_schedules_A_add_timezone() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_schedules'
AND COLUMN_NAME ='timezone_id')
THEN
ALTER TABLE t_ds_schedules ADD COLUMN timezone_id varchar(40) DEFAULT NULL;
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_schedules_A_add_timezone();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_schedules_A_add_timezone();
--
-- Table structure for table t_ds_environment
--
DROP TABLE IF EXISTS t_ds_environment;
CREATE TABLE t_ds_environment (
id serial NOT NULL ,
code bigint NOT NULL,
name varchar(100) DEFAULT NULL ,
config text DEFAULT NULL ,
description text ,
operator int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id) ,
CONSTRAINT environment_name_unique UNIQUE (name),
CONSTRAINT environment_code_unique UNIQUE (code)
);
ALTER TABLE t_ds_task_definition ADD COLUMN environment_code bigint DEFAULT '-1';
comment on column t_ds_task_definition.environment_code is 'environment code';
ALTER TABLE t_ds_task_definition_log ADD COLUMN environment_code bigint DEFAULT '-1';
comment on column t_ds_task_definition_log.environment_code is 'environment code';
ALTER TABLE t_ds_command ADD COLUMN environment_code bigint DEFAULT '-1';
comment on column t_ds_command.environment_code is 'environment code';
ALTER TABLE t_ds_error_command ADD COLUMN environment_code bigint DEFAULT '-1';
comment on column t_ds_error_command.environment_code is 'environment code';
ALTER TABLE t_ds_schedules ADD COLUMN environment_code bigint DEFAULT '-1';
comment on column t_ds_schedules.environment_code is 'environment code';
ALTER TABLE t_ds_process_instance ADD COLUMN environment_code bigint DEFAULT '-1';
comment on column t_ds_process_instance.environment_code is 'environment code';
ALTER TABLE t_ds_task_instance ADD COLUMN environment_code bigint DEFAULT '-1';
comment on column t_ds_task_instance.environment_code is 'environment code';
ALTER TABLE t_ds_task_instance ADD COLUMN environment_config text;
comment on column t_ds_task_instance.environment_config is 'environment config';
--
-- Table structure for table t_ds_environment_worker_group_relation
--
DROP TABLE IF EXISTS t_ds_environment_worker_group_relation;
CREATE TABLE t_ds_environment_worker_group_relation (
id serial NOT NULL,
environment_code bigint NOT NULL,
worker_group varchar(255) NOT NULL,
operator int DEFAULT NULL,
create_time timestamp DEFAULT NULL,
update_time timestamp DEFAULT NULL,
PRIMARY KEY (id) ,
CONSTRAINT environment_worker_group_unique UNIQUE (environment_code,worker_group)
);
-- uc_dolphin_T_t_ds_task_definition_A_drop_UN_taskName
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_definition_A_drop_UN_taskName() RETURNS void AS $$
BEGIN
IF EXISTS (SELECT 1 FROM pg_stat_all_indexes
WHERE relname='t_ds_task_definition'
AND indexrelname ='task_definition_unique')
ALTER TABLE t_ds_task_definition drop CONSTRAINT task_definition_unique;
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_task_definition_A_drop_UN_taskName();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_definition_A_drop_UN_taskName();
ALTER TABLE t_ds_task_definition ALTER COLUMN resource_ids TYPE text;
ALTER TABLE t_ds_task_definition_log ALTER COLUMN resource_ids TYPE text;
-- ----------------------------
-- These columns will not be used in the new version,if you determine that the historical data is useless, you can delete it using the sql below
-- ----------------------------
-- ALTER TABLE t_ds_alert DROP COLUMN "show_type", DROP COLUMN "alert_type", DROP COLUMN "receivers", DROP COLUMN "receivers_cc";
-- ALTER TABLE t_ds_alertgroup DROP COLUMN "group_type";
-- ALTER TABLE t_ds_process_definition DROP COLUMN "receivers", DROP COLUMN "receivers_cc";
-- ALTER TABLE t_ds_process_definition_version DROP COLUMN "receivers", DROP COLUMN "receivers_cc";
-- DROP TABLE IF EXISTS t_ds_relation_user_alertgroup;
-- ALTER TABLE t_ds_command DROP COLUMN "dependence";
-- ALTER TABLE t_ds_error_command DROP COLUMN "dependence"; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,593 | [Bug] [DAO] t_ds_error_command miss `message` in pgsql | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
insert into t_ds_error_command error because `message` field is not exist.
### What you expected to happen
insert into t_ds_error_command successfully.
### How to reproduce
run command fail and it will move to error command.
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6593 | https://github.com/apache/dolphinscheduler/pull/6594 | e15668097a2f73fd137b8adbe79f4150886c59f9 | adf49e6a5f102094007fff245824f6885990cff8 | "2021-10-22T10:50:49Z" | java | "2021-10-25T01:30:55Z" | sql/dolphinscheduler_postgre.sql | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS;
DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE;
DROP TABLE IF EXISTS QRTZ_LOCKS;
DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_JOB_DETAILS;
DROP TABLE IF EXISTS QRTZ_CALENDARS;
CREATE TABLE QRTZ_JOB_DETAILS (
SCHED_NAME character varying(120) NOT NULL,
JOB_NAME character varying(200) NOT NULL,
JOB_GROUP character varying(200) NOT NULL,
DESCRIPTION character varying(250) NULL,
JOB_CLASS_NAME character varying(250) NOT NULL,
IS_DURABLE boolean NOT NULL,
IS_NONCONCURRENT boolean NOT NULL,
IS_UPDATE_DATA boolean NOT NULL,
REQUESTS_RECOVERY boolean NOT NULL,
JOB_DATA bytea NULL
);
alter table QRTZ_JOB_DETAILS add primary key(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE TABLE QRTZ_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
JOB_NAME character varying(200) NOT NULL,
JOB_GROUP character varying(200) NOT NULL,
DESCRIPTION character varying(250) NULL,
NEXT_FIRE_TIME BIGINT NULL,
PREV_FIRE_TIME BIGINT NULL,
PRIORITY INTEGER NULL,
TRIGGER_STATE character varying(16) NOT NULL,
TRIGGER_TYPE character varying(8) NOT NULL,
START_TIME BIGINT NOT NULL,
END_TIME BIGINT NULL,
CALENDAR_NAME character varying(200) NULL,
MISFIRE_INSTR SMALLINT NULL,
JOB_DATA bytea NULL
) ;
alter table QRTZ_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_SIMPLE_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
REPEAT_COUNT BIGINT NOT NULL,
REPEAT_INTERVAL BIGINT NOT NULL,
TIMES_TRIGGERED BIGINT NOT NULL
) ;
alter table QRTZ_SIMPLE_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_CRON_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
CRON_EXPRESSION character varying(120) NOT NULL,
TIME_ZONE_ID character varying(80)
) ;
alter table QRTZ_CRON_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_SIMPROP_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
STR_PROP_1 character varying(512) NULL,
STR_PROP_2 character varying(512) NULL,
STR_PROP_3 character varying(512) NULL,
INT_PROP_1 INT NULL,
INT_PROP_2 INT NULL,
LONG_PROP_1 BIGINT NULL,
LONG_PROP_2 BIGINT NULL,
DEC_PROP_1 NUMERIC(13,4) NULL,
DEC_PROP_2 NUMERIC(13,4) NULL,
BOOL_PROP_1 boolean NULL,
BOOL_PROP_2 boolean NULL
) ;
alter table QRTZ_SIMPROP_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_BLOB_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
BLOB_DATA bytea NULL
) ;
alter table QRTZ_BLOB_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_CALENDARS (
SCHED_NAME character varying(120) NOT NULL,
CALENDAR_NAME character varying(200) NOT NULL,
CALENDAR bytea NOT NULL
) ;
alter table QRTZ_CALENDARS add primary key(SCHED_NAME,CALENDAR_NAME);
CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL
) ;
alter table QRTZ_PAUSED_TRIGGER_GRPS add primary key(SCHED_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_FIRED_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
ENTRY_ID character varying(200) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
INSTANCE_NAME character varying(200) NOT NULL,
FIRED_TIME BIGINT NOT NULL,
SCHED_TIME BIGINT NOT NULL,
PRIORITY INTEGER NOT NULL,
STATE character varying(16) NOT NULL,
JOB_NAME character varying(200) NULL,
JOB_GROUP character varying(200) NULL,
IS_NONCONCURRENT boolean NULL,
REQUESTS_RECOVERY boolean NULL
) ;
alter table QRTZ_FIRED_TRIGGERS add primary key(SCHED_NAME,ENTRY_ID);
CREATE TABLE QRTZ_SCHEDULER_STATE (
SCHED_NAME character varying(120) NOT NULL,
INSTANCE_NAME character varying(200) NOT NULL,
LAST_CHECKIN_TIME BIGINT NOT NULL,
CHECKIN_INTERVAL BIGINT NOT NULL
) ;
alter table QRTZ_SCHEDULER_STATE add primary key(SCHED_NAME,INSTANCE_NAME);
CREATE TABLE QRTZ_LOCKS (
SCHED_NAME character varying(120) NOT NULL,
LOCK_NAME character varying(40) NOT NULL
) ;
alter table QRTZ_LOCKS add primary key(SCHED_NAME,LOCK_NAME);
CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY);
CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME);
CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME);
CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
--
-- Table structure for table t_ds_access_token
--
DROP TABLE IF EXISTS t_ds_access_token;
CREATE TABLE t_ds_access_token (
id int NOT NULL ,
user_id int DEFAULT NULL ,
token varchar(64) DEFAULT NULL ,
expire_time timestamp DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_alert
--
DROP TABLE IF EXISTS t_ds_alert;
CREATE TABLE t_ds_alert (
id int NOT NULL ,
title varchar(64) DEFAULT NULL ,
content text ,
alert_status int DEFAULT '0' ,
log text ,
alertgroup_id int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_alertgroup
--
DROP TABLE IF EXISTS t_ds_alertgroup;
CREATE TABLE t_ds_alertgroup(
id int NOT NULL,
alert_instance_ids varchar (255) DEFAULT NULL,
create_user_id int4 DEFAULT NULL,
group_name varchar(255) DEFAULT NULL,
description varchar(255) DEFAULT NULL,
create_time timestamp DEFAULT NULL,
update_time timestamp DEFAULT NULL,
PRIMARY KEY (id),
CONSTRAINT t_ds_alertgroup_name_un UNIQUE (group_name)
) ;
--
-- Table structure for table t_ds_command
--
DROP TABLE IF EXISTS t_ds_command;
CREATE TABLE t_ds_command (
id int NOT NULL ,
command_type int DEFAULT NULL ,
process_definition_code bigint NOT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
executor_id int DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
environment_code bigint DEFAULT '-1',
dry_run int DEFAULT '0' ,
process_instance_id int DEFAULT 0,
process_definition_version int DEFAULT 0,
PRIMARY KEY (id)
) ;
create index priority_id_index on t_ds_command (process_instance_priority,id);
--
-- Table structure for table t_ds_datasource
--
DROP TABLE IF EXISTS t_ds_datasource;
CREATE TABLE t_ds_datasource (
id int NOT NULL ,
name varchar(64) NOT NULL ,
note varchar(255) DEFAULT NULL ,
type int NOT NULL ,
user_id int NOT NULL ,
connection_params text NOT NULL ,
create_time timestamp NOT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id),
CONSTRAINT t_ds_datasource_name_un UNIQUE (name, type)
) ;
--
-- Table structure for table t_ds_error_command
--
DROP TABLE IF EXISTS t_ds_error_command;
CREATE TABLE t_ds_error_command (
id int NOT NULL ,
command_type int DEFAULT NULL ,
process_definition_code bigint NOT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
executor_id int DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
environment_code bigint DEFAULT '-1',
dry_run int DEFAULT '0' ,
process_instance_id int DEFAULT 0,
process_definition_version int DEFAULT 0,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_master_server
--
--
-- Table structure for table t_ds_process_definition
--
DROP TABLE IF EXISTS t_ds_process_definition;
CREATE TABLE t_ds_process_definition (
id int NOT NULL ,
code bigint NOT NULL,
name varchar(255) DEFAULT NULL ,
version int DEFAULT NULL ,
description text ,
project_code bigint DEFAULT NULL ,
release_state int DEFAULT NULL ,
user_id int DEFAULT NULL ,
global_params text ,
locations text ,
warning_group_id int DEFAULT NULL ,
flag int DEFAULT NULL ,
timeout int DEFAULT '0' ,
tenant_id int DEFAULT '-1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id) ,
CONSTRAINT process_definition_unique UNIQUE (name, project_code)
) ;
create index process_definition_index on t_ds_process_definition (code,id);
DROP TABLE IF EXISTS t_ds_process_definition_log;
CREATE TABLE t_ds_process_definition_log (
id int NOT NULL ,
code bigint NOT NULL,
name varchar(255) DEFAULT NULL ,
version int DEFAULT NULL ,
description text ,
project_code bigint DEFAULT NULL ,
release_state int DEFAULT NULL ,
user_id int DEFAULT NULL ,
global_params text ,
locations text ,
warning_group_id int DEFAULT NULL ,
flag int DEFAULT NULL ,
timeout int DEFAULT '0' ,
tenant_id int DEFAULT '-1' ,
operator int DEFAULT NULL ,
operate_time timestamp DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
DROP TABLE IF EXISTS t_ds_task_definition;
CREATE TABLE t_ds_task_definition (
id int NOT NULL ,
code bigint NOT NULL,
name varchar(255) DEFAULT NULL ,
version int DEFAULT NULL ,
description text ,
project_code bigint DEFAULT NULL ,
user_id int DEFAULT NULL ,
task_type varchar(50) DEFAULT NULL ,
task_params text ,
flag int DEFAULT NULL ,
task_priority int DEFAULT NULL ,
worker_group varchar(255) DEFAULT NULL ,
environment_code bigint DEFAULT '-1',
fail_retry_times int DEFAULT NULL ,
fail_retry_interval int DEFAULT NULL ,
timeout_flag int DEFAULT NULL ,
timeout_notify_strategy int DEFAULT NULL ,
timeout int DEFAULT '0' ,
delay_time int DEFAULT '0' ,
resource_ids text ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
create index task_definition_index on t_ds_task_definition (project_code,id);
DROP TABLE IF EXISTS t_ds_task_definition_log;
CREATE TABLE t_ds_task_definition_log (
id int NOT NULL ,
code bigint NOT NULL,
name varchar(255) DEFAULT NULL ,
version int DEFAULT NULL ,
description text ,
project_code bigint DEFAULT NULL ,
user_id int DEFAULT NULL ,
task_type varchar(50) DEFAULT NULL ,
task_params text ,
flag int DEFAULT NULL ,
task_priority int DEFAULT NULL ,
worker_group varchar(255) DEFAULT NULL ,
environment_code bigint DEFAULT '-1',
fail_retry_times int DEFAULT NULL ,
fail_retry_interval int DEFAULT NULL ,
timeout_flag int DEFAULT NULL ,
timeout_notify_strategy int DEFAULT NULL ,
timeout int DEFAULT '0' ,
delay_time int DEFAULT '0' ,
resource_ids text ,
operator int DEFAULT NULL ,
operate_time timestamp DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
DROP TABLE IF EXISTS t_ds_process_task_relation;
CREATE TABLE t_ds_process_task_relation (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
process_definition_version int DEFAULT NULL ,
project_code bigint DEFAULT NULL ,
process_definition_code bigint DEFAULT NULL ,
pre_task_code bigint DEFAULT NULL ,
pre_task_version int DEFAULT '0' ,
post_task_code bigint DEFAULT NULL ,
post_task_version int DEFAULT '0' ,
condition_type int DEFAULT NULL ,
condition_params text ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
DROP TABLE IF EXISTS t_ds_process_task_relation_log;
CREATE TABLE t_ds_process_task_relation_log (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
process_definition_version int DEFAULT NULL ,
project_code bigint DEFAULT NULL ,
process_definition_code bigint DEFAULT NULL ,
pre_task_code bigint DEFAULT NULL ,
pre_task_version int DEFAULT '0' ,
post_task_code bigint DEFAULT NULL ,
post_task_version int DEFAULT '0' ,
condition_type int DEFAULT NULL ,
condition_params text ,
operator int DEFAULT NULL ,
operate_time timestamp DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_process_instance
--
DROP TABLE IF EXISTS t_ds_process_instance;
CREATE TABLE t_ds_process_instance (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
process_definition_version int DEFAULT NULL ,
process_definition_code bigint DEFAULT NULL ,
state int DEFAULT NULL ,
recovery int DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
end_time timestamp DEFAULT NULL ,
run_times int DEFAULT NULL ,
host varchar(135) DEFAULT NULL ,
command_type int DEFAULT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
max_try_times int DEFAULT '0' ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
command_start_time timestamp DEFAULT NULL ,
global_params text ,
process_instance_json text ,
flag int DEFAULT '1' ,
update_time timestamp NULL ,
is_sub_process int DEFAULT '0' ,
executor_id int NOT NULL ,
history_cmd text ,
dependence_schedule_times text ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64) ,
environment_code bigint DEFAULT '-1',
timeout int DEFAULT '0' ,
tenant_id int NOT NULL DEFAULT '-1' ,
var_pool text ,
dry_run int DEFAULT '0' ,
PRIMARY KEY (id)
) ;
create index process_instance_index on t_ds_process_instance (process_definition_code,id);
create index start_time_index on t_ds_process_instance (start_time);
--
-- Table structure for table t_ds_project
--
DROP TABLE IF EXISTS t_ds_project;
CREATE TABLE t_ds_project (
id int NOT NULL ,
name varchar(100) DEFAULT NULL ,
code bigint NOT NULL,
description varchar(200) DEFAULT NULL ,
user_id int DEFAULT NULL ,
flag int DEFAULT '1' ,
create_time timestamp DEFAULT CURRENT_TIMESTAMP ,
update_time timestamp DEFAULT CURRENT_TIMESTAMP ,
PRIMARY KEY (id)
) ;
create index user_id_index on t_ds_project (user_id);
--
-- Table structure for table t_ds_queue
--
DROP TABLE IF EXISTS t_ds_queue;
CREATE TABLE t_ds_queue (
id int NOT NULL ,
queue_name varchar(64) DEFAULT NULL ,
queue varchar(64) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_relation_datasource_user
--
DROP TABLE IF EXISTS t_ds_relation_datasource_user;
CREATE TABLE t_ds_relation_datasource_user (
id int NOT NULL ,
user_id int NOT NULL ,
datasource_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
;
--
-- Table structure for table t_ds_relation_process_instance
--
DROP TABLE IF EXISTS t_ds_relation_process_instance;
CREATE TABLE t_ds_relation_process_instance (
id int NOT NULL ,
parent_process_instance_id int DEFAULT NULL ,
parent_task_instance_id int DEFAULT NULL ,
process_instance_id int DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_relation_project_user
--
DROP TABLE IF EXISTS t_ds_relation_project_user;
CREATE TABLE t_ds_relation_project_user (
id int NOT NULL ,
user_id int NOT NULL ,
project_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
create index relation_project_user_id_index on t_ds_relation_project_user (user_id);
--
-- Table structure for table t_ds_relation_resources_user
--
DROP TABLE IF EXISTS t_ds_relation_resources_user;
CREATE TABLE t_ds_relation_resources_user (
id int NOT NULL ,
user_id int NOT NULL ,
resources_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_relation_udfs_user
--
DROP TABLE IF EXISTS t_ds_relation_udfs_user;
CREATE TABLE t_ds_relation_udfs_user (
id int NOT NULL ,
user_id int NOT NULL ,
udf_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
;
--
-- Table structure for table t_ds_resources
--
DROP TABLE IF EXISTS t_ds_resources;
CREATE TABLE t_ds_resources (
id int NOT NULL ,
alias varchar(64) DEFAULT NULL ,
file_name varchar(64) DEFAULT NULL ,
description varchar(255) DEFAULT NULL ,
user_id int DEFAULT NULL ,
type int DEFAULT NULL ,
size bigint DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
pid int,
full_name varchar(64),
is_directory int,
PRIMARY KEY (id),
CONSTRAINT t_ds_resources_un UNIQUE (full_name, type)
) ;
--
-- Table structure for table t_ds_schedules
--
DROP TABLE IF EXISTS t_ds_schedules;
CREATE TABLE t_ds_schedules (
id int NOT NULL ,
process_definition_code bigint NOT NULL ,
start_time timestamp NOT NULL ,
end_time timestamp NOT NULL ,
timezone_id varchar(40) default NULL ,
crontab varchar(255) NOT NULL ,
failure_strategy int NOT NULL ,
user_id int NOT NULL ,
release_state int NOT NULL ,
warning_type int NOT NULL ,
warning_group_id int DEFAULT NULL ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
environment_code bigint DEFAULT '-1',
create_time timestamp NOT NULL ,
update_time timestamp NOT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_session
--
DROP TABLE IF EXISTS t_ds_session;
CREATE TABLE t_ds_session (
id varchar(64) NOT NULL ,
user_id int DEFAULT NULL ,
ip varchar(45) DEFAULT NULL ,
last_login_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_task_instance
--
DROP TABLE IF EXISTS t_ds_task_instance;
CREATE TABLE t_ds_task_instance (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
task_type varchar(50) DEFAULT NULL ,
task_code bigint NOT NULL,
task_definition_version int DEFAULT NULL ,
process_instance_id int DEFAULT NULL ,
state int DEFAULT NULL ,
submit_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
end_time timestamp DEFAULT NULL ,
host varchar(135) DEFAULT NULL ,
execute_path varchar(200) DEFAULT NULL ,
log_path varchar(200) DEFAULT NULL ,
alert_flag int DEFAULT NULL ,
retry_times int DEFAULT '0' ,
pid int DEFAULT NULL ,
app_link text ,
task_params text ,
flag int DEFAULT '1' ,
retry_interval int DEFAULT NULL ,
max_retry_times int DEFAULT NULL ,
task_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
environment_code bigint DEFAULT '-1',
environment_config text,
executor_id int DEFAULT NULL ,
first_submit_time timestamp DEFAULT NULL ,
delay_time int DEFAULT '0' ,
var_pool text ,
dry_run int DEFAULT '0' ,
PRIMARY KEY (id),
CONSTRAINT foreign_key_instance_id FOREIGN KEY(process_instance_id) REFERENCES t_ds_process_instance(id) ON DELETE CASCADE
) ;
--
-- Table structure for table t_ds_tenant
--
DROP TABLE IF EXISTS t_ds_tenant;
CREATE TABLE t_ds_tenant (
id int NOT NULL ,
tenant_code varchar(64) DEFAULT NULL ,
description varchar(255) DEFAULT NULL ,
queue_id int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_udfs
--
DROP TABLE IF EXISTS t_ds_udfs;
CREATE TABLE t_ds_udfs (
id int NOT NULL ,
user_id int NOT NULL ,
func_name varchar(100) NOT NULL ,
class_name varchar(255) NOT NULL ,
type int NOT NULL ,
arg_types varchar(255) DEFAULT NULL ,
database varchar(255) DEFAULT NULL ,
description varchar(255) DEFAULT NULL ,
resource_id int NOT NULL ,
resource_name varchar(255) NOT NULL ,
create_time timestamp NOT NULL ,
update_time timestamp NOT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_user
--
DROP TABLE IF EXISTS t_ds_user;
CREATE TABLE t_ds_user (
id int NOT NULL ,
user_name varchar(64) DEFAULT NULL ,
user_password varchar(64) DEFAULT NULL ,
user_type int DEFAULT NULL ,
email varchar(64) DEFAULT NULL ,
phone varchar(11) DEFAULT NULL ,
tenant_id int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
queue varchar(64) DEFAULT NULL ,
state int DEFAULT 1 ,
PRIMARY KEY (id)
);
comment on column t_ds_user.state is 'state 0:disable 1:enable';
--
-- Table structure for table t_ds_version
--
DROP TABLE IF EXISTS t_ds_version;
CREATE TABLE t_ds_version (
id int NOT NULL ,
version varchar(200) NOT NULL,
PRIMARY KEY (id)
) ;
create index version_index on t_ds_version(version);
--
-- Table structure for table t_ds_worker_group
--
DROP TABLE IF EXISTS t_ds_worker_group;
CREATE TABLE t_ds_worker_group (
id bigint NOT NULL ,
name varchar(255) NOT NULL ,
addr_list text DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id) ,
CONSTRAINT name_unique UNIQUE (name)
) ;
--
-- Table structure for table t_ds_worker_server
--
DROP TABLE IF EXISTS t_ds_worker_server;
CREATE TABLE t_ds_worker_server (
id int NOT NULL ,
host varchar(45) DEFAULT NULL ,
port int DEFAULT NULL ,
zk_directory varchar(64) DEFAULT NULL ,
res_info varchar(255) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
last_heartbeat_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
DROP SEQUENCE IF EXISTS t_ds_access_token_id_sequence;
CREATE SEQUENCE t_ds_access_token_id_sequence;
ALTER TABLE t_ds_access_token ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_access_token_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_alert_id_sequence;
CREATE SEQUENCE t_ds_alert_id_sequence;
ALTER TABLE t_ds_alert ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alert_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_alertgroup_id_sequence;
CREATE SEQUENCE t_ds_alertgroup_id_sequence;
ALTER TABLE t_ds_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alertgroup_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_command_id_sequence;
CREATE SEQUENCE t_ds_command_id_sequence;
ALTER TABLE t_ds_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_command_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_datasource_id_sequence;
CREATE SEQUENCE t_ds_datasource_id_sequence;
ALTER TABLE t_ds_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_datasource_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_definition_id_sequence;
CREATE SEQUENCE t_ds_process_definition_id_sequence;
ALTER TABLE t_ds_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_definition_log_id_sequence;
CREATE SEQUENCE t_ds_process_definition_log_id_sequence;
ALTER TABLE t_ds_process_definition_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_log_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_task_definition_id_sequence;
CREATE SEQUENCE t_ds_task_definition_id_sequence;
ALTER TABLE t_ds_task_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_definition_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_task_definition_log_id_sequence;
CREATE SEQUENCE t_ds_task_definition_log_id_sequence;
ALTER TABLE t_ds_task_definition_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_definition_log_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_task_relation_id_sequence;
CREATE SEQUENCE t_ds_process_task_relation_id_sequence;
ALTER TABLE t_ds_process_task_relation ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_task_relation_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_task_relation_log_id_sequence;
CREATE SEQUENCE t_ds_process_task_relation_log_id_sequence;
ALTER TABLE t_ds_process_task_relation_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_task_relation_log_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_instance_id_sequence;
CREATE SEQUENCE t_ds_process_instance_id_sequence;
ALTER TABLE t_ds_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_project_id_sequence;
CREATE SEQUENCE t_ds_project_id_sequence;
ALTER TABLE t_ds_project ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_project_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_queue_id_sequence;
CREATE SEQUENCE t_ds_queue_id_sequence;
ALTER TABLE t_ds_queue ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_queue_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_datasource_user_id_sequence;
CREATE SEQUENCE t_ds_relation_datasource_user_id_sequence;
ALTER TABLE t_ds_relation_datasource_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_datasource_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_process_instance_id_sequence;
CREATE SEQUENCE t_ds_relation_process_instance_id_sequence;
ALTER TABLE t_ds_relation_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_process_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_project_user_id_sequence;
CREATE SEQUENCE t_ds_relation_project_user_id_sequence;
ALTER TABLE t_ds_relation_project_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_project_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_resources_user_id_sequence;
CREATE SEQUENCE t_ds_relation_resources_user_id_sequence;
ALTER TABLE t_ds_relation_resources_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_resources_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_udfs_user_id_sequence;
CREATE SEQUENCE t_ds_relation_udfs_user_id_sequence;
ALTER TABLE t_ds_relation_udfs_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_udfs_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_resources_id_sequence;
CREATE SEQUENCE t_ds_resources_id_sequence;
ALTER TABLE t_ds_resources ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_resources_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_schedules_id_sequence;
CREATE SEQUENCE t_ds_schedules_id_sequence;
ALTER TABLE t_ds_schedules ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_schedules_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_task_instance_id_sequence;
CREATE SEQUENCE t_ds_task_instance_id_sequence;
ALTER TABLE t_ds_task_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_tenant_id_sequence;
CREATE SEQUENCE t_ds_tenant_id_sequence;
ALTER TABLE t_ds_tenant ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_tenant_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_udfs_id_sequence;
CREATE SEQUENCE t_ds_udfs_id_sequence;
ALTER TABLE t_ds_udfs ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_udfs_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_user_id_sequence;
CREATE SEQUENCE t_ds_user_id_sequence;
ALTER TABLE t_ds_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_version_id_sequence;
CREATE SEQUENCE t_ds_version_id_sequence;
ALTER TABLE t_ds_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_version_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_worker_group_id_sequence;
CREATE SEQUENCE t_ds_worker_group_id_sequence;
ALTER TABLE t_ds_worker_group ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_group_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_worker_server_id_sequence;
CREATE SEQUENCE t_ds_worker_server_id_sequence;
ALTER TABLE t_ds_worker_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_server_id_sequence');
-- Records of t_ds_user?user : admin , password : dolphinscheduler123
INSERT INTO t_ds_user(user_name, user_password, user_type, email, phone, tenant_id, state, create_time, update_time)
VALUES ('admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', '', '0', 1, '2018-03-27 15:48:50', '2018-10-24 17:40:22');
-- Records of t_ds_alertgroup, default admin warning group
INSERT INTO t_ds_alertgroup(alert_instance_ids, create_user_id, group_name, description, create_time, update_time)
VALUES ('1,2', 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39');
-- Records of t_ds_queue,default queue name : default
INSERT INTO t_ds_queue(queue_name, queue, create_time, update_time)
VALUES ('default', 'default', '2018-11-29 10:22:33', '2018-11-29 10:22:33');
-- Records of t_ds_queue,default queue name : default
INSERT INTO t_ds_version(version) VALUES ('1.4.0');
--
-- Table structure for table t_ds_plugin_define
--
DROP TABLE IF EXISTS t_ds_plugin_define;
CREATE TABLE t_ds_plugin_define (
id serial NOT NULL,
plugin_name varchar(100) NOT NULL,
plugin_type varchar(100) NOT NULL,
plugin_params text NULL,
create_time timestamp NULL,
update_time timestamp NULL,
CONSTRAINT t_ds_plugin_define_pk PRIMARY KEY (id),
CONSTRAINT t_ds_plugin_define_un UNIQUE (plugin_name, plugin_type)
);
--
-- Table structure for table t_ds_alert_plugin_instance
--
DROP TABLE IF EXISTS t_ds_alert_plugin_instance;
CREATE TABLE t_ds_alert_plugin_instance (
id serial NOT NULL,
plugin_define_id int4 NOT NULL,
plugin_instance_params text NULL,
create_time timestamp NULL,
update_time timestamp NULL,
instance_name varchar(200) NULL,
CONSTRAINT t_ds_alert_plugin_instance_pk PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_environment
--
DROP TABLE IF EXISTS t_ds_environment;
CREATE TABLE t_ds_environment (
id serial NOT NULL,
code bigint NOT NULL,
name varchar(100) DEFAULT NULL,
config text DEFAULT NULL,
description text,
operator int DEFAULT NULL,
create_time timestamp DEFAULT NULL,
update_time timestamp DEFAULT NULL,
PRIMARY KEY (id),
CONSTRAINT environment_name_unique UNIQUE (name),
CONSTRAINT environment_code_unique UNIQUE (code)
);
--
-- Table structure for table t_ds_environment_worker_group_relation
--
DROP TABLE IF EXISTS t_ds_environment_worker_group_relation;
CREATE TABLE t_ds_environment_worker_group_relation (
id serial NOT NULL,
environment_code bigint NOT NULL,
worker_group varchar(255) NOT NULL,
operator int DEFAULT NULL,
create_time timestamp DEFAULT NULL,
update_time timestamp DEFAULT NULL,
PRIMARY KEY (id) ,
CONSTRAINT environment_worker_group_unique UNIQUE (environment_code,worker_group)
);
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,591 | [Bug] [API] import process definition error | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
```
2021-10-22 14:28:39.421 ERROR 27304 --- [qtp407976995-47] o.a.d.a.exceptions.ApiExceptionHandler : import process definition error
java.lang.NullPointerException: null
at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl.checkAndImport(ProcessDefinitionServiceImpl.java:895) ~[dolphinscheduler-api-2.0.0-alpha.jar:2.0.0-alpha]
at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl.importProcessDefinition(ProcessDefinitionServiceImpl.java:827) ~[dolphinscheduler-api-2.0.0-alpha.jar:2.0.0-alpha]
```
### What you expected to happen
import process definition success.
### How to reproduce
import process definition whit the json file you output from ds.
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6591 | https://github.com/apache/dolphinscheduler/pull/6592 | adf49e6a5f102094007fff245824f6885990cff8 | 0d26724455316da665c0c8501b19531ada663779 | "2021-10-22T10:27:25Z" | java | "2021-10-26T09:35:06Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service.impl;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID;
import org.apache.dolphinscheduler.api.dto.DagDataSchedule;
import org.apache.dolphinscheduler.api.dto.treeview.Instance;
import org.apache.dolphinscheduler.api.dto.treeview.TreeViewDto;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
import org.apache.dolphinscheduler.api.service.ProcessDefinitionService;
import org.apache.dolphinscheduler.api.service.ProcessInstanceService;
import org.apache.dolphinscheduler.api.service.ProjectService;
import org.apache.dolphinscheduler.api.service.SchedulerService;
import org.apache.dolphinscheduler.api.utils.CheckUtils;
import org.apache.dolphinscheduler.api.utils.FileUtils;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.ReleaseState;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils;
import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException;
import org.apache.dolphinscheduler.dao.entity.DagData;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation;
import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskDefinition;
import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.TenantMapper;
import org.apache.dolphinscheduler.dao.mapper.UserMapper;
import org.apache.dolphinscheduler.service.permission.PermissionCheck;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.commons.lang.StringUtils;
import java.io.BufferedOutputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import javax.servlet.ServletOutputStream;
import javax.servlet.http.HttpServletResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.MediaType;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.google.common.collect.Lists;
/**
* process definition service impl
*/
@Service
public class ProcessDefinitionServiceImpl extends BaseServiceImpl implements ProcessDefinitionService {
private static final Logger logger = LoggerFactory.getLogger(ProcessDefinitionServiceImpl.class);
private static final String RELEASESTATE = "releaseState";
@Autowired
private ProjectMapper projectMapper;
@Autowired
private ProjectService projectService;
@Autowired
private UserMapper userMapper;
@Autowired
private ProcessDefinitionLogMapper processDefinitionLogMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
@Autowired
private ProcessInstanceService processInstanceService;
@Autowired
private TaskInstanceMapper taskInstanceMapper;
@Autowired
private ScheduleMapper scheduleMapper;
@Autowired
private ProcessService processService;
@Autowired
private ProcessTaskRelationMapper processTaskRelationMapper;
@Autowired
private ProcessTaskRelationLogMapper processTaskRelationLogMapper;
@Autowired
TaskDefinitionLogMapper taskDefinitionLogMapper;
@Autowired
private TaskDefinitionMapper taskDefinitionMapper;
@Autowired
private SchedulerService schedulerService;
@Autowired
private TenantMapper tenantMapper;
/**
* create process definition
*
* @param loginUser login user
* @param projectCode project code
* @param name process definition name
* @param description description
* @param globalParams global params
* @param locations locations for nodes
* @param timeout timeout
* @param tenantCode tenantCode
* @param taskRelationJson relation json for nodes
* @param taskDefinitionJson taskDefinitionJson
* @return create result code
*/
@Override
@Transactional(rollbackFor = RuntimeException.class)
public Map<String, Object> createProcessDefinition(User loginUser,
long projectCode,
String name,
String description,
String globalParams,
String locations,
int timeout,
String tenantCode,
String taskRelationJson,
String taskDefinitionJson) {
Project project = projectMapper.queryByCode(projectCode);
//check user access for project
Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
// check whether the new process define name exist
ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name);
if (definition != null) {
putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name);
return result;
}
List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class);
Map<String, Object> checkTaskDefinitions = checkTaskDefinitionList(taskDefinitionLogs, taskDefinitionJson);
if (checkTaskDefinitions.get(Constants.STATUS) != Status.SUCCESS) {
return checkTaskDefinitions;
}
List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class);
Map<String, Object> checkRelationJson = checkTaskRelationList(taskRelationList, taskRelationJson, taskDefinitionLogs);
if (checkRelationJson.get(Constants.STATUS) != Status.SUCCESS) {
return checkRelationJson;
}
int tenantId = -1;
if (!Constants.DEFAULT.equals(tenantCode)) {
Tenant tenant = tenantMapper.queryByTenantCode(tenantCode);
if (tenant == null) {
putMsg(result, Status.TENANT_NOT_EXIST);
return result;
}
tenantId = tenant.getId();
}
long processDefinitionCode;
try {
processDefinitionCode = SnowFlakeUtils.getInstance().nextId();
} catch (SnowFlakeException e) {
putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS);
return result;
}
ProcessDefinition processDefinition = new ProcessDefinition(projectCode, name, processDefinitionCode, description,
globalParams, locations, timeout, loginUser.getId(), tenantId);
return createDagDefine(loginUser, taskRelationList, processDefinition, taskDefinitionLogs);
}
private Map<String, Object> createDagDefine(User loginUser,
List<ProcessTaskRelationLog> taskRelationList,
ProcessDefinition processDefinition,
List<TaskDefinitionLog> taskDefinitionLogs) {
Map<String, Object> result = new HashMap<>();
int saveTaskResult = processService.saveTaskDefine(loginUser, processDefinition.getProjectCode(), taskDefinitionLogs);
if (saveTaskResult == Constants.EXIT_CODE_SUCCESS) {
logger.info("The task has not changed, so skip");
}
if (saveTaskResult == Constants.DEFINITION_FAILURE) {
putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR);
throw new ServiceException(Status.CREATE_TASK_DEFINITION_ERROR);
}
int insertVersion = processService.saveProcessDefine(loginUser, processDefinition, true);
if (insertVersion == 0) {
putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR);
throw new ServiceException(Status.CREATE_PROCESS_DEFINITION_ERROR);
}
int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs);
if (insertResult == Constants.EXIT_CODE_SUCCESS) {
putMsg(result, Status.SUCCESS);
result.put(Constants.DATA_LIST, processDefinition);
} else {
putMsg(result, Status.CREATE_PROCESS_TASK_RELATION_ERROR);
throw new ServiceException(Status.CREATE_PROCESS_TASK_RELATION_ERROR);
}
return result;
}
private Map<String, Object> checkTaskDefinitionList(List<TaskDefinitionLog> taskDefinitionLogs, String taskDefinitionJson) {
Map<String, Object> result = new HashMap<>();
try {
if (taskDefinitionLogs.isEmpty()) {
logger.error("taskDefinitionJson invalid: {}", taskDefinitionJson);
putMsg(result, Status.DATA_IS_NOT_VALID, taskDefinitionJson);
return result;
}
for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) {
if (!CheckUtils.checkTaskDefinitionParameters(taskDefinitionLog)) {
logger.error("task definition {} parameter invalid", taskDefinitionLog.getName());
putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskDefinitionLog.getName());
return result;
}
}
putMsg(result, Status.SUCCESS);
} catch (Exception e) {
result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
result.put(Constants.MSG, e.getMessage());
}
return result;
}
private Map<String, Object> checkTaskRelationList(List<ProcessTaskRelationLog> taskRelationList, String taskRelationJson, List<TaskDefinitionLog> taskDefinitionLogs) {
Map<String, Object> result = new HashMap<>();
try {
if (taskRelationList == null || taskRelationList.isEmpty()) {
logger.error("task relation list is null");
putMsg(result, Status.DATA_IS_NOT_VALID, taskRelationJson);
return result;
}
List<ProcessTaskRelation> processTaskRelations = taskRelationList.stream()
.map(processTaskRelationLog -> JSONUtils.parseObject(JSONUtils.toJsonString(processTaskRelationLog), ProcessTaskRelation.class))
.collect(Collectors.toList());
List<TaskNode> taskNodeList = processService.transformTask(processTaskRelations, taskDefinitionLogs);
if (taskNodeList.size() != taskRelationList.size()) {
Set<Long> postTaskCodes = taskRelationList.stream().map(ProcessTaskRelationLog::getPostTaskCode).collect(Collectors.toSet());
Set<Long> taskNodeCodes = taskNodeList.stream().map(TaskNode::getCode).collect(Collectors.toSet());
Collection<Long> codes = CollectionUtils.subtract(postTaskCodes, taskNodeCodes);
if (CollectionUtils.isNotEmpty(codes)) {
logger.error("the task code is not exit");
putMsg(result, Status.TASK_DEFINE_NOT_EXIST, StringUtils.join(codes, Constants.COMMA));
return result;
}
}
if (graphHasCycle(taskNodeList)) {
logger.error("process DAG has cycle");
putMsg(result, Status.PROCESS_NODE_HAS_CYCLE);
return result;
}
// check whether the task relation json is normal
for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) {
if (processTaskRelationLog.getPostTaskCode() == 0) {
logger.error("the post_task_code or post_task_version can't be zero");
putMsg(result, Status.CHECK_PROCESS_TASK_RELATION_ERROR);
return result;
}
}
putMsg(result, Status.SUCCESS);
} catch (Exception e) {
result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
result.put(Constants.MSG, e.getMessage());
}
return result;
}
/**
* query process definition list
*
* @param loginUser login user
* @param projectCode project code
* @return definition list
*/
@Override
public Map<String, Object> queryProcessDefinitionList(User loginUser, long projectCode) {
Project project = projectMapper.queryByCode(projectCode);
//check user access for project
Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
List<ProcessDefinition> resourceList = processDefinitionMapper.queryAllDefinitionList(projectCode);
List<DagData> dagDataList = resourceList.stream().map(processService::genDagData).collect(Collectors.toList());
result.put(Constants.DATA_LIST, dagDataList);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query process definition list paging
*
* @param loginUser login user
* @param projectCode project code
* @param searchVal search value
* @param userId user id
* @param pageNo page number
* @param pageSize page size
* @return process definition page
*/
@Override
public Result queryProcessDefinitionListPaging(User loginUser, long projectCode, String searchVal, Integer userId, Integer pageNo, Integer pageSize) {
Result result = new Result();
Project project = projectMapper.queryByCode(projectCode);
//check user access for project
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
putMsg(result, resultStatus);
return result;
}
Page<ProcessDefinition> page = new Page<>(pageNo, pageSize);
IPage<ProcessDefinition> processDefinitionIPage = processDefinitionMapper.queryDefineListPaging(
page, searchVal, userId, project.getCode(), isAdmin(loginUser));
List<ProcessDefinition> records = processDefinitionIPage.getRecords();
for (ProcessDefinition pd : records) {
ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper.queryByDefinitionCodeAndVersion(pd.getCode(), pd.getVersion());
User user = userMapper.selectById(processDefinitionLog.getOperator());
pd.setModifyBy(user.getUserName());
}
processDefinitionIPage.setRecords(records);
PageInfo<ProcessDefinition> pageInfo = new PageInfo<>(pageNo, pageSize);
pageInfo.setTotal((int) processDefinitionIPage.getTotal());
pageInfo.setTotalList(processDefinitionIPage.getRecords());
result.setData(pageInfo);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query detail of process definition
*
* @param loginUser login user
* @param projectCode project code
* @param code process definition code
* @return process definition detail
*/
@Override
public Map<String, Object> queryProcessDefinitionByCode(User loginUser, long projectCode, long code) {
Project project = projectMapper.queryByCode(projectCode);
//check user access for project
Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code);
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code);
} else {
Tenant tenant = tenantMapper.queryById(processDefinition.getTenantId());
if (tenant != null) {
processDefinition.setTenantCode(tenant.getTenantCode());
}
DagData dagData = processService.genDagData(processDefinition);
result.put(Constants.DATA_LIST, dagData);
putMsg(result, Status.SUCCESS);
}
return result;
}
@Override
public Map<String, Object> queryProcessDefinitionByName(User loginUser, long projectCode, String name) {
Project project = projectMapper.queryByCode(projectCode);
//check user access for project
Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, name);
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, name);
} else {
DagData dagData = processService.genDagData(processDefinition);
result.put(Constants.DATA_LIST, dagData);
putMsg(result, Status.SUCCESS);
}
return result;
}
/**
* update process definition
*
* @param loginUser login user
* @param projectCode project code
* @param name process definition name
* @param code process definition code
* @param description description
* @param globalParams global params
* @param locations locations for nodes
* @param timeout timeout
* @param tenantCode tenantCode
* @param taskRelationJson relation json for nodes
* @param taskDefinitionJson taskDefinitionJson
* @return update result code
*/
@Override
@Transactional(rollbackFor = RuntimeException.class)
public Map<String, Object> updateProcessDefinition(User loginUser,
long projectCode,
String name,
long code,
String description,
String globalParams,
String locations,
int timeout,
String tenantCode,
String taskRelationJson,
String taskDefinitionJson) {
Project project = projectMapper.queryByCode(projectCode);
//check user access for project
Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class);
Map<String, Object> checkTaskDefinitions = checkTaskDefinitionList(taskDefinitionLogs, taskDefinitionJson);
if (checkTaskDefinitions.get(Constants.STATUS) != Status.SUCCESS) {
return checkTaskDefinitions;
}
List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class);
Map<String, Object> checkRelationJson = checkTaskRelationList(taskRelationList, taskRelationJson, taskDefinitionLogs);
if (checkRelationJson.get(Constants.STATUS) != Status.SUCCESS) {
return checkRelationJson;
}
int tenantId = -1;
if (!Constants.DEFAULT.equals(tenantCode)) {
Tenant tenant = tenantMapper.queryByTenantCode(tenantCode);
if (tenant == null) {
putMsg(result, Status.TENANT_NOT_EXIST);
return result;
}
tenantId = tenant.getId();
}
ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code);
// check process definition exists
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code);
return result;
}
if (processDefinition.getReleaseState() == ReleaseState.ONLINE) {
// online can not permit edit
putMsg(result, Status.PROCESS_DEFINE_NOT_ALLOWED_EDIT, processDefinition.getName());
return result;
}
if (!name.equals(processDefinition.getName())) {
// check whether the new process define name exist
ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name);
if (definition != null) {
putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name);
return result;
}
}
ProcessDefinition processDefinitionDeepCopy = JSONUtils.parseObject(JSONUtils.toJsonString(processDefinition), ProcessDefinition.class);
processDefinition.set(projectCode, name, description, globalParams, locations, timeout, tenantId);
return updateDagDefine(loginUser, taskRelationList, processDefinition, processDefinitionDeepCopy, taskDefinitionLogs);
}
private Map<String, Object> updateDagDefine(User loginUser,
List<ProcessTaskRelationLog> taskRelationList,
ProcessDefinition processDefinition,
ProcessDefinition processDefinitionDeepCopy,
List<TaskDefinitionLog> taskDefinitionLogs) {
Map<String, Object> result = new HashMap<>();
int saveTaskResult = processService.saveTaskDefine(loginUser, processDefinition.getProjectCode(), taskDefinitionLogs);
if (saveTaskResult == Constants.EXIT_CODE_SUCCESS) {
logger.info("The task has not changed, so skip");
}
if (saveTaskResult == Constants.DEFINITION_FAILURE) {
putMsg(result, Status.UPDATE_TASK_DEFINITION_ERROR);
throw new ServiceException(Status.UPDATE_TASK_DEFINITION_ERROR);
}
int insertVersion;
if (processDefinition.equals(processDefinitionDeepCopy)) {
insertVersion = processDefinitionDeepCopy.getVersion();
} else {
processDefinition.setUpdateTime(new Date());
insertVersion = processService.saveProcessDefine(loginUser, processDefinition, true);
}
if (insertVersion == 0) {
putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR);
throw new ServiceException(Status.UPDATE_PROCESS_DEFINITION_ERROR);
}
int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(),
processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs);
if (insertResult == Constants.EXIT_CODE_SUCCESS) {
putMsg(result, Status.SUCCESS);
result.put(Constants.DATA_LIST, processDefinition);
} else {
putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR);
throw new ServiceException(Status.UPDATE_PROCESS_DEFINITION_ERROR);
}
return result;
}
/**
* verify process definition name unique
*
* @param loginUser login user
* @param projectCode project code
* @param name name
* @return true if process definition name not exists, otherwise false
*/
@Override
public Map<String, Object> verifyProcessDefinitionName(User loginUser, long projectCode, String name) {
Project project = projectMapper.queryByCode(projectCode);
//check user access for project
Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
ProcessDefinition processDefinition = processDefinitionMapper.verifyByDefineName(project.getCode(), name.trim());
if (processDefinition == null) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name.trim());
}
return result;
}
/**
* delete process definition by code
*
* @param loginUser login user
* @param projectCode project code
* @param code process definition code
* @return delete result code
*/
@Override
@Transactional(rollbackFor = RuntimeException.class)
public Map<String, Object> deleteProcessDefinitionByCode(User loginUser, long projectCode, long code) {
Project project = projectMapper.queryByCode(projectCode);
//check user access for project
Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code);
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code);
return result;
}
// Determine if the login user is the owner of the process definition
if (loginUser.getId() != processDefinition.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
// check process definition is already online
if (processDefinition.getReleaseState() == ReleaseState.ONLINE) {
putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE, code);
return result;
}
// check process instances is already running
List<ProcessInstance> processInstances = processInstanceService.queryByProcessDefineCodeAndStatus(processDefinition.getCode(), Constants.NOT_TERMINATED_STATES);
if (CollectionUtils.isNotEmpty(processInstances)) {
putMsg(result, Status.DELETE_PROCESS_DEFINITION_BY_CODE_FAIL, processInstances.size());
return result;
}
// get the timing according to the process definition
List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionCode(code);
if (!schedules.isEmpty() && schedules.size() > 1) {
logger.warn("scheduler num is {},Greater than 1", schedules.size());
putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR);
return result;
} else if (schedules.size() == 1) {
Schedule schedule = schedules.get(0);
if (schedule.getReleaseState() == ReleaseState.OFFLINE) {
int delete = scheduleMapper.deleteById(schedule.getId());
if (delete == 0) {
putMsg(result, Status.DELETE_SCHEDULE_CRON_BY_ID_ERROR);
throw new ServiceException(Status.DELETE_SCHEDULE_CRON_BY_ID_ERROR);
}
} else if (schedule.getReleaseState() == ReleaseState.ONLINE) {
putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE, schedule.getId());
return result;
}
}
int delete = processDefinitionMapper.deleteById(processDefinition.getId());
int deleteRelation = processTaskRelationMapper.deleteByCode(project.getCode(), processDefinition.getCode());
if (delete == 0 || deleteRelation == 0) {
putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR);
throw new ServiceException(Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR);
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* release process definition: online / offline
*
* @param loginUser login user
* @param projectCode project code
* @param code process definition code
* @param releaseState release state
* @return release result code
*/
@Override
@Transactional(rollbackFor = RuntimeException.class)
public Map<String, Object> releaseProcessDefinition(User loginUser, long projectCode, long code, ReleaseState releaseState) {
Project project = projectMapper.queryByCode(projectCode);
//check user access for project
Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
// check state
if (null == releaseState) {
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE);
return result;
}
ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code);
switch (releaseState) {
case ONLINE:
// To check resources whether they are already cancel authorized or deleted
String resourceIds = processDefinition.getResourceIds();
if (StringUtils.isNotBlank(resourceIds)) {
Integer[] resourceIdArray = Arrays.stream(resourceIds.split(Constants.COMMA)).map(Integer::parseInt).toArray(Integer[]::new);
PermissionCheck<Integer> permissionCheck = new PermissionCheck<>(AuthorizationType.RESOURCE_FILE_ID, processService, resourceIdArray, loginUser.getId(), logger);
try {
permissionCheck.checkPermission();
} catch (Exception e) {
logger.error(e.getMessage(), e);
putMsg(result, Status.RESOURCE_NOT_EXIST_OR_NO_PERMISSION, RELEASESTATE);
return result;
}
}
processDefinition.setReleaseState(releaseState);
processDefinitionMapper.updateById(processDefinition);
break;
case OFFLINE:
processDefinition.setReleaseState(releaseState);
int updateProcess = processDefinitionMapper.updateById(processDefinition);
List<Schedule> scheduleList = scheduleMapper.selectAllByProcessDefineArray(
new long[]{processDefinition.getCode()}
);
if (updateProcess > 0 && scheduleList.size() == 1) {
Schedule schedule = scheduleList.get(0);
logger.info("set schedule offline, project id: {}, schedule id: {}, process definition code: {}", project.getId(), schedule.getId(), code);
// set status
schedule.setReleaseState(ReleaseState.OFFLINE);
int updateSchedule = scheduleMapper.updateById(schedule);
if (updateSchedule == 0) {
putMsg(result, Status.OFFLINE_SCHEDULE_ERROR);
throw new ServiceException(Status.OFFLINE_SCHEDULE_ERROR);
}
schedulerService.deleteSchedule(project.getId(), schedule.getId());
}
break;
default:
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE);
return result;
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* batch export process definition by codes
*/
@Override
public void batchExportProcessDefinitionByCodes(User loginUser, long projectCode, String codes, HttpServletResponse response) {
if (StringUtils.isEmpty(codes)) {
return;
}
Project project = projectMapper.queryByCode(projectCode);
//check user access for project
Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return;
}
Set<Long> defineCodeSet = Lists.newArrayList(codes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet());
List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(defineCodeSet);
List<DagDataSchedule> dagDataSchedules = processDefinitionList.stream().map(this::exportProcessDagData).collect(Collectors.toList());
if (CollectionUtils.isNotEmpty(dagDataSchedules)) {
downloadProcessDefinitionFile(response, dagDataSchedules);
}
}
/**
* download the process definition file
*/
private void downloadProcessDefinitionFile(HttpServletResponse response, List<DagDataSchedule> dagDataSchedules) {
response.setContentType(MediaType.APPLICATION_JSON_UTF8_VALUE);
BufferedOutputStream buff = null;
ServletOutputStream out = null;
try {
out = response.getOutputStream();
buff = new BufferedOutputStream(out);
buff.write(JSONUtils.toJsonString(dagDataSchedules).getBytes(StandardCharsets.UTF_8));
buff.flush();
buff.close();
} catch (IOException e) {
logger.warn("export process fail", e);
} finally {
if (null != buff) {
try {
buff.close();
} catch (Exception e) {
logger.warn("export process buffer not close", e);
}
}
if (null != out) {
try {
out.close();
} catch (Exception e) {
logger.warn("export process output stream not close", e);
}
}
}
}
/**
* get export process dag data
*
* @param processDefinition process definition
* @return DagDataSchedule
*/
public DagDataSchedule exportProcessDagData(ProcessDefinition processDefinition) {
List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionCode(processDefinition.getCode());
DagDataSchedule dagDataSchedule = new DagDataSchedule(processService.genDagData(processDefinition));
if (!schedules.isEmpty()) {
Schedule schedule = schedules.get(0);
schedule.setReleaseState(ReleaseState.OFFLINE);
dagDataSchedule.setSchedule(schedule);
}
return dagDataSchedule;
}
/**
* import process definition
*
* @param loginUser login user
* @param projectCode project code
* @param file process metadata json file
* @return import process
*/
@Override
@Transactional(rollbackFor = RuntimeException.class)
public Map<String, Object> importProcessDefinition(User loginUser, long projectCode, MultipartFile file) {
Map<String, Object> result = new HashMap<>();
String dagDataScheduleJson = FileUtils.file2String(file);
List<DagDataSchedule> dagDataScheduleList = JSONUtils.toList(dagDataScheduleJson, DagDataSchedule.class);
//check file content
if (CollectionUtils.isEmpty(dagDataScheduleList)) {
putMsg(result, Status.DATA_IS_NULL, "fileContent");
return result;
}
for (DagDataSchedule dagDataSchedule : dagDataScheduleList) {
if (!checkAndImport(loginUser, projectCode, result, dagDataSchedule)) {
return result;
}
}
return result;
}
/**
* check and import
*/
private boolean checkAndImport(User loginUser, long projectCode, Map<String, Object> result, DagDataSchedule dagDataSchedule) {
if (!checkImportanceParams(dagDataSchedule, result)) {
return false;
}
ProcessDefinition processDefinition = dagDataSchedule.getProcessDefinition();
//unique check
Map<String, Object> checkResult = verifyProcessDefinitionName(loginUser, projectCode, processDefinition.getName());
if (Status.SUCCESS.equals(checkResult.get(Constants.STATUS))) {
putMsg(result, Status.SUCCESS);
} else {
result.putAll(checkResult);
return false;
}
String processDefinitionName = recursionProcessDefinitionName(projectCode, processDefinition.getName(), 1);
processDefinition.setName(processDefinitionName + "_import_" + DateUtils.getCurrentTimeStamp());
processDefinition.setUserId(loginUser.getId());
try {
processDefinition.setCode(SnowFlakeUtils.getInstance().nextId());
} catch (SnowFlakeException e) {
putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR);
return false;
}
List<TaskDefinition> taskDefinitionList = dagDataSchedule.getTaskDefinitionList();
Map<Long, Long> taskCodeMap = new HashMap<>();
Date now = new Date();
List<TaskDefinitionLog> taskDefinitionLogList = new ArrayList<>();
for (TaskDefinition taskDefinition : taskDefinitionList) {
TaskDefinitionLog taskDefinitionLog = new TaskDefinitionLog(taskDefinition);
taskDefinitionLog.setName(taskDefinitionLog.getName() + "_import_" + DateUtils.getCurrentTimeStamp());
taskDefinitionLog.setProjectCode(projectCode);
taskDefinitionLog.setUserId(loginUser.getId());
taskDefinitionLog.setVersion(Constants.VERSION_FIRST);
taskDefinitionLog.setCreateTime(now);
taskDefinitionLog.setUpdateTime(now);
taskDefinitionLog.setOperator(loginUser.getId());
taskDefinitionLog.setOperateTime(now);
try {
long code = SnowFlakeUtils.getInstance().nextId();
taskCodeMap.put(taskDefinitionLog.getCode(), code);
taskDefinitionLog.setCode(code);
} catch (SnowFlakeException e) {
logger.error("Task code get error, ", e);
putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS, "Error generating task definition code");
return false;
}
taskDefinitionLogList.add(taskDefinitionLog);
}
int insert = taskDefinitionMapper.batchInsert(taskDefinitionLogList);
int logInsert = taskDefinitionLogMapper.batchInsert(taskDefinitionLogList);
if ((logInsert & insert) == 0) {
putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR);
throw new ServiceException(Status.CREATE_TASK_DEFINITION_ERROR);
}
List<ProcessTaskRelation> taskRelationList = dagDataSchedule.getProcessTaskRelationList();
List<ProcessTaskRelationLog> taskRelationLogList = new ArrayList<>();
for (ProcessTaskRelation processTaskRelation : taskRelationList) {
ProcessTaskRelationLog processTaskRelationLog = new ProcessTaskRelationLog(processTaskRelation);
processTaskRelationLog.setPreTaskCode(taskCodeMap.get(processTaskRelationLog.getPreTaskCode()));
processTaskRelationLog.setPostTaskCode(taskCodeMap.get(processTaskRelationLog.getPostTaskCode()));
processTaskRelationLog.setPreTaskVersion(Constants.VERSION_FIRST);
processTaskRelationLog.setPostTaskVersion(Constants.VERSION_FIRST);
taskRelationLogList.add(processTaskRelationLog);
}
Map<String, Object> createDagResult = createDagDefine(loginUser, taskRelationLogList, processDefinition, Lists.newArrayList());
if (Status.SUCCESS.equals(createDagResult.get(Constants.STATUS))) {
putMsg(createDagResult, Status.SUCCESS);
} else {
result.putAll(createDagResult);
throw new ServiceException(Status.IMPORT_PROCESS_DEFINE_ERROR);
}
Schedule schedule = dagDataSchedule.getSchedule();
if (null != schedule) {
ProcessDefinition newProcessDefinition = processDefinitionMapper.queryByCode(processDefinition.getCode());
schedule.setProcessDefinitionCode(newProcessDefinition.getCode());
schedule.setUserId(loginUser.getId());
schedule.setCreateTime(now);
schedule.setUpdateTime(now);
int scheduleInsert = scheduleMapper.insert(schedule);
if (0 == scheduleInsert) {
putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR);
throw new ServiceException(Status.IMPORT_PROCESS_DEFINE_ERROR);
}
}
return true;
}
/**
* check importance params
*/
private boolean checkImportanceParams(DagDataSchedule dagDataSchedule, Map<String, Object> result) {
if (dagDataSchedule.getProcessDefinition() == null) {
putMsg(result, Status.DATA_IS_NULL, "ProcessDefinition");
return false;
}
if (CollectionUtils.isEmpty(dagDataSchedule.getTaskDefinitionList())) {
putMsg(result, Status.DATA_IS_NULL, "TaskDefinitionList");
return false;
}
if (CollectionUtils.isEmpty(dagDataSchedule.getProcessTaskRelationList())) {
putMsg(result, Status.DATA_IS_NULL, "ProcessTaskRelationList");
return false;
}
return true;
}
private String recursionProcessDefinitionName(long projectCode, String processDefinitionName, int num) {
ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, processDefinitionName);
if (processDefinition != null) {
if (num > 1) {
String str = processDefinitionName.substring(0, processDefinitionName.length() - 3);
processDefinitionName = str + "(" + num + ")";
} else {
processDefinitionName = processDefinition.getName() + "(" + num + ")";
}
} else {
return processDefinitionName;
}
return recursionProcessDefinitionName(projectCode, processDefinitionName, num + 1);
}
/**
* check the process task relation json
*
* @param processTaskRelationJson process task relation json
* @return check result code
*/
@Override
public Map<String, Object> checkProcessNodeList(String processTaskRelationJson) {
Map<String, Object> result = new HashMap<>();
try {
if (processTaskRelationJson == null) {
logger.error("process data is null");
putMsg(result, Status.DATA_IS_NOT_VALID, processTaskRelationJson);
return result;
}
List<ProcessTaskRelation> taskRelationList = JSONUtils.toList(processTaskRelationJson, ProcessTaskRelation.class);
// Check whether the task node is normal
List<TaskNode> taskNodes = processService.transformTask(taskRelationList, Lists.newArrayList());
if (CollectionUtils.isEmpty(taskNodes)) {
logger.error("process node info is empty");
putMsg(result, Status.PROCESS_DAG_IS_EMPTY);
return result;
}
// check has cycle
if (graphHasCycle(taskNodes)) {
logger.error("process DAG has cycle");
putMsg(result, Status.PROCESS_NODE_HAS_CYCLE);
return result;
}
// check whether the process definition json is normal
for (TaskNode taskNode : taskNodes) {
if (!CheckUtils.checkTaskNodeParameters(taskNode)) {
logger.error("task node {} parameter invalid", taskNode.getName());
putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskNode.getName());
return result;
}
// check extra params
CheckUtils.checkOtherParams(taskNode.getExtras());
}
putMsg(result, Status.SUCCESS);
} catch (Exception e) {
result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
result.put(Constants.MSG, e.getMessage());
}
return result;
}
/**
* get task node details based on process definition
*
* @param loginUser loginUser
* @param projectCode project code
* @param code process definition code
* @return task node list
*/
@Override
public Map<String, Object> getTaskNodeListByDefinitionCode(User loginUser, long projectCode, long code) {
Project project = projectMapper.queryByCode(projectCode);
//check user access for project
Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code);
if (processDefinition == null) {
logger.info("process define not exists");
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code);
return result;
}
DagData dagData = processService.genDagData(processDefinition);
result.put(Constants.DATA_LIST, dagData.getTaskDefinitionList());
putMsg(result, Status.SUCCESS);
return result;
}
/**
* get task node details map based on process definition
*
* @param loginUser loginUser
* @param projectCode project code
* @param codes define codes
* @return task node list
*/
@Override
public Map<String, Object> getNodeListMapByDefinitionCodes(User loginUser, long projectCode, String codes) {
Project project = projectMapper.queryByCode(projectCode);
//check user access for project
Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
Set<Long> defineCodeSet = Lists.newArrayList(codes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet());
List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(defineCodeSet);
if (CollectionUtils.isEmpty(processDefinitionList)) {
logger.info("process definition not exists");
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, codes);
return result;
}
Map<Long, List<TaskDefinition>> taskNodeMap = new HashMap<>();
for (ProcessDefinition processDefinition : processDefinitionList) {
DagData dagData = processService.genDagData(processDefinition);
taskNodeMap.put(processDefinition.getCode(), dagData.getTaskDefinitionList());
}
result.put(Constants.DATA_LIST, taskNodeMap);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query process definition all by project code
*
* @param loginUser loginUser
* @param projectCode project code
* @return process definitions in the project
*/
@Override
public Map<String, Object> queryAllProcessDefinitionByProjectCode(User loginUser, long projectCode) {
Project project = projectMapper.queryByCode(projectCode);
//check user access for project
Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
List<ProcessDefinition> processDefinitions = processDefinitionMapper.queryAllDefinitionList(projectCode);
List<DagData> dagDataList = processDefinitions.stream().map(processService::genDagData).collect(Collectors.toList());
result.put(Constants.DATA_LIST, dagDataList);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* Encapsulates the TreeView structure
*
* @param code process definition code
* @param limit limit
* @return tree view json data
*/
@Override
public Map<String, Object> viewTree(long code, Integer limit) {
Map<String, Object> result = new HashMap<>();
ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code);
if (null == processDefinition) {
logger.info("process define not exists");
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code);
return result;
}
DAG<String, TaskNode, TaskNodeRelation> dag = processService.genDagGraph(processDefinition);
// nodes that is running
Map<String, List<TreeViewDto>> runningNodeMap = new ConcurrentHashMap<>();
//nodes that is waiting to run
Map<String, List<TreeViewDto>> waitingRunningNodeMap = new ConcurrentHashMap<>();
// List of process instances
List<ProcessInstance> processInstanceList = processInstanceService.queryByProcessDefineCode(code, limit);
processInstanceList.forEach(processInstance -> processInstance.setDuration(DateUtils.format2Duration(processInstance.getStartTime(), processInstance.getEndTime())));
List<TaskDefinitionLog> taskDefinitionList = processService.genTaskDefineList(processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()));
Map<Long, TaskDefinitionLog> taskDefinitionMap = taskDefinitionList.stream()
.collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog));
if (limit > processInstanceList.size()) {
limit = processInstanceList.size();
}
TreeViewDto parentTreeViewDto = new TreeViewDto();
parentTreeViewDto.setName("DAG");
parentTreeViewDto.setType("");
parentTreeViewDto.setCode(0L);
// Specify the process definition, because it is a TreeView for a process definition
for (int i = limit - 1; i >= 0; i--) {
ProcessInstance processInstance = processInstanceList.get(i);
Date endTime = processInstance.getEndTime() == null ? new Date() : processInstance.getEndTime();
parentTreeViewDto.getInstances().add(new Instance(processInstance.getId(), processInstance.getName(), processInstance.getProcessDefinitionCode(),
"", processInstance.getState().toString(), processInstance.getStartTime(), endTime, processInstance.getHost(),
DateUtils.format2Readable(endTime.getTime() - processInstance.getStartTime().getTime())));
}
List<TreeViewDto> parentTreeViewDtoList = new ArrayList<>();
parentTreeViewDtoList.add(parentTreeViewDto);
// Here is the encapsulation task instance
for (String startNode : dag.getBeginNode()) {
runningNodeMap.put(startNode, parentTreeViewDtoList);
}
while (Stopper.isRunning()) {
Set<String> postNodeList;
Iterator<Map.Entry<String, List<TreeViewDto>>> iter = runningNodeMap.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry<String, List<TreeViewDto>> en = iter.next();
String nodeCode = en.getKey();
parentTreeViewDtoList = en.getValue();
TreeViewDto treeViewDto = new TreeViewDto();
TaskNode taskNode = dag.getNode(nodeCode);
treeViewDto.setType(taskNode.getType());
treeViewDto.setCode(taskNode.getCode());
treeViewDto.setName(taskNode.getName());
//set treeViewDto instances
for (int i = limit - 1; i >= 0; i--) {
ProcessInstance processInstance = processInstanceList.get(i);
TaskInstance taskInstance = taskInstanceMapper.queryByInstanceIdAndCode(processInstance.getId(), Long.parseLong(nodeCode));
if (taskInstance == null) {
treeViewDto.getInstances().add(new Instance(-1, "not running", 0, "null"));
} else {
Date startTime = taskInstance.getStartTime() == null ? new Date() : taskInstance.getStartTime();
Date endTime = taskInstance.getEndTime() == null ? new Date() : taskInstance.getEndTime();
int subProcessId = 0;
// if process is sub process, the return sub id, or sub id=0
if (taskInstance.isSubProcess()) {
TaskDefinition taskDefinition = taskDefinitionMap.get(taskInstance.getTaskCode());
subProcessId = Integer.parseInt(JSONUtils.parseObject(
taskDefinition.getTaskParams()).path(CMD_PARAM_SUB_PROCESS_DEFINE_ID).asText());
}
treeViewDto.getInstances().add(new Instance(taskInstance.getId(), taskInstance.getName(), taskInstance.getTaskCode(),
taskInstance.getTaskType(), taskInstance.getState().toString(), taskInstance.getStartTime(), taskInstance.getEndTime(),
taskInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - startTime.getTime()), subProcessId));
}
}
for (TreeViewDto pTreeViewDto : parentTreeViewDtoList) {
pTreeViewDto.getChildren().add(treeViewDto);
}
postNodeList = dag.getSubsequentNodes(nodeCode);
if (CollectionUtils.isNotEmpty(postNodeList)) {
for (String nextNodeCode : postNodeList) {
List<TreeViewDto> treeViewDtoList = waitingRunningNodeMap.get(nextNodeCode);
if (CollectionUtils.isEmpty(treeViewDtoList)) {
treeViewDtoList = new ArrayList<>();
}
treeViewDtoList.add(treeViewDto);
waitingRunningNodeMap.put(nextNodeCode, treeViewDtoList);
}
}
runningNodeMap.remove(nodeCode);
}
if (waitingRunningNodeMap.size() == 0) {
break;
} else {
runningNodeMap.putAll(waitingRunningNodeMap);
waitingRunningNodeMap.clear();
}
}
result.put(Constants.DATA_LIST, parentTreeViewDto);
result.put(Constants.STATUS, Status.SUCCESS);
result.put(Constants.MSG, Status.SUCCESS.getMsg());
return result;
}
/**
* whether the graph has a ring
*
* @param taskNodeResponseList task node response list
* @return if graph has cycle flag
*/
private boolean graphHasCycle(List<TaskNode> taskNodeResponseList) {
DAG<String, TaskNode, String> graph = new DAG<>();
// Fill the vertices
for (TaskNode taskNodeResponse : taskNodeResponseList) {
graph.addNode(Long.toString(taskNodeResponse.getCode()), taskNodeResponse);
}
// Fill edge relations
for (TaskNode taskNodeResponse : taskNodeResponseList) {
List<String> preTasks = JSONUtils.toList(taskNodeResponse.getPreTasks(), String.class);
if (CollectionUtils.isNotEmpty(preTasks)) {
for (String preTask : preTasks) {
if (!graph.addEdge(preTask, Long.toString(taskNodeResponse.getCode()))) {
return true;
}
}
}
}
return graph.hasCycle();
}
/**
* batch copy process definition
*
* @param loginUser loginUser
* @param projectCode projectCode
* @param codes processDefinitionCodes
* @param targetProjectCode targetProjectCode
*/
@Override
public Map<String, Object> batchCopyProcessDefinition(User loginUser,
long projectCode,
String codes,
long targetProjectCode) {
Map<String, Object> result = checkParams(loginUser, projectCode, codes, targetProjectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
List<String> failedProcessList = new ArrayList<>();
doBatchOperateProcessDefinition(loginUser, targetProjectCode, failedProcessList, codes, result, true);
checkBatchOperateResult(projectCode, targetProjectCode, result, failedProcessList, true);
return result;
}
/**
* batch move process definition
*
* @param loginUser loginUser
* @param projectCode projectCode
* @param codes processDefinitionCodes
* @param targetProjectCode targetProjectCode
*/
@Override
@Transactional(rollbackFor = RuntimeException.class)
public Map<String, Object> batchMoveProcessDefinition(User loginUser,
long projectCode,
String codes,
long targetProjectCode) {
Map<String, Object> result = checkParams(loginUser, projectCode, codes, targetProjectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
if (projectCode == targetProjectCode) {
return result;
}
List<String> failedProcessList = new ArrayList<>();
doBatchOperateProcessDefinition(loginUser, targetProjectCode, failedProcessList, codes, result, false);
checkBatchOperateResult(projectCode, targetProjectCode, result, failedProcessList, false);
return result;
}
private Map<String, Object> checkParams(User loginUser,
long projectCode,
String processDefinitionCodes,
long targetProjectCode) {
Project project = projectMapper.queryByCode(projectCode);
//check user access for project
Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
if (StringUtils.isEmpty(processDefinitionCodes)) {
putMsg(result, Status.PROCESS_DEFINITION_CODES_IS_EMPTY, processDefinitionCodes);
return result;
}
if (projectCode != targetProjectCode) {
Project targetProject = projectMapper.queryByCode(targetProjectCode);
//check user access for project
Map<String, Object> targetResult = projectService.checkProjectAndAuth(loginUser, targetProject, targetProjectCode);
if (targetResult.get(Constants.STATUS) != Status.SUCCESS) {
return targetResult;
}
}
return result;
}
private void doBatchOperateProcessDefinition(User loginUser,
long targetProjectCode,
List<String> failedProcessList,
String processDefinitionCodes,
Map<String, Object> result,
boolean isCopy) {
Set<Long> definitionCodes = Arrays.stream(processDefinitionCodes.split(Constants.COMMA)).map(Long::parseLong).collect(Collectors.toSet());
List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(definitionCodes);
Set<Long> queryCodes = processDefinitionList.stream().map(ProcessDefinition::getCode).collect(Collectors.toSet());
// definitionCodes - queryCodes
Set<Long> diffCode = definitionCodes.stream().filter(code -> !queryCodes.contains(code)).collect(Collectors.toSet());
diffCode.forEach(code -> failedProcessList.add(code + "[null]"));
for (ProcessDefinition processDefinition : processDefinitionList) {
List<ProcessTaskRelation> processTaskRelations =
processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode());
List<ProcessTaskRelationLog> taskRelationList = processTaskRelations.stream().map(ProcessTaskRelationLog::new).collect(Collectors.toList());
processDefinition.setProjectCode(targetProjectCode);
if (isCopy) {
try {
processDefinition.setCode(SnowFlakeUtils.getInstance().nextId());
} catch (SnowFlakeException e) {
putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS);
throw new ServiceException(Status.INTERNAL_SERVER_ERROR_ARGS);
}
processDefinition.setId(0);
processDefinition.setUserId(loginUser.getId());
processDefinition.setName(processDefinition.getName() + "_copy_" + DateUtils.getCurrentTimeStamp());
try {
result.putAll(createDagDefine(loginUser, taskRelationList, processDefinition, Lists.newArrayList()));
} catch (Exception e) {
putMsg(result, Status.COPY_PROCESS_DEFINITION_ERROR);
throw new ServiceException(Status.COPY_PROCESS_DEFINITION_ERROR);
}
} else {
try {
result.putAll(updateDagDefine(loginUser, taskRelationList, processDefinition, null, Lists.newArrayList()));
} catch (Exception e) {
putMsg(result, Status.MOVE_PROCESS_DEFINITION_ERROR);
throw new ServiceException(Status.MOVE_PROCESS_DEFINITION_ERROR);
}
}
if (result.get(Constants.STATUS) != Status.SUCCESS) {
failedProcessList.add(processDefinition.getCode() + "[" + processDefinition.getName() + "]");
}
}
}
/**
* switch the defined process definition version
*
* @param loginUser login user
* @param projectCode project code
* @param code process definition code
* @param version the version user want to switch
* @return switch process definition version result code
*/
@Override
@Transactional(rollbackFor = RuntimeException.class)
public Map<String, Object> switchProcessDefinitionVersion(User loginUser, long projectCode, long code, int version) {
Project project = projectMapper.queryByCode(projectCode);
//check user access for project
Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code);
if (Objects.isNull(processDefinition)) {
putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_ERROR, code);
return result;
}
ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper.queryByDefinitionCodeAndVersion(code, version);
if (Objects.isNull(processDefinitionLog)) {
putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_VERSION_ERROR, processDefinition.getCode(), version);
return result;
}
int switchVersion = processService.switchVersion(processDefinition, processDefinitionLog);
if (switchVersion <= 0) {
putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR);
throw new ServiceException(Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR);
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* check batch operate result
*
* @param srcProjectCode srcProjectCode
* @param targetProjectCode targetProjectCode
* @param result result
* @param failedProcessList failedProcessList
* @param isCopy isCopy
*/
private void checkBatchOperateResult(long srcProjectCode, long targetProjectCode,
Map<String, Object> result, List<String> failedProcessList, boolean isCopy) {
if (!failedProcessList.isEmpty()) {
if (isCopy) {
putMsg(result, Status.COPY_PROCESS_DEFINITION_ERROR, srcProjectCode, targetProjectCode, String.join(",", failedProcessList));
} else {
putMsg(result, Status.MOVE_PROCESS_DEFINITION_ERROR, srcProjectCode, targetProjectCode, String.join(",", failedProcessList));
}
} else {
putMsg(result, Status.SUCCESS);
}
}
/**
* query the pagination versions info by one certain process definition code
*
* @param loginUser login user info to check auth
* @param projectCode project code
* @param pageNo page number
* @param pageSize page size
* @param code process definition code
* @return the pagination process definition versions info of the certain process definition
*/
@Override
public Result queryProcessDefinitionVersions(User loginUser, long projectCode, int pageNo, int pageSize, long code) {
Result result = new Result();
Project project = projectMapper.queryByCode(projectCode);
// check user access for project
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
putMsg(result, resultStatus);
return result;
}
PageInfo<ProcessDefinitionLog> pageInfo = new PageInfo<>(pageNo, pageSize);
Page<ProcessDefinitionLog> page = new Page<>(pageNo, pageSize);
IPage<ProcessDefinitionLog> processDefinitionVersionsPaging = processDefinitionLogMapper.queryProcessDefinitionVersionsPaging(page, code);
List<ProcessDefinitionLog> processDefinitionLogs = processDefinitionVersionsPaging.getRecords();
pageInfo.setTotalList(processDefinitionLogs);
pageInfo.setTotal((int) processDefinitionVersionsPaging.getTotal());
result.setData(pageInfo);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* delete one certain process definition by version number and process definition code
*
* @param loginUser login user info to check auth
* @param projectCode project code
* @param code process definition code
* @param version version number
* @return delete result code
*/
@Override
@Transactional(rollbackFor = RuntimeException.class)
public Map<String, Object> deleteProcessDefinitionVersion(User loginUser, long projectCode, long code, int version) {
Project project = projectMapper.queryByCode(projectCode);
//check user access for project
Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code);
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code);
} else {
int deleteLog = processDefinitionLogMapper.deleteByProcessDefinitionCodeAndVersion(code, version);
int deleteRelationLog = processTaskRelationLogMapper.deleteByCode(processDefinition.getCode(), processDefinition.getVersion());
if (deleteLog == 0 || deleteRelationLog == 0) {
putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR);
throw new ServiceException(Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR);
}
putMsg(result, Status.SUCCESS);
}
return result;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,181 | Empty try block in UT | I found out we have empty try block in our UT, maybe we should just remove this block?
https://github.com/apache/dolphinscheduler/blob/e1aed5eefd26c9b447d356de6238e4b61a4d3daf/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/plugin/AlertPluginManagerTest.java#L60-L65 | https://github.com/apache/dolphinscheduler/issues/6181 | https://github.com/apache/dolphinscheduler/pull/6182 | d91801e6f60670915a97d031b21fe16ce98e3e4e | cbeedba3e2cfdf80553da0d798fc71aaa448c6e0 | "2021-09-13T02:20:30Z" | java | "2021-10-27T11:01:33Z" | dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/plugin/AlertPluginManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.alert.plugin;
import org.apache.dolphinscheduler.alert.AlertServer;
import org.apache.dolphinscheduler.alert.utils.Constants;
import org.apache.dolphinscheduler.common.utils.PropertyUtils;
import org.apache.dolphinscheduler.spi.plugin.DolphinPluginLoader;
import org.apache.dolphinscheduler.spi.plugin.DolphinPluginManagerConfig;
import org.apache.dolphinscheduler.spi.utils.StringUtils;
import java.util.Objects;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.ImmutableList;
/**
* AlertPluginManager Tester.
*/
public class AlertPluginManagerTest {
private static final Logger logger = LoggerFactory.getLogger(AlertPluginManagerTest.class);
@Test
public void testLoadPlugins() {
logger.info("begin test AlertPluginManagerTest");
AlertPluginManager alertPluginManager = new AlertPluginManager();
DolphinPluginManagerConfig alertPluginManagerConfig = new DolphinPluginManagerConfig();
String path = Objects.requireNonNull(DolphinPluginLoader.class.getClassLoader().getResource("")).getPath();
alertPluginManagerConfig.setPlugins(path + "../../../dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/pom.xml");
if (StringUtils.isNotBlank(PropertyUtils.getString(AlertServer.ALERT_PLUGIN_DIR))) {
alertPluginManagerConfig.setInstalledPluginsDir(PropertyUtils.getString(AlertServer.ALERT_PLUGIN_DIR, Constants.ALERT_PLUGIN_PATH).trim());
}
if (StringUtils.isNotBlank(PropertyUtils.getString(AlertServer.MAVEN_LOCAL_REPOSITORY))) {
alertPluginManagerConfig.setMavenLocalRepository(Objects.requireNonNull(PropertyUtils.getString(AlertServer.MAVEN_LOCAL_REPOSITORY)).trim());
}
DolphinPluginLoader alertPluginLoader = new DolphinPluginLoader(alertPluginManagerConfig, ImmutableList.of(alertPluginManager));
try {
//alertPluginLoader.loadPlugins();
} catch (Exception e) {
throw new RuntimeException("load Alert Plugin Failed !", e);
}
Assert.assertNull(alertPluginManager.getAlertChannelFactoryMap().get("Email"));
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,467 | [Feature][CI] Add stale bot for issues and PR | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Add stale bot for issues and PR, which for issues and PR has not recent activity for a long time, allow bot to automatically mark issues and PRs as stale or event close them. It could reduce maintenance time for our community
### Use case
Automatically mark issues and PRs as stale or event close them
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6467 | https://github.com/apache/dolphinscheduler/pull/6468 | cbeedba3e2cfdf80553da0d798fc71aaa448c6e0 | 03aaef0fe6548e828144e6e5ac8ceb8bd6b650ad | "2021-10-08T14:30:58Z" | java | "2021-10-28T05:43:02Z" | .github/workflows/stale.yml | |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,616 | [Bug] [Worker] Worker fakes death when it stop itself fail. | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
When I try a stress test, I found that worker fakes death and print nothing to log file. At the same time, worker is not exist in zk node path and Master can't dispatch task because no worker.
That's the log before worker stop:
```
[ERROR] 2021-10-15 15:10:57.590 org.apache.dolphinscheduler.server.worker.WorkerServer:[223] - worker server stop exception
org.apache.dolphinscheduler.spi.register.RegistryException: zookeeper delete key error
at org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperRegistry.delete(ZookeeperRegistry.java:272)
at org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperRegistry.remove(ZookeeperRegistry.java:199)
at org.apache.dolphinscheduler.service.registry.RegistryCenter.remove(RegistryCenter.java:157)
at org.apache.dolphinscheduler.server.worker.registry.WorkerRegistryClient.unRegistry(WorkerRegistryClient.java:128)
at org.apache.dolphinscheduler.server.worker.WorkerServer.close(WorkerServer.java:219)
at org.apache.dolphinscheduler.server.worker.WorkerServer.stop(WorkerServer.java:229)
at org.apache.dolphinscheduler.server.registry.HeartBeatTask.run(HeartBeatTask.java:81)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308)
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180)
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.zookeeper.KeeperException$ConnectionLossException: KeeperErrorCode = ConnectionLoss for /dolphinscheduler/nodes/worker/default/172.28.132.15:1234
at org.apache.zookeeper.KeeperException.create(KeeperException.java:102)
at org.apache.zookeeper.KeeperException.create(KeeperException.java:54)
at org.apache.zookeeper.ZooKeeper.delete(ZooKeeper.java:882)
at org.apache.curator.framework.imps.DeleteBuilderImpl$5.call(DeleteBuilderImpl.java:274)
at org.apache.curator.framework.imps.DeleteBuilderImpl$5.call(DeleteBuilderImpl.java:268)
at org.apache.curator.connection.StandardConnectionHandlingPolicy.callWithRetry(StandardConnectionHandlingPolicy.java:67)
at org.apache.curator.RetryLoop.callWithRetry(RetryLoop.java:81)
at org.apache.curator.framework.imps.DeleteBuilderImpl.pathInForeground(DeleteBuilderImpl.java:265)
at org.apache.curator.framework.imps.DeleteBuilderImpl.forPath(DeleteBuilderImpl.java:249)
at org.apache.curator.framework.imps.DeleteBuilderImpl.forPath(DeleteBuilderImpl.java:34)
at org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperRegistry.delete(ZookeeperRegistry.java:267)
... 13 common frames omitted
```
Maybe set stop single to true after close zk and netty successfully is better.
### What you expected to happen
worker can stop itself successfully when it is judged dead server.
### How to reproduce
do some stress test with many task running.
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6616 | https://github.com/apache/dolphinscheduler/pull/6621 | 03aaef0fe6548e828144e6e5ac8ceb8bd6b650ad | 06e8e24708e2c2a2b9367b9e76a4dc0cec4cdfc6 | "2021-10-27T10:10:53Z" | java | "2021-10-29T09:44:06Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/registry/WorkerRegistryClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.registry;
import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP;
import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_WORKERS;
import static org.apache.dolphinscheduler.common.Constants.SINGLE_SLASH;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.IStoppable;
import org.apache.dolphinscheduler.common.enums.NodeType;
import org.apache.dolphinscheduler.common.utils.NetUtils;
import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory;
import org.apache.dolphinscheduler.server.registry.HeartBeatTask;
import org.apache.dolphinscheduler.server.worker.config.WorkerConfig;
import org.apache.dolphinscheduler.server.worker.runner.WorkerManagerThread;
import org.apache.dolphinscheduler.service.registry.RegistryClient;
import org.apache.commons.lang.StringUtils;
import java.util.Set;
import java.util.StringJoiner;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import javax.annotation.PostConstruct;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.google.common.collect.Sets;
/**
* worker registry
*/
@Service
public class WorkerRegistryClient {
private final Logger logger = LoggerFactory.getLogger(WorkerRegistryClient.class);
/**
* worker config
*/
@Autowired
private WorkerConfig workerConfig;
/**
* worker manager
*/
@Autowired
private WorkerManagerThread workerManagerThread;
/**
* heartbeat executor
*/
private ScheduledExecutorService heartBeatExecutor;
private RegistryClient registryClient;
/**
* worker startup time, ms
*/
private long startupTime;
private Set<String> workerGroups;
@PostConstruct
public void initWorkRegistry() {
this.workerGroups = workerConfig.getWorkerGroups();
this.startupTime = System.currentTimeMillis();
this.registryClient = RegistryClient.getInstance();
this.heartBeatExecutor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("HeartBeatExecutor"));
}
/**
* registry
*/
public void registry() {
String address = NetUtils.getAddr(workerConfig.getListenPort());
Set<String> workerZkPaths = getWorkerZkPaths();
int workerHeartbeatInterval = workerConfig.getWorkerHeartbeatInterval();
for (String workerZKPath : workerZkPaths) {
registryClient.persistEphemeral(workerZKPath, "");
logger.info("worker node : {} registry to ZK {} successfully", address, workerZKPath);
}
HeartBeatTask heartBeatTask = new HeartBeatTask(startupTime,
workerConfig.getWorkerMaxCpuloadAvg(),
workerConfig.getWorkerReservedMemory(),
workerConfig.getHostWeight(),
workerZkPaths,
Constants.WORKER_TYPE,
registryClient,
workerConfig.getWorkerExecThreads(),
workerManagerThread
);
this.heartBeatExecutor.scheduleAtFixedRate(heartBeatTask, workerHeartbeatInterval, workerHeartbeatInterval, TimeUnit.SECONDS);
logger.info("worker node : {} heartbeat interval {} s", address, workerHeartbeatInterval);
}
/**
* remove registry info
*/
public void unRegistry() {
String address = getLocalAddress();
Set<String> workerZkPaths = getWorkerZkPaths();
for (String workerZkPath : workerZkPaths) {
registryClient.remove(workerZkPath);
logger.info("worker node : {} unRegistry from ZK {}.", address, workerZkPath);
}
this.heartBeatExecutor.shutdownNow();
logger.info("heartbeat executor shutdown");
registryClient.close();
}
/**
* get worker path
*/
public Set<String> getWorkerZkPaths() {
Set<String> workerPaths = Sets.newHashSet();
String address = getLocalAddress();
for (String workGroup : this.workerGroups) {
StringJoiner workerPathJoiner = new StringJoiner(SINGLE_SLASH);
workerPathJoiner.add(REGISTRY_DOLPHINSCHEDULER_WORKERS);
if (StringUtils.isEmpty(workGroup)) {
workGroup = DEFAULT_WORKER_GROUP;
}
// trim and lower case is need
workerPathJoiner.add(workGroup.trim().toLowerCase());
workerPathJoiner.add(address);
workerPaths.add(workerPathJoiner.toString());
}
return workerPaths;
}
public void handleDeadServer(Set<String> nodeSet, NodeType nodeType, String opType) throws Exception {
registryClient.handleDeadServer(nodeSet, nodeType, opType);
}
/**
* get local address
*/
private String getLocalAddress() {
return NetUtils.getAddr(workerConfig.getListenPort());
}
public void setRegistryStoppable(IStoppable stoppable) {
registryClient.setStoppable(stoppable);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,640 | [Bug] [WorkerServer] PROCESS_HOST_UPDATE_REQUST command not support | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Version: 2.0.0-alpha
When Master server restart, the worker receive PROCESS_HOST_UPDATE_REQUST command but not support.
### What you expected to happen
Worker can handle this command type normally.
### How to reproduce
run the task and restart the master server.
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6640 | https://github.com/apache/dolphinscheduler/pull/6642 | dd6ed36f65d2add3bf8e31cad24ab25f4606c9d9 | 8850baff07cc5174a57a0560cbeeb1a3368a4dca | "2021-10-29T12:27:51Z" | java | "2021-10-31T13:10:51Z" | dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/command/CommandType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.command;
public enum CommandType {
/**
* remove task log request,
*/
REMOVE_TAK_LOG_REQUEST,
/**
* remove task log response
*/
REMOVE_TAK_LOG_RESPONSE,
/**
* roll view log request
*/
ROLL_VIEW_LOG_REQUEST,
/**
* roll view log response
*/
ROLL_VIEW_LOG_RESPONSE,
/**
* view whole log request
*/
VIEW_WHOLE_LOG_REQUEST,
/**
* view whole log response
*/
VIEW_WHOLE_LOG_RESPONSE,
/**
* get log bytes request
*/
GET_LOG_BYTES_REQUEST,
/**
* get log bytes response
*/
GET_LOG_BYTES_RESPONSE,
WORKER_REQUEST,
MASTER_RESPONSE,
/**
* execute task request
*/
TASK_EXECUTE_REQUEST,
/**
* execute task ack
*/
TASK_EXECUTE_ACK,
/**
* execute task response
*/
TASK_EXECUTE_RESPONSE,
/**
* db task ack
*/
DB_TASK_ACK,
/**
* db task response
*/
DB_TASK_RESPONSE,
/**
* kill task
*/
TASK_KILL_REQUEST,
/**
* kill task response
*/
TASK_KILL_RESPONSE,
/**
* HEART_BEAT
*/
HEART_BEAT,
/**
* ping
*/
PING,
/**
* pong
*/
PONG,
/**
* alert send request
*/
ALERT_SEND_REQUEST,
/**
* alert send response
*/
ALERT_SEND_RESPONSE,
/**
* process host update
*/
PROCESS_HOST_UPDATE_REQUST,
/**
* process host update response
*/
PROCESS_HOST_UPDATE_RESPONSE,
/**
* state event request
*/
STATE_EVENT_REQUEST;
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,640 | [Bug] [WorkerServer] PROCESS_HOST_UPDATE_REQUST command not support | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Version: 2.0.0-alpha
When Master server restart, the worker receive PROCESS_HOST_UPDATE_REQUST command but not support.
### What you expected to happen
Worker can handle this command type normally.
### How to reproduce
run the task and restart the master server.
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6640 | https://github.com/apache/dolphinscheduler/pull/6642 | dd6ed36f65d2add3bf8e31cad24ab25f4606c9d9 | 8850baff07cc5174a57a0560cbeeb1a3368a4dca | "2021-10-29T12:27:51Z" | java | "2021-10-31T13:10:51Z" | dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/command/HostUpdateCommand.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.command;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import java.io.Serializable;
/**
* process host update
*/
public class HostUpdateCommand implements Serializable {
/**
* task id
*/
private int taskInstanceId;
private String processHost;
public int getTaskInstanceId() {
return taskInstanceId;
}
public void setTaskInstanceId(int taskInstanceId) {
this.taskInstanceId = taskInstanceId;
}
public String getProcessHost() {
return processHost;
}
public void setProcessHost(String processHost) {
this.processHost = processHost;
}
/**
* package request command
*
* @return command
*/
public Command convert2Command() {
Command command = new Command();
command.setType(CommandType.PROCESS_HOST_UPDATE_REQUST);
byte[] body = JSONUtils.toJsonByteArray(this);
command.setBody(body);
return command;
}
@Override
public String toString() {
return "HostUpdateCommand{"
+ "taskInstanceId=" + taskInstanceId
+ "host=" + processHost
+ '}';
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,640 | [Bug] [WorkerServer] PROCESS_HOST_UPDATE_REQUST command not support | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Version: 2.0.0-alpha
When Master server restart, the worker receive PROCESS_HOST_UPDATE_REQUST command but not support.
### What you expected to happen
Worker can handle this command type normally.
### How to reproduce
run the task and restart the master server.
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6640 | https://github.com/apache/dolphinscheduler/pull/6642 | dd6ed36f65d2add3bf8e31cad24ab25f4606c9d9 | 8850baff07cc5174a57a0560cbeeb1a3368a4dca | "2021-10-29T12:27:51Z" | java | "2021-10-31T13:10:51Z" | dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/command/HostUpdateResponseCommand.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.command;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import java.io.Serializable;
public class HostUpdateResponseCommand implements Serializable {
private int taskInstanceId;
private String processHost;
private int status;
public HostUpdateResponseCommand(int taskInstanceId, String processHost, int code) {
this.taskInstanceId = taskInstanceId;
this.processHost = processHost;
this.status = code;
}
public int getTaskInstanceId() {
return this.taskInstanceId;
}
public void setTaskInstanceId(int taskInstanceId) {
this.taskInstanceId = taskInstanceId;
}
public String getProcessHost() {
return this.processHost;
}
public void setProcessHost(String processHost) {
this.processHost = processHost;
}
public int getStatus() {
return status;
}
public void setStatus(int status) {
this.status = status;
}
/**
* package request command
*
* @return command
*/
public Command convert2Command() {
Command command = new Command();
command.setType(CommandType.PROCESS_HOST_UPDATE_REQUST);
byte[] body = JSONUtils.toJsonByteArray(this);
command.setBody(body);
return command;
}
@Override
public String toString() {
return "HostUpdateResponseCommand{"
+ "taskInstanceId=" + taskInstanceId
+ "host=" + processHost
+ '}';
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,640 | [Bug] [WorkerServer] PROCESS_HOST_UPDATE_REQUST command not support | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Version: 2.0.0-alpha
When Master server restart, the worker receive PROCESS_HOST_UPDATE_REQUST command but not support.
### What you expected to happen
Worker can handle this command type normally.
### How to reproduce
run the task and restart the master server.
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6640 | https://github.com/apache/dolphinscheduler/pull/6642 | dd6ed36f65d2add3bf8e31cad24ab25f4606c9d9 | 8850baff07cc5174a57a0560cbeeb1a3368a4dca | "2021-10-29T12:27:51Z" | java | "2021-10-31T13:10:51Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/WorkerServer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.IStoppable;
import org.apache.dolphinscheduler.common.enums.NodeType;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.remote.NettyRemotingServer;
import org.apache.dolphinscheduler.remote.command.CommandType;
import org.apache.dolphinscheduler.remote.config.NettyServerConfig;
import org.apache.dolphinscheduler.server.worker.config.WorkerConfig;
import org.apache.dolphinscheduler.server.worker.plugin.TaskPluginManager;
import org.apache.dolphinscheduler.server.worker.processor.DBTaskAckProcessor;
import org.apache.dolphinscheduler.server.worker.processor.DBTaskResponseProcessor;
import org.apache.dolphinscheduler.server.worker.processor.TaskExecuteProcessor;
import org.apache.dolphinscheduler.server.worker.processor.TaskKillProcessor;
import org.apache.dolphinscheduler.server.worker.registry.WorkerRegistryClient;
import org.apache.dolphinscheduler.server.worker.runner.RetryReportTaskStatusThread;
import org.apache.dolphinscheduler.server.worker.runner.WorkerManagerThread;
import org.apache.dolphinscheduler.service.alert.AlertClientService;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.spi.exception.PluginNotFoundException;
import org.apache.dolphinscheduler.spi.plugin.DolphinPluginLoader;
import org.apache.dolphinscheduler.spi.plugin.DolphinPluginManagerConfig;
import org.apache.dolphinscheduler.spi.utils.StringUtils;
import org.apache.commons.collections4.MapUtils;
import java.util.Set;
import javax.annotation.PostConstruct;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.WebApplicationType;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.FilterType;
import org.springframework.transaction.annotation.EnableTransactionManagement;
import com.facebook.presto.jdbc.internal.guava.collect.ImmutableList;
/**
* worker server
*/
@ComponentScan(value = "org.apache.dolphinscheduler", excludeFilters = {
@ComponentScan.Filter(type = FilterType.REGEX, pattern = {
"org.apache.dolphinscheduler.server.master.*",
"org.apache.dolphinscheduler.server.monitor.*",
"org.apache.dolphinscheduler.server.log.*"
})
})
@EnableTransactionManagement
public class WorkerServer implements IStoppable {
/**
* logger
*/
private static final Logger logger = LoggerFactory.getLogger(WorkerServer.class);
/**
* netty remote server
*/
private NettyRemotingServer nettyRemotingServer;
/**
* worker config
*/
@Autowired
private WorkerConfig workerConfig;
/**
* spring application context
* only use it for initialization
*/
@Autowired
private SpringApplicationContext springApplicationContext;
/**
* alert model netty remote server
*/
private AlertClientService alertClientService;
@Autowired
private RetryReportTaskStatusThread retryReportTaskStatusThread;
@Autowired
private WorkerManagerThread workerManagerThread;
/**
* worker registry
*/
@Autowired
private WorkerRegistryClient workerRegistryClient;
private TaskPluginManager taskPluginManager;
/**
* worker server startup, not use web service
*
* @param args arguments
*/
public static void main(String[] args) {
Thread.currentThread().setName(Constants.THREAD_NAME_WORKER_SERVER);
new SpringApplicationBuilder(WorkerServer.class).web(WebApplicationType.NONE).run(args);
}
/**
* worker server run
*/
@PostConstruct
public void run() {
// alert-server client registry
alertClientService = new AlertClientService(workerConfig.getAlertListenHost(), Constants.ALERT_RPC_PORT);
// init task plugin
initTaskPlugin();
// init remoting server
NettyServerConfig serverConfig = new NettyServerConfig();
serverConfig.setListenPort(workerConfig.getListenPort());
this.nettyRemotingServer = new NettyRemotingServer(serverConfig);
this.nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_REQUEST, new TaskExecuteProcessor(alertClientService, taskPluginManager));
this.nettyRemotingServer.registerProcessor(CommandType.TASK_KILL_REQUEST, new TaskKillProcessor());
this.nettyRemotingServer.registerProcessor(CommandType.DB_TASK_ACK, new DBTaskAckProcessor());
this.nettyRemotingServer.registerProcessor(CommandType.DB_TASK_RESPONSE, new DBTaskResponseProcessor());
this.nettyRemotingServer.start();
// worker registry
try {
this.workerRegistryClient.registry();
this.workerRegistryClient.setRegistryStoppable(this);
Set<String> workerZkPaths = this.workerRegistryClient.getWorkerZkPaths();
this.workerRegistryClient.handleDeadServer(workerZkPaths, NodeType.WORKER, Constants.DELETE_OP);
} catch (Exception e) {
logger.error(e.getMessage(), e);
throw new RuntimeException(e);
}
// task execute manager
this.workerManagerThread.start();
// retry report task status
this.retryReportTaskStatusThread.start();
/**
* registry hooks, which are called before the process exits
*/
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
if (Stopper.isRunning()) {
close("shutdownHook");
}
}));
}
// todo better
private void initTaskPlugin() {
taskPluginManager = new TaskPluginManager();
DolphinPluginManagerConfig taskPluginManagerConfig = new DolphinPluginManagerConfig();
taskPluginManagerConfig.setPlugins(workerConfig.getTaskPluginBinding());
if (StringUtils.isNotBlank(workerConfig.getTaskPluginDir())) {
taskPluginManagerConfig.setInstalledPluginsDir(workerConfig.getTaskPluginDir().trim());
}
if (StringUtils.isNotBlank(workerConfig.getMavenLocalRepository())) {
taskPluginManagerConfig.setMavenLocalRepository(workerConfig.getMavenLocalRepository().trim());
}
DolphinPluginLoader taskPluginLoader = new DolphinPluginLoader(taskPluginManagerConfig, ImmutableList.of(taskPluginManager));
try {
taskPluginLoader.loadPlugins();
} catch (Exception e) {
throw new RuntimeException("Load Task Plugin Failed !", e);
}
if (MapUtils.isEmpty(taskPluginManager.getTaskChannelMap())) {
throw new PluginNotFoundException("Task Plugin Not Found,Please Check Config File");
}
}
public void close(String cause) {
try {
// execute only once
if (Stopper.isStopped()) {
return;
}
logger.info("worker server is stopping ..., cause : {}", cause);
// set stop signal is true
Stopper.stop();
try {
// thread sleep 3 seconds for thread quitely stop
Thread.sleep(3000L);
} catch (Exception e) {
logger.warn("thread sleep exception", e);
}
// close
this.nettyRemotingServer.close();
this.workerRegistryClient.unRegistry();
this.alertClientService.close();
this.springApplicationContext.close();
} catch (Exception e) {
logger.error("worker server stop exception ", e);
}
}
@Override
public void stop(String cause) {
close(cause);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,640 | [Bug] [WorkerServer] PROCESS_HOST_UPDATE_REQUST command not support | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Version: 2.0.0-alpha
When Master server restart, the worker receive PROCESS_HOST_UPDATE_REQUST command but not support.
### What you expected to happen
Worker can handle this command type normally.
### How to reproduce
run the task and restart the master server.
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6640 | https://github.com/apache/dolphinscheduler/pull/6642 | dd6ed36f65d2add3bf8e31cad24ab25f4606c9d9 | 8850baff07cc5174a57a0560cbeeb1a3368a4dca | "2021-10-29T12:27:51Z" | java | "2021-10-31T13:10:51Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/HostUpdateProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.processor;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.remote.command.Command;
import org.apache.dolphinscheduler.remote.command.CommandType;
import org.apache.dolphinscheduler.remote.command.HostUpdateCommand;
import org.apache.dolphinscheduler.remote.processor.NettyRemoteChannel;
import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import io.netty.channel.Channel;
/**
* update process host
* this used when master failover
*/
public class HostUpdateProcessor implements NettyRequestProcessor {
private final Logger logger = LoggerFactory.getLogger(HostUpdateProcessor.class);
/**
* task callback service
*/
private final TaskCallbackService taskCallbackService;
public HostUpdateProcessor() {
this.taskCallbackService = SpringApplicationContext.getBean(TaskCallbackService.class);
}
@Override
public void process(Channel channel, Command command) {
Preconditions.checkArgument(CommandType.PROCESS_HOST_UPDATE_REQUST == command.getType(), String.format("invalid command type : %s", command.getType()));
HostUpdateCommand updateCommand = JSONUtils.parseObject(command.getBody(), HostUpdateCommand.class);
logger.info("received host update command : {}", updateCommand);
taskCallbackService.changeRemoteChannel(updateCommand.getTaskInstanceId(), new NettyRemoteChannel(channel, command.getOpaque()));
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,638 | [Bug] [MasterServer] task state no change when failover worker | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
version: 2.0-alpha
The task state is always running when I stop the worker.
I try to debug in failoverWorker and found the judge conditions error.
### What you expected to happen
After stop the worker, the task will change state and no always running.
### How to reproduce
run a long time task and stop the worker.
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6638 | https://github.com/apache/dolphinscheduler/pull/6639 | 8850baff07cc5174a57a0560cbeeb1a3368a4dca | ae0b18f3a3284357f7533c015086cd65f27caca5 | "2021-10-29T10:43:07Z" | java | "2021-10-31T13:12:55Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistryClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.registry;
import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_MASTERS;
import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_NODE;
import static org.apache.dolphinscheduler.common.Constants.SLEEP_TIME_MILLIS;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.IStoppable;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.NodeType;
import org.apache.dolphinscheduler.common.enums.StateEvent;
import org.apache.dolphinscheduler.common.enums.StateEventType;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.NetUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory;
import org.apache.dolphinscheduler.server.builder.TaskExecutionContextBuilder;
import org.apache.dolphinscheduler.server.master.cache.ProcessInstanceExecCacheManager;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread;
import org.apache.dolphinscheduler.server.registry.HeartBeatTask;
import org.apache.dolphinscheduler.server.utils.ProcessUtils;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.queue.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.service.registry.RegistryClient;
import org.apache.dolphinscheduler.spi.register.RegistryConnectListener;
import org.apache.dolphinscheduler.spi.register.RegistryConnectState;
import org.apache.commons.lang.StringUtils;
import java.util.Date;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import com.google.common.collect.Sets;
/**
* zookeeper master client
* <p>
* single instance
*/
@Component
public class MasterRegistryClient {
/**
* logger
*/
private static final Logger logger = LoggerFactory.getLogger(MasterRegistryClient.class);
/**
* process service
*/
@Autowired
private ProcessService processService;
private RegistryClient registryClient;
/**
* master config
*/
@Autowired
private MasterConfig masterConfig;
/**
* heartbeat executor
*/
private ScheduledExecutorService heartBeatExecutor;
@Autowired
private ProcessInstanceExecCacheManager processInstanceExecCacheManager;
/**
* master startup time, ms
*/
private long startupTime;
private String localNodePath;
public void init() {
this.startupTime = System.currentTimeMillis();
this.registryClient = RegistryClient.getInstance();
this.heartBeatExecutor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("HeartBeatExecutor"));
}
public void start() {
String nodeLock = registryClient.getMasterStartUpLockPath();
try {
// create distributed lock with the root node path of the lock space as /dolphinscheduler/lock/failover/startup-masters
registryClient.getLock(nodeLock);
// master registry
registry();
String registryPath = getMasterPath();
registryClient.handleDeadServer(registryPath, NodeType.MASTER, Constants.DELETE_OP);
// init system node
while (!registryClient.checkNodeExists(NetUtils.getHost(), NodeType.MASTER)) {
ThreadUtils.sleep(SLEEP_TIME_MILLIS);
}
// self tolerant
if (registryClient.getActiveMasterNum() == 1) {
removeNodePath(null, NodeType.MASTER, true);
removeNodePath(null, NodeType.WORKER, true);
}
registryClient.subscribe(REGISTRY_DOLPHINSCHEDULER_NODE, new MasterRegistryDataListener());
} catch (Exception e) {
logger.error("master start up exception", e);
} finally {
registryClient.releaseLock(nodeLock);
}
}
public void setRegistryStoppable(IStoppable stoppable) {
registryClient.setStoppable(stoppable);
}
public void closeRegistry() {
unRegistry();
}
/**
* remove zookeeper node path
*
* @param path zookeeper node path
* @param nodeType zookeeper node type
* @param failover is failover
*/
public void removeNodePath(String path, NodeType nodeType, boolean failover) {
logger.info("{} node deleted : {}", nodeType, path);
String failoverPath = getFailoverLockPath(nodeType);
try {
registryClient.getLock(failoverPath);
String serverHost = null;
if (!StringUtils.isEmpty(path)) {
serverHost = registryClient.getHostByEventDataPath(path);
if (StringUtils.isEmpty(serverHost)) {
logger.error("server down error: unknown path: {}", path);
return;
}
// handle dead server
registryClient.handleDeadServer(path, nodeType, Constants.ADD_OP);
}
//failover server
if (failover) {
failoverServerWhenDown(serverHost, nodeType);
}
} catch (Exception e) {
logger.error("{} server failover failed.", nodeType);
logger.error("failover exception ", e);
} finally {
registryClient.releaseLock(failoverPath);
}
}
/**
* failover server when server down
*
* @param serverHost server host
* @param nodeType zookeeper node type
*/
private void failoverServerWhenDown(String serverHost, NodeType nodeType) {
switch (nodeType) {
case MASTER:
failoverMaster(serverHost);
break;
case WORKER:
failoverWorker(serverHost, true, true);
break;
default:
break;
}
}
/**
* get failover lock path
*
* @param nodeType zookeeper node type
* @return fail over lock path
*/
private String getFailoverLockPath(NodeType nodeType) {
switch (nodeType) {
case MASTER:
return registryClient.getMasterFailoverLockPath();
case WORKER:
return registryClient.getWorkerFailoverLockPath();
default:
return "";
}
}
/**
* task needs failover if task start before worker starts
*
* @param taskInstance task instance
* @return true if task instance need fail over
*/
private boolean checkTaskInstanceNeedFailover(TaskInstance taskInstance) {
boolean taskNeedFailover = true;
//now no host will execute this task instance,so no need to failover the task
if (taskInstance.getHost() == null) {
return false;
}
// if the worker node exists in zookeeper, we must check the task starts after the worker
if (registryClient.checkNodeExists(taskInstance.getHost(), NodeType.WORKER)) {
//if task start after worker starts, there is no need to failover the task.
if (checkTaskAfterWorkerStart(taskInstance)) {
taskNeedFailover = false;
}
}
return taskNeedFailover;
}
/**
* check task start after the worker server starts.
*
* @param taskInstance task instance
* @return true if task instance start time after worker server start date
*/
private boolean checkTaskAfterWorkerStart(TaskInstance taskInstance) {
if (StringUtils.isEmpty(taskInstance.getHost())) {
return false;
}
Date workerServerStartDate = null;
List<Server> workerServers = registryClient.getServerList(NodeType.WORKER);
for (Server workerServer : workerServers) {
if (taskInstance.getHost().equals(workerServer.getHost() + Constants.COLON + workerServer.getPort())) {
workerServerStartDate = workerServer.getCreateTime();
break;
}
}
if (workerServerStartDate != null) {
return taskInstance.getStartTime().after(workerServerStartDate);
}
return false;
}
/**
* failover worker tasks
* <p>
* 1. kill yarn job if there are yarn jobs in tasks.
* 2. change task state from running to need failover.
* 3. failover all tasks when workerHost is null
*
* @param workerHost worker host
* @param needCheckWorkerAlive need check worker alive
*/
private void failoverWorker(String workerHost, boolean needCheckWorkerAlive, boolean checkOwner) {
logger.info("start worker[{}] failover ...", workerHost);
List<TaskInstance> needFailoverTaskInstanceList = processService.queryNeedFailoverTaskInstances(workerHost);
for (TaskInstance taskInstance : needFailoverTaskInstanceList) {
if (needCheckWorkerAlive) {
if (!checkTaskInstanceNeedFailover(taskInstance)) {
continue;
}
}
ProcessInstance processInstance = processService.findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
if (workerHost == null
|| !checkOwner
|| processInstance.getHost().equalsIgnoreCase(workerHost)) {
// only failover the task owned myself if worker down.
// failover master need handle worker at the same time
if (processInstance == null) {
logger.error("failover error, the process {} of task {} do not exists.",
taskInstance.getProcessInstanceId(), taskInstance.getId());
continue;
}
taskInstance.setProcessInstance(processInstance);
TaskExecutionContext taskExecutionContext = TaskExecutionContextBuilder.get()
.buildTaskInstanceRelatedInfo(taskInstance)
.buildProcessInstanceRelatedInfo(processInstance)
.create();
// only kill yarn job if exists , the local thread has exited
ProcessUtils.killYarnJob(taskExecutionContext);
taskInstance.setState(ExecutionStatus.NEED_FAULT_TOLERANCE);
processService.saveTaskInstance(taskInstance);
if (!processInstanceExecCacheManager.contains(processInstance.getId())) {
return;
}
WorkflowExecuteThread workflowExecuteThreadNotify = processInstanceExecCacheManager.getByProcessInstanceId(processInstance.getId());
StateEvent stateEvent = new StateEvent();
stateEvent.setTaskInstanceId(taskInstance.getId());
stateEvent.setType(StateEventType.TASK_STATE_CHANGE);
stateEvent.setProcessInstanceId(processInstance.getId());
stateEvent.setExecutionStatus(taskInstance.getState());
workflowExecuteThreadNotify.addStateEvent(stateEvent);
}
}
logger.info("end worker[{}] failover ...", workerHost);
}
/**
* failover master tasks
*
* @param masterHost master host
*/
private void failoverMaster(String masterHost) {
logger.info("start master failover ...");
List<ProcessInstance> needFailoverProcessInstanceList = processService.queryNeedFailoverProcessInstances(masterHost);
logger.info("failover process list size:{} ", needFailoverProcessInstanceList.size());
//updateProcessInstance host is null and insert into command
for (ProcessInstance processInstance : needFailoverProcessInstanceList) {
logger.info("failover process instance id: {} host:{}", processInstance.getId(), processInstance.getHost());
if (Constants.NULL.equals(processInstance.getHost())) {
continue;
}
processService.processNeedFailoverProcessInstances(processInstance);
}
failoverWorker(masterHost, true, false);
logger.info("master failover end");
}
public void blockAcquireMutex() {
registryClient.getLock(registryClient.getMasterLockPath());
}
public void releaseLock() {
registryClient.releaseLock(registryClient.getMasterLockPath());
}
/**
* registry
*/
public void registry() {
String address = NetUtils.getAddr(masterConfig.getListenPort());
localNodePath = getMasterPath();
int masterHeartbeatInterval = masterConfig.getMasterHeartbeatInterval();
HeartBeatTask heartBeatTask = new HeartBeatTask(startupTime,
masterConfig.getMasterMaxCpuloadAvg(),
masterConfig.getMasterReservedMemory(),
Sets.newHashSet(getMasterPath()),
Constants.MASTER_TYPE,
registryClient);
registryClient.persistEphemeral(localNodePath, heartBeatTask.getHeartBeatInfo());
registryClient.addConnectionStateListener(new MasterRegistryConnectStateListener());
this.heartBeatExecutor.scheduleAtFixedRate(heartBeatTask, masterHeartbeatInterval, masterHeartbeatInterval, TimeUnit.SECONDS);
logger.info("master node : {} registry to ZK successfully with heartBeatInterval : {}s", address, masterHeartbeatInterval);
}
class MasterRegistryConnectStateListener implements RegistryConnectListener {
@Override
public void notify(RegistryConnectState newState) {
if (RegistryConnectState.RECONNECTED == newState) {
registryClient.persistEphemeral(localNodePath, "");
}
if (RegistryConnectState.SUSPENDED == newState) {
registryClient.persistEphemeral(localNodePath, "");
}
}
}
/**
* remove registry info
*/
public void unRegistry() {
try {
String address = getLocalAddress();
String localNodePath = getMasterPath();
registryClient.remove(localNodePath);
logger.info("master node : {} unRegistry to register center.", address);
heartBeatExecutor.shutdown();
logger.info("heartbeat executor shutdown");
registryClient.close();
} catch (Exception e) {
logger.error("remove registry path exception ", e);
}
}
/**
* get master path
*/
public String getMasterPath() {
String address = getLocalAddress();
return REGISTRY_DOLPHINSCHEDULER_MASTERS + "/" + address;
}
/**
* get local address
*/
private String getLocalAddress() {
return NetUtils.getAddr(masterConfig.getListenPort());
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,408 | [Feature][API] the private function 'checkResourceExists' should remove the userId condition | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
the private function 'checkResourceExists' should remove the userId condition;
If we want to check the resouce exists or not, we should not user the userId condition.
Because the resource was created by user, it's userId was the user who created it.
If we user the userID '0', It won't take effect.
### Use case
_No response_
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6408 | https://github.com/apache/dolphinscheduler/pull/6409 | 86c85114b23317b9f791e091101fa6289e9d6b7c | 3a8b80971ba4b23b8e919ad04e074bd273c40c9a | "2021-09-29T04:44:58Z" | java | "2021-10-31T13:26:48Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ResourcesServiceImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service.impl;
import static org.apache.dolphinscheduler.common.Constants.ALIAS;
import static org.apache.dolphinscheduler.common.Constants.CONTENT;
import static org.apache.dolphinscheduler.common.Constants.JAR;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter;
import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor;
import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
import org.apache.dolphinscheduler.api.service.ResourcesService;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.RegexUtils;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.spi.enums.ResourceType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.PropertyUtils;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.ResourcesUser;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper;
import org.apache.dolphinscheduler.dao.mapper.TenantMapper;
import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper;
import org.apache.dolphinscheduler.dao.mapper.UserMapper;
import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils;
import org.apache.commons.beanutils.BeanMap;
import org.apache.commons.lang.StringUtils;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.regex.Matcher;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.fasterxml.jackson.databind.SerializationFeature;
/**
* resources service impl
*/
@Service
public class ResourcesServiceImpl extends BaseServiceImpl implements ResourcesService {
private static final Logger logger = LoggerFactory.getLogger(ResourcesServiceImpl.class);
@Autowired
private ResourceMapper resourcesMapper;
@Autowired
private UdfFuncMapper udfFunctionMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private UserMapper userMapper;
@Autowired
private ResourceUserMapper resourceUserMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
/**
* create directory
*
* @param loginUser login user
* @param name alias
* @param description description
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create directory result
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Result<Object> createDirectory(User loginUser,
String name,
String description,
ResourceType type,
int pid,
String currentDir) {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name) : String.format("%s/%s",currentDir,name);
result = verifyResource(loginUser, type, fullName, pid);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource directory {} has exist, can't recreate", fullName);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,true,description,name,loginUser.getId(),type,0,now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (DuplicateKeyException e) {
logger.error("resource directory {} has exist, can't recreate", fullName);
putMsg(result, Status.RESOURCE_EXIST);
return result;
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new ServiceException("resource already exists, can't recreate");
}
//create directory in hdfs
createDirectory(loginUser,fullName,type,result);
return result;
}
/**
* create resource
*
* @param loginUser login user
* @param name alias
* @param desc description
* @param file file
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create result code
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Result<Object> createResource(User loginUser,
String name,
String desc,
ResourceType type,
MultipartFile file,
int pid,
String currentDir) {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
result = verifyPid(loginUser, pid);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
result = verifyFile(name, type, file);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// check resource name exists
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name) : String.format("%s/%s",currentDir,name);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} has exist, can't recreate", RegexUtils.escapeNRT(name));
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new ServiceException("resource already exists, can't recreate");
}
// fail upload
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", RegexUtils.escapeNRT(name), RegexUtils.escapeNRT(file.getOriginalFilename()));
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new ServiceException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
}
return result;
}
/**
* check resource is exists
*
* @param fullName fullName
* @param userId user id
* @param type type
* @return true if resource exists
*/
private boolean checkResourceExists(String fullName, int userId, int type) {
Boolean existResource = resourcesMapper.existResource(fullName, userId, type);
return existResource == Boolean.TRUE;
}
/**
* update resource
* @param loginUser login user
* @param resourceId resource id
* @param name name
* @param desc description
* @param type resource type
* @param file resource file
* @return update result code
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Result<Object> updateResource(User loginUser,
int resourceId,
String name,
String desc,
ResourceType type,
MultipartFile file) {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
if (file == null && name.equals(resource.getAlias()) && desc.equals(resource.getDescription())) {
putMsg(result, Status.SUCCESS);
return result;
}
//check resource already exists
String originFullName = resource.getFullName();
String originResourceName = resource.getAlias();
String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/") + 1),name);
if (!originResourceName.equals(name) && checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} already exists, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
result = verifyFile(name, type, file);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// query tenant by user id
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)) {
return result;
}
// verify whether the resource exists in storage
// get the path of origin file in storage
String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName);
try {
if (!HadoopUtils.getInstance().exists(originHdfsFileName)) {
logger.error("{} not exist", originHdfsFileName);
putMsg(result,Status.RESOURCE_NOT_EXIST);
return result;
}
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new ServiceException(Status.HDFS_OPERATION_ERROR);
}
if (!resource.isDirectory()) {
//get the origin file suffix
String originSuffix = FileUtils.suffix(originFullName);
String suffix = FileUtils.suffix(fullName);
boolean suffixIsChanged = false;
if (StringUtils.isBlank(suffix) && StringUtils.isNotBlank(originSuffix)) {
suffixIsChanged = true;
}
if (StringUtils.isNotBlank(suffix) && !suffix.equals(originSuffix)) {
suffixIsChanged = true;
}
//verify whether suffix is changed
if (suffixIsChanged) {
//need verify whether this resource is authorized to other users
Map<String, Object> columnMap = new HashMap<>();
columnMap.put("resources_id", resourceId);
List<ResourcesUser> resourcesUsers = resourceUserMapper.selectByMap(columnMap);
if (CollectionUtils.isNotEmpty(resourcesUsers)) {
List<Integer> userIds = resourcesUsers.stream().map(ResourcesUser::getUserId).collect(Collectors.toList());
List<User> users = userMapper.selectBatchIds(userIds);
String userNames = users.stream().map(User::getUserName).collect(Collectors.toList()).toString();
logger.error("resource is authorized to user {},suffix not allowed to be modified", userNames);
putMsg(result,Status.RESOURCE_IS_AUTHORIZED,userNames);
return result;
}
}
}
// updateResource data
Date now = new Date();
resource.setAlias(name);
resource.setFileName(name);
resource.setFullName(fullName);
resource.setDescription(desc);
resource.setUpdateTime(now);
if (file != null) {
resource.setSize(file.getSize());
}
try {
resourcesMapper.updateById(resource);
if (resource.isDirectory()) {
List<Integer> childrenResource = listAllChildren(resource,false);
if (CollectionUtils.isNotEmpty(childrenResource)) {
String matcherFullName = Matcher.quoteReplacement(fullName);
List<Resource> childResourceList;
Integer[] childResIdArray = childrenResource.toArray(new Integer[childrenResource.size()]);
List<Resource> resourceList = resourcesMapper.listResourceByIds(childResIdArray);
childResourceList = resourceList.stream().map(t -> {
t.setFullName(t.getFullName().replaceFirst(originFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
resourcesMapper.batchUpdateResource(childResourceList);
if (ResourceType.UDF.equals(resource.getType())) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(childResIdArray);
if (CollectionUtils.isNotEmpty(udfFuncs)) {
udfFuncs = udfFuncs.stream().map(t -> {
t.setResourceName(t.getResourceName().replaceFirst(originFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
udfFunctionMapper.batchUpdateUdfFunc(udfFuncs);
}
}
}
} else if (ResourceType.UDF.equals(resource.getType())) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(new Integer[]{resourceId});
if (CollectionUtils.isNotEmpty(udfFuncs)) {
udfFuncs = udfFuncs.stream().map(t -> {
t.setResourceName(fullName);
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
udfFunctionMapper.batchUpdateUdfFunc(udfFuncs);
}
}
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error(Status.UPDATE_RESOURCE_ERROR.getMsg(), e);
throw new ServiceException(Status.UPDATE_RESOURCE_ERROR);
}
// if name unchanged, return directly without moving on HDFS
if (originResourceName.equals(name) && file == null) {
return result;
}
if (file != null) {
// fail upload
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, RegexUtils.escapeNRT(file.getOriginalFilename()));
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new ServiceException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
}
if (!fullName.equals(originFullName)) {
try {
HadoopUtils.getInstance().delete(originHdfsFileName,false);
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new ServiceException(String.format("delete resource: %s failed.", originFullName));
}
}
return result;
}
// get the path of dest file in hdfs
String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName);
try {
logger.info("start hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName);
HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true);
} catch (Exception e) {
logger.error(MessageFormat.format("hdfs copy {0} -> {1} fail", originHdfsFileName, destHdfsFileName), e);
putMsg(result,Status.HDFS_COPY_FAIL);
throw new ServiceException(Status.HDFS_COPY_FAIL);
}
return result;
}
private Result<Object> verifyFile(String name, ResourceType type, MultipartFile file) {
Result<Object> result = new Result<>();
putMsg(result, Status.SUCCESS);
if (file != null) {
// file is empty
if (file.isEmpty()) {
logger.error("file is empty: {}", RegexUtils.escapeNRT(file.getOriginalFilename()));
putMsg(result, Status.RESOURCE_FILE_IS_EMPTY);
return result;
}
// file suffix
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(name);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
// rename file suffix and original suffix must be consistent
logger.error("rename file suffix and original suffix must be consistent: {}", RegexUtils.escapeNRT(file.getOriginalFilename()));
putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE);
return result;
}
//If resource type is UDF, only jar packages are allowed to be uploaded, and the suffix must be .jar
if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(fileSuffix)) {
logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg());
putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR);
return result;
}
if (file.getSize() > Constants.MAX_FILE_SIZE) {
logger.error("file size is too large: {}", RegexUtils.escapeNRT(file.getOriginalFilename()));
putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT);
return result;
}
}
return result;
}
/**
* query resources list paging
*
* @param loginUser login user
* @param type resource type
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @return resource list page
*/
@Override
public Result queryResourceListPaging(User loginUser, int directoryId, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) {
Result result = new Result();
Page<Resource> page = new Page<>(pageNo, pageSize);
int userId = loginUser.getId();
if (isAdmin(loginUser)) {
userId = 0;
}
if (directoryId != -1) {
Resource directory = resourcesMapper.selectById(directoryId);
if (directory == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
}
List<Integer> resourcesIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 0);
IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page, userId, directoryId, type.ordinal(), searchVal,resourcesIds);
PageInfo<Resource> pageInfo = new PageInfo<>(pageNo, pageSize);
pageInfo.setTotal((int)resourceIPage.getTotal());
pageInfo.setTotalList(resourceIPage.getRecords());
result.setData(pageInfo);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* create directory
* @param loginUser login user
* @param fullName full name
* @param type resource type
* @param result Result
*/
private void createDirectory(User loginUser,String fullName,ResourceType type,Result<Object> result) {
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
String directoryName = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourceRootPath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
if (!HadoopUtils.getInstance().exists(resourceRootPath)) {
createTenantDirIfNotExists(tenantCode);
}
if (!HadoopUtils.getInstance().mkdir(directoryName)) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new ServiceException(String.format("create resource directory: %s failed.", directoryName));
}
} catch (Exception e) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new ServiceException(String.format("create resource directory: %s failed.", directoryName));
}
}
/**
* upload file to hdfs
*
* @param loginUser login user
* @param fullName full name
* @param file file
*/
private boolean upload(User loginUser, String fullName, MultipartFile file, ResourceType type) {
// save to local
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(fullName);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
return false;
}
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
// random file name
String localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
// save file to hdfs, and delete original file
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourcePath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
// if tenant dir not exists
if (!HadoopUtils.getInstance().exists(resourcePath)) {
createTenantDirIfNotExists(tenantCode);
}
org.apache.dolphinscheduler.api.utils.FileUtils.copyFile(file, localFilename);
HadoopUtils.getInstance().copyLocalToHdfs(localFilename, hdfsFilename, true, true);
} catch (Exception e) {
try {
FileUtils.deleteFile(localFilename);
} catch (IOException ex) {
logger.error("delete local tmp file:{} error", localFilename, ex);
}
logger.error(e.getMessage(), e);
return false;
}
return true;
}
/**
* query resource list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@Override
public Map<String, Object> queryResourceList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>();
List<Resource> allResourceList = queryAuthoredResourceList(loginUser, type);
Visitor resourceTreeVisitor = new ResourceTreeVisitor(allResourceList);
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query resource list by program type
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@Override
public Map<String, Object> queryResourceByProgramType(User loginUser, ResourceType type, ProgramType programType) {
Map<String, Object> result = new HashMap<>();
List<Resource> allResourceList = queryAuthoredResourceList(loginUser, type);
String suffix = ".jar";
if (programType != null) {
switch (programType) {
case JAVA:
case SCALA:
break;
case PYTHON:
suffix = ".py";
break;
default:
}
}
List<Resource> resources = new ResourceFilter(suffix, new ArrayList<>(allResourceList)).filter();
Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources);
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result, Status.SUCCESS);
return result;
}
/**
* delete resource
*
* @param loginUser login user
* @param resourceId resource id
* @return delete result code
* @throws IOException exception
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Result<Object> delete(User loginUser, int resourceId) throws IOException {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// get resource by id
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)) {
return result;
}
// get all resource id of process definitions those is released
List<Map<String, Object>> list = processDefinitionMapper.listResources();
Map<Integer, Set<Long>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list);
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
// get all children of the resource
List<Integer> allChildren = listAllChildren(resource,true);
Integer[] needDeleteResourceIdArray = allChildren.toArray(new Integer[allChildren.size()]);
//if resource type is UDF,need check whether it is bound by UDF function
if (resource.getType() == (ResourceType.UDF)) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(needDeleteResourceIdArray);
if (CollectionUtils.isNotEmpty(udfFuncs)) {
logger.error("can't be deleted,because it is bound by UDF functions:{}", udfFuncs);
putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName());
return result;
}
}
if (resourceIdSet.contains(resource.getPid())) {
logger.error("can't be deleted,because it is used of process definition");
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
resourceIdSet.retainAll(allChildren);
if (CollectionUtils.isNotEmpty(resourceIdSet)) {
logger.error("can't be deleted,because it is used of process definition");
for (Integer resId : resourceIdSet) {
logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId));
}
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
// get hdfs file by type
String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
//delete data in database
resourcesMapper.deleteIds(needDeleteResourceIdArray);
resourceUserMapper.deleteResourceUserArray(0, needDeleteResourceIdArray);
//delete file on hdfs
HadoopUtils.getInstance().delete(hdfsFilename, true);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* verify resource by name and type
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
@Override
public Result<Object> verifyResourceName(String fullName, ResourceType type, User loginUser) {
Result<Object> result = new Result<>();
putMsg(result, Status.SUCCESS);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource type:{} name:{} has exist, can't create again.", type, RegexUtils.escapeNRT(fullName));
putMsg(result, Status.RESOURCE_EXIST);
} else {
// query tenant
Tenant tenant = tenantMapper.queryById(loginUser.getTenantId());
if (tenant != null) {
String tenantCode = tenant.getTenantCode();
try {
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
if (HadoopUtils.getInstance().exists(hdfsFilename)) {
logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, RegexUtils.escapeNRT(fullName), hdfsFilename);
putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename);
}
} catch (Exception e) {
logger.error(e.getMessage(),e);
putMsg(result,Status.HDFS_OPERATION_ERROR);
}
} else {
putMsg(result,Status.TENANT_NOT_EXIST);
}
}
return result;
}
/**
* verify resource by full name or pid and type
* @param fullName resource full name
* @param id resource id
* @param type resource type
* @return true if the resource full name or pid not exists, otherwise return false
*/
@Override
public Result<Object> queryResource(String fullName, Integer id, ResourceType type) {
Result<Object> result = new Result<>();
if (StringUtils.isBlank(fullName) && id == null) {
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
return result;
}
if (StringUtils.isNotBlank(fullName)) {
List<Resource> resourceList = resourcesMapper.queryResource(fullName,type.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(resourceList.get(0));
} else {
Resource resource = resourcesMapper.selectById(id);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
Resource parentResource = resourcesMapper.selectById(resource.getPid());
if (parentResource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(parentResource);
}
return result;
}
/**
* view resource file online
*
* @param resourceId resource id
* @param skipLineNum skip line number
* @param limit limit
* @return resource content
*/
@Override
public Result<Object> readResource(int resourceId, int skipLineNum, int limit) {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// get resource by id
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check preview or not by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support view, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)) {
return result;
}
// hdfs path
String hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resource.getFullName());
logger.info("resource hdfs path is {}", hdfsFileName);
try {
if (HadoopUtils.getInstance().exists(hdfsFileName)) {
List<String> content = HadoopUtils.getInstance().catFile(hdfsFileName, skipLineNum, limit);
putMsg(result, Status.SUCCESS);
Map<String, Object> map = new HashMap<>();
map.put(ALIAS, resource.getAlias());
map.put(CONTENT, String.join("\n", content));
result.setData(map);
} else {
logger.error("read file {} not exist in hdfs", hdfsFileName);
putMsg(result, Status.RESOURCE_FILE_NOT_EXIST,hdfsFileName);
}
} catch (Exception e) {
logger.error("Resource {} read failed", hdfsFileName, e);
putMsg(result, Status.HDFS_OPERATION_ERROR);
}
return result;
}
/**
* create resource file online
*
* @param loginUser login user
* @param type resource type
* @param fileName file name
* @param fileSuffix file suffix
* @param desc description
* @param content content
* @param pid pid
* @param currentDir current directory
* @return create result code
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Result<Object> onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content,int pid,String currentDir) {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
//check file suffix
String nameSuffix = fileSuffix.trim();
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support create", nameSuffix);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String name = fileName.trim() + "." + nameSuffix;
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name) : String.format("%s/%s",currentDir,name);
result = verifyResource(loginUser, type, fullName, pid);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// save data
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,name,loginUser.getId(),type,content.getBytes().length,now,now);
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
result = uploadContentToHdfs(fullName, tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new ServiceException(result.getMsg());
}
return result;
}
private Result<Object> checkResourceUploadStartupState() {
Result<Object> result = new Result<>();
putMsg(result, Status.SUCCESS);
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()) {
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
return result;
}
private Result<Object> verifyResource(User loginUser, ResourceType type, String fullName, int pid) {
Result<Object> result = verifyResourceName(fullName, type, loginUser);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
return verifyPid(loginUser, pid);
}
private Result<Object> verifyPid(User loginUser, int pid) {
Result<Object> result = new Result<>();
putMsg(result, Status.SUCCESS);
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
return result;
}
/**
* updateProcessInstance resource
*
* @param resourceId resource id
* @param content content
* @return update result cod
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Result<Object> updateResourceContent(int resourceId, String content) {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("read file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check can edit by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support updateProcessInstance, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)) {
return result;
}
resource.setSize(content.getBytes().length);
resource.setUpdateTime(new Date());
resourcesMapper.updateById(resource);
result = uploadContentToHdfs(resource.getFullName(), tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new ServiceException(result.getMsg());
}
return result;
}
/**
* @param resourceName resource name
* @param tenantCode tenant code
* @param content content
* @return result
*/
private Result<Object> uploadContentToHdfs(String resourceName, String tenantCode, String content) {
Result<Object> result = new Result<>();
String localFilename = "";
String hdfsFileName = "";
try {
localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
if (!FileUtils.writeContent2File(content, localFilename)) {
// write file fail
logger.error("file {} fail, content is {}", localFilename, RegexUtils.escapeNRT(content));
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
// get resource file hdfs path
hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resourceName);
String resourcePath = HadoopUtils.getHdfsResDir(tenantCode);
logger.info("resource hdfs path is {}, resource dir is {}", hdfsFileName, resourcePath);
HadoopUtils hadoopUtils = HadoopUtils.getInstance();
if (!hadoopUtils.exists(resourcePath)) {
// create if tenant dir not exists
createTenantDirIfNotExists(tenantCode);
}
if (hadoopUtils.exists(hdfsFileName)) {
hadoopUtils.delete(hdfsFileName, false);
}
hadoopUtils.copyLocalToHdfs(localFilename, hdfsFileName, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
result.setCode(Status.HDFS_OPERATION_ERROR.getCode());
result.setMsg(String.format("copy %s to hdfs %s fail", localFilename, hdfsFileName));
return result;
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* download file
*
* @param resourceId resource id
* @return resource content
* @throws IOException exception
*/
@Override
public org.springframework.core.io.Resource downloadResource(int resourceId) throws IOException {
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()) {
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
throw new ServiceException("hdfs not startup");
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("download file not exist, resource id {}", resourceId);
return null;
}
if (resource.isDirectory()) {
logger.error("resource id {} is directory,can't download it", resourceId);
throw new ServiceException("can't download directory");
}
int userId = resource.getUserId();
User user = userMapper.selectById(userId);
if (user == null) {
logger.error("user id {} not exists", userId);
throw new ServiceException(String.format("resource owner id %d not exist",userId));
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if (tenant == null) {
logger.error("tenant id {} not exists", user.getTenantId());
throw new ServiceException(String.format("The tenant id %d of resource owner not exist",user.getTenantId()));
}
String tenantCode = tenant.getTenantCode();
String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
String localFileName = FileUtils.getDownloadFilename(resource.getAlias());
logger.info("resource hdfs path is {}, download local filename is {}", hdfsFileName, localFileName);
HadoopUtils.getInstance().copyHdfsToLocal(hdfsFileName, localFileName, false, true);
return org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(localFileName);
}
/**
* list all file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
@Override
public Map<String, Object> authorizeResourceTree(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<ResourceComponent> list;
if (CollectionUtils.isNotEmpty(resourceList)) {
Visitor visitor = new ResourceTreeVisitor(resourceList);
list = visitor.visit().getChildren();
} else {
list = new ArrayList<>(0);
}
result.put(Constants.DATA_LIST, list);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* unauthorized file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
@Override
public Map<String, Object> unauthorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<Resource> list;
if (resourceList != null && !resourceList.isEmpty()) {
Set<Resource> resourceSet = new HashSet<>(resourceList);
List<Resource> authedResourceList = queryResourceList(userId, Constants.AUTHORIZE_WRITABLE_PERM);
getAuthorizedResourceList(resourceSet, authedResourceList);
list = new ArrayList<>(resourceSet);
} else {
list = new ArrayList<>(0);
}
Visitor visitor = new ResourceTreeVisitor(list);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result, Status.SUCCESS);
return result;
}
/**
* unauthorized udf function
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
@Override
public Map<String, Object> unauthorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
//only admin can operate
if (isNotAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncList = udfFunctionMapper.queryUdfFuncExceptUserId(userId);
List<UdfFunc> resultList = new ArrayList<>();
Set<UdfFunc> udfFuncSet;
if (CollectionUtils.isNotEmpty(udfFuncList)) {
udfFuncSet = new HashSet<>(udfFuncList);
List<UdfFunc> authedUDFFuncList = udfFunctionMapper.queryAuthedUdfFunc(userId);
getAuthorizedResourceList(udfFuncSet, authedUDFFuncList);
resultList = new ArrayList<>(udfFuncSet);
}
result.put(Constants.DATA_LIST, resultList);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* authorized udf function
*
* @param loginUser login user
* @param userId user id
* @return authorized result code
*/
@Override
public Map<String, Object> authorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncs = udfFunctionMapper.queryAuthedUdfFunc(userId);
result.put(Constants.DATA_LIST, udfFuncs);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* authorized file
*
* @param loginUser login user
* @param userId user id
* @return authorized result
*/
@Override
public Map<String, Object> authorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
List<Resource> authedResources = queryResourceList(userId, Constants.AUTHORIZE_WRITABLE_PERM);
Visitor visitor = new ResourceTreeVisitor(authedResources);
String visit = JSONUtils.toJsonString(visitor.visit(), SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS);
logger.info(visit);
String jsonTreeStr = JSONUtils.toJsonString(visitor.visit().getChildren(), SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS);
logger.info(jsonTreeStr);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* get authorized resource list
*
* @param resourceSet resource set
* @param authedResourceList authorized resource list
*/
private void getAuthorizedResourceList(Set<?> resourceSet, List<?> authedResourceList) {
Set<?> authedResourceSet;
if (CollectionUtils.isNotEmpty(authedResourceList)) {
authedResourceSet = new HashSet<>(authedResourceList);
resourceSet.removeAll(authedResourceSet);
}
}
/**
* get tenantCode by UserId
*
* @param userId user id
* @param result return result
* @return tenant code
*/
private String getTenantCode(int userId,Result<Object> result) {
User user = userMapper.selectById(userId);
if (user == null) {
logger.error("user {} not exists", userId);
putMsg(result, Status.USER_NOT_EXIST,userId);
return null;
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if (tenant == null) {
logger.error("tenant not exists");
putMsg(result, Status.TENANT_NOT_EXIST);
return null;
}
return tenant.getTenantCode();
}
/**
* list all children id
* @param resource resource
* @param containSelf whether add self to children list
* @return all children id
*/
List<Integer> listAllChildren(Resource resource,boolean containSelf) {
List<Integer> childList = new ArrayList<>();
if (resource.getId() != -1 && containSelf) {
childList.add(resource.getId());
}
if (resource.isDirectory()) {
listAllChildren(resource.getId(),childList);
}
return childList;
}
/**
* list all children id
* @param resourceId resource id
* @param childList child list
*/
void listAllChildren(int resourceId,List<Integer> childList) {
List<Integer> children = resourcesMapper.listChildren(resourceId);
for (int childId : children) {
childList.add(childId);
listAllChildren(childId, childList);
}
}
/**
* query authored resource list (own and authorized)
* @param loginUser login user
* @param type ResourceType
* @return all authored resource list
*/
private List<Resource> queryAuthoredResourceList(User loginUser, ResourceType type) {
List<Resource> relationResources;
int userId = loginUser.getId();
if (isAdmin(loginUser)) {
userId = 0;
relationResources = new ArrayList<>();
} else {
// query resource relation
relationResources = queryResourceList(userId, 0);
}
List<Resource> ownResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal());
ownResourceList.addAll(relationResources);
return ownResourceList;
}
/**
* query resource list by userId and perm
* @param userId userId
* @param perm perm
* @return resource list
*/
private List<Resource> queryResourceList(Integer userId, int perm) {
List<Integer> resIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, perm);
return CollectionUtils.isEmpty(resIds) ? new ArrayList<>() : resourcesMapper.queryResourceListById(resIds);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,408 | [Feature][API] the private function 'checkResourceExists' should remove the userId condition | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
the private function 'checkResourceExists' should remove the userId condition;
If we want to check the resouce exists or not, we should not user the userId condition.
Because the resource was created by user, it's userId was the user who created it.
If we user the userID '0', It won't take effect.
### Use case
_No response_
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6408 | https://github.com/apache/dolphinscheduler/pull/6409 | 86c85114b23317b9f791e091101fa6289e9d6b7c | 3a8b80971ba4b23b8e919ad04e074bd273c40c9a | "2021-09-29T04:44:58Z" | java | "2021-10-31T13:26:48Z" | dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.impl.ResourcesServiceImpl;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.spi.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.PropertyUtils;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper;
import org.apache.dolphinscheduler.dao.mapper.TenantMapper;
import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper;
import org.apache.dolphinscheduler.dao.mapper.UserMapper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.mock.web.MockMultipartFile;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
/**
* resources service test
*/
@RunWith(PowerMockRunner.class)
@PowerMockIgnore({"sun.security.*", "javax.net.*"})
@PrepareForTest({HadoopUtils.class, PropertyUtils.class, FileUtils.class, org.apache.dolphinscheduler.api.utils.FileUtils.class})
public class ResourcesServiceTest {
private static final Logger logger = LoggerFactory.getLogger(ResourcesServiceTest.class);
@InjectMocks
private ResourcesServiceImpl resourcesService;
@Mock
private ResourceMapper resourcesMapper;
@Mock
private TenantMapper tenantMapper;
@Mock
private HadoopUtils hadoopUtils;
@Mock
private UserMapper userMapper;
@Mock
private UdfFuncMapper udfFunctionMapper;
@Mock
private ProcessDefinitionMapper processDefinitionMapper;
@Mock
private ResourceUserMapper resourceUserMapper;
@Before
public void setUp() {
PowerMockito.mockStatic(HadoopUtils.class);
PowerMockito.mockStatic(FileUtils.class);
PowerMockito.mockStatic(org.apache.dolphinscheduler.api.utils.FileUtils.class);
try {
// new HadoopUtils
PowerMockito.whenNew(HadoopUtils.class).withNoArguments().thenReturn(hadoopUtils);
} catch (Exception e) {
e.printStackTrace();
}
PowerMockito.when(HadoopUtils.getInstance()).thenReturn(hadoopUtils);
PowerMockito.mockStatic(PropertyUtils.class);
}
@Test
public void testCreateResource() {
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User();
//HDFS_NOT_STARTUP
Result result = resourcesService.createResource(user, "ResourcesServiceTest", "ResourcesServiceTest", ResourceType.FILE, null, -1, "/");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg());
//RESOURCE_FILE_IS_EMPTY
MockMultipartFile mockMultipartFile = new MockMultipartFile("test.pdf", "".getBytes());
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.createResource(user, "ResourcesServiceTest", "ResourcesServiceTest", ResourceType.FILE, mockMultipartFile, -1, "/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_FILE_IS_EMPTY.getMsg(), result.getMsg());
//RESOURCE_SUFFIX_FORBID_CHANGE
mockMultipartFile = new MockMultipartFile("test.pdf", "test.pdf", "pdf", "test".getBytes());
PowerMockito.when(FileUtils.suffix("test.pdf")).thenReturn("pdf");
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.jar")).thenReturn("jar");
result = resourcesService.createResource(user, "ResourcesServiceTest.jar", "ResourcesServiceTest", ResourceType.FILE, mockMultipartFile, -1, "/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_FORBID_CHANGE.getMsg(), result.getMsg());
//UDF_RESOURCE_SUFFIX_NOT_JAR
mockMultipartFile = new MockMultipartFile("ResourcesServiceTest.pdf", "ResourcesServiceTest.pdf", "pdf", "test".getBytes());
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.pdf")).thenReturn("pdf");
result = resourcesService.createResource(user, "ResourcesServiceTest.pdf", "ResourcesServiceTest", ResourceType.UDF, mockMultipartFile, -1, "/");
logger.info(result.toString());
Assert.assertEquals(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg(), result.getMsg());
}
@Test
public void testCreateDirecotry() {
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User();
//HDFS_NOT_STARTUP
Result result = resourcesService.createDirectory(user, "directoryTest", "directory test", ResourceType.FILE, -1, "/");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg());
//PARENT_RESOURCE_NOT_EXIST
user.setId(1);
user.setTenantId(1);
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.selectById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.createDirectory(user, "directoryTest", "directory test", ResourceType.FILE, 1, "/");
logger.info(result.toString());
Assert.assertEquals(Status.PARENT_RESOURCE_NOT_EXIST.getMsg(), result.getMsg());
//RESOURCE_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.existResource("/directoryTest", 0, 0)).thenReturn(true);
result = resourcesService.createDirectory(user, "directoryTest", "directory test", ResourceType.FILE, -1, "/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(), result.getMsg());
}
@Test
public void testUpdateResource() {
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User();
//HDFS_NOT_STARTUP
Result result = resourcesService.updateResource(user, 1, "ResourcesServiceTest", "ResourcesServiceTest", ResourceType.FILE, null);
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg());
//RESOURCE_NOT_EXIST
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.updateResource(user, 0, "ResourcesServiceTest", "ResourcesServiceTest", ResourceType.FILE, null);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg());
//USER_NO_OPERATION_PERM
result = resourcesService.updateResource(user, 1, "ResourcesServiceTest", "ResourcesServiceTest", ResourceType.FILE, null);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM.getMsg(), result.getMsg());
//RESOURCE_NOT_EXIST
user.setId(1);
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
PowerMockito.when(HadoopUtils.getHdfsFileName(Mockito.any(), Mockito.any(), Mockito.anyString())).thenReturn("test1");
try {
Mockito.when(HadoopUtils.getInstance().exists(Mockito.any())).thenReturn(false);
} catch (IOException e) {
logger.error(e.getMessage(), e);
}
result = resourcesService.updateResource(user, 1, "ResourcesServiceTest1.jar", "ResourcesServiceTest", ResourceType.UDF, null);
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg());
//SUCCESS
user.setId(1);
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
try {
Mockito.when(HadoopUtils.getInstance().exists(Mockito.any())).thenReturn(true);
} catch (IOException e) {
logger.error(e.getMessage(), e);
}
result = resourcesService.updateResource(user, 1, "ResourcesServiceTest.jar", "ResourcesServiceTest", ResourceType.FILE, null);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg());
//RESOURCE_EXIST
Mockito.when(resourcesMapper.existResource("/ResourcesServiceTest1.jar", 0, 0)).thenReturn(true);
result = resourcesService.updateResource(user, 1, "ResourcesServiceTest1.jar", "ResourcesServiceTest", ResourceType.FILE, null);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(), result.getMsg());
//USER_NOT_EXIST
Mockito.when(userMapper.selectById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.updateResource(user, 1, "ResourcesServiceTest1.jar", "ResourcesServiceTest", ResourceType.UDF, null);
logger.info(result.toString());
Assert.assertTrue(Status.USER_NOT_EXIST.getCode() == result.getCode());
//TENANT_NOT_EXIST
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
Mockito.when(tenantMapper.queryById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.updateResource(user, 1, "ResourcesServiceTest1.jar", "ResourcesServiceTest", ResourceType.UDF, null);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
//SUCCESS
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test");
try {
PowerMockito.when(HadoopUtils.getInstance().copy(Mockito.anyString(), Mockito.anyString(), true, true)).thenReturn(true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
result = resourcesService.updateResource(user, 1, "ResourcesServiceTest1.jar", "ResourcesServiceTest1.jar", ResourceType.UDF, null);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg());
}
@Test
public void testQueryResourceListPaging() {
User loginUser = new User();
loginUser.setUserType(UserType.ADMIN_USER);
IPage<Resource> resourcePage = new Page<>(1, 10);
resourcePage.setTotal(1);
resourcePage.setRecords(getResourceList());
Mockito.when(resourcesMapper.queryResourcePaging(Mockito.any(Page.class),
Mockito.eq(0), Mockito.eq(-1), Mockito.eq(0), Mockito.eq("test"), Mockito.any())).thenReturn(resourcePage);
Result result = resourcesService.queryResourceListPaging(loginUser, -1, ResourceType.FILE, "test", 1, 10);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getCode(), (int)result.getCode());
PageInfo pageInfo = (PageInfo) result.getData();
Assert.assertTrue(CollectionUtils.isNotEmpty(pageInfo.getTotalList()));
}
@Test
public void testQueryResourceList() {
User loginUser = new User();
loginUser.setId(0);
loginUser.setUserType(UserType.ADMIN_USER);
Mockito.when(resourcesMapper.queryResourceListAuthored(0, 0)).thenReturn(getResourceList());
Map<String, Object> result = resourcesService.queryResourceList(loginUser, ResourceType.FILE);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
List<Resource> resourceList = (List<Resource>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(resourceList));
}
@Test
public void testDelete() {
User loginUser = new User();
loginUser.setId(0);
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
try {
// HDFS_NOT_STARTUP
Result result = resourcesService.delete(loginUser, 1);
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg());
//RESOURCE_NOT_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
result = resourcesService.delete(loginUser, 2);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg());
// USER_NO_OPERATION_PERM
result = resourcesService.delete(loginUser, 2);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg());
//TENANT_NOT_EXIST
loginUser.setUserType(UserType.ADMIN_USER);
loginUser.setTenantId(2);
Mockito.when(userMapper.selectById(Mockito.anyInt())).thenReturn(loginUser);
result = resourcesService.delete(loginUser, 1);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
//SUCCESS
loginUser.setTenantId(1);
Mockito.when(hadoopUtils.delete(Mockito.anyString(), Mockito.anyBoolean())).thenReturn(true);
Mockito.when(processDefinitionMapper.listResources()).thenReturn(getResources());
Mockito.when(resourcesMapper.deleteIds(Mockito.any())).thenReturn(1);
Mockito.when(resourceUserMapper.deleteResourceUserArray(Mockito.anyInt(), Mockito.any())).thenReturn(1);
result = resourcesService.delete(loginUser, 1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg());
} catch (Exception e) {
logger.error("delete error", e);
Assert.assertTrue(false);
}
}
@Test
public void testVerifyResourceName() {
User user = new User();
user.setId(1);
Mockito.when(resourcesMapper.existResource("/ResourcesServiceTest.jar", 0, 0)).thenReturn(true);
Result result = resourcesService.verifyResourceName("/ResourcesServiceTest.jar", ResourceType.FILE, user);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(), result.getMsg());
//TENANT_NOT_EXIST
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
String unExistFullName = "/test.jar";
try {
Mockito.when(hadoopUtils.exists(unExistFullName)).thenReturn(false);
} catch (IOException e) {
logger.error("hadoop error", e);
}
result = resourcesService.verifyResourceName("/test.jar", ResourceType.FILE, user);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
//RESOURCE_FILE_EXIST
user.setTenantId(1);
try {
Mockito.when(hadoopUtils.exists("test")).thenReturn(true);
} catch (IOException e) {
logger.error("hadoop error", e);
}
PowerMockito.when(HadoopUtils.getHdfsResourceFileName("123", "test1")).thenReturn("test");
result = resourcesService.verifyResourceName("/ResourcesServiceTest.jar", ResourceType.FILE, user);
logger.info(result.toString());
Assert.assertTrue(Status.RESOURCE_EXIST.getCode() == result.getCode());
//SUCCESS
result = resourcesService.verifyResourceName("test2", ResourceType.FILE, user);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg());
}
@Test
public void testReadResource() {
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
//HDFS_NOT_STARTUP
Result result = resourcesService.readResource(1, 1, 10);
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg());
//RESOURCE_NOT_EXIST
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.readResource(2, 1, 10);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg());
//RESOURCE_SUFFIX_NOT_SUPPORT_VIEW
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class");
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.readResource(1, 1, 10);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(), result.getMsg());
//USER_NOT_EXIST
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar");
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.jar")).thenReturn("jar");
result = resourcesService.readResource(1, 1, 10);
logger.info(result.toString());
Assert.assertTrue(Status.USER_NOT_EXIST.getCode() == result.getCode());
//TENANT_NOT_EXIST
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
result = resourcesService.readResource(1, 1, 10);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
//RESOURCE_FILE_NOT_EXIST
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
try {
Mockito.when(hadoopUtils.exists(Mockito.anyString())).thenReturn(false);
} catch (IOException e) {
logger.error("hadoop error", e);
}
result = resourcesService.readResource(1, 1, 10);
logger.info(result.toString());
Assert.assertTrue(Status.RESOURCE_FILE_NOT_EXIST.getCode() == result.getCode());
//SUCCESS
try {
Mockito.when(hadoopUtils.exists(null)).thenReturn(true);
Mockito.when(hadoopUtils.catFile(null, 1, 10)).thenReturn(getContent());
} catch (IOException e) {
logger.error("hadoop error", e);
}
result = resourcesService.readResource(1, 1, 10);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg());
}
@Test
public void testOnlineCreateResource() {
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
PowerMockito.when(HadoopUtils.getHdfsResDir("hdfsdDir")).thenReturn("hdfsDir");
PowerMockito.when(HadoopUtils.getHdfsUdfDir("udfDir")).thenReturn("udfDir");
User user = getUser();
//HDFS_NOT_STARTUP
Result result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content", -1, "/");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg());
//RESOURCE_SUFFIX_NOT_SUPPORT_VIEW
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class");
result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content", -1, "/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(), result.getMsg());
//RuntimeException
try {
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar");
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content", -1, "/");
} catch (RuntimeException ex) {
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), ex.getMessage());
}
//SUCCESS
Mockito.when(FileUtils.getUploadFilename(Mockito.anyString(), Mockito.anyString())).thenReturn("test");
PowerMockito.when(FileUtils.writeContent2File(Mockito.anyString(), Mockito.anyString())).thenReturn(true);
result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content", -1, "/");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg());
}
@Test
public void testUpdateResourceContent() {
User loginUser = new User();
loginUser.setId(0);
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
// HDFS_NOT_STARTUP
Result result = resourcesService.updateResourceContent(1, "content");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg());
//RESOURCE_NOT_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
result = resourcesService.updateResourceContent(2, "content");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg());
//RESOURCE_SUFFIX_NOT_SUPPORT_VIEW
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class");
result = resourcesService.updateResourceContent(1, "content");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(), result.getMsg());
//USER_NOT_EXIST
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar");
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.jar")).thenReturn("jar");
result = resourcesService.updateResourceContent(1, "content");
logger.info(result.toString());
Assert.assertTrue(Status.USER_NOT_EXIST.getCode() == result.getCode());
//TENANT_NOT_EXIST
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
result = resourcesService.updateResourceContent(1, "content");
logger.info(result.toString());
Assert.assertTrue(Status.TENANT_NOT_EXIST.getCode() == result.getCode());
//SUCCESS
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
Mockito.when(FileUtils.getUploadFilename(Mockito.anyString(), Mockito.anyString())).thenReturn("test");
PowerMockito.when(FileUtils.writeContent2File(Mockito.anyString(), Mockito.anyString())).thenReturn(true);
result = resourcesService.updateResourceContent(1, "content");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg());
}
@Test
public void testDownloadResource() {
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
org.springframework.core.io.Resource resourceMock = Mockito.mock(org.springframework.core.io.Resource.class);
try {
//resource null
org.springframework.core.io.Resource resource = resourcesService.downloadResource(1);
Assert.assertNull(resource);
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
PowerMockito.when(org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(Mockito.any())).thenReturn(resourceMock);
resource = resourcesService.downloadResource(1);
Assert.assertNotNull(resource);
} catch (Exception e) {
logger.error("DownloadResource error", e);
Assert.assertTrue(false);
}
}
@Test
public void testUnauthorizedFile() {
User user = getUser();
//USER_NO_OPERATION_PERM
Map<String, Object> result = resourcesService.unauthorizedFile(user, 1);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS));
//SUCCESS
user.setUserType(UserType.ADMIN_USER);
Mockito.when(resourcesMapper.queryResourceExceptUserId(1)).thenReturn(getResourceList());
result = resourcesService.unauthorizedFile(user, 1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
List<Resource> resources = (List<Resource>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(resources));
}
@Test
public void testUnauthorizedUDFFunction() {
User user = getUser();
//USER_NO_OPERATION_PERM
Map<String, Object> result = resourcesService.unauthorizedUDFFunction(user, 1);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS));
//SUCCESS
user.setUserType(UserType.ADMIN_USER);
Mockito.when(udfFunctionMapper.queryUdfFuncExceptUserId(1)).thenReturn(getUdfFuncList());
result = resourcesService.unauthorizedUDFFunction(user, 1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
List<UdfFunc> udfFuncs = (List<UdfFunc>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(udfFuncs));
}
@Test
public void testAuthorizedUDFFunction() {
User user = getUser();
//USER_NO_OPERATION_PERM
Map<String, Object> result = resourcesService.authorizedUDFFunction(user, 1);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS));
//SUCCESS
user.setUserType(UserType.ADMIN_USER);
Mockito.when(udfFunctionMapper.queryAuthedUdfFunc(1)).thenReturn(getUdfFuncList());
result = resourcesService.authorizedUDFFunction(user, 1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
List<UdfFunc> udfFuncs = (List<UdfFunc>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(udfFuncs));
}
@Test
public void testAuthorizedFile() {
User user = getUser();
//USER_NO_OPERATION_PERM
Map<String, Object> result = resourcesService.authorizedFile(user, 1);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS));
//SUCCESS
user.setUserType(UserType.ADMIN_USER);
List<Integer> resIds = new ArrayList<>();
resIds.add(1);
Mockito.when(resourceUserMapper.queryResourcesIdListByUserIdAndPerm(Mockito.anyInt(), Mockito.anyInt())).thenReturn(resIds);
Mockito.when(resourcesMapper.queryResourceListById(Mockito.any())).thenReturn(getResourceList());
result = resourcesService.authorizedFile(user, 1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
List<Resource> resources = (List<Resource>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(resources));
}
@Test
public void testCatFile() {
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
//SUCCESS
try {
Mockito.when(hadoopUtils.exists(null)).thenReturn(true);
Mockito.when(hadoopUtils.catFile(null, 1, 10)).thenReturn(getContent());
List<String> list = hadoopUtils.catFile(null, 1, 10);
Assert.assertNotNull(list);
} catch (IOException e) {
logger.error("hadoop error", e);
}
}
private List<Resource> getResourceList() {
List<Resource> resources = new ArrayList<>();
resources.add(getResource());
return resources;
}
private Tenant getTenant() {
Tenant tenant = new Tenant();
tenant.setTenantCode("123");
return tenant;
}
private Resource getResource() {
Resource resource = new Resource();
resource.setPid(-1);
resource.setUserId(1);
resource.setDescription("ResourcesServiceTest.jar");
resource.setAlias("ResourcesServiceTest.jar");
resource.setFullName("/ResourcesServiceTest.jar");
resource.setType(ResourceType.FILE);
return resource;
}
private Resource getUdfResource() {
Resource resource = new Resource();
resource.setUserId(1);
resource.setDescription("udfTest");
resource.setAlias("udfTest.jar");
resource.setFullName("/udfTest.jar");
resource.setType(ResourceType.UDF);
return resource;
}
private UdfFunc getUdfFunc() {
UdfFunc udfFunc = new UdfFunc();
udfFunc.setId(1);
return udfFunc;
}
private List<UdfFunc> getUdfFuncList() {
List<UdfFunc> udfFuncs = new ArrayList<>();
udfFuncs.add(getUdfFunc());
return udfFuncs;
}
private User getUser() {
User user = new User();
user.setId(1);
user.setTenantId(1);
return user;
}
private List<String> getContent() {
List<String> contentList = new ArrayList<>();
contentList.add("test");
return contentList;
}
private List<Map<String, Object>> getResources() {
List<Map<String, Object>> resources = new ArrayList<>();
Map<String, Object> resource = new HashMap<>();
resource.put("id", 1);
resource.put("resource_ids", "1");
resources.add(resource);
return resources;
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,408 | [Feature][API] the private function 'checkResourceExists' should remove the userId condition | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
the private function 'checkResourceExists' should remove the userId condition;
If we want to check the resouce exists or not, we should not user the userId condition.
Because the resource was created by user, it's userId was the user who created it.
If we user the userID '0', It won't take effect.
### Use case
_No response_
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6408 | https://github.com/apache/dolphinscheduler/pull/6409 | 86c85114b23317b9f791e091101fa6289e9d6b7c | 3a8b80971ba4b23b8e919ad04e074bd273c40c9a | "2021-09-29T04:44:58Z" | java | "2021-10-31T13:26:48Z" | dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.mapper;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.ibatis.annotations.Param;
import java.util.List;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
/**
* resource mapper interface
*/
public interface ResourceMapper extends BaseMapper<Resource> {
/**
* query resource list
* @param fullName full name
* @param userId userId
* @param type type
* @return resource list
*/
List<Resource> queryResourceList(@Param("fullName") String fullName,
@Param("userId") int userId,
@Param("type") int type);
/**
* query resource list
* @param userId userId
* @param type type
* @return resource list
*/
List<Resource> queryResourceListAuthored(@Param("userId") int userId,
@Param("type") int type);
/**
* resource page
* @param page page
* @param userId userId
* @param id id
* @param type type
* @param searchVal searchVal
* @param resIds resIds
* @return resource page
*/
IPage<Resource> queryResourcePaging(IPage<Resource> page,
@Param("userId") int userId,
@Param("id") int id,
@Param("type") int type,
@Param("searchVal") String searchVal,
@Param("resIds") List<Integer> resIds);
/**
* query resource except userId
* @param userId userId
* @return resource list
*/
List<Resource> queryResourceExceptUserId(@Param("userId") int userId);
/**
* list authorized resource
* @param userId userId
* @param resNames resNames
* @param <T> T
* @return resource list
*/
<T> List<Resource> listAuthorizedResource(@Param("userId") int userId, @Param("resNames") T[] resNames);
/**
* list resources by id
* @param resIds resIds
* @return resource list
*/
List<Resource> queryResourceListById(@Param("resIds") List<Integer> resIds);
/**
* list authorized resource
* @param userId userId
* @param resIds resIds
* @param <T> T
* @return resource list
*/
<T> List<Resource> listAuthorizedResourceById(@Param("userId") int userId,@Param("resIds")T[] resIds);
/**
* delete resource by id array
* @param resIds resource id array
* @return delete num
*/
int deleteIds(@Param("resIds")Integer[] resIds);
/**
* list children
* @param direcotyId directory id
* @return resource id array
*/
List<Integer> listChildren(@Param("direcotyId") int direcotyId);
/**
* query resource by full name or pid
* @param fullName full name
* @param type resource type
* @return resource
*/
List<Resource> queryResource(@Param("fullName") String fullName,@Param("type") int type);
/**
* list resource by id array
* @param resIds resource id array
* @return resource list
*/
List<Resource> listResourceByIds(@Param("resIds")Integer[] resIds);
/**
* update resource
* @param resourceList resource list
* @return update num
*/
int batchUpdateResource(@Param("resourceList") List<Resource> resourceList);
/**
* check resource exist
* @param fullName full name
* @param userId userId
* @param type type
* @return true if exist else return null
*/
Boolean existResource(@Param("fullName") String fullName,
@Param("userId") int userId,
@Param("type") int type);
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,408 | [Feature][API] the private function 'checkResourceExists' should remove the userId condition | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
the private function 'checkResourceExists' should remove the userId condition;
If we want to check the resouce exists or not, we should not user the userId condition.
Because the resource was created by user, it's userId was the user who created it.
If we user the userID '0', It won't take effect.
### Use case
_No response_
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6408 | https://github.com/apache/dolphinscheduler/pull/6409 | 86c85114b23317b9f791e091101fa6289e9d6b7c | 3a8b80971ba4b23b8e919ad04e074bd273c40c9a | "2021-09-29T04:44:58Z" | java | "2021-10-31T13:26:48Z" | dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.xml | <?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ResourceMapper">
<sql id="baseSqlV2">
${alias}.id, ${alias}.alias, ${alias}.file_name, ${alias}.description, ${alias}.user_id, ${alias}.type, ${alias}.size, ${alias}.create_time, ${alias}.update_time,
${alias}.pid, ${alias}.full_name, ${alias}.is_directory
</sql>
<select id="queryResourceList" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select
<include refid="baseSqlV2">
<property name="alias" value="r"/>
</include>
from t_ds_resources r
where 1= 1
<if test="fullName != null and fullName != ''">
and r.full_name = #{fullName}
</if>
<if test="type != -1">
and r.type = #{type}
</if>
<if test="userId != 0">
and r.user_id = #{userId}
</if>
</select>
<select id="queryResourceListAuthored" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select
<include refid="baseSqlV2">
<property name="alias" value="r"/>
</include>
from t_ds_resources r
where 1 = 1
<if test="type != -1">
and r.type=#{type}
</if>
<if test="userId != 0">
and r.user_id=#{userId}
</if>
</select>
<select id="queryResourcePaging" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select
<include refid="baseSqlV2">
<property name="alias" value="d"/>
</include>
from t_ds_resources d
where d.type=#{type} and d.pid=#{id}
<if test="userId != 0">
and (
<if test="resIds != null and resIds.size() > 0">
and d.id in
<foreach collection="resIds" item="i" open="(" close=") or" separator=",">
#{i}
</foreach>
</if>
d.user_id=#{userId} )
</if>
<if test="searchVal != null and searchVal != ''">
and d.alias like concat('%', #{searchVal}, '%')
</if>
order by d.update_time desc
</select>
<select id="queryResourceExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select
<include refid="baseSqlV2">
<property name="alias" value="r"/>
</include>
from t_ds_resources r
where r.user_id <![CDATA[ <> ]]> #{userId}
</select>
<select id="listAuthorizedResource" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select
<include refid="baseSqlV2">
<property name="alias" value="r"/>
</include>
from t_ds_resources r
where r.type = 0
and r.user_id=#{userId}
<if test="resNames != null and resNames.length > 0">
and full_name in
<foreach collection="resNames" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
</select>
<select id="queryResourceListById" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select
<include refid="baseSqlV2">
<property name="alias" value="r"/>
</include>
from t_ds_resources r
where 1 = 1
<if test="resIds != null and resIds.size() > 0">
and r.id in
<foreach collection="resIds" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
</select>
<select id="listAuthorizedResourceById" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select
<include refid="baseSqlV2">
<property name="alias" value="r"/>
</include>
from t_ds_resources r
where r.user_id=#{userId}
<if test="resIds != null and resIds.length > 0">
and id in
<foreach collection="resIds" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
</select>
<delete id="deleteIds" parameterType="java.lang.Integer">
delete from t_ds_resources where id in
<foreach collection="resIds" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</delete>
<select id="listChildren" resultType="java.lang.Integer">
select id
from t_ds_resources
where pid = #{direcotyId}
</select>
<select id="queryResource" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select
<include refid="baseSqlV2">
<property name="alias" value="r"/>
</include>
from t_ds_resources r
where r.type = #{type}
and r.full_name = #{fullName}
</select>
<update id="batchUpdateResource" parameterType="java.util.List">
<foreach collection="resourceList" item="resource" index="index" open="" close="" separator=";">
update t_ds_resources
<set>
full_name=#{resource.fullName},
update_time=#{resource.updateTime}
</set>
<where>
id=#{resource.id}
</where>
</foreach>
</update>
<select id="listResourceByIds" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select
<include refid="baseSqlV2">
<property name="alias" value="r"/>
</include>
from t_ds_resources r
where r.id in
<foreach collection="resIds" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</select>
<select id="existResource" resultType="java.lang.Boolean">
select 1
from t_ds_resources
where full_name = #{fullName}
and type = #{type}
and user_id = #{userId} limit 1
</select>
</mapper>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,408 | [Feature][API] the private function 'checkResourceExists' should remove the userId condition | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
the private function 'checkResourceExists' should remove the userId condition;
If we want to check the resouce exists or not, we should not user the userId condition.
Because the resource was created by user, it's userId was the user who created it.
If we user the userID '0', It won't take effect.
### Use case
_No response_
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6408 | https://github.com/apache/dolphinscheduler/pull/6409 | 86c85114b23317b9f791e091101fa6289e9d6b7c | 3a8b80971ba4b23b8e919ad04e074bd273c40c9a | "2021-09-29T04:44:58Z" | java | "2021-10-31T13:26:48Z" | dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/mapper/ResourceMapperTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.mapper;
import static java.util.stream.Collectors.toList;
import static org.hamcrest.Matchers.greaterThan;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.spi.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.ResourcesUser;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.User;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.annotation.Rollback;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.transaction.annotation.Transactional;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
@RunWith(SpringRunner.class)
@SpringBootTest
@Transactional
@Rollback(true)
public class ResourceMapperTest {
@Autowired
ResourceMapper resourceMapper;
@Autowired
ResourceUserMapper resourceUserMapper;
@Autowired
TenantMapper tenantMapper;
@Autowired
UserMapper userMapper;
/**
* insert
*
* @return Resource
*/
private Resource insertOne() {
//insertOne
Resource resource = new Resource();
resource.setAlias("ut-resource");
resource.setFullName("/ut-resource");
resource.setPid(-1);
resource.setDirectory(false);
resource.setType(ResourceType.FILE);
resource.setUserId(111);
int status = resourceMapper.insert(resource);
if (status != 1) {
Assert.fail("insert data error");
}
return resource;
}
/**
* create resource by user
*
* @param user user
* @return Resource
*/
private Resource createResource(User user, boolean isDirectory, ResourceType resourceType, int pid, String alias, String fullName) {
//insertOne
Resource resource = new Resource();
resource.setDirectory(isDirectory);
resource.setType(resourceType);
resource.setAlias(alias);
resource.setFullName(fullName);
resource.setUserId(user.getId());
int status = resourceMapper.insert(resource);
if (status != 1) {
Assert.fail("insert data error");
}
return resource;
}
/**
* create resource by user
*
* @param user user
* @return Resource
*/
private Resource createResource(User user) {
//insertOne
String alias = String.format("ut-resource-%s", user.getUserName());
String fullName = String.format("/%s", alias);
Resource resource = createResource(user, false, ResourceType.FILE, -1, alias, fullName);
return resource;
}
/**
* create user
*
* @return User
*/
private User createGeneralUser(String userName) {
User user = new User();
user.setUserName(userName);
user.setUserPassword("1");
user.setEmail("xx@123.com");
user.setUserType(UserType.GENERAL_USER);
user.setCreateTime(new Date());
user.setTenantId(1);
user.setUpdateTime(new Date());
int status = userMapper.insert(user);
if (status != 1) {
Assert.fail("insert data error");
}
return user;
}
/**
* create resource user
*
* @return ResourcesUser
*/
private ResourcesUser createResourcesUser(Resource resource, User user) {
//insertOne
ResourcesUser resourcesUser = new ResourcesUser();
resourcesUser.setCreateTime(new Date());
resourcesUser.setUpdateTime(new Date());
resourcesUser.setUserId(user.getId());
resourcesUser.setResourcesId(resource.getId());
resourcesUser.setPerm(7);
resourceUserMapper.insert(resourcesUser);
return resourcesUser;
}
@Test
public void testInsert() {
Resource resource = insertOne();
assertNotNull(resource.getId());
assertThat(resource.getId(), greaterThan(0));
}
/**
* test update
*/
@Test
public void testUpdate() {
//insertOne
Resource resource = insertOne();
resource.setCreateTime(new Date());
//update
int update = resourceMapper.updateById(resource);
Assert.assertEquals(1, update);
}
/**
* test delete
*/
@Test
public void testDelete() {
Resource resourceMap = insertOne();
int delete = resourceMapper.deleteById(resourceMap.getId());
Assert.assertEquals(1, delete);
}
/**
* test query
*/
@Test
public void testQuery() {
Resource resource = insertOne();
//query
List<Resource> resources = resourceMapper.selectList(null);
Assert.assertNotEquals(resources.size(), 0);
}
/**
* test query resource list
*/
@Test
public void testQueryResourceList() {
Resource resource = insertOne();
String alias = "";
int userId = resource.getUserId();
int type = resource.getType().ordinal();
List<Resource> resources = resourceMapper.queryResourceList(
alias,
userId,
type
);
Assert.assertNotEquals(resources.size(), 0);
}
/**
* test page
*/
@Test
public void testQueryResourcePaging() {
Resource resource = insertOne();
ResourcesUser resourcesUser = new ResourcesUser();
resourcesUser.setResourcesId(resource.getId());
resourcesUser.setUserId(1110);
resourceUserMapper.insert(resourcesUser);
Page<Resource> page = new Page(1, 3);
IPage<Resource> resourceIPage = resourceMapper.queryResourcePaging(
page,
0,
-1,
resource.getType().ordinal(),
"",
new ArrayList<>()
);
IPage<Resource> resourceIPage1 = resourceMapper.queryResourcePaging(
page,
1110,
-1,
resource.getType().ordinal(),
"",
null
);
Assert.assertEquals(resourceIPage.getTotal(), 0);
Assert.assertEquals(resourceIPage1.getTotal(), 0);
}
/**
* test authed resource list
*/
@Test
public void testQueryResourceListAuthored() {
Resource resource = insertOne();
List<Integer> resIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(resource.getUserId(), Constants.AUTHORIZE_WRITABLE_PERM);
List<Resource> resources = CollectionUtils.isEmpty(resIds) ? new ArrayList<>() : resourceMapper.queryResourceListById(resIds);
ResourcesUser resourcesUser = new ResourcesUser();
resourcesUser.setResourcesId(resource.getId());
resourcesUser.setUserId(1110);
resourcesUser.setPerm(Constants.AUTHORIZE_WRITABLE_PERM);
resourceUserMapper.insert(resourcesUser);
List<Integer> resIds1 = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(1110, Constants.AUTHORIZE_WRITABLE_PERM);
List<Resource> resources1 = CollectionUtils.isEmpty(resIds1) ? new ArrayList<>() : resourceMapper.queryResourceListById(resIds1);
Assert.assertEquals(0, resources.size());
Assert.assertNotEquals(0, resources1.size());
}
/**
* test authed resource list
*/
@Test
public void testQueryAuthorizedResourceList() {
Resource resource = insertOne();
List<Integer> resIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(resource.getUserId(), Constants.AUTHORIZE_WRITABLE_PERM);
List<Resource> resources = CollectionUtils.isEmpty(resIds) ? new ArrayList<>() : resourceMapper.queryResourceListById(resIds);
resourceMapper.deleteById(resource.getId());
Assert.assertEquals(0, resources.size());
}
/**
* test query resource expect userId
*/
@Test
public void testQueryResourceExceptUserId() {
Resource resource = insertOne();
List<Resource> resources = resourceMapper.queryResourceExceptUserId(
11111
);
Assert.assertNotEquals(resources.size(), 0);
}
/**
* test query tenant code by resource name
*/
@Test
public void testQueryTenantCodeByResourceName() {
Tenant tenant = new Tenant();
tenant.setTenantCode("ut tenant code for resource");
int tenantInsertStatus = tenantMapper.insert(tenant);
if (tenantInsertStatus != 1) {
Assert.fail("insert tenant data error");
}
User user = new User();
user.setTenantId(tenant.getId());
user.setUserName("ut user");
int userInsertStatus = userMapper.insert(user);
if (userInsertStatus != 1) {
Assert.fail("insert user data error");
}
Resource resource = insertOne();
resource.setUserId(user.getId());
int userUpdateStatus = resourceMapper.updateById(resource);
if (userUpdateStatus != 1) {
Assert.fail("update user data error");
}
List<Resource> resourceList = resourceMapper.queryResource(resource.getFullName(), ResourceType.FILE.ordinal());
int resourceUserId = resourceList.get(0).getUserId();
User resourceUser = userMapper.selectById(resourceUserId);
Tenant resourceTenant = tenantMapper.selectById(resourceUser.getTenantId());
Assert.assertEquals("ut tenant code for resource", resourceTenant.getTenantCode());
}
@Test
public void testListAuthorizedResource() {
// create a general user
User generalUser1 = createGeneralUser("user1");
User generalUser2 = createGeneralUser("user2");
// create one resource
Resource resource = createResource(generalUser2);
Resource unauthorizedResource = createResource(generalUser1);
// need download resources
String[] resNames = new String[]{resource.getFullName(), unauthorizedResource.getFullName()};
List<Resource> resources = resourceMapper.listAuthorizedResource(generalUser2.getId(), resNames);
Assert.assertEquals(generalUser2.getId(), resource.getUserId());
Assert.assertFalse(resources.stream().map(t -> t.getFullName()).collect(toList()).containsAll(Arrays.asList(resNames)));
// authorize object unauthorizedResource to generalUser
createResourcesUser(unauthorizedResource, generalUser2);
List<Resource> authorizedResources = resourceMapper.listAuthorizedResource(generalUser2.getId(), resNames);
Assert.assertTrue(authorizedResources.stream().map(t -> t.getFullName()).collect(toList()).containsAll(Arrays.asList(resNames)));
}
@Test
public void deleteIdsTest() {
// create a general user
User generalUser1 = createGeneralUser("user1");
User generalUser = createGeneralUser("user");
Resource resource = createResource(generalUser);
Resource resource1 = createResource(generalUser1);
List<Integer> resourceList = new ArrayList<>();
resourceList.add(resource.getId());
resourceList.add(resource1.getId());
int result = resourceMapper.deleteIds(resourceList.toArray(new Integer[resourceList.size()]));
Assert.assertEquals(result, 2);
}
@Test
public void queryResourceListAuthoredTest() {
// create a general user
User generalUser1 = createGeneralUser("user1");
User generalUser2 = createGeneralUser("user2");
// create resource
Resource resource = createResource(generalUser1);
createResourcesUser(resource, generalUser2);
List<Resource> resourceList = resourceMapper.queryResourceListAuthored(generalUser2.getId(), ResourceType.FILE.ordinal());
Assert.assertNotNull(resourceList);
resourceList = resourceMapper.queryResourceListAuthored(generalUser2.getId(), ResourceType.FILE.ordinal());
Assert.assertFalse(resourceList.contains(resource));
}
@Test
public void batchUpdateResourceTest() {
// create a general user
User generalUser1 = createGeneralUser("user1");
// create resource
Resource resource = createResource(generalUser1);
resource.setFullName(String.format("%s-update", resource.getFullName()));
resource.setUpdateTime(new Date());
List<Resource> resourceList = new ArrayList<>();
resourceList.add(resource);
int result = resourceMapper.batchUpdateResource(resourceList);
if (result != resourceList.size()) {
Assert.fail("batch update resource data error");
}
}
@Test
public void existResourceTest() {
String fullName = "/ut-resource";
int userId = 111;
int type = ResourceType.FILE.getCode();
Assert.assertNull(resourceMapper.existResource(fullName, userId, type));
insertOne();
Assert.assertTrue(resourceMapper.existResource(fullName, userId, type));
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,653 | [Bug] [Server] Netty client create too many channel and make busy network | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
version: dev
When after the stress test, I found the busy network between master and worker, but no any log. And then I dump the network package and found many heartbeat report.
![c5c20ffb4c6220c43924557b0478fd5](https://user-images.githubusercontent.com/11962619/139615061-c16f141f-e8a3-412c-9364-7f3418ccc5a9.png)
I try to run the test method, which use netty client to send many commands, and the channel created number is equal to the commands number.
When debug, I found that the key of map to manage channels is Host, a Object without override hashcode and equals.
### What you expected to happen
Netty client create less channel.
### How to reproduce
Run the test method, which use netty client to send many commands.
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6653 | https://github.com/apache/dolphinscheduler/pull/6654 | 3a8b80971ba4b23b8e919ad04e074bd273c40c9a | daca3baf6662b9bcf3de856722ece79aad753773 | "2021-11-01T02:55:40Z" | java | "2021-11-01T04:18:39Z" | dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/utils/Host.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.utils;
import static org.apache.dolphinscheduler.common.Constants.COLON;
import java.io.Serializable;
/**
* server address
*/
public class Host implements Serializable {
/**
* address
*/
private String address;
/**
* ip
*/
private String ip;
/**
* port
*/
private int port;
public Host() {
}
public Host(String ip, int port) {
this.ip = ip;
this.port = port;
this.address = ip + COLON + port;
}
public Host(String address) {
String[] parts = splitAddress(address);
this.ip = parts[0];
this.port = Integer.parseInt(parts[1]);
this.address = address;
}
public String getAddress() {
return address;
}
public void setAddress(String address) {
String[] parts = splitAddress(address);
this.ip = parts[0];
this.port = Integer.parseInt(parts[1]);
this.address = address;
}
public String getIp() {
return ip;
}
public void setIp(String ip) {
this.ip = ip;
this.address = ip + COLON + port;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
this.address = ip + COLON + port;
}
/**
* address convert host
*
* @param address address
* @return host
*/
public static Host of(String address) {
String[] parts = splitAddress(address);
return new Host(parts[0], Integer.parseInt(parts[1]));
}
/**
* address convert host
*
* @param address address
* @return host
*/
public static String[] splitAddress(String address) {
if (address == null) {
throw new IllegalArgumentException("Host : address is null.");
}
String[] parts = address.split(COLON);
if (parts.length != 2) {
throw new IllegalArgumentException(String.format("Host : %s illegal.", address));
}
return parts;
}
/**
* whether old version
*
* @param address address
* @return old version is true , otherwise is false
*/
public static Boolean isOldVersion(String address) {
String[] parts = address.split(COLON);
return parts.length != 2;
}
@Override
public String toString() {
return "Host{"
+ "address='" + address + '\''
+ ", ip='" + ip + '\''
+ ", port=" + port
+ '}';
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,625 | [Bug] [Standalone Server] Standalone server load plugin from fixed maven loacl repository path | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
branch: dev
开发分支
When use standalone server, the DolphinPluginManager looks for plugin dependencies from local maven repository. but the local maven repository is a fixed value in `DolphinPluginManagerConfig.java` who's value is `${user.home}/.m2/repository`.
当使用本地单机开发时,插件加载器会从本地Maven仓库加载插件,但是本地仓库的值是在代码中写死的: `${user.home}/.m2/repository`.
![图片](https://user-images.githubusercontent.com/23203149/139200190-9267993e-6df7-4b5c-a443-4aad86cf5a78.png)
In my developement envirionment, I set the maven local repository path (localRepository) to `D:\\repository` in `${user.home}/.m2/settings.xml`.
在我的开发环境中,我在Maven的配置文件`${user.home}/.m2/settings.xml`中设置了本地仓库(localRepository)的地址
![Local Maven Repository Settings](https://user-images.githubusercontent.com/23203149/139199519-7c7705a3-5d3d-4931-93ce-e2d1affc137f.png)
so that the plugin manager could not load plugin correctly
因此,插件加载器无法正确加载插件
### What you expected to happen
* we should load from settings.xml firstly
1. load from `${user.home}/.m2/settings.xml`
2. if `${user.home}/.m2/settings.xml` not exists, load from `${MAVEN_HOME}/conf/settings.xml`
* if settings.xml not exists or the node `localRepository` not exists in settings.xml, use default value
### How to reproduce
* set local repository into another path in ide
* start standalone server
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6625 | https://github.com/apache/dolphinscheduler/pull/6626 | f5d8356dc310ab942039834ceff6af1cf1f9f83c | 61a8726ccc42a2adba8cdc859e3c3271099fdade | "2021-10-28T06:34:54Z" | java | "2021-11-03T10:46:25Z" | dolphinscheduler-spi/src/main/java/org/apache/dolphinscheduler/spi/plugin/DolphinPluginManagerConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.spi.plugin;
import static java.lang.String.format;
import static java.util.Objects.requireNonNull;
import java.io.File;
import java.util.List;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableList;
/**
* Dolphin Scheduler Plugin Manager Config
*/
public class DolphinPluginManagerConfig {
/**
* The dir of the Alert Plugin in.
* When AlertServer is running on the server, it will load the Alert Plugin from this directory.
*/
private File installedPluginsDir;
/**
* The plugin should be load.
* The installedPluginsDir is empty when we development and run server in IDEA. Then we can config which plugin should be load by param name alert.plugin.binding in the alert.properties file
*/
private List<String> plugins;
/**
* Development, When AlertServer is running on IDE, AlertPluginLoad can load Alert Plugin from local Repository.
*/
private String mavenLocalRepository = System.getProperty("user.home") + "/.m2/repository";
private List<String> mavenRemoteRepository = ImmutableList.of("http://repo1.maven.org/maven2/");
File getInstalledPluginsDir() {
return installedPluginsDir;
}
/**
* @param pluginDir plugin directory
*/
public void setInstalledPluginsDir(String pluginDir) {
requireNonNull(pluginDir, "pluginDir can not be null");
File pluginDirFile = new File(pluginDir);
if (!pluginDirFile.exists()) {
throw new IllegalArgumentException(format("plugin dir not exists ! %s", pluginDirFile.getPath()));
}
this.installedPluginsDir = pluginDirFile;
}
public List<String> getPlugins() {
return plugins;
}
public DolphinPluginManagerConfig setPlugins(List<String> plugins) {
this.plugins = plugins;
return this;
}
/**
* When development and run server in IDE, this method can set plugins in alert.properties .
* Then when you start AlertServer in IDE, the plugin can be load.
* eg:
* file: alert.properties
* alert.plugin=\
* ../dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/pom.xml, \
* ../dolphinscheduler-alert-plugin/dolphinscheduler-alert-wechat/pom.xml
*
* @param plugins plugins
* @return DolphinPluginManagerConfig
*/
public DolphinPluginManagerConfig setPlugins(String plugins) {
if (plugins == null) {
this.plugins = null;
} else {
this.plugins = ImmutableList.copyOf(Splitter.on(',').omitEmptyStrings().trimResults().split(plugins));
}
return this;
}
String getMavenLocalRepository() {
return mavenLocalRepository;
}
public void setMavenLocalRepository(String mavenLocalRepository) {
this.mavenLocalRepository = mavenLocalRepository;
}
List<String> getMavenRemoteRepository() {
return mavenRemoteRepository;
}
public DolphinPluginManagerConfig setMavenRemoteRepository(List<String> mavenRemoteRepository) {
this.mavenRemoteRepository = mavenRemoteRepository;
return this;
}
public DolphinPluginManagerConfig setMavenRemoteRepository(String mavenRemoteRepository) {
this.mavenRemoteRepository = ImmutableList.copyOf(Splitter.on(',').omitEmptyStrings().trimResults().split(mavenRemoteRepository));
return this;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,684 | [Feature][Directory] Remove empty directory | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
I find this directory is not used, right?
It might be better to remove this empty directory, if this is needed, we may need to add description.
https://github.com/apache/dolphinscheduler/tree/dev/repository/dolphinscheduler/dolphinscheduler-ui
### Use case
_No response_
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6684 | https://github.com/apache/dolphinscheduler/pull/6685 | 2e7036529c356b35ed99ea46f7ffefe66ec2a916 | cbb623bfa0e44419e6f1e21ea64c86cd33df3560 | "2021-11-03T14:44:31Z" | java | "2021-11-04T05:22:01Z" | repository/dolphinscheduler/dolphinscheduler-ui/.gitignore | .idea
.settings
package-lock.json
.classpath
.project
node_modules |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,689 | [Bug] [Standalone Server] DolphinPluginManagerConfig get maven sttings does not check file exists | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
i created a pr #6626 to fix #6625, but if forget to check if `the settings.xml` in the code. I feel very sorry about this~
if `~/.m2/settings.xml` or `${MAVEN_HOME}/conf/settings.xml` does not exists, will cause FileNotFound exception
### What you expected to happen
check file exists
### How to reproduce
* remove ~/.m2/settings.xml and the start the server
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6689 | https://github.com/apache/dolphinscheduler/pull/6691 | 66fbcae9ddd6c0ca925c24ef01345fc793d85586 | 71047db39ea97373411a674ab0f3efe647f28a9d | "2021-11-04T02:18:15Z" | java | "2021-11-04T12:17:53Z" | dolphinscheduler-spi/src/main/java/org/apache/dolphinscheduler/spi/plugin/DolphinPluginManagerConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.spi.plugin;
import static java.lang.String.format;
import static java.util.Objects.requireNonNull;
import org.apache.dolphinscheduler.spi.utils.StringUtils;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import javax.xml.XMLConstants;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableList;
/**
* Dolphin Scheduler Plugin Manager Config
*/
public class DolphinPluginManagerConfig {
private static final Logger logger = LoggerFactory.getLogger(DolphinPluginManagerConfig.class);
/**
* The dir of the Alert Plugin in.
* When AlertServer is running on the server, it will load the Alert Plugin from this directory.
*/
private File installedPluginsDir;
/**
* The plugin should be load.
* The installedPluginsDir is empty when we development and run server in IDEA. Then we can config which plugin should be load by param name alert.plugin.binding in the alert.properties file
*/
private List<String> plugins;
/**
* Development, When AlertServer is running on IDE, AlertPluginLoad can load Alert Plugin from local Repository.
*/
private final String defaultLocalRepository = System.getProperty("user.home") + "/.m2/repository";
private String mavenLocalRepository = getMavenLocalRepositoryOrDefault(defaultLocalRepository);
private List<String> mavenRemoteRepository = ImmutableList.of("http://repo1.maven.org/maven2/");
File getInstalledPluginsDir() {
return installedPluginsDir;
}
/**
* @param pluginDir plugin directory
*/
public void setInstalledPluginsDir(String pluginDir) {
requireNonNull(pluginDir, "pluginDir can not be null");
File pluginDirFile = new File(pluginDir);
if (!pluginDirFile.exists()) {
throw new IllegalArgumentException(format("plugin dir not exists ! %s", pluginDirFile.getPath()));
}
this.installedPluginsDir = pluginDirFile;
}
public List<String> getPlugins() {
return plugins;
}
public DolphinPluginManagerConfig setPlugins(List<String> plugins) {
this.plugins = plugins;
return this;
}
/**
* When development and run server in IDE, this method can set plugins in alert.properties .
* Then when you start AlertServer in IDE, the plugin can be load.
* eg:
* file: alert.properties
* alert.plugin=\
* ../dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/pom.xml, \
* ../dolphinscheduler-alert-plugin/dolphinscheduler-alert-wechat/pom.xml
*
* @param plugins plugins
* @return DolphinPluginManagerConfig
*/
public DolphinPluginManagerConfig setPlugins(String plugins) {
if (plugins == null) {
this.plugins = null;
} else {
this.plugins = ImmutableList.copyOf(Splitter.on(',').omitEmptyStrings().trimResults().split(plugins));
}
return this;
}
String getMavenLocalRepository() {
return mavenLocalRepository;
}
public void setMavenLocalRepository(String mavenLocalRepository) {
this.mavenLocalRepository = mavenLocalRepository;
}
List<String> getMavenRemoteRepository() {
return mavenRemoteRepository;
}
/**
* Get local repository from maven settings.xml if available.
* <p>
* if System environment does not exists settings.xml, return the default value.
* </p>
*
* @param defaultRepository default repository path.
* @return local repository path.
*/
private String getMavenLocalRepositoryOrDefault(String defaultRepository) {
// get 'settings.xml' from user home
Path settingsXmlPath = getMavenSettingsXmlFromUserHome();
// if user home does not exist settings.xml, get from '$MAVEN_HOME/conf/settings.xml'
if (settingsXmlPath == null) {
logger.info("User home does not exists maven settings.xml");
settingsXmlPath = getMavenSettingsXmlFromEvn();
}
// still not exist, return default repository
if (settingsXmlPath == null) {
logger.info("Maven home does not exists maven settings.xml, use default");
return defaultRepository;
}
// parse xml
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
DocumentBuilder documentBuilder;
try {
// security settings
try {
factory.setFeature("http://apache.org/xml/features/disallow-doctype-decl", true);
factory.setFeature("http://xml.org/sax/features/external-general-entities", false);
factory.setFeature("http://xml.org/sax/features/external-parameter-entities", false);
factory.setAttribute(XMLConstants.ACCESS_EXTERNAL_DTD, "");
factory.setAttribute(XMLConstants.ACCESS_EXTERNAL_SCHEMA, "");
} catch (Exception e) {
logger.warn("Error at parse settings.xml, setting security features: {}", e.getLocalizedMessage());
}
documentBuilder = factory.newDocumentBuilder();
Document document = documentBuilder.parse(settingsXmlPath.toFile());
// search node named 'localRepository'
String localRepositoryNodeTag = "localRepository";
NodeList nodeList = document.getElementsByTagName(localRepositoryNodeTag);
int length = nodeList.getLength();
if (length <= 0) {
// if node not exists, return default repository
logger.info("File {} does not contains node named {}", settingsXmlPath, localRepositoryNodeTag);
return defaultRepository;
}
for (int i = 0; i < length; i++) {
Node node = nodeList.item(i);
String content = node.getTextContent();
if (StringUtils.isNotEmpty(content) && StringUtils.isNotBlank(content)) {
Path localRepositoryPath = Paths.get(content);
if (Files.exists(localRepositoryPath)) {
logger.info("Got local repository path {}", content);
return content;
}
}
}
return defaultRepository;
} catch (Exception e) {
logger.error(e.getLocalizedMessage(), e);
return defaultRepository;
}
}
/**
* Get maven settings.xml file path from "${user.home}/.m2"
* <p>
* if "${user.home}/.m2/settings.xml" does not exist,
* null will be returned
* </p>
*
* @return settings.xml file path, could be null
*/
private Path getMavenSettingsXmlFromUserHome() {
String userHome = System.getProperty("user.home");
Path settingsXmlPath = null;
if (!StringUtils.isEmpty(userHome)) {
settingsXmlPath = Paths.get(userHome, ".m2", "settings.xml").toAbsolutePath();
}
return settingsXmlPath;
}
/**
* Get maven settings.xml file path from "${MAVEN_HOME}/conf"
* <p>
* if "${MAVEN_HOME}/conf/settings.xml" does not exist,
* null will be returned
* </p>
*
* @return settings.xml file path, could be null
*/
private Path getMavenSettingsXmlFromEvn() {
String mavenHome = System.getenv("MAVEN_HOME");
Path settingsXmlPath = null;
if (mavenHome == null) {
mavenHome = System.getenv("M2_HOME");
}
if (mavenHome != null) {
settingsXmlPath = Paths.get(mavenHome, "conf", "settings.xml").toAbsolutePath();
}
return settingsXmlPath;
}
public DolphinPluginManagerConfig setMavenRemoteRepository(List<String> mavenRemoteRepository) {
this.mavenRemoteRepository = mavenRemoteRepository;
return this;
}
public DolphinPluginManagerConfig setMavenRemoteRepository(String mavenRemoteRepository) {
this.mavenRemoteRepository = ImmutableList.copyOf(Splitter.on(',').omitEmptyStrings().trimResults().split(mavenRemoteRepository));
return this;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,660 | [Improvement][Server] Change the registry session timeout | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Now the default registry timeout is 1s, which is short for zk, and it will be reset a negotiated timeout by zk server, because `zk.minSessionTimeout = 2 * tickTimeout` and `zk.maxSessionTimeout = 20 * tickTimeout`.
I think the default session timeout param should be improved to control by client better, not only for zk.
Now the defaule hearbeat interval is 10s, So maybe the registry session timeout should set to 30s.
BTW, `Curator.DEFAULT_SESSION_TIMEOUT_MS = 60 * 1000` and `Curator.DEFAULT_CONNECTION_TIMEOUT_MS = 15 * 1000`.
### Use case
_No response_
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6660 | https://github.com/apache/dolphinscheduler/pull/6663 | 1165afbdd17cf072c434020376d653b5f411fdd6 | 1ebab356f3e431237524537a6fedd26590ff24ce | "2021-11-02T04:21:49Z" | java | "2021-11-04T14:31:13Z" | dolphinscheduler-registry-plugin/dolphinscheduler-registry-zookeeper/src/main/java/org/apache/dolphinscheduler/plugin/registry/zookeeper/ZookeeperConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.plugin.registry.zookeeper;
import java.util.function.Function;
public enum ZookeeperConfiguration {
NAME_SPACE("namespace", "dolphinscheduler", value -> value),
SERVERS("servers", null, value -> value),
/**
* Initial amount of time to wait between retries
*/
BASE_SLEEP_TIME("base.sleep.time.ms", 60, Integer::valueOf),
MAX_SLEEP_TIME("max.sleep.ms", 300, Integer::valueOf),
DIGEST("digest", null, value -> value),
MAX_RETRIES("max.retries", 5, Integer::valueOf),
//todo
SESSION_TIMEOUT_MS("session.timeout.ms", 1000, Integer::valueOf),
CONNECTION_TIMEOUT_MS("connection.timeout.ms", 1000, Integer::valueOf),
BLOCK_UNTIL_CONNECTED_WAIT_MS("block.until.connected.wait", 600, Integer::valueOf),
;
private final String name;
public String getName() {
return name;
}
private final Object defaultValue;
private final Function<String, Object> converter;
<T> ZookeeperConfiguration(String name, T defaultValue, Function<String, T> converter) {
this.name = name;
this.defaultValue = defaultValue;
this.converter = (Function<String, Object>) converter;
}
public <T> T getParameterValue(String param) {
Object value = param != null ? converter.apply(param) : defaultValue;
return (T) value;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,660 | [Improvement][Server] Change the registry session timeout | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Now the default registry timeout is 1s, which is short for zk, and it will be reset a negotiated timeout by zk server, because `zk.minSessionTimeout = 2 * tickTimeout` and `zk.maxSessionTimeout = 20 * tickTimeout`.
I think the default session timeout param should be improved to control by client better, not only for zk.
Now the defaule hearbeat interval is 10s, So maybe the registry session timeout should set to 30s.
BTW, `Curator.DEFAULT_SESSION_TIMEOUT_MS = 60 * 1000` and `Curator.DEFAULT_CONNECTION_TIMEOUT_MS = 15 * 1000`.
### Use case
_No response_
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6660 | https://github.com/apache/dolphinscheduler/pull/6663 | 1165afbdd17cf072c434020376d653b5f411fdd6 | 1ebab356f3e431237524537a6fedd26590ff24ce | "2021-11-02T04:21:49Z" | java | "2021-11-04T14:31:13Z" | dolphinscheduler-service/src/main/resources/registry.properties | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#registry.plugin.dir config the Registry Plugin dir.
registry.plugin.dir=lib/plugin/registry
registry.plugin.name=zookeeper
registry.servers=127.0.0.1:2181
#maven.local.repository=/usr/local/localRepository
#registry.plugin.binding config the Registry Plugin need be load when development and run in IDE
#registry.plugin.binding=./dolphinscheduler-registry-plugin/dolphinscheduler-registry-zookeeper/pom.xml
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,673 | [Bug] [community] Stale bot will close issue/pr unexpected | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Our stale bot will close our issue/PR unexpected, this situation would make our missing user feedback.
Something information for you:
* When issue not activity(no comment or labels add) for 14 days will mark as `stale`, and it would close for next 3 days. Issue with label `Waiting for reply`, `new feature`, `DSIP` would not close
* But when user create `bug` or `feature` issue and maintainer without answer for 17 days, it will be close to, I think this is not the behavior we want. **Solution:** Maybe we should add labels `Waiting for reply` when use create bug or feature issue.
* Another unexpected situation is when maintainer answer user issue at first time, and user add addition information but maintainer would not answer for 17 days is also would be close. **Solution:** I am not sure how to cover this situation, maybe we should see other repo how to handle it.
### What you expected to happen
See above
### How to reproduce
See above
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6673 | https://github.com/apache/dolphinscheduler/pull/6676 | a269e3df7612a017dab6b8ff473720b53d96ad0d | 5855c936a4a3daafcc12b408a46b8e8470ec34c4 | "2021-11-03T06:40:00Z" | java | "2021-11-05T10:35:41Z" | .github/ISSUE_TEMPLATE/bug-report.yml | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name: Bug report
title: "[Bug] [Module Name] Bug title "
description: Problems and issues with code of Apache Dolphinscheduler
labels: [ "bug" ]
body:
- type: markdown
attributes:
value: |
Please make sure what you are reporting is indeed a bug with reproducible steps, if you want to ask questions
or share ideas, please [subscribe to our mailing list](mailto:dev-subscribe@dolphinscheduler.apache.org) and sent
emails to [our mailing list](mailto:dev@dolphinscheduler.apache.org), you can also head to our
[Discussion](https://github.com/apache/dolphinscheduler/discussions) tab.
For better global communication, Please write in English.
If you feel the description in English is not clear, then you can append description in Chinese, thanks!
- type: checkboxes
attributes:
label: Search before asking
description: >
Please make sure to search in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue)
first to see whether the same issue was reported already.
options:
- label: >
I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found
no similar issues.
required: true
- type: textarea
attributes:
label: What happened
description: Describe what happened.
placeholder: >
Please provide the context in which the problem occurred and explain what happened
validations:
required: true
- type: textarea
attributes:
label: What you expected to happen
description: What do you think went wrong?
placeholder: >
Please explain why you think the behaviour is erroneous. It is extremely helpful if you copy and paste
the fragment of logs showing the exact error messages or wrong behaviour and screenshots for
UI problems. You can include files by dragging and dropping them here.
**NOTE**: please copy and paste texts instead of taking screenshots of them for easy future search.
validations:
required: true
- type: textarea
attributes:
label: How to reproduce
description: >
What should we do to reproduce the problem? If you are not able to provide a reproducible case,
please open a [Discussion](https://github.com/apache/dolphinscheduler/discussions) instead.
placeholder: >
Please make sure you provide a reproducible step-by-step case of how to reproduce the problem
as minimally and precisely as possible. Keep in mind we do not have access to your deployment.
Remember that non-reproducible issues will be closed! Opening a discussion is recommended as a
first step.
validations:
required: true
- type: textarea
attributes:
label: Anything else
description: Anything else we need to know?
placeholder: >
How often does this problem occur? (Once? Every time? Only when certain conditions are met?)
Any relevant logs to include? Put them here inside fenced
``` ``` blocks or inside a collapsable details tag if it's too long:
<details><summary>x.log</summary> lots of stuff </details>
- type: checkboxes
attributes:
label: Are you willing to submit PR?
description: >
This is absolutely not required, but we are happy to guide you in the contribution process
especially if you already have a good understanding of how to implement the fix.
Dolphinscheduler is a totally community-driven project and we love to bring new contributors in.
options:
- label: Yes I am willing to submit a PR!
- type: checkboxes
attributes:
label: Code of Conduct
description: |
The Code of Conduct helps create a safe space for everyone. We require that everyone agrees to it.
options:
- label: >
I agree to follow this project's
[Code of Conduct](https://www.apache.org/foundation/policies/conduct)
required: true
- type: markdown
attributes:
value: "Thanks for completing our form!"
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,673 | [Bug] [community] Stale bot will close issue/pr unexpected | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Our stale bot will close our issue/PR unexpected, this situation would make our missing user feedback.
Something information for you:
* When issue not activity(no comment or labels add) for 14 days will mark as `stale`, and it would close for next 3 days. Issue with label `Waiting for reply`, `new feature`, `DSIP` would not close
* But when user create `bug` or `feature` issue and maintainer without answer for 17 days, it will be close to, I think this is not the behavior we want. **Solution:** Maybe we should add labels `Waiting for reply` when use create bug or feature issue.
* Another unexpected situation is when maintainer answer user issue at first time, and user add addition information but maintainer would not answer for 17 days is also would be close. **Solution:** I am not sure how to cover this situation, maybe we should see other repo how to handle it.
### What you expected to happen
See above
### How to reproduce
See above
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6673 | https://github.com/apache/dolphinscheduler/pull/6676 | a269e3df7612a017dab6b8ff473720b53d96ad0d | 5855c936a4a3daafcc12b408a46b8e8470ec34c4 | "2021-11-03T06:40:00Z" | java | "2021-11-05T10:35:41Z" | .github/ISSUE_TEMPLATE/feature-request.yml | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name: Feature request
description: Suggest an idea for this project
title: "[Feature][Module Name] Feature title"
labels: [ "new feature" ]
body:
- type: markdown
attributes:
value: |
For better global communication, Please write in English.
If you feel the description in English is not clear, then you can append description in Chinese, thanks!
- type: checkboxes
attributes:
label: Search before asking
description: >
Please make sure to search in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) first
to see whether the same feature was requested already.
options:
- label: >
I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no
similar feature requirement.
required: true
- type: textarea
attributes:
label: Description
description: A short description of your feature
- type: textarea
attributes:
label: Use case
description: What do you want to happen?
placeholder: >
Rather than telling us how you might implement this feature, try to take a
step back and describe what you are trying to achieve.
- type: textarea
attributes:
label: Related issues
description: Is there currently another issue associated with this?
- type: checkboxes
attributes:
label: Are you willing to submit a PR?
description: >
This is absolutely not required, but we are happy to guide you in the contribution process
especially if you already have a good understanding of how to implement the feature.
DolphinScheduler is a totally community-driven project and we love to bring new contributors in.
options:
- label: Yes I am willing to submit a PR!
- type: checkboxes
attributes:
label: Code of Conduct
description: |
The Code of Conduct helps create a safe space for everyone. We require that everyone agrees to it.
options:
- label: |
I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
required: true
- type: markdown
attributes:
value: "Thanks for completing our form!"
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,673 | [Bug] [community] Stale bot will close issue/pr unexpected | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Our stale bot will close our issue/PR unexpected, this situation would make our missing user feedback.
Something information for you:
* When issue not activity(no comment or labels add) for 14 days will mark as `stale`, and it would close for next 3 days. Issue with label `Waiting for reply`, `new feature`, `DSIP` would not close
* But when user create `bug` or `feature` issue and maintainer without answer for 17 days, it will be close to, I think this is not the behavior we want. **Solution:** Maybe we should add labels `Waiting for reply` when use create bug or feature issue.
* Another unexpected situation is when maintainer answer user issue at first time, and user add addition information but maintainer would not answer for 17 days is also would be close. **Solution:** I am not sure how to cover this situation, maybe we should see other repo how to handle it.
### What you expected to happen
See above
### How to reproduce
See above
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6673 | https://github.com/apache/dolphinscheduler/pull/6676 | a269e3df7612a017dab6b8ff473720b53d96ad0d | 5855c936a4a3daafcc12b408a46b8e8470ec34c4 | "2021-11-03T06:40:00Z" | java | "2021-11-05T10:35:41Z" | .github/actions/lable-on-issue | |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,673 | [Bug] [community] Stale bot will close issue/pr unexpected | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Our stale bot will close our issue/PR unexpected, this situation would make our missing user feedback.
Something information for you:
* When issue not activity(no comment or labels add) for 14 days will mark as `stale`, and it would close for next 3 days. Issue with label `Waiting for reply`, `new feature`, `DSIP` would not close
* But when user create `bug` or `feature` issue and maintainer without answer for 17 days, it will be close to, I think this is not the behavior we want. **Solution:** Maybe we should add labels `Waiting for reply` when use create bug or feature issue.
* Another unexpected situation is when maintainer answer user issue at first time, and user add addition information but maintainer would not answer for 17 days is also would be close. **Solution:** I am not sure how to cover this situation, maybe we should see other repo how to handle it.
### What you expected to happen
See above
### How to reproduce
See above
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6673 | https://github.com/apache/dolphinscheduler/pull/6676 | a269e3df7612a017dab6b8ff473720b53d96ad0d | 5855c936a4a3daafcc12b408a46b8e8470ec34c4 | "2021-11-03T06:40:00Z" | java | "2021-11-05T10:35:41Z" | .github/workflows/issue_robot.yml | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name: issue-robot
on:
issues:
types: [opened]
jobs:
issueRobot:
runs-on: ubuntu-latest
steps:
- name: "Checkout ${{ github.ref }}"
uses: actions/checkout@v2
with:
persist-credentials: false
submodules: true
- name: "Translation into English in issue"
uses: ./.github/actions/translate-on-issue
with:
translate-title: true
translate-body: true
- name: "Comment in issue"
uses: ./.github/actions/comment-on-issue
with:
message: "Hi:\n* Thank you for your feedback, we have received your issue, Please wait patiently for a reply.\n* In order for us to understand your request as soon as possible, please provide detailed information、version or pictures.\n* If you haven't received a reply for a long time, you can subscribe to the developer's email,Mail subscription steps reference https://dolphinscheduler.apache.org/en-us/community/development/subscribe.html ,Then write the issue URL in the email content and send question to dev@dolphinscheduler.apache.org."
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: "Add lable in issue"
uses: ./.github/actions/lable-on-issue
with:
add-labels: "Waiting for reply"
ignore-if-labeled: true
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,673 | [Bug] [community] Stale bot will close issue/pr unexpected | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Our stale bot will close our issue/PR unexpected, this situation would make our missing user feedback.
Something information for you:
* When issue not activity(no comment or labels add) for 14 days will mark as `stale`, and it would close for next 3 days. Issue with label `Waiting for reply`, `new feature`, `DSIP` would not close
* But when user create `bug` or `feature` issue and maintainer without answer for 17 days, it will be close to, I think this is not the behavior we want. **Solution:** Maybe we should add labels `Waiting for reply` when use create bug or feature issue.
* Another unexpected situation is when maintainer answer user issue at first time, and user add addition information but maintainer would not answer for 17 days is also would be close. **Solution:** I am not sure how to cover this situation, maybe we should see other repo how to handle it.
### What you expected to happen
See above
### How to reproduce
See above
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6673 | https://github.com/apache/dolphinscheduler/pull/6676 | a269e3df7612a017dab6b8ff473720b53d96ad0d | 5855c936a4a3daafcc12b408a46b8e8470ec34c4 | "2021-11-03T06:40:00Z" | java | "2021-11-05T10:35:41Z" | .github/workflows/stale.yml | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# https://github.com/actions/stale
name: 'Close stale issues and PRs'
on:
schedule:
- cron: '0 0 * * *'
permissions:
# Stale recommended permissions
pull-requests: write
issues: write
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v4
with:
# Stale Issues
days-before-issue-stale: 14
days-before-issue-close: 3
# We do not stale Issues with label `Waiting for reply`, `new feature` and `DSIP`
exempt-issue-labels: 'Waiting for reply,new feature,DSIP'
stale-issue-message: >
This issue has been automatically marked as stale because it has not had recent activity
for 14 days. It will be closed in next 3 days if no further activity occurs.
close-issue-message: >
This issue has been closed because it has not received response for too long time. You could
reopen it if you encountered similar problems in the future.
# Stale PRs
days-before-pr-stale: 60
days-before-pr-close: 5
stale-pr-message: >
This pull request has been automatically marked as stale because it has not had recent
activity for 60 days. It will be closed in 5 days if no further activity occurs.
close-pr-message: >
This pull request has been closed because it has not had recent activity. You could reopen it
if you try to continue your work, and anyone who are interested in it are encouraged to continue
work on this pull request.
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,673 | [Bug] [community] Stale bot will close issue/pr unexpected | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Our stale bot will close our issue/PR unexpected, this situation would make our missing user feedback.
Something information for you:
* When issue not activity(no comment or labels add) for 14 days will mark as `stale`, and it would close for next 3 days. Issue with label `Waiting for reply`, `new feature`, `DSIP` would not close
* But when user create `bug` or `feature` issue and maintainer without answer for 17 days, it will be close to, I think this is not the behavior we want. **Solution:** Maybe we should add labels `Waiting for reply` when use create bug or feature issue.
* Another unexpected situation is when maintainer answer user issue at first time, and user add addition information but maintainer would not answer for 17 days is also would be close. **Solution:** I am not sure how to cover this situation, maybe we should see other repo how to handle it.
### What you expected to happen
See above
### How to reproduce
See above
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6673 | https://github.com/apache/dolphinscheduler/pull/6676 | a269e3df7612a017dab6b8ff473720b53d96ad0d | 5855c936a4a3daafcc12b408a46b8e8470ec34c4 | "2021-11-03T06:40:00Z" | java | "2021-11-05T10:35:41Z" | .gitmodules | # Licensed to Apache Software Foundation (ASF) under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Apache Software Foundation (ASF) licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
[submodule ".github/actions/comment-on-issue"]
path = .github/actions/comment-on-issue
url = https://github.com/xingchun-chen/actions-comment-on-issue
[submodule ".github/actions/lable-on-issue"]
path = .github/actions/lable-on-issue
url = https://github.com/xingchun-chen/labeler
[submodule ".github/actions/translate-on-issue"]
path = .github/actions/translate-on-issue
url = https://github.com/xingchun-chen/translation-helper
[submodule ".github/actions/reviewdog-setup"]
path = .github/actions/reviewdog-setup
url = https://github.com/reviewdog/action-setup
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,673 | [Bug] [community] Stale bot will close issue/pr unexpected | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Our stale bot will close our issue/PR unexpected, this situation would make our missing user feedback.
Something information for you:
* When issue not activity(no comment or labels add) for 14 days will mark as `stale`, and it would close for next 3 days. Issue with label `Waiting for reply`, `new feature`, `DSIP` would not close
* But when user create `bug` or `feature` issue and maintainer without answer for 17 days, it will be close to, I think this is not the behavior we want. **Solution:** Maybe we should add labels `Waiting for reply` when use create bug or feature issue.
* Another unexpected situation is when maintainer answer user issue at first time, and user add addition information but maintainer would not answer for 17 days is also would be close. **Solution:** I am not sure how to cover this situation, maybe we should see other repo how to handle it.
### What you expected to happen
See above
### How to reproduce
See above
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6673 | https://github.com/apache/dolphinscheduler/pull/6676 | a269e3df7612a017dab6b8ff473720b53d96ad0d | 5855c936a4a3daafcc12b408a46b8e8470ec34c4 | "2021-11-03T06:40:00Z" | java | "2021-11-05T10:35:41Z" | .licenserc.yaml | # Licensed to Apache Software Foundation (ASF) under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Apache Software Foundation (ASF) licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
header:
license:
spdx-id: Apache-2.0
copyright-owner: Apache Software Foundation
paths-ignore:
- dist
- NOTICE
- LICENSE
- DISCLAIMER
- dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/ScriptRunner.java
- mvnw.cmd
- sql/soft_version
- .mvn
- .gitattributes
- '**/licenses/**/LICENSE-*'
- '**/*.md'
- '**/*.json'
- '**/*.iml'
- '**/*.ini'
- '**/.babelrc'
- '**/.eslintignore'
- '**/.gitignore'
- '**/LICENSE'
- '**/NOTICE'
- '**/node_modules/**'
- '.github/actions/comment-on-issue/**'
- '.github/actions/lable-on-issue/**'
- '.github/actions/reviewdog-setup/**'
- '.github/actions/translate-on-issue/**'
comment: on-failure
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,694 | [Feature][Master] Reduce the refresh resource interval of LowerWeight | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Now the refresh resource interval of LowerWeight is 5s, it means that Master sync worker weight every 5s.
When Worker send heartbeat every second, I want that Master can get it more timely, which can make smoother task running.
### Use case
Reduce the refresh resource interval of LowerWeight
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6694 | https://github.com/apache/dolphinscheduler/pull/6695 | 5855c936a4a3daafcc12b408a46b8e8470ec34c4 | 72daba58cd378a347e9657b994bb631c0704fc2a | "2021-11-04T10:36:25Z" | java | "2021-11-05T11:10:36Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HeartBeat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import org.apache.dolphinscheduler.common.Constants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class HeartBeat {
private static final Logger logger = LoggerFactory.getLogger(HeartBeat.class);
public static final String COMMA = ",";
private long startupTime;
private long reportTime;
private double cpuUsage;
private double memoryUsage;
private double loadAverage;
private double availablePhysicalMemorySize;
private double maxCpuloadAvg;
private double reservedMemory;
private int serverStatus;
private int processId;
private int workerHostWeight; // worker host weight
private int workerWaitingTaskCount; // worker waiting task count
private int workerExecThreadCount; // worker thread pool thread count
public long getStartupTime() {
return startupTime;
}
public void setStartupTime(long startupTime) {
this.startupTime = startupTime;
}
public long getReportTime() {
return reportTime;
}
public void setReportTime(long reportTime) {
this.reportTime = reportTime;
}
public double getCpuUsage() {
return cpuUsage;
}
public void setCpuUsage(double cpuUsage) {
this.cpuUsage = cpuUsage;
}
public double getMemoryUsage() {
return memoryUsage;
}
public void setMemoryUsage(double memoryUsage) {
this.memoryUsage = memoryUsage;
}
public double getLoadAverage() {
return loadAverage;
}
public void setLoadAverage(double loadAverage) {
this.loadAverage = loadAverage;
}
public double getAvailablePhysicalMemorySize() {
return availablePhysicalMemorySize;
}
public void setAvailablePhysicalMemorySize(double availablePhysicalMemorySize) {
this.availablePhysicalMemorySize = availablePhysicalMemorySize;
}
public double getMaxCpuloadAvg() {
return maxCpuloadAvg;
}
public void setMaxCpuloadAvg(double maxCpuloadAvg) {
this.maxCpuloadAvg = maxCpuloadAvg;
}
public double getReservedMemory() {
return reservedMemory;
}
public void setReservedMemory(double reservedMemory) {
this.reservedMemory = reservedMemory;
}
public int getServerStatus() {
return serverStatus;
}
public void setServerStatus(int serverStatus) {
this.serverStatus = serverStatus;
}
public int getProcessId() {
return processId;
}
public void setProcessId(int processId) {
this.processId = processId;
}
public int getWorkerHostWeight() {
return workerHostWeight;
}
public void setWorkerHostWeight(int workerHostWeight) {
this.workerHostWeight = workerHostWeight;
}
public int getWorkerWaitingTaskCount() {
return workerWaitingTaskCount;
}
public void setWorkerWaitingTaskCount(int workerWaitingTaskCount) {
this.workerWaitingTaskCount = workerWaitingTaskCount;
}
public int getWorkerExecThreadCount() {
return workerExecThreadCount;
}
public void setWorkerExecThreadCount(int workerExecThreadCount) {
this.workerExecThreadCount = workerExecThreadCount;
}
public HeartBeat() {
this.reportTime = System.currentTimeMillis();
this.serverStatus = Constants.NORMAL_NODE_STATUS;
}
public HeartBeat(long startupTime, double maxCpuloadAvg, double reservedMemory) {
this.reportTime = System.currentTimeMillis();
this.serverStatus = Constants.NORMAL_NODE_STATUS;
this.startupTime = startupTime;
this.maxCpuloadAvg = maxCpuloadAvg;
this.reservedMemory = reservedMemory;
}
public HeartBeat(long startupTime, double maxCpuloadAvg, double reservedMemory, int hostWeight, int workerExecThreadCount) {
this.reportTime = System.currentTimeMillis();
this.serverStatus = Constants.NORMAL_NODE_STATUS;
this.startupTime = startupTime;
this.maxCpuloadAvg = maxCpuloadAvg;
this.reservedMemory = reservedMemory;
this.workerHostWeight = hostWeight;
this.workerExecThreadCount = workerExecThreadCount;
}
/**
* fill system info
*/
private void fillSystemInfo() {
this.cpuUsage = OSUtils.cpuUsage();
this.loadAverage = OSUtils.loadAverage();
this.availablePhysicalMemorySize = OSUtils.availablePhysicalMemorySize();
this.memoryUsage = OSUtils.memoryUsage();
this.processId = OSUtils.getProcessID();
}
/**
* update server state
*/
public void updateServerState() {
if (loadAverage > maxCpuloadAvg || availablePhysicalMemorySize < reservedMemory) {
logger.warn("current cpu load average {} is too high or available memory {}G is too low, under max.cpuload.avg={} and reserved.memory={}G",
loadAverage, availablePhysicalMemorySize, maxCpuloadAvg, reservedMemory);
this.serverStatus = Constants.ABNORMAL_NODE_STATUS;
} else if (workerWaitingTaskCount > workerExecThreadCount) {
logger.warn("current waiting task count {} is large than worker thread count {}, worker is busy", workerWaitingTaskCount, workerExecThreadCount);
this.serverStatus = Constants.BUSY_NODE_STATUE;
} else {
this.serverStatus = Constants.NORMAL_NODE_STATUS;
}
}
/**
* encode heartbeat
*/
public String encodeHeartBeat() {
this.fillSystemInfo();
this.updateServerState();
StringBuilder builder = new StringBuilder(100);
builder.append(cpuUsage).append(COMMA);
builder.append(memoryUsage).append(COMMA);
builder.append(loadAverage).append(COMMA);
builder.append(availablePhysicalMemorySize).append(Constants.COMMA);
builder.append(maxCpuloadAvg).append(Constants.COMMA);
builder.append(reservedMemory).append(Constants.COMMA);
builder.append(startupTime).append(Constants.COMMA);
builder.append(reportTime).append(Constants.COMMA);
builder.append(serverStatus).append(COMMA);
builder.append(processId).append(COMMA);
builder.append(workerHostWeight).append(COMMA);
builder.append(workerExecThreadCount).append(COMMA);
builder.append(workerWaitingTaskCount);
return builder.toString();
}
/**
* decode heartbeat
*/
public static HeartBeat decodeHeartBeat(String heartBeatInfo) {
String[] parts = heartBeatInfo.split(Constants.COMMA);
if (parts.length != Constants.HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH) {
return null;
}
HeartBeat heartBeat = new HeartBeat();
heartBeat.cpuUsage = Double.parseDouble(parts[0]);
heartBeat.memoryUsage = Double.parseDouble(parts[1]);
heartBeat.loadAverage = Double.parseDouble(parts[2]);
heartBeat.availablePhysicalMemorySize = Double.parseDouble(parts[3]);
heartBeat.maxCpuloadAvg = Double.parseDouble(parts[4]);
heartBeat.reservedMemory = Double.parseDouble(parts[5]);
heartBeat.startupTime = Long.parseLong(parts[6]);
heartBeat.reportTime = Long.parseLong(parts[7]);
heartBeat.serverStatus = Integer.parseInt(parts[8]);
heartBeat.processId = Integer.parseInt(parts[9]);
heartBeat.workerHostWeight = Integer.parseInt(parts[10]);
heartBeat.workerExecThreadCount = Integer.parseInt(parts[11]);
heartBeat.workerWaitingTaskCount = Integer.parseInt(parts[12]);
return heartBeat;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,694 | [Feature][Master] Reduce the refresh resource interval of LowerWeight | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Now the refresh resource interval of LowerWeight is 5s, it means that Master sync worker weight every 5s.
When Worker send heartbeat every second, I want that Master can get it more timely, which can make smoother task running.
### Use case
Reduce the refresh resource interval of LowerWeight
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6694 | https://github.com/apache/dolphinscheduler/pull/6695 | 5855c936a4a3daafcc12b408a46b8e8470ec34c4 | 72daba58cd378a347e9657b994bb631c0704fc2a | "2021-11-04T10:36:25Z" | java | "2021-11-05T11:10:36Z" | dolphinscheduler-registry-plugin/dolphinscheduler-registry-zookeeper/src/main/java/org/apache/dolphinscheduler/plugin/registry/zookeeper/ZookeeperRegistry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.plugin.registry.zookeeper;
import static org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperConfiguration.BASE_SLEEP_TIME;
import static org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperConfiguration.BLOCK_UNTIL_CONNECTED_WAIT_MS;
import static org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperConfiguration.CONNECTION_TIMEOUT_MS;
import static org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperConfiguration.DIGEST;
import static org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperConfiguration.MAX_RETRIES;
import static org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperConfiguration.NAME_SPACE;
import static org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperConfiguration.SERVERS;
import static org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperConfiguration.SESSION_TIMEOUT_MS;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import org.apache.dolphinscheduler.spi.register.DataChangeEvent;
import org.apache.dolphinscheduler.spi.register.ListenerManager;
import org.apache.dolphinscheduler.spi.register.Registry;
import org.apache.dolphinscheduler.spi.register.RegistryConnectListener;
import org.apache.dolphinscheduler.spi.register.RegistryException;
import org.apache.dolphinscheduler.spi.register.SubscribeListener;
import org.apache.curator.RetryPolicy;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.api.ACLProvider;
import org.apache.curator.framework.api.transaction.TransactionOp;
import org.apache.curator.framework.recipes.cache.TreeCache;
import org.apache.curator.framework.recipes.cache.TreeCacheEvent;
import org.apache.curator.framework.recipes.cache.TreeCacheListener;
import org.apache.curator.framework.recipes.locks.InterProcessMutex;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.curator.utils.CloseableUtils;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.data.ACL;
import java.nio.charset.StandardCharsets;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.google.common.base.Strings;
public class ZookeeperRegistry implements Registry {
private CuratorFramework client;
/**
* treeCache map
* k-subscribe key
* v-listener
*/
private Map<String, TreeCache> treeCacheMap = new HashMap<>();
/**
* Distributed lock map
*/
private ThreadLocal<Map<String, InterProcessMutex>> threadLocalLockMap = new ThreadLocal<>();
/**
* build retry policy
*/
private static RetryPolicy buildRetryPolicy(Map<String, String> registerData) {
int baseSleepTimeMs = BASE_SLEEP_TIME.getParameterValue(registerData.get(BASE_SLEEP_TIME.getName()));
int maxRetries = MAX_RETRIES.getParameterValue(registerData.get(MAX_RETRIES.getName()));
int maxSleepMs = baseSleepTimeMs * maxRetries;
return new ExponentialBackoffRetry(baseSleepTimeMs, maxRetries, maxSleepMs);
}
/**
* build digest
*/
private static void buildDigest(CuratorFrameworkFactory.Builder builder, String digest) {
builder.authorization(DIGEST.getName(), digest.getBytes(StandardCharsets.UTF_8))
.aclProvider(new ACLProvider() {
@Override
public List<ACL> getDefaultAcl() {
return ZooDefs.Ids.CREATOR_ALL_ACL;
}
@Override
public List<ACL> getAclForPath(final String path) {
return ZooDefs.Ids.CREATOR_ALL_ACL;
}
});
}
@Override
public void init(Map<String, String> registerData) {
CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder()
.connectString(SERVERS.getParameterValue(registerData.get(SERVERS.getName())))
.retryPolicy(buildRetryPolicy(registerData))
.namespace(NAME_SPACE.getParameterValue(registerData.get(NAME_SPACE.getName())))
.sessionTimeoutMs(SESSION_TIMEOUT_MS.getParameterValue(registerData.get(SESSION_TIMEOUT_MS.getName())))
.connectionTimeoutMs(CONNECTION_TIMEOUT_MS.getParameterValue(registerData.get(CONNECTION_TIMEOUT_MS.getName())));
String digest = DIGEST.getParameterValue(registerData.get(DIGEST.getName()));
if (!Strings.isNullOrEmpty(digest)) {
buildDigest(builder, digest);
}
client = builder.build();
client.start();
try {
if (!client.blockUntilConnected(BLOCK_UNTIL_CONNECTED_WAIT_MS.getParameterValue(registerData.get(BLOCK_UNTIL_CONNECTED_WAIT_MS.getName())), MILLISECONDS)) {
client.close();
throw new RegistryException("zookeeper connect timeout");
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RegistryException("zookeeper connect error", e);
}
}
@Override
public void addConnectionStateListener(RegistryConnectListener registryConnectListener) {
client.getConnectionStateListenable().addListener(new ZookeeperConnectionStateListener(registryConnectListener));
}
@Override
public boolean subscribe(String path, SubscribeListener subscribeListener) {
if (null != treeCacheMap.get(path)) {
return false;
}
TreeCache treeCache = new TreeCache(client, path);
TreeCacheListener treeCacheListener = (client, event) -> {
TreeCacheEvent.Type type = event.getType();
DataChangeEvent eventType = null;
String dataPath = null;
switch (type) {
case NODE_ADDED:
dataPath = event.getData().getPath();
eventType = DataChangeEvent.ADD;
break;
case NODE_UPDATED:
eventType = DataChangeEvent.UPDATE;
dataPath = event.getData().getPath();
break;
case NODE_REMOVED:
eventType = DataChangeEvent.REMOVE;
dataPath = event.getData().getPath();
break;
default:
}
if (null != eventType && null != dataPath) {
ListenerManager.dataChange(path, dataPath, eventType);
}
};
treeCache.getListenable().addListener(treeCacheListener);
treeCacheMap.put(path, treeCache);
try {
treeCache.start();
} catch (Exception e) {
throw new RegistryException("start zookeeper tree cache error", e);
}
ListenerManager.addListener(path, subscribeListener);
return true;
}
@Override
public void unsubscribe(String path) {
TreeCache treeCache = treeCacheMap.get(path);
treeCache.close();
ListenerManager.removeListener(path);
}
@Override
public String get(String key) {
try {
return new String(client.getData().forPath(key), StandardCharsets.UTF_8);
} catch (Exception e) {
throw new RegistryException("zookeeper get data error", e);
}
}
@Override
public void remove(String key) {
delete(key);
}
@Override
public boolean isExisted(String key) {
try {
return null != client.checkExists().forPath(key);
} catch (Exception e) {
throw new RegistryException("zookeeper check key is existed error", e);
}
}
@Override
public void persist(String key, String value) {
try {
if (isExisted(key)) {
update(key, value);
return;
}
client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).forPath(key, value.getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new RegistryException("zookeeper persist error", e);
}
}
@Override
public void persistEphemeral(String key, String value) {
try {
if (isExisted(key)) {
update(key, value);
return;
}
client.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL).forPath(key, value.getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new RegistryException("zookeeper persist ephemeral error", e);
}
}
@Override
public void update(String key, String value) {
try {
if (!isExisted(key)) {
return;
}
TransactionOp transactionOp = client.transactionOp();
client.transaction().forOperations(transactionOp.check().forPath(key), transactionOp.setData().forPath(key, value.getBytes(StandardCharsets.UTF_8)));
} catch (Exception e) {
throw new RegistryException("zookeeper update error", e);
}
}
@Override
public List<String> getChildren(String key) {
try {
List<String> result = client.getChildren().forPath(key);
result.sort(Comparator.reverseOrder());
return result;
} catch (Exception e) {
throw new RegistryException("zookeeper get children error", e);
}
}
@Override
public boolean delete(String nodePath) {
try {
client.delete()
.deletingChildrenIfNeeded()
.forPath(nodePath);
} catch (KeeperException.NoNodeException ignore) {
// the node is not exist, we can believe the node has been removed
} catch (Exception e) {
throw new RegistryException("zookeeper delete key error", e);
}
return true;
}
@Override
public boolean acquireLock(String key) {
InterProcessMutex interProcessMutex = new InterProcessMutex(client, key);
try {
interProcessMutex.acquire();
if (null == threadLocalLockMap.get()) {
threadLocalLockMap.set(new HashMap<>(3));
}
threadLocalLockMap.get().put(key, interProcessMutex);
return true;
} catch (Exception e) {
try {
interProcessMutex.release();
throw new RegistryException("zookeeper get lock error", e);
} catch (Exception exception) {
throw new RegistryException("zookeeper release lock error", e);
}
}
}
@Override
public boolean releaseLock(String key) {
if (null == threadLocalLockMap.get().get(key)) {
return false;
}
try {
threadLocalLockMap.get().get(key).release();
threadLocalLockMap.get().remove(key);
if (threadLocalLockMap.get().isEmpty()) {
threadLocalLockMap.remove();
}
} catch (Exception e) {
throw new RegistryException("zookeeper release lock error", e);
}
return true;
}
public CuratorFramework getClient() {
return client;
}
@Override
public void close() {
treeCacheMap.forEach((key, value) -> value.close());
waitForCacheClose(500);
CloseableUtils.closeQuietly(client);
}
private void waitForCacheClose(long millis) {
try {
Thread.sleep(millis);
} catch (final InterruptedException ex) {
Thread.currentThread().interrupt();
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,694 | [Feature][Master] Reduce the refresh resource interval of LowerWeight | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Now the refresh resource interval of LowerWeight is 5s, it means that Master sync worker weight every 5s.
When Worker send heartbeat every second, I want that Master can get it more timely, which can make smoother task running.
### Use case
Reduce the refresh resource interval of LowerWeight
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6694 | https://github.com/apache/dolphinscheduler/pull/6695 | 5855c936a4a3daafcc12b408a46b8e8470ec34c4 | 72daba58cd378a347e9657b994bb631c0704fc2a | "2021-11-04T10:36:25Z" | java | "2021-11-05T11:10:36Z" | dolphinscheduler-registry-plugin/dolphinscheduler-registry-zookeeper/src/test/java/org/apache/dolphinscheduler/plugin/registry/zookeeper/ZookeeperRegistryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.plugin.registry.zookeeper;
import org.apache.dolphinscheduler.spi.register.DataChangeEvent;
import org.apache.dolphinscheduler.spi.register.SubscribeListener;
import org.apache.curator.test.TestingServer;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ZookeeperRegistryTest {
private static final Logger logger = LoggerFactory.getLogger(ZookeeperRegistryTest.class);
TestingServer server;
ZookeeperRegistry registry = new ZookeeperRegistry();
@Before
public void before() throws Exception {
server = new TestingServer(true);
Map<String, String> registryConfig = new HashMap<>();
registryConfig.put(ZookeeperConfiguration.SERVERS.getName(), server.getConnectString());
registry.init(registryConfig);
registry.persist("/sub", "");
}
@Test
public void persistTest() {
registry.persist("/nodes/m1", "");
registry.persist("/nodes/m2", "");
Assert.assertEquals(Arrays.asList("m2", "m1"), registry.getChildren("/nodes"));
Assert.assertTrue(registry.isExisted("/nodes/m1"));
registry.delete("/nodes/m2");
Assert.assertFalse(registry.isExisted("/nodes/m2"));
}
@Test
public void lockTest() throws InterruptedException {
CountDownLatch preCountDownLatch = new CountDownLatch(1);
CountDownLatch allCountDownLatch = new CountDownLatch(2);
List<String> testData = new ArrayList<>();
new Thread(() -> {
registry.acquireLock("/lock");
preCountDownLatch.countDown();
logger.info(Thread.currentThread().getName() + " :I got the lock, but I don't want to work. I want to rest for a while");
try {
Thread.sleep(1000);
logger.info(Thread.currentThread().getName() + " :I'm going to start working");
testData.add("thread1");
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} finally {
logger.info(Thread.currentThread().getName() + " :I have finished my work, now I release the lock");
registry.releaseLock("/lock");
allCountDownLatch.countDown();
}
}).start();
preCountDownLatch.await();
new Thread(() -> {
try {
logger.info(Thread.currentThread().getName() + " :I am trying to acquire the lock");
registry.acquireLock("/lock");
logger.info(Thread.currentThread().getName() + " :I got the lock and I started working");
testData.add("thread2");
} finally {
registry.releaseLock("/lock");
allCountDownLatch.countDown();
}
}).start();
allCountDownLatch.await();
Assert.assertEquals(testData, Arrays.asList("thread1", "thread2"));
}
@Test
public void subscribeTest() {
boolean status = registry.subscribe("/sub", new TestListener());
Assert.assertTrue(status);
}
class TestListener implements SubscribeListener {
@Override
public void notify(String path, DataChangeEvent dataChangeEvent) {
logger.info("I'm test listener");
}
}
@After
public void after() throws IOException {
registry.close();
server.close();
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,694 | [Feature][Master] Reduce the refresh resource interval of LowerWeight | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Now the refresh resource interval of LowerWeight is 5s, it means that Master sync worker weight every 5s.
When Worker send heartbeat every second, I want that Master can get it more timely, which can make smoother task running.
### Use case
Reduce the refresh resource interval of LowerWeight
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6694 | https://github.com/apache/dolphinscheduler/pull/6695 | 5855c936a4a3daafcc12b408a46b8e8470ec34c4 | 72daba58cd378a347e9657b994bb631c0704fc2a | "2021-11-04T10:36:25Z" | java | "2021-11-05T11:10:36Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/dispatch/host/LowerWeightHostManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.dispatch.host;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.HeartBeat;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory;
import org.apache.dolphinscheduler.server.master.dispatch.context.ExecutionContext;
import org.apache.dolphinscheduler.server.master.dispatch.host.assign.HostWeight;
import org.apache.dolphinscheduler.server.master.dispatch.host.assign.HostWorker;
import org.apache.dolphinscheduler.server.master.dispatch.host.assign.LowerWeightRoundRobin;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* lower weight host manager
*/
public class LowerWeightHostManager extends CommonHostManager {
private final Logger logger = LoggerFactory.getLogger(LowerWeightHostManager.class);
/**
* selector
*/
private LowerWeightRoundRobin selector;
/**
* worker host weights
*/
private ConcurrentHashMap<String, Set<HostWeight>> workerHostWeightsMap;
/**
* worker group host lock
*/
private Lock lock;
/**
* executor service
*/
private ScheduledExecutorService executorService;
@PostConstruct
public void init() {
this.selector = new LowerWeightRoundRobin();
this.workerHostWeightsMap = new ConcurrentHashMap<>();
this.lock = new ReentrantLock();
this.executorService = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("LowerWeightHostManagerExecutor"));
this.executorService.scheduleWithFixedDelay(new RefreshResourceTask(), 0, 5, TimeUnit.SECONDS);
}
@PreDestroy
public void close() {
this.executorService.shutdownNow();
}
/**
* select host
*
* @param context context
* @return host
*/
@Override
public Host select(ExecutionContext context) {
Set<HostWeight> workerHostWeights = getWorkerHostWeights(context.getWorkerGroup());
if (CollectionUtils.isNotEmpty(workerHostWeights)) {
return selector.select(workerHostWeights).getHost();
}
return new Host();
}
@Override
public HostWorker select(Collection<HostWorker> nodes) {
throw new UnsupportedOperationException("not support");
}
private void syncWorkerHostWeight(Map<String, Set<HostWeight>> workerHostWeights) {
lock.lock();
try {
workerHostWeightsMap.clear();
workerHostWeightsMap.putAll(workerHostWeights);
} finally {
lock.unlock();
}
}
private Set<HostWeight> getWorkerHostWeights(String workerGroup) {
lock.lock();
try {
return workerHostWeightsMap.get(workerGroup);
} finally {
lock.unlock();
}
}
class RefreshResourceTask implements Runnable {
@Override
public void run() {
try {
Map<String, Set<HostWeight>> workerHostWeights = new HashMap<>();
Map<String, Set<String>> workerGroupNodes = serverNodeManager.getWorkerGroupNodes();
for (Map.Entry<String, Set<String>> entry : workerGroupNodes.entrySet()) {
String workerGroup = entry.getKey();
Set<String> nodes = entry.getValue();
Set<HostWeight> hostWeights = new HashSet<>(nodes.size());
for (String node : nodes) {
String heartbeat = serverNodeManager.getWorkerNodeInfo(node);
HostWeight hostWeight = getHostWeight(node, workerGroup, heartbeat);
if (hostWeight != null) {
hostWeights.add(hostWeight);
}
}
if (!hostWeights.isEmpty()) {
workerHostWeights.put(workerGroup, hostWeights);
}
}
syncWorkerHostWeight(workerHostWeights);
} catch (Throwable ex) {
logger.error("RefreshResourceTask error", ex);
}
}
public HostWeight getHostWeight(String addr, String workerGroup, String heartBeatInfo) {
HeartBeat heartBeat = HeartBeat.decodeHeartBeat(heartBeatInfo);
if (heartBeat == null) {
return null;
}
if (Constants.ABNORMAL_NODE_STATUS == heartBeat.getServerStatus()) {
logger.warn("worker {} current cpu load average {} is too high or available memory {}G is too low",
addr, heartBeat.getLoadAverage(), heartBeat.getAvailablePhysicalMemorySize());
return null;
}
if (Constants.BUSY_NODE_STATUE == heartBeat.getServerStatus()) {
logger.warn("worker {} is busy, current waiting task count {} is large than worker thread count {}",
addr, heartBeat.getWorkerWaitingTaskCount(), heartBeat.getWorkerExecThreadCount());
return null;
}
return new HostWeight(HostWorker.of(addr, heartBeat.getWorkerHostWeight(), workerGroup),
heartBeat.getCpuUsage(), heartBeat.getMemoryUsage(), heartBeat.getLoadAverage(), heartBeat.getStartupTime());
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,694 | [Feature][Master] Reduce the refresh resource interval of LowerWeight | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Now the refresh resource interval of LowerWeight is 5s, it means that Master sync worker weight every 5s.
When Worker send heartbeat every second, I want that Master can get it more timely, which can make smoother task running.
### Use case
Reduce the refresh resource interval of LowerWeight
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6694 | https://github.com/apache/dolphinscheduler/pull/6695 | 5855c936a4a3daafcc12b408a46b8e8470ec34c4 | 72daba58cd378a347e9657b994bb631c0704fc2a | "2021-11-04T10:36:25Z" | java | "2021-11-05T11:10:36Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistryDataListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.registry;
import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_MASTERS;
import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_WORKERS;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.NodeType;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.spi.register.DataChangeEvent;
import org.apache.dolphinscheduler.spi.register.SubscribeListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MasterRegistryDataListener implements SubscribeListener {
private static final Logger logger = LoggerFactory.getLogger(MasterRegistryDataListener.class);
private MasterRegistryClient masterRegistryClient;
public MasterRegistryDataListener() {
masterRegistryClient = SpringApplicationContext.getBean(MasterRegistryClient.class);
}
@Override
public void notify(String path, DataChangeEvent event) {
//monitor master
if (path.startsWith(REGISTRY_DOLPHINSCHEDULER_MASTERS + Constants.SINGLE_SLASH)) {
handleMasterEvent(event, path);
} else if (path.startsWith(REGISTRY_DOLPHINSCHEDULER_WORKERS + Constants.SINGLE_SLASH)) {
//monitor worker
handleWorkerEvent(event, path);
}
}
/**
* monitor master
*
* @param event event
* @param path path
*/
public void handleMasterEvent(DataChangeEvent event, String path) {
switch (event) {
case ADD:
logger.info("master node added : {}", path);
break;
case REMOVE:
masterRegistryClient.removeNodePath(path, NodeType.MASTER, true);
break;
default:
break;
}
}
/**
* monitor worker
*
* @param event event
* @param path path
*/
public void handleWorkerEvent(DataChangeEvent event, String path) {
switch (event) {
case ADD:
logger.info("worker node added : {}", path);
break;
case REMOVE:
logger.info("worker node deleted : {}", path);
masterRegistryClient.removeNodePath(path, NodeType.WORKER, true);
break;
default:
break;
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,694 | [Feature][Master] Reduce the refresh resource interval of LowerWeight | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Now the refresh resource interval of LowerWeight is 5s, it means that Master sync worker weight every 5s.
When Worker send heartbeat every second, I want that Master can get it more timely, which can make smoother task running.
### Use case
Reduce the refresh resource interval of LowerWeight
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6694 | https://github.com/apache/dolphinscheduler/pull/6695 | 5855c936a4a3daafcc12b408a46b8e8470ec34c4 | 72daba58cd378a347e9657b994bb631c0704fc2a | "2021-11-04T10:36:25Z" | java | "2021-11-05T11:10:36Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/ServerNodeManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.registry;
import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_MASTERS;
import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_WORKERS;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.NodeType;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.common.utils.NetUtils;
import org.apache.dolphinscheduler.dao.AlertDao;
import org.apache.dolphinscheduler.dao.entity.WorkerGroup;
import org.apache.dolphinscheduler.dao.mapper.WorkerGroupMapper;
import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory;
import org.apache.dolphinscheduler.service.queue.MasterPriorityQueue;
import org.apache.dolphinscheduler.service.registry.RegistryClient;
import org.apache.dolphinscheduler.spi.register.DataChangeEvent;
import org.apache.dolphinscheduler.spi.register.SubscribeListener;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import javax.annotation.PreDestroy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
/**
* server node manager
*/
@Service
public class ServerNodeManager implements InitializingBean {
private final Logger logger = LoggerFactory.getLogger(ServerNodeManager.class);
/**
* master lock
*/
private final Lock masterLock = new ReentrantLock();
/**
* worker group lock
*/
private final Lock workerGroupLock = new ReentrantLock();
/**
* worker node info lock
*/
private final Lock workerNodeInfoLock = new ReentrantLock();
/**
* worker group nodes
*/
private final ConcurrentHashMap<String, Set<String>> workerGroupNodes = new ConcurrentHashMap<>();
/**
* master nodes
*/
private final Set<String> masterNodes = new HashSet<>();
/**
* worker node info
*/
private final Map<String, String> workerNodeInfo = new HashMap<>();
/**
* executor service
*/
private ScheduledExecutorService executorService;
/**
* zk client
*/
private RegistryClient registryClient = RegistryClient.getInstance();
/**
* eg : /node/worker/group/127.0.0.1:xxx
*/
private static final int WORKER_LISTENER_CHECK_LENGTH = 5;
/**
* worker group mapper
*/
@Autowired
private WorkerGroupMapper workerGroupMapper;
private MasterPriorityQueue masterPriorityQueue = new MasterPriorityQueue();
/**
* alert dao
*/
@Autowired
private AlertDao alertDao;
public static volatile List<Integer> SLOT_LIST = new ArrayList<>();
public static volatile Integer MASTER_SIZE = 0;
public static Integer getSlot() {
if (SLOT_LIST.size() > 0) {
return SLOT_LIST.get(0);
}
return 0;
}
/**
* init listener
*
* @throws Exception if error throws Exception
*/
@Override
public void afterPropertiesSet() throws Exception {
/**
* load nodes from zookeeper
*/
load();
/**
* init executor service
*/
executorService = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("ServerNodeManagerExecutor"));
executorService.scheduleWithFixedDelay(new WorkerNodeInfoAndGroupDbSyncTask(), 0, 10, TimeUnit.SECONDS);
/**
* init MasterNodeListener listener
*/
registryClient.subscribe(REGISTRY_DOLPHINSCHEDULER_MASTERS, new MasterDataListener());
/**
* init WorkerNodeListener listener
*/
registryClient.subscribe(REGISTRY_DOLPHINSCHEDULER_WORKERS, new WorkerDataListener());
}
/**
* load nodes from zookeeper
*/
public void load() {
/**
* master nodes from zookeeper
*/
updateMasterNodes();
/**
* worker group nodes from zookeeper
*/
Set<String> workerGroups = registryClient.getWorkerGroupDirectly();
for (String workerGroup : workerGroups) {
syncWorkerGroupNodes(workerGroup, registryClient.getWorkerGroupNodesDirectly(workerGroup));
}
}
/**
* worker node info and worker group db sync task
*/
class WorkerNodeInfoAndGroupDbSyncTask implements Runnable {
@Override
public void run() {
// sync worker node info
Map<String, String> newWorkerNodeInfo = registryClient.getServerMaps(NodeType.WORKER, true);
syncWorkerNodeInfo(newWorkerNodeInfo);
// sync worker group nodes from database
List<WorkerGroup> workerGroupList = workerGroupMapper.queryAllWorkerGroup();
if (CollectionUtils.isNotEmpty(workerGroupList)) {
for (WorkerGroup wg : workerGroupList) {
String workerGroup = wg.getName();
Set<String> nodes = new HashSet<>();
String[] addrs = wg.getAddrList().split(Constants.COMMA);
for (String addr : addrs) {
if (newWorkerNodeInfo.containsKey(addr)) {
nodes.add(addr);
}
}
if (!nodes.isEmpty()) {
syncWorkerGroupNodes(workerGroup, nodes);
}
}
}
}
}
/**
* worker group node listener
*/
class WorkerDataListener implements SubscribeListener {
@Override
public void notify(String path, DataChangeEvent dataChangeEvent) {
if (registryClient.isWorkerPath(path)) {
try {
if (dataChangeEvent == DataChangeEvent.ADD) {
logger.info("worker group node : {} added.", path);
String group = parseGroup(path);
Set<String> currentNodes = registryClient.getWorkerGroupNodesDirectly(group);
logger.info("currentNodes : {}", currentNodes);
syncWorkerGroupNodes(group, currentNodes);
} else if (dataChangeEvent == DataChangeEvent.REMOVE) {
logger.info("worker group node : {} down.", path);
String group = parseGroup(path);
Set<String> currentNodes = registryClient.getWorkerGroupNodesDirectly(group);
syncWorkerGroupNodes(group, currentNodes);
alertDao.sendServerStopedAlert(1, path, "WORKER");
}
} catch (IllegalArgumentException ex) {
logger.warn(ex.getMessage());
} catch (Exception ex) {
logger.error("WorkerGroupListener capture data change and get data failed", ex);
}
}
}
private String parseGroup(String path) {
String[] parts = path.split("/");
if (parts.length < WORKER_LISTENER_CHECK_LENGTH) {
throw new IllegalArgumentException(String.format("worker group path : %s is not valid, ignore", path));
}
return parts[parts.length - 2];
}
}
/**
* master node listener
*/
class MasterDataListener implements SubscribeListener {
@Override
public void notify(String path, DataChangeEvent dataChangeEvent) {
if (registryClient.isMasterPath(path)) {
try {
if (dataChangeEvent.equals(DataChangeEvent.ADD)) {
logger.info("master node : {} added.", path);
updateMasterNodes();
}
if (dataChangeEvent.equals(DataChangeEvent.REMOVE)) {
logger.info("master node : {} down.", path);
updateMasterNodes();
alertDao.sendServerStopedAlert(1, path, "MASTER");
}
} catch (Exception ex) {
logger.error("MasterNodeListener capture data change and get data failed.", ex);
}
}
}
}
private void updateMasterNodes() {
SLOT_LIST.clear();
this.masterNodes.clear();
String nodeLock = registryClient.getMasterLockPath();
try {
registryClient.getLock(nodeLock);
Set<String> currentNodes = registryClient.getMasterNodesDirectly();
List<Server> masterNodes = registryClient.getServerList(NodeType.MASTER);
syncMasterNodes(currentNodes, masterNodes);
} catch (Exception e) {
logger.error("update master nodes error", e);
} finally {
registryClient.releaseLock(nodeLock);
}
}
/**
* get master nodes
*
* @return master nodes
*/
public Set<String> getMasterNodes() {
masterLock.lock();
try {
return Collections.unmodifiableSet(masterNodes);
} finally {
masterLock.unlock();
}
}
/**
* sync master nodes
*
* @param nodes master nodes
*/
private void syncMasterNodes(Set<String> nodes, List<Server> masterNodes) {
masterLock.lock();
try {
this.masterNodes.addAll(nodes);
this.masterPriorityQueue.clear();
this.masterPriorityQueue.putList(masterNodes);
int index = masterPriorityQueue.getIndex(NetUtils.getHost());
if (index >= 0) {
MASTER_SIZE = nodes.size();
SLOT_LIST.add(masterPriorityQueue.getIndex(NetUtils.getHost()));
}
logger.info("update master nodes, master size: {}, slot: {}",
MASTER_SIZE, SLOT_LIST.toString()
);
} finally {
masterLock.unlock();
}
}
/**
* sync worker group nodes
*
* @param workerGroup worker group
* @param nodes worker nodes
*/
private void syncWorkerGroupNodes(String workerGroup, Set<String> nodes) {
workerGroupLock.lock();
try {
workerGroup = workerGroup.toLowerCase();
Set<String> workerNodes = workerGroupNodes.getOrDefault(workerGroup, new HashSet<>());
workerNodes.clear();
workerNodes.addAll(nodes);
workerGroupNodes.put(workerGroup, workerNodes);
} finally {
workerGroupLock.unlock();
}
}
public Map<String, Set<String>> getWorkerGroupNodes() {
return Collections.unmodifiableMap(workerGroupNodes);
}
/**
* get worker group nodes
*
* @param workerGroup workerGroup
* @return worker nodes
*/
public Set<String> getWorkerGroupNodes(String workerGroup) {
workerGroupLock.lock();
try {
if (StringUtils.isEmpty(workerGroup)) {
workerGroup = Constants.DEFAULT_WORKER_GROUP;
}
workerGroup = workerGroup.toLowerCase();
Set<String> nodes = workerGroupNodes.get(workerGroup);
if (CollectionUtils.isNotEmpty(nodes)) {
return Collections.unmodifiableSet(nodes);
}
return nodes;
} finally {
workerGroupLock.unlock();
}
}
/**
* get worker node info
*
* @return worker node info
*/
public Map<String, String> getWorkerNodeInfo() {
return Collections.unmodifiableMap(workerNodeInfo);
}
/**
* get worker node info
*
* @param workerNode worker node
* @return worker node info
*/
public String getWorkerNodeInfo(String workerNode) {
workerNodeInfoLock.lock();
try {
return workerNodeInfo.getOrDefault(workerNode, null);
} finally {
workerNodeInfoLock.unlock();
}
}
/**
* sync worker node info
*
* @param newWorkerNodeInfo new worker node info
*/
private void syncWorkerNodeInfo(Map<String, String> newWorkerNodeInfo) {
workerNodeInfoLock.lock();
try {
workerNodeInfo.clear();
workerNodeInfo.putAll(newWorkerNodeInfo);
} finally {
workerNodeInfoLock.unlock();
}
}
/**
* destroy
*/
@PreDestroy
public void destroy() {
executorService.shutdownNow();
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,694 | [Feature][Master] Reduce the refresh resource interval of LowerWeight | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Now the refresh resource interval of LowerWeight is 5s, it means that Master sync worker weight every 5s.
When Worker send heartbeat every second, I want that Master can get it more timely, which can make smoother task running.
### Use case
Reduce the refresh resource interval of LowerWeight
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6694 | https://github.com/apache/dolphinscheduler/pull/6695 | 5855c936a4a3daafcc12b408a46b8e8470ec34c4 | 72daba58cd378a347e9657b994bb631c0704fc2a | "2021-11-04T10:36:25Z" | java | "2021-11-05T11:10:36Z" | dolphinscheduler-spi/src/main/java/org/apache/dolphinscheduler/spi/register/ListenerManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.spi.register;
import java.util.HashMap;
/**
* The registry node monitors subscriptions
*/
public class ListenerManager {
/**
* All message subscriptions must be subscribed uniformly at startup.
* A node path only supports one listener
*/
private static HashMap<String, SubscribeListener> listeners = new HashMap<>();
/**
* Check whether the key has been monitored
*/
public static boolean checkHasListeners(String path) {
return null != listeners.get(path);
}
/**
* add listener(A node can only be monitored by one listener)
*/
public static void addListener(String path, SubscribeListener listener) {
listeners.put(path, listener);
}
/**
* remove listener
*/
public static void removeListener(String path) {
listeners.remove(path);
}
/**
*
*After the data changes, it is distributed to the corresponding listener for processing
*/
public static void dataChange(String key,String path, DataChangeEvent dataChangeEvent) {
SubscribeListener notifyListener = listeners.get(key);
if (null == notifyListener) {
return;
}
notifyListener.notify(path,dataChangeEvent);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,694 | [Feature][Master] Reduce the refresh resource interval of LowerWeight | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Now the refresh resource interval of LowerWeight is 5s, it means that Master sync worker weight every 5s.
When Worker send heartbeat every second, I want that Master can get it more timely, which can make smoother task running.
### Use case
Reduce the refresh resource interval of LowerWeight
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6694 | https://github.com/apache/dolphinscheduler/pull/6695 | 5855c936a4a3daafcc12b408a46b8e8470ec34c4 | 72daba58cd378a347e9657b994bb631c0704fc2a | "2021-11-04T10:36:25Z" | java | "2021-11-05T11:10:36Z" | dolphinscheduler-spi/src/main/java/org/apache/dolphinscheduler/spi/register/SubscribeListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.spi.register;
/**
* Registration center subscription. All listeners must implement this interface
*/
public interface SubscribeListener {
/**
* Processing logic when the subscription node changes
*/
void notify(String path, DataChangeEvent dataChangeEvent);
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,701 | [Bug] [API] Query worker group list ,paging error | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
When worker group number has been added more than 10,page size is 10,paging is error:there is no second page.
### What you expected to happen
Increase worker group number more than 10,paging is correct.
### How to reproduce
Increase worker group number more than 10.
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6701 | https://github.com/apache/dolphinscheduler/pull/6704 | 5e923971bfc49f6705b25ed4bbeb214904d3291f | d4d59b0627237e73f6ca1762882938beee596e34 | "2021-11-05T07:45:27Z" | java | "2021-11-05T12:23:20Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/WorkerGroupServiceImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service.impl;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.WorkerGroupService;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.RegistryCenterUtils;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.NodeType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.entity.WorkerGroup;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.WorkerGroupMapper;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import com.facebook.presto.jdbc.internal.guava.base.Strings;
/**
* worker group service impl
*/
@Service
public class WorkerGroupServiceImpl extends BaseServiceImpl implements WorkerGroupService {
private static final Logger logger = LoggerFactory.getLogger(WorkerGroupServiceImpl.class);
@Autowired
WorkerGroupMapper workerGroupMapper;
@Autowired
ProcessInstanceMapper processInstanceMapper;
/**
* create or update a worker group
*
* @param loginUser login user
* @param id worker group id
* @param name worker group name
* @param addrList addr list
* @return create or update result code
*/
@Override
public Map<String, Object> saveWorkerGroup(User loginUser, int id, String name, String addrList) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
if (StringUtils.isEmpty(name)) {
putMsg(result, Status.NAME_NULL);
return result;
}
Date now = new Date();
WorkerGroup workerGroup;
if (id != 0) {
workerGroup = workerGroupMapper.selectById(id);
// check exist
if (workerGroup == null) {
workerGroup = new WorkerGroup();
workerGroup.setCreateTime(now);
}
} else {
workerGroup = new WorkerGroup();
workerGroup.setCreateTime(now);
}
workerGroup.setName(name);
workerGroup.setAddrList(addrList);
workerGroup.setUpdateTime(now);
if (checkWorkerGroupNameExists(workerGroup)) {
putMsg(result, Status.NAME_EXIST, workerGroup.getName());
return result;
}
String invalidAddr = checkWorkerGroupAddrList(workerGroup);
if (invalidAddr != null) {
putMsg(result, Status.WORKER_ADDRESS_INVALID, invalidAddr);
return result;
}
if (workerGroup.getId() != 0) {
workerGroupMapper.updateById(workerGroup);
} else {
workerGroupMapper.insert(workerGroup);
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* check worker group name exists
*
* @param workerGroup worker group
* @return boolean
*/
private boolean checkWorkerGroupNameExists(WorkerGroup workerGroup) {
List<WorkerGroup> workerGroupList = workerGroupMapper.queryWorkerGroupByName(workerGroup.getName());
if (CollectionUtils.isNotEmpty(workerGroupList)) {
// new group has same name
if (workerGroup.getId() == 0) {
return true;
}
// check group id
for (WorkerGroup group : workerGroupList) {
if (group.getId() != workerGroup.getId()) {
return true;
}
}
}
// check zookeeper
String workerGroupPath = Constants.REGISTRY_DOLPHINSCHEDULER_WORKERS + Constants.SINGLE_SLASH + workerGroup.getName();
return RegistryCenterUtils.isNodeExisted(workerGroupPath);
}
/**
* check worker group addr list
*
* @param workerGroup worker group
* @return boolean
*/
private String checkWorkerGroupAddrList(WorkerGroup workerGroup) {
Map<String, String> serverMaps = RegistryCenterUtils.getServerMaps(NodeType.WORKER, true);
if (Strings.isNullOrEmpty(workerGroup.getAddrList())) {
return null;
}
for (String addr : workerGroup.getAddrList().split(Constants.COMMA)) {
if (!serverMaps.containsKey(addr)) {
return addr;
}
}
return null;
}
/**
* query worker group paging
*
* @param loginUser login user
* @param pageNo page number
* @param searchVal search value
* @param pageSize page size
* @return worker group list page
*/
@Override
public Result queryAllGroupPaging(User loginUser, Integer pageNo, Integer pageSize, String searchVal) {
// list from index
int fromIndex = (pageNo - 1) * pageSize;
// list to index
int toIndex = (pageNo - 1) * pageSize + pageSize;
Result result = new Result();
if (!isAdmin(loginUser)) {
putMsg(result,Status.USER_NO_OPERATION_PERM);
return result;
}
List<WorkerGroup> workerGroups = getWorkerGroups(true);
List<WorkerGroup> resultDataList = new ArrayList<>();
if (CollectionUtils.isNotEmpty(workerGroups)) {
List<WorkerGroup> searchValDataList = new ArrayList<>();
if (!StringUtils.isEmpty(searchVal)) {
for (WorkerGroup workerGroup : workerGroups) {
if (workerGroup.getName().contains(searchVal)) {
searchValDataList.add(workerGroup);
}
}
} else {
searchValDataList = workerGroups;
}
if (fromIndex < searchValDataList.size()) {
if (toIndex > searchValDataList.size()) {
toIndex = searchValDataList.size();
}
resultDataList = searchValDataList.subList(fromIndex, toIndex);
}
}
PageInfo<WorkerGroup> pageInfo = new PageInfo<>(pageNo, pageSize);
pageInfo.setTotal(resultDataList.size());
pageInfo.setTotalList(resultDataList);
result.setData(pageInfo);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query all worker group
*
* @return all worker group list
*/
@Override
public Map<String, Object> queryAllGroup() {
Map<String, Object> result = new HashMap<>();
List<WorkerGroup> workerGroups = getWorkerGroups(false);
List<String> availableWorkerGroupList = workerGroups.stream()
.map(WorkerGroup::getName)
.collect(Collectors.toList());
int index = availableWorkerGroupList.indexOf(Constants.DEFAULT_WORKER_GROUP);
if (index > -1) {
availableWorkerGroupList.remove(index);
availableWorkerGroupList.add(0, Constants.DEFAULT_WORKER_GROUP);
}
result.put(Constants.DATA_LIST, availableWorkerGroupList);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* get worker groups
*
* @param isPaging whether paging
* @return WorkerGroup list
*/
private List<WorkerGroup> getWorkerGroups(boolean isPaging) {
// worker groups from database
List<WorkerGroup> workerGroups = workerGroupMapper.queryAllWorkerGroup();
// worker groups from zookeeper
String workerPath = Constants.REGISTRY_DOLPHINSCHEDULER_WORKERS;
List<String> workerGroupList = null;
try {
workerGroupList = RegistryCenterUtils.getChildrenNodes(workerPath);
} catch (Exception e) {
logger.error("getWorkerGroups exception: {}, workerPath: {}, isPaging: {}", e.getMessage(), workerPath, isPaging);
}
if (CollectionUtils.isEmpty(workerGroupList)) {
if (CollectionUtils.isEmpty(workerGroups) && !isPaging) {
WorkerGroup wg = new WorkerGroup();
wg.setName(Constants.DEFAULT_WORKER_GROUP);
workerGroups.add(wg);
}
return workerGroups;
}
for (String workerGroup : workerGroupList) {
String workerGroupPath = workerPath + Constants.SINGLE_SLASH + workerGroup;
List<String> childrenNodes = null;
try {
childrenNodes = RegistryCenterUtils.getChildrenNodes(workerGroupPath);
} catch (Exception e) {
logger.error("getChildrenNodes exception: {}, workerGroupPath: {}", e.getMessage(), workerGroupPath);
}
if (childrenNodes == null || childrenNodes.isEmpty()) {
continue;
}
WorkerGroup wg = new WorkerGroup();
wg.setName(workerGroup);
if (isPaging) {
wg.setAddrList(String.join(Constants.COMMA, childrenNodes));
String registeredValue = RegistryCenterUtils.getNodeData(workerGroupPath + Constants.SINGLE_SLASH + childrenNodes.get(0));
wg.setCreateTime(DateUtils.stringToDate(registeredValue.split(Constants.COMMA)[6]));
wg.setUpdateTime(DateUtils.stringToDate(registeredValue.split(Constants.COMMA)[7]));
wg.setSystemDefault(true);
}
workerGroups.add(wg);
}
return workerGroups;
}
/**
* delete worker group by id
*
* @param id worker group id
* @return delete result code
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Map<String, Object> deleteWorkerGroupById(User loginUser, Integer id) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
WorkerGroup workerGroup = workerGroupMapper.selectById(id);
if (workerGroup == null) {
putMsg(result, Status.DELETE_WORKER_GROUP_NOT_EXIST);
return result;
}
List<ProcessInstance> processInstances = processInstanceMapper.queryByWorkerGroupNameAndStatus(workerGroup.getName(), Constants.NOT_TERMINATED_STATES);
if (CollectionUtils.isNotEmpty(processInstances)) {
putMsg(result, Status.DELETE_WORKER_GROUP_BY_ID_FAIL, processInstances.size());
return result;
}
workerGroupMapper.deleteById(id);
processInstanceMapper.updateProcessInstanceByWorkerGroupName(workerGroup.getName(), "");
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query all worker address list
*
* @return all worker address list
*/
@Override
public Map<String, Object> getWorkerAddressList() {
Map<String, Object> result = new HashMap<>();
List<String> serverNodeList = RegistryCenterUtils.getServerNodeList(NodeType.WORKER, true);
result.put(Constants.DATA_LIST, serverNodeList);
putMsg(result, Status.SUCCESS);
return result;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,649 | [Bug] [UI] When entering the project management page through different links, the front-end storage project is not switched | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
branch : dev
When I enter the project management interface through different links(different project), the front-end storage project is not switched.
![image](https://user-images.githubusercontent.com/35388422/139579266-045c424c-b5ed-4939-8f16-fdc10a3f9ac6.png)
![image](https://user-images.githubusercontent.com/35388422/139579292-519fd6b1-5e6d-4c22-bc41-3762145007fc.png)
### What you expected to happen
Project can be switched normally.
### How to reproduce
1. Open two browser windows
2. Enter different project links for each window
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6649 | https://github.com/apache/dolphinscheduler/pull/6650 | 1144360257cc901c70962971a0777930dcb39e3c | a728c85a92faff6539daa86defa5206f986d38a9 | "2021-10-31T10:59:38Z" | java | "2021-11-06T02:12:01Z" | dolphinscheduler-ui/src/js/conf/home/router/index.js | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import Vue from 'vue'
import store from '@/conf/home/store'
import localStore from '@/module/util/localStorage'
import i18n from '@/module/i18n/index.js'
import config from '~/external/config'
import Router from 'vue-router'
Vue.use(Router)
const router = new Router({
routes: [
{
path: '/',
name: 'index',
redirect: {
name: 'home'
}
},
{
path: '/home',
name: 'home',
component: resolve => require(['../pages/home/index'], resolve),
meta: {
title: `${i18n.$t('Home')} - DolphinScheduler`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/projects',
name: 'projects',
component: resolve => require(['../pages/projects/index'], resolve),
meta: {
title: `${i18n.$t('Project')}`
},
redirect: {
name: 'projects-list'
},
beforeEnter: (to, from, next) => {
const blacklist = ['projects', 'projects-list']
if (!blacklist.includes(to.name) && to.params.projectId && to.params.projectId !== localStore.getItem('projectId')) {
store.dispatch('projects/getProjectById', {
projectId: to.params.projectId
}).then(res => {
store.commit('dag/setProjectId', res.id)
store.commit('dag/setProjectCode', res.code)
store.commit('dag/setProjectName', res.name)
localStore.setItem('projectId', res.id)
localStore.setItem('projectCode', res.code)
localStore.setItem('projectName', res.name)
next()
}).catch(e => {
next({ name: 'projects-list' })
})
} else {
next()
}
},
children: [
{
path: '/projects/list',
name: 'projects-list',
component: resolve => require(['../pages/projects/pages/list/index'], resolve),
meta: {
title: `${i18n.$t('Project')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/projects/:projectCode/index',
name: 'projects-index',
component: resolve => require(['../pages/projects/pages/index/index'], resolve),
meta: {
title: `${i18n.$t('Project Home')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/projects/:projectCode/kinship',
name: 'projects-kinship',
component: resolve => require(['../pages/projects/pages/kinship/index'], resolve),
meta: {
title: `${i18n.$t('Kinship')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/projects/:projectCode/definition',
name: 'definition',
component: resolve => require(['../pages/projects/pages/definition/index'], resolve),
meta: {
title: `${i18n.$t('Process definition')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
},
redirect: {
name: 'projects-definition-list'
},
children: [
{
path: '/projects/:projectCode/definition/list',
name: 'projects-definition-list',
component: resolve => require(['../pages/projects/pages/definition/pages/list/index'], resolve),
meta: {
title: `${i18n.$t('Process definition')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/projects/:projectCode/definition/list/:code',
name: 'projects-definition-details',
component: resolve => require(['../pages/projects/pages/definition/pages/details/index'], resolve),
meta: {
title: `${i18n.$t('Process definition details')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/projects/:projectCode/definition/create',
name: 'definition-create',
component: resolve => require(['../pages/projects/pages/definition/pages/create/index'], resolve),
meta: {
title: `${i18n.$t('Create process definition')}`
}
},
{
path: '/projects/:projectCode/definition/tree/:code',
name: 'definition-tree-view-index',
component: resolve => require(['../pages/projects/pages/definition/pages/tree/index'], resolve),
meta: {
title: `${i18n.$t('TreeView')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/projects/:projectCode/definition/list/timing/:code',
name: 'definition-timing-details',
component: resolve => require(['../pages/projects/pages/definition/timing/index'], resolve),
meta: {
title: `${i18n.$t('Scheduled task list')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
}
]
},
{
path: '/projects/:projectCode/instance',
name: 'instance',
component: resolve => require(['../pages/projects/pages/instance/index'], resolve),
meta: {
title: `${i18n.$t('Process Instance')}`
},
redirect: {
name: 'projects-instance-list'
},
children: [
{
path: '/projects/:projectCode/instance/list',
name: 'projects-instance-list',
component: resolve => require(['../pages/projects/pages/instance/pages/list/index'], resolve),
meta: {
title: `${i18n.$t('Process Instance')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/projects/:projectCode/instance/list/:id',
name: 'projects-instance-details',
component: resolve => require(['../pages/projects/pages/instance/pages/details/index'], resolve),
meta: {
title: `${i18n.$t('Process instance details')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/projects/:projectCode/instance/gantt/:id',
name: 'instance-gantt-index',
component: resolve => require(['../pages/projects/pages/instance/pages/gantt/index'], resolve),
meta: {
title: `${i18n.$t('Gantt')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
}
]
},
{
path: '/projects/:projectCode/task-instance',
name: 'task-instance',
component: resolve => require(['../pages/projects/pages/taskInstance'], resolve),
meta: {
title: `${i18n.$t('Task Instance')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/projects/:projectCode/task-record',
name: 'task-record',
component: resolve => require(['../pages/projects/pages/taskRecord'], resolve),
meta: {
title: `${i18n.$t('Task record')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/projects/:projectCode/history-task-record',
name: 'history-task-record',
component: resolve => require(['../pages/projects/pages/historyTaskRecord'], resolve),
meta: {
title: `${i18n.$t('History task record')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
}
]
},
{
path: '/resource',
name: 'resource',
component: resolve => require(['../pages/resource/index'], resolve),
redirect: {
name: 'file'
},
meta: {
title: `${i18n.$t('Resources')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
},
children: [
{
path: '/resource/file',
name: 'file',
component: resolve => require(['../pages/resource/pages/file/pages/list/index'], resolve),
meta: {
title: `${i18n.$t('File Manage')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/resource/file/create',
name: 'resource-file-create',
component: resolve => require(['../pages/resource/pages/file/pages/create/index'], resolve),
meta: {
title: `${i18n.$t('Create Resource')}`
}
},
{
path: '/resource/file/createFolder',
name: 'resource-file-createFolder',
component: resolve => require(['../pages/resource/pages/file/pages/createFolder/index'], resolve),
meta: {
title: `${i18n.$t('Create Resource')}`
}
},
{
path: '/resource/file/subFileFolder/:id',
name: 'resource-file-subFileFolder',
component: resolve => require(['../pages/resource/pages/file/pages/subFileFolder/index'], resolve),
meta: {
title: `${i18n.$t('Create Resource')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/resource/file/subFile/:id',
name: 'resource-file-subFile',
component: resolve => require(['../pages/resource/pages/file/pages/subFile/index'], resolve),
meta: {
title: `${i18n.$t('Create Resource')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/resource/file/list/:id',
name: 'resource-file-details',
component: resolve => require(['../pages/resource/pages/file/pages/details/index'], resolve),
meta: {
title: `${i18n.$t('File Details')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/resource/file/subdirectory/:id',
name: 'resource-file-subdirectory',
component: resolve => require(['../pages/resource/pages/file/pages/subdirectory/index'], resolve),
meta: {
title: `${i18n.$t('File Manage')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/resource/file/edit/:id',
name: 'resource-file-edit',
component: resolve => require(['../pages/resource/pages/file/pages/edit/index'], resolve),
meta: {
title: `${i18n.$t('File Details')}`
}
},
{
path: '/resource/udf',
name: 'udf',
component: resolve => require(['../pages/resource/pages/udf/index'], resolve),
meta: {
title: `${i18n.$t('UDF manage')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
},
children: [
{
path: '/resource/udf',
name: 'resource-udf',
component: resolve => require(['../pages/resource/pages/udf/pages/resource/index'], resolve),
meta: {
title: `${i18n.$t('UDF Resources')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/resource/udf/subUdfDirectory/:id',
name: 'resource-udf-subUdfDirectory',
component: resolve => require(['../pages/resource/pages/udf/pages/subUdfDirectory/index'], resolve),
meta: {
title: `${i18n.$t('UDF Resources')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/resource/udf/createUdfFolder',
name: 'resource-udf-createUdfFolder',
component: resolve => require(['../pages/resource/pages/udf/pages/createUdfFolder/index'], resolve),
meta: {
title: `${i18n.$t('Create Resource')}`
}
},
{
path: '/resource/udf/subCreateUdfFolder/:id',
name: 'resource-udf-subCreateUdfFolder',
component: resolve => require(['../pages/resource/pages/udf/pages/subUdfFolder/index'], resolve),
meta: {
title: `${i18n.$t('Create Resource')}`
}
},
{
path: '/resource/func',
name: 'resource-func',
component: resolve => require(['../pages/resource/pages/udf/pages/function/index'], resolve),
meta: {
title: `${i18n.$t('UDF Function')}`
}
}
]
}
]
},
{
path: '/datasource',
name: 'datasource',
component: resolve => require(['../pages/datasource/index'], resolve),
meta: {
title: `${i18n.$t('Datasource')}`
},
redirect: {
name: 'datasource-list'
},
children: [
{
path: '/datasource/list',
name: 'datasource-list',
component: resolve => require(['../pages/datasource/pages/list/index'], resolve),
meta: {
title: `${i18n.$t('Datasource')}`
}
}
]
},
{
path: '/security',
name: 'security',
component: resolve => require(['../pages/security/index'], resolve),
meta: {
title: `${i18n.$t('Security')}`
},
redirect: {
name: 'tenement-manage'
},
children: [
{
path: '/security/tenant',
name: 'tenement-manage',
component: resolve => require(['../pages/security/pages/tenement/index'], resolve),
meta: {
title: `${i18n.$t('Tenant Manage')}`
}
},
{
path: '/security/users',
name: 'users-manage',
component: resolve => require(['../pages/security/pages/users/index'], resolve),
meta: {
title: `${i18n.$t('User Manage')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/security/warning-groups',
name: 'warning-groups-manage',
component: resolve => require(['../pages/security/pages/warningGroups/index'], resolve),
meta: {
title: `${i18n.$t('Warning group manage')}`
}
},
{
path: '/security/warning-instance',
name: 'warning-instance-manage',
component: resolve => require(['../pages/security/pages/warningInstance/index'], resolve),
meta: {
title: `${i18n.$t('Warning instance manage')}`
}
},
{
path: '/security/queue',
name: 'queue-manage',
component: resolve => require(['../pages/security/pages/queue/index'], resolve),
meta: {
title: `${i18n.$t('Queue manage')}`
}
},
{
path: '/security/worker-groups',
name: 'worker-groups-manage',
component: resolve => require(['../pages/security/pages/workerGroups/index'], resolve),
meta: {
title: `${i18n.$t('Worker group manage')}`
}
},
{
path: '/security/environments',
name: 'environment-manage',
component: resolve => require(['../pages/security/pages/environment/index'], resolve),
meta: {
title: `${i18n.$t('Environment manage')}`
}
},
{
path: '/security/token',
name: 'token-manage',
component: resolve => require(['../pages/security/pages/token/index'], resolve),
meta: {
title: `${i18n.$t('Token manage')}`
}
}
]
},
{
path: '/user',
name: 'user',
component: resolve => require(['../pages/user/index'], resolve),
meta: {
title: `${i18n.$t('User Center')}`
},
redirect: {
name: 'account'
},
children: [
{
path: '/user/account',
name: 'account',
component: resolve => require(['../pages/user/pages/account/index'], resolve),
meta: {
title: `${i18n.$t('User Information')}`
}
},
{
path: '/user/password',
name: 'password',
component: resolve => require(['../pages/user/pages/password/index'], resolve),
meta: {
title: `${i18n.$t('Edit password')}`
}
},
{
path: '/user/token',
name: 'token',
component: resolve => require(['../pages/user/pages/token/index'], resolve),
meta: {
title: `${i18n.$t('Token manage')}`
}
}
]
},
{
path: '/monitor',
name: 'monitor',
component: resolve => require(['../pages/monitor/index'], resolve),
meta: {
title: 'monitor'
},
redirect: {
name: 'servers-master'
},
children: [
{
path: '/monitor/servers/master',
name: 'servers-master',
component: resolve => require(['../pages/monitor/pages/servers/master'], resolve),
meta: {
title: `${i18n.$t('Service-Master')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/monitor/servers/worker',
name: 'servers-worker',
component: resolve => require(['../pages/monitor/pages/servers/worker'], resolve),
meta: {
title: `${i18n.$t('Service-Worker')}`,
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/monitor/servers/alert',
name: 'servers-alert',
component: resolve => require(['../pages/monitor/pages/servers/alert'], resolve),
meta: {
title: 'Alert',
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/monitor/servers/rpcserver',
name: 'servers-rpcserver',
component: resolve => require(['../pages/monitor/pages/servers/rpcserver'], resolve),
meta: {
title: 'Rpcserver',
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/monitor/servers/zookeeper',
name: 'servers-zookeeper',
component: resolve => require(['../pages/monitor/pages/servers/zookeeper'], resolve),
meta: {
title: 'Zookeeper',
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/monitor/servers/apiserver',
name: 'servers-apiserver',
component: resolve => require(['../pages/monitor/pages/servers/apiserver'], resolve),
meta: {
title: 'Apiserver',
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/monitor/servers/db',
name: 'servers-db',
component: resolve => require(['../pages/monitor/pages/servers/db'], resolve),
meta: {
title: 'DB',
refreshInSwitchedTab: config.refreshInSwitchedTab
}
},
{
path: '/monitor/servers/statistics',
name: 'statistics',
component: resolve => require(['../pages/monitor/pages/servers/statistics'], resolve),
meta: {
title: 'statistics',
refreshInSwitchedTab: config.refreshInSwitchedTab
}
}
]
}
]
})
const VueRouterPush = Router.prototype.push
Router.prototype.push = function push (to) {
return VueRouterPush.call(this, to).catch(err => err)
}
router.beforeEach((to, from, next) => {
const $body = $('body')
$body.find('.tooltip.fade.top.in').remove()
if (to.meta.title) {
document.title = `${to.meta.title} - DolphinScheduler`
}
next()
})
export default router
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,649 | [Bug] [UI] When entering the project management page through different links, the front-end storage project is not switched | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
branch : dev
When I enter the project management interface through different links(different project), the front-end storage project is not switched.
![image](https://user-images.githubusercontent.com/35388422/139579266-045c424c-b5ed-4939-8f16-fdc10a3f9ac6.png)
![image](https://user-images.githubusercontent.com/35388422/139579292-519fd6b1-5e6d-4c22-bc41-3762145007fc.png)
### What you expected to happen
Project can be switched normally.
### How to reproduce
1. Open two browser windows
2. Enter different project links for each window
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6649 | https://github.com/apache/dolphinscheduler/pull/6650 | 1144360257cc901c70962971a0777930dcb39e3c | a728c85a92faff6539daa86defa5206f986d38a9 | "2021-10-31T10:59:38Z" | java | "2021-11-06T02:12:01Z" | dolphinscheduler-ui/src/js/conf/home/store/projects/actions.js | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import io from '@/module/io'
export default {
/**
* Get item list
*/
getProjectsList ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('projects', payload, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* Get project by id
*/
getProjectById ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${payload}`, {}, res => {
resolve(res.data)
}).catch(e => {
reject(e)
})
})
},
/**
* Create project
*/
createProjects ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post('projects', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Delete Project
*/
deleteProjects ({ state }, payload) {
return new Promise((resolve, reject) => {
io.delete(`projects/${payload.projectCode}`, {}, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* edit Project
*/
updateProjects ({ state }, payload) {
return new Promise((resolve, reject) => {
io.put(`projects/${payload.projectCode}`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Task status statistics
*/
getTaskStatusCount ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('projects/analysis/task-state-count', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* get command state count
*/
getCommandStateCount ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('projects/analysis/command-state-count', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* get command state count
*/
getQueueCount ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('projects/analysis/queue-count', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Process status statistics
*/
getProcessStateCount ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('projects/analysis/process-state-count', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/**
* Process definition statistics
*/
getDefineUserCount ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('projects/analysis/define-user-count', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,657 | [Bug] [Master] Failed to operate workflow node separately | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
![image](https://user-images.githubusercontent.com/42203474/139636648-2b175530-996c-48bb-82ff-46b8a6ea39ba.png)
Failed to operate workflow node alone. No new task instance is generated.
version: dev
There are no problems in the version of 2.0-alpha-release.
### What you expected to happen
Operate workflow node separately and generate new task instance.
### How to reproduce
Operate workflow node separately.
### Anything else
```java
[ERROR] 2021-11-01 15:30:35.452 org.apache.dolphinscheduler.dao.utils.DagHelper:[112] - start node name [a] is not in task node list [[TaskNode{id='null', code=851434614521856, version=1, name='a', desc='', type='SHELL', runFlag='NORMAL', loc='null', maxRetryTimes=0, retryInterval=1, params='{"resourceList":[],"localParams":[],"rawScript":"echo \"a\"","waitStartTimeout":{},"switchResult":{}}', preTasks='[]', preTaskNodeList=null, extras='null', depList=[], dependence='{}', conditionResult='{"successNode":[""],"failedNode":[""]}', taskInstancePriority=MEDIUM, workerGroup='default', environmentCode=-1, timeout='{"enable":false,"strategy":null,"interval":0}', delayTime=0}]]
[ERROR] 2021-11-01 15:30:35.452 org.apache.dolphinscheduler.dao.utils.DagHelper:[112] - start node name [a] is not in task node list [[TaskNode{id='null', code=851434614521856, version=1, name='a', desc='', type='SHELL', runFlag='NORMAL', loc='null', maxRetryTimes=0, retryInterval=1, params='{"resourceList":[],"localParams":[],"rawScript":"echo \"a\"","waitStartTimeout":{},"switchResult":{}}', preTasks='[]', preTaskNodeList=null, extras='null', depList=[], dependence='{}', conditionResult='{"successNode":[""],"failedNode":[""]}', taskInstancePriority=MEDIUM, workerGroup='default', environmentCode=-1, timeout='{"enable":false,"strategy":null,"interval":0}', delayTime=0}]]
[ERROR] 2021-11-01 15:30:35.453 org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread:[512] - processDag is null
[ERROR] 2021-11-01 15:30:35.453 org.apache.dolphinscheduler.dao.utils.DagHelper:[112] - start node name [job_b1] is not in task node list [[TaskNode{id='null', code=851435467538432, version=1, name='job_b1', desc='', type='SHELL', runFlag='NORMAL', loc='null', maxRetryTimes=0, retryInterval=1, params='{"resourceList":[],"localParams":[],"rawScript":"echo \"b1\"","waitStartTimeout":{},"switchResult":{}}', preTasks='[851437597130752]', preTaskNodeList=null, extras='null', depList=[851437597130752], dependence='{}', conditionResult='{"successNode":[""],"failedNode":[""]}', taskInstancePriority=MEDIUM, workerGroup='default', environmentCode=-1, timeout='{"enable":false,"strategy":null,"interval":0}', delayTime=0}, TaskNode{id='null', code=851437597130752, version=2, name='depend_a', desc='', type='DEPENDENT', runFlag='NORMAL', loc='null', maxRetryTimes=0, retryInterval=1, params='{"waitStartTimeout":{"strategy":"FAILED","interval":null,"checkInterval":null,"enable":false},"switchResult":{}}', preTasks='[]', preTaskNodeList=null, extras='null', depList=[], dependence='{"relation":"AND","dependTaskList":[{"relation":"AND","dependItemList":[{"projectCode":851275541217280,"definitionCode":851435217125376,"depTasks":"a","cycle":"day","dateValue":"today"}]}]}', conditionResult='{"successNode":[""],"failedNode":[""]}', taskInstancePriority=MEDIUM, workerGroup='default', environmentCode=-1, timeout='{"enable":false,"strategy":null,"interval":0}', delayTime=0}, TaskNode{id='null', code=851436591874048, version=1, name='job_b2', desc='', type='SHELL', runFlag='NORMAL', loc='null', maxRetryTimes=0, retryInterval=1, params='{"resourceList":[],"localParams":[],"rawScript":"echo \"b2\"","waitStartTimeout":{},"switchResult":{}}', preTasks='[851437597130752]', preTaskNodeList=null, extras='null', depList=[851437597130752], dependence='{}', conditionResult='{"successNode":[""],"failedNode":[""]}', taskInstancePriority=MEDIUM, workerGroup='default', environmentCode=-1, timeout='{"enable":false,"strategy":null,"interval":0}', delayTime=0}]]
[ERROR] 2021-11-01 15:30:35.453 org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread:[512] - processDag is null
[ERROR] 2021-11-01 15:30:35.453 org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread:[512] - processDag is null
[ERROR] 2021-11-01 15:30:35.454 org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread:[195] - handler error:
java.lang.NullPointerException: null
at org.apache.dolphinscheduler.dao.utils.DagHelper.parsePostNodes(DagHelper.java:301)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.submitPostNode(WorkflowExecuteThread.java:797)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.startProcess(WorkflowExecuteThread.java:464)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.run(WorkflowExecuteThread.java:192)
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
at com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:125)
at com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:57)
at com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:78)
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at java.base/java.lang.Thread.run(Thread.java:829)
[ERROR] 2021-11-01 15:30:35.454 org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread:[195] - handler error:
java.lang.NullPointerException: null
at org.apache.dolphinscheduler.dao.utils.DagHelper.parsePostNodes(DagHelper.java:301)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.submitPostNode(WorkflowExecuteThread.java:797)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.startProcess(WorkflowExecuteThread.java:464)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.run(WorkflowExecuteThread.java:192)
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
at com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:125)
at com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:57)
at com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:78)
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at java.base/java.lang.Thread.run(Thread.java:829)
[ERROR] 2021-11-01 15:30:35.454 org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread:[195] - handler error:
java.lang.NullPointerException: null
at org.apache.dolphinscheduler.dao.utils.DagHelper.parsePostNodes(DagHelper.java:301)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.submitPostNode(WorkflowExecuteThread.java:797)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.startProcess(WorkflowExecuteThread.java:464)
at org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread.run(WorkflowExecuteThread.java:192)
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
at com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:125)
at com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:57)
at com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:78)
```
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6657 | https://github.com/apache/dolphinscheduler/pull/6717 | a728c85a92faff6539daa86defa5206f986d38a9 | 69f2d3b21bc99006fd1fa4e0f0b970972d26ae2f | "2021-11-01T07:33:49Z" | java | "2021-11-06T11:27:00Z" | dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/utils/DagHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.utils;
import org.apache.dolphinscheduler.common.enums.TaskDependType;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.task.conditions.ConditionsParameters;
import org.apache.dolphinscheduler.common.task.switchtask.SwitchParameters;
import org.apache.dolphinscheduler.common.task.switchtask.SwitchResultVo;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* dag tools
*/
public class DagHelper {
private static final Logger logger = LoggerFactory.getLogger(DagHelper.class);
/**
* generate flow node relation list by task node list;
* Edges that are not in the task Node List will not be added to the result
*
* @param taskNodeList taskNodeList
* @return task node relation list
*/
public static List<TaskNodeRelation> generateRelationListByFlowNodes(List<TaskNode> taskNodeList) {
List<TaskNodeRelation> nodeRelationList = new ArrayList<>();
for (TaskNode taskNode : taskNodeList) {
String preTasks = taskNode.getPreTasks();
List<String> preTaskList = JSONUtils.toList(preTasks, String.class);
if (preTaskList != null) {
for (String depNodeCode : preTaskList) {
if (null != findNodeByCode(taskNodeList, depNodeCode)) {
nodeRelationList.add(new TaskNodeRelation(depNodeCode, Long.toString(taskNode.getCode())));
}
}
}
}
return nodeRelationList;
}
/**
* generate task nodes needed by dag
*
* @param taskNodeList taskNodeList
* @param startNodeNameList startNodeNameList
* @param recoveryNodeCodeList recoveryNodeCodeList
* @param taskDependType taskDependType
* @return task node list
*/
public static List<TaskNode> generateFlowNodeListByStartNode(List<TaskNode> taskNodeList, List<String> startNodeNameList,
List<String> recoveryNodeCodeList, TaskDependType taskDependType) {
List<TaskNode> destFlowNodeList = new ArrayList<>();
List<String> startNodeList = startNodeNameList;
if (taskDependType != TaskDependType.TASK_POST
&& CollectionUtils.isEmpty(startNodeList)) {
logger.error("start node list is empty! cannot continue run the process ");
return destFlowNodeList;
}
List<TaskNode> destTaskNodeList = new ArrayList<>();
List<TaskNode> tmpTaskNodeList = new ArrayList<>();
if (taskDependType == TaskDependType.TASK_POST
&& CollectionUtils.isNotEmpty(recoveryNodeCodeList)) {
startNodeList = recoveryNodeCodeList;
}
if (CollectionUtils.isEmpty(startNodeList)) {
// no special designation start nodes
tmpTaskNodeList = taskNodeList;
} else {
// specified start nodes or resume execution
for (String startNodeCode : startNodeList) {
TaskNode startNode = findNodeByCode(taskNodeList, startNodeCode);
List<TaskNode> childNodeList = new ArrayList<>();
if (startNode == null) {
logger.error("start node name [{}] is not in task node list [{}] ",
startNodeCode,
taskNodeList
);
continue;
} else if (TaskDependType.TASK_POST == taskDependType) {
List<String> visitedNodeCodeList = new ArrayList<>();
childNodeList = getFlowNodeListPost(startNode, taskNodeList, visitedNodeCodeList);
} else if (TaskDependType.TASK_PRE == taskDependType) {
List<String> visitedNodeCodeList = new ArrayList<>();
childNodeList = getFlowNodeListPre(startNode, recoveryNodeCodeList, taskNodeList, visitedNodeCodeList);
} else {
childNodeList.add(startNode);
}
tmpTaskNodeList.addAll(childNodeList);
}
}
for (TaskNode taskNode : tmpTaskNodeList) {
if (null == findNodeByCode(destTaskNodeList, Long.toString(taskNode.getCode()))) {
destTaskNodeList.add(taskNode);
}
}
return destTaskNodeList;
}
/**
* find all the nodes that depended on the start node
*
* @param startNode startNode
* @param taskNodeList taskNodeList
* @return task node list
*/
private static List<TaskNode> getFlowNodeListPost(TaskNode startNode, List<TaskNode> taskNodeList, List<String> visitedNodeCodeList) {
List<TaskNode> resultList = new ArrayList<>();
for (TaskNode taskNode : taskNodeList) {
List<String> depList = taskNode.getDepList();
if (null != depList && null != startNode && depList.contains(Long.toString(startNode.getCode())) && !visitedNodeCodeList.contains(Long.toString(taskNode.getCode()))) {
resultList.addAll(getFlowNodeListPost(taskNode, taskNodeList, visitedNodeCodeList));
}
}
// why add (startNode != null) condition? for SonarCloud Quality Gate passed
if (null != startNode) {
visitedNodeCodeList.add(Long.toString(startNode.getCode()));
}
resultList.add(startNode);
return resultList;
}
/**
* find all nodes that start nodes depend on.
*
* @param startNode startNode
* @param recoveryNodeCodeList recoveryNodeCodeList
* @param taskNodeList taskNodeList
* @return task node list
*/
private static List<TaskNode> getFlowNodeListPre(TaskNode startNode, List<String> recoveryNodeCodeList, List<TaskNode> taskNodeList, List<String> visitedNodeCodeList) {
List<TaskNode> resultList = new ArrayList<>();
List<String> depList = new ArrayList<>();
if (null != startNode) {
depList = startNode.getDepList();
resultList.add(startNode);
}
if (CollectionUtils.isEmpty(depList)) {
return resultList;
}
for (String depNodeCode : depList) {
TaskNode start = findNodeByCode(taskNodeList, depNodeCode);
if (recoveryNodeCodeList.contains(depNodeCode)) {
resultList.add(start);
} else if (!visitedNodeCodeList.contains(depNodeCode)) {
resultList.addAll(getFlowNodeListPre(start, recoveryNodeCodeList, taskNodeList, visitedNodeCodeList));
}
}
// why add (startNode != null) condition? for SonarCloud Quality Gate passed
if (null != startNode) {
visitedNodeCodeList.add(Long.toString(startNode.getCode()));
}
return resultList;
}
/**
* generate dag by start nodes and recovery nodes
*
* @param totalTaskNodeList totalTaskNodeList
* @param startNodeNameList startNodeNameList
* @param recoveryNodeCodeList recoveryNodeCodeList
* @param depNodeType depNodeType
* @return process dag
* @throws Exception if error throws Exception
*/
public static ProcessDag generateFlowDag(List<TaskNode> totalTaskNodeList,
List<String> startNodeNameList,
List<String> recoveryNodeCodeList,
TaskDependType depNodeType) throws Exception {
List<TaskNode> destTaskNodeList = generateFlowNodeListByStartNode(totalTaskNodeList, startNodeNameList, recoveryNodeCodeList, depNodeType);
if (destTaskNodeList.isEmpty()) {
return null;
}
List<TaskNodeRelation> taskNodeRelations = generateRelationListByFlowNodes(destTaskNodeList);
ProcessDag processDag = new ProcessDag();
processDag.setEdges(taskNodeRelations);
processDag.setNodes(destTaskNodeList);
return processDag;
}
/**
* find node by node name
*
* @param nodeDetails nodeDetails
* @param nodeName nodeName
* @return task node
*/
public static TaskNode findNodeByName(List<TaskNode> nodeDetails, String nodeName) {
for (TaskNode taskNode : nodeDetails) {
if (taskNode.getName().equals(nodeName)) {
return taskNode;
}
}
return null;
}
/**
* find node by node code
*
* @param nodeDetails nodeDetails
* @param nodeCode nodeCode
* @return task node
*/
public static TaskNode findNodeByCode(List<TaskNode> nodeDetails, String nodeCode) {
for (TaskNode taskNode : nodeDetails) {
if (Long.toString(taskNode.getCode()).equals(nodeCode)) {
return taskNode;
}
}
return null;
}
/**
* the task can be submit when all the depends nodes are forbidden or complete
*
* @param taskNode taskNode
* @param dag dag
* @param completeTaskList completeTaskList
* @return can submit
*/
public static boolean allDependsForbiddenOrEnd(TaskNode taskNode,
DAG<String, TaskNode, TaskNodeRelation> dag,
Map<String, TaskNode> skipTaskNodeList,
Map<String, TaskInstance> completeTaskList) {
List<String> dependList = taskNode.getDepList();
if (dependList == null) {
return true;
}
for (String dependNodeCode : dependList) {
TaskNode dependNode = dag.getNode(dependNodeCode);
if (dependNode == null || completeTaskList.containsKey(dependNodeCode)
|| dependNode.isForbidden()
|| skipTaskNodeList.containsKey(dependNodeCode)) {
continue;
} else {
return false;
}
}
return true;
}
/**
* parse the successor nodes of previous node.
* this function parse the condition node to find the right branch.
* also check all the depends nodes forbidden or complete
*
* @return successor nodes
*/
public static Set<String> parsePostNodes(String preNodeCode,
Map<String, TaskNode> skipTaskNodeList,
DAG<String, TaskNode, TaskNodeRelation> dag,
Map<String, TaskInstance> completeTaskList) {
Set<String> postNodeList = new HashSet<>();
Collection<String> startVertexes = new ArrayList<>();
if (preNodeCode == null) {
startVertexes = dag.getBeginNode();
} else if (dag.getNode(preNodeCode).isConditionsTask()) {
List<String> conditionTaskList = parseConditionTask(preNodeCode, skipTaskNodeList, dag, completeTaskList);
startVertexes.addAll(conditionTaskList);
} else if (dag.getNode(preNodeCode).isSwitchTask()) {
List<String> conditionTaskList = parseSwitchTask(preNodeCode, skipTaskNodeList, dag, completeTaskList);
startVertexes.addAll(conditionTaskList);
} else {
startVertexes = dag.getSubsequentNodes(preNodeCode);
}
for (String subsequent : startVertexes) {
TaskNode taskNode = dag.getNode(subsequent);
if (isTaskNodeNeedSkip(taskNode, skipTaskNodeList)) {
setTaskNodeSkip(subsequent, dag, completeTaskList, skipTaskNodeList);
continue;
}
if (!DagHelper.allDependsForbiddenOrEnd(taskNode, dag, skipTaskNodeList, completeTaskList)) {
continue;
}
if (taskNode.isForbidden() || completeTaskList.containsKey(subsequent)) {
postNodeList.addAll(parsePostNodes(subsequent, skipTaskNodeList, dag, completeTaskList));
continue;
}
postNodeList.add(subsequent);
}
return postNodeList;
}
/**
* if all of the task dependence are skipped, skip it too.
*/
private static boolean isTaskNodeNeedSkip(TaskNode taskNode,
Map<String, TaskNode> skipTaskNodeList
) {
if (CollectionUtils.isEmpty(taskNode.getDepList())) {
return false;
}
for (String depNode : taskNode.getDepList()) {
if (!skipTaskNodeList.containsKey(depNode)) {
return false;
}
}
return true;
}
/**
* parse condition task find the branch process
* set skip flag for another one.
*/
public static List<String> parseConditionTask(String nodeCode,
Map<String, TaskNode> skipTaskNodeList,
DAG<String, TaskNode, TaskNodeRelation> dag,
Map<String, TaskInstance> completeTaskList) {
List<String> conditionTaskList = new ArrayList<>();
TaskNode taskNode = dag.getNode(nodeCode);
if (!taskNode.isConditionsTask()) {
return conditionTaskList;
}
if (!completeTaskList.containsKey(nodeCode)) {
return conditionTaskList;
}
TaskInstance taskInstance = completeTaskList.get(nodeCode);
ConditionsParameters conditionsParameters =
JSONUtils.parseObject(taskNode.getConditionResult(), ConditionsParameters.class);
List<String> skipNodeList = new ArrayList<>();
if (taskInstance.getState().typeIsSuccess()) {
conditionTaskList = conditionsParameters.getSuccessNode();
skipNodeList = conditionsParameters.getFailedNode();
} else if (taskInstance.getState().typeIsFailure()) {
conditionTaskList = conditionsParameters.getFailedNode();
skipNodeList = conditionsParameters.getSuccessNode();
} else {
conditionTaskList.add(nodeCode);
}
for (String failedNode : skipNodeList) {
setTaskNodeSkip(failedNode, dag, completeTaskList, skipTaskNodeList);
}
return conditionTaskList;
}
/**
* parse condition task find the branch process
* set skip flag for another one.
*
* @param nodeCode
* @return
*/
public static List<String> parseSwitchTask(String nodeCode,
Map<String, TaskNode> skipTaskNodeList,
DAG<String, TaskNode, TaskNodeRelation> dag,
Map<String, TaskInstance> completeTaskList) {
List<String> conditionTaskList = new ArrayList<>();
TaskNode taskNode = dag.getNode(nodeCode);
if (!taskNode.isSwitchTask()) {
return conditionTaskList;
}
if (!completeTaskList.containsKey(nodeCode)) {
return conditionTaskList;
}
conditionTaskList = skipTaskNode4Switch(taskNode, skipTaskNodeList, completeTaskList, dag);
return conditionTaskList;
}
private static List<String> skipTaskNode4Switch(TaskNode taskNode, Map<String, TaskNode> skipTaskNodeList,
Map<String, TaskInstance> completeTaskList,
DAG<String, TaskNode, TaskNodeRelation> dag) {
SwitchParameters switchParameters = completeTaskList.get(taskNode.getName()).getSwitchDependency();
int resultConditionLocation = switchParameters.getResultConditionLocation();
List<SwitchResultVo> conditionResultVoList = switchParameters.getDependTaskList();
List<String> switchTaskList = conditionResultVoList.get(resultConditionLocation).getNextNode();
if (CollectionUtils.isEmpty(switchTaskList)) {
switchTaskList = new ArrayList<>();
}
conditionResultVoList.remove(resultConditionLocation);
for (SwitchResultVo info : conditionResultVoList) {
if (CollectionUtils.isEmpty(info.getNextNode())) {
continue;
}
setTaskNodeSkip(info.getNextNode().get(0), dag, completeTaskList, skipTaskNodeList);
}
return switchTaskList;
}
/**
* set task node and the post nodes skip flag
*/
private static void setTaskNodeSkip(String skipNodeCode,
DAG<String, TaskNode, TaskNodeRelation> dag,
Map<String, TaskInstance> completeTaskList,
Map<String, TaskNode> skipTaskNodeList) {
if (!dag.containsNode(skipNodeCode)) {
return;
}
skipTaskNodeList.putIfAbsent(skipNodeCode, dag.getNode(skipNodeCode));
Collection<String> postNodeList = dag.getSubsequentNodes(skipNodeCode);
for (String post : postNodeList) {
TaskNode postNode = dag.getNode(post);
if (isTaskNodeNeedSkip(postNode, skipTaskNodeList)) {
setTaskNodeSkip(post, dag, completeTaskList, skipTaskNodeList);
}
}
}
/***
* build dag graph
* @param processDag processDag
* @return dag
*/
public static DAG<String, TaskNode, TaskNodeRelation> buildDagGraph(ProcessDag processDag) {
DAG<String, TaskNode, TaskNodeRelation> dag = new DAG<>();
//add vertex
if (CollectionUtils.isNotEmpty(processDag.getNodes())) {
for (TaskNode node : processDag.getNodes()) {
dag.addNode(Long.toString(node.getCode()), node);
}
}
//add edge
if (CollectionUtils.isNotEmpty(processDag.getEdges())) {
for (TaskNodeRelation edge : processDag.getEdges()) {
dag.addEdge(edge.getStartNode(), edge.getEndNode());
}
}
return dag;
}
/**
* get process dag
*
* @param taskNodeList task node list
* @return Process dag
*/
public static ProcessDag getProcessDag(List<TaskNode> taskNodeList) {
List<TaskNodeRelation> taskNodeRelations = new ArrayList<>();
// Traverse node information and build relationships
for (TaskNode taskNode : taskNodeList) {
String preTasks = taskNode.getPreTasks();
List<String> preTasksList = JSONUtils.toList(preTasks, String.class);
// If the dependency is not empty
if (preTasksList != null) {
for (String depNode : preTasksList) {
taskNodeRelations.add(new TaskNodeRelation(depNode, Long.toString(taskNode.getCode())));
}
}
}
ProcessDag processDag = new ProcessDag();
processDag.setEdges(taskNodeRelations);
processDag.setNodes(taskNodeList);
return processDag;
}
/**
* get process dag
*
* @param taskNodeList task node list
* @return Process dag
*/
public static ProcessDag getProcessDag(List<TaskNode> taskNodeList,
List<ProcessTaskRelation> processTaskRelations) {
Map<Long, TaskNode> taskNodeMap = new HashMap<>();
taskNodeList.forEach(taskNode -> {
taskNodeMap.putIfAbsent(taskNode.getCode(), taskNode);
});
List<TaskNodeRelation> taskNodeRelations = new ArrayList<>();
for (ProcessTaskRelation processTaskRelation : processTaskRelations) {
long preTaskCode = processTaskRelation.getPreTaskCode();
long postTaskCode = processTaskRelation.getPostTaskCode();
if (processTaskRelation.getPreTaskCode() != 0
&& taskNodeMap.containsKey(preTaskCode) && taskNodeMap.containsKey(postTaskCode)) {
TaskNode preNode = taskNodeMap.get(preTaskCode);
TaskNode postNode = taskNodeMap.get(postTaskCode);
taskNodeRelations.add(new TaskNodeRelation(Long.toString(preNode.getCode()), Long.toString(postNode.getCode())));
}
}
ProcessDag processDag = new ProcessDag();
processDag.setEdges(taskNodeRelations);
processDag.setNodes(taskNodeList);
return processDag;
}
/**
* is there have conditions after the parent node
*/
public static boolean haveConditionsAfterNode(String parentNodeCode,
DAG<String, TaskNode, TaskNodeRelation> dag
) {
boolean result = false;
Set<String> subsequentNodes = dag.getSubsequentNodes(parentNodeCode);
if (CollectionUtils.isEmpty(subsequentNodes)) {
return result;
}
for (String nodeCode : subsequentNodes) {
TaskNode taskNode = dag.getNode(nodeCode);
List<String> preTasksList = JSONUtils.toList(taskNode.getPreTasks(), String.class);
if (preTasksList.contains(parentNodeCode) && taskNode.isConditionsTask()) {
return true;
}
}
return result;
}
/**
* is there have conditions after the parent node
*/
public static boolean haveConditionsAfterNode(String parentNodeCode, List<TaskNode> taskNodes) {
if (CollectionUtils.isEmpty(taskNodes)) {
return false;
}
for (TaskNode taskNode : taskNodes) {
List<String> preTasksList = JSONUtils.toList(taskNode.getPreTasks(), String.class);
if (preTasksList.contains(parentNodeCode) && taskNode.isConditionsTask()) {
return true;
}
}
return false;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,641 | [Bug] [MasterServer] RefreshResourceTask error by NPE | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
When master refresh the worker node, it may occur NPE when hearbeat is null.
```
[ERROR] 2021-10-29 20:21:45.977 org.apache.dolphinscheduler.server.master.dispatch.host.LowerWeightHostManager:[152] - [] RefreshResourceTask error
java.lang.NullPointerException: null
at org.apache.dolphinscheduler.common.utils.HeartBeat.decodeHeartBeat(HeartBeat.java:228)
at org.apache.dolphinscheduler.server.master.dispatch.host.LowerWeightHostManager$RefreshResourceTask.getHostWeight(LowerWeightHostManager.java:157)
at org.apache.dolphinscheduler.server.master.dispatch.host.LowerWeightHostManager$RefreshResourceTask.run(LowerWeightHostManager.java:141)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308)
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180)
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
```
### What you expected to happen
No NPE Excetion.
### How to reproduce
Restart the worker server.
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6641 | https://github.com/apache/dolphinscheduler/pull/6656 | b226253eec219bc0152ed5a3c5939e13385bcedc | 73f20b7553565f3d8d1ac2640d8806191fc17666 | "2021-10-29T12:48:29Z" | java | "2021-11-07T03:40:54Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/dispatch/host/LowerWeightHostManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.dispatch.host;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.HeartBeat;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory;
import org.apache.dolphinscheduler.server.master.dispatch.context.ExecutionContext;
import org.apache.dolphinscheduler.server.master.dispatch.host.assign.HostWeight;
import org.apache.dolphinscheduler.server.master.dispatch.host.assign.HostWorker;
import org.apache.dolphinscheduler.server.master.dispatch.host.assign.LowerWeightRoundRobin;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* lower weight host manager
*/
public class LowerWeightHostManager extends CommonHostManager {
private final Logger logger = LoggerFactory.getLogger(LowerWeightHostManager.class);
/**
* selector
*/
private LowerWeightRoundRobin selector;
/**
* worker host weights
*/
private ConcurrentHashMap<String, Set<HostWeight>> workerHostWeightsMap;
/**
* worker group host lock
*/
private Lock lock;
/**
* executor service
*/
private ScheduledExecutorService executorService;
@PostConstruct
public void init() {
this.selector = new LowerWeightRoundRobin();
this.workerHostWeightsMap = new ConcurrentHashMap<>();
this.lock = new ReentrantLock();
this.executorService = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("LowerWeightHostManagerExecutor"));
this.executorService.scheduleWithFixedDelay(new RefreshResourceTask(), 0, 1, TimeUnit.SECONDS);
}
@PreDestroy
public void close() {
this.executorService.shutdownNow();
}
/**
* select host
*
* @param context context
* @return host
*/
@Override
public Host select(ExecutionContext context) {
Set<HostWeight> workerHostWeights = getWorkerHostWeights(context.getWorkerGroup());
if (CollectionUtils.isNotEmpty(workerHostWeights)) {
return selector.select(workerHostWeights).getHost();
}
return new Host();
}
@Override
public HostWorker select(Collection<HostWorker> nodes) {
throw new UnsupportedOperationException("not support");
}
private void syncWorkerHostWeight(Map<String, Set<HostWeight>> workerHostWeights) {
lock.lock();
try {
workerHostWeightsMap.clear();
workerHostWeightsMap.putAll(workerHostWeights);
} finally {
lock.unlock();
}
}
private Set<HostWeight> getWorkerHostWeights(String workerGroup) {
lock.lock();
try {
return workerHostWeightsMap.get(workerGroup);
} finally {
lock.unlock();
}
}
class RefreshResourceTask implements Runnable {
@Override
public void run() {
try {
Map<String, Set<HostWeight>> workerHostWeights = new HashMap<>();
Map<String, Set<String>> workerGroupNodes = serverNodeManager.getWorkerGroupNodes();
for (Map.Entry<String, Set<String>> entry : workerGroupNodes.entrySet()) {
String workerGroup = entry.getKey();
Set<String> nodes = entry.getValue();
Set<HostWeight> hostWeights = new HashSet<>(nodes.size());
for (String node : nodes) {
String heartbeat = serverNodeManager.getWorkerNodeInfo(node);
HostWeight hostWeight = getHostWeight(node, workerGroup, heartbeat);
if (hostWeight != null) {
hostWeights.add(hostWeight);
}
}
if (!hostWeights.isEmpty()) {
workerHostWeights.put(workerGroup, hostWeights);
}
}
syncWorkerHostWeight(workerHostWeights);
} catch (Throwable ex) {
logger.error("RefreshResourceTask error", ex);
}
}
public HostWeight getHostWeight(String addr, String workerGroup, String heartBeatInfo) {
HeartBeat heartBeat = HeartBeat.decodeHeartBeat(heartBeatInfo);
if (heartBeat == null) {
return null;
}
if (Constants.ABNORMAL_NODE_STATUS == heartBeat.getServerStatus()) {
logger.warn("worker {} current cpu load average {} is too high or available memory {}G is too low",
addr, heartBeat.getLoadAverage(), heartBeat.getAvailablePhysicalMemorySize());
return null;
}
if (Constants.BUSY_NODE_STATUE == heartBeat.getServerStatus()) {
logger.warn("worker {} is busy, current waiting task count {} is large than worker thread count {}",
addr, heartBeat.getWorkerWaitingTaskCount(), heartBeat.getWorkerExecThreadCount());
return null;
}
return new HostWeight(HostWorker.of(addr, heartBeat.getWorkerHostWeight(), workerGroup),
heartBeat.getCpuUsage(), heartBeat.getMemoryUsage(), heartBeat.getLoadAverage(), heartBeat.getStartupTime());
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,641 | [Bug] [MasterServer] RefreshResourceTask error by NPE | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
When master refresh the worker node, it may occur NPE when hearbeat is null.
```
[ERROR] 2021-10-29 20:21:45.977 org.apache.dolphinscheduler.server.master.dispatch.host.LowerWeightHostManager:[152] - [] RefreshResourceTask error
java.lang.NullPointerException: null
at org.apache.dolphinscheduler.common.utils.HeartBeat.decodeHeartBeat(HeartBeat.java:228)
at org.apache.dolphinscheduler.server.master.dispatch.host.LowerWeightHostManager$RefreshResourceTask.getHostWeight(LowerWeightHostManager.java:157)
at org.apache.dolphinscheduler.server.master.dispatch.host.LowerWeightHostManager$RefreshResourceTask.run(LowerWeightHostManager.java:141)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308)
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180)
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
```
### What you expected to happen
No NPE Excetion.
### How to reproduce
Restart the worker server.
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6641 | https://github.com/apache/dolphinscheduler/pull/6656 | b226253eec219bc0152ed5a3c5939e13385bcedc | 73f20b7553565f3d8d1ac2640d8806191fc17666 | "2021-10-29T12:48:29Z" | java | "2021-11-07T03:40:54Z" | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/dispatch/host/RefreshResourceTaskTest.java | |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,641 | [Bug] [MasterServer] RefreshResourceTask error by NPE | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
When master refresh the worker node, it may occur NPE when hearbeat is null.
```
[ERROR] 2021-10-29 20:21:45.977 org.apache.dolphinscheduler.server.master.dispatch.host.LowerWeightHostManager:[152] - [] RefreshResourceTask error
java.lang.NullPointerException: null
at org.apache.dolphinscheduler.common.utils.HeartBeat.decodeHeartBeat(HeartBeat.java:228)
at org.apache.dolphinscheduler.server.master.dispatch.host.LowerWeightHostManager$RefreshResourceTask.getHostWeight(LowerWeightHostManager.java:157)
at org.apache.dolphinscheduler.server.master.dispatch.host.LowerWeightHostManager$RefreshResourceTask.run(LowerWeightHostManager.java:141)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308)
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180)
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
```
### What you expected to happen
No NPE Excetion.
### How to reproduce
Restart the worker server.
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6641 | https://github.com/apache/dolphinscheduler/pull/6656 | b226253eec219bc0152ed5a3c5939e13385bcedc | 73f20b7553565f3d8d1ac2640d8806191fc17666 | "2021-10-29T12:48:29Z" | java | "2021-11-07T03:40:54Z" | pom.xml | <?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler</artifactId>
<version>2.0.0-SNAPSHOT</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
<url>http://dolphinscheduler.apache.org</url>
<description>Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated
to solving the complex dependencies in data processing, making the scheduling system out of the box for data
processing.
</description>
<licenses>
<license>
<name>Apache License 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:https://github.com/apache/dolphinscheduler.git</connection>
<developerConnection>scm:git:https://github.com/apache/dolphinscheduler.git</developerConnection>
<url>https://github.com/apache/dolphinscheduler</url>
<tag>HEAD</tag>
</scm>
<mailingLists>
<mailingList>
<name>DolphinScheduler Developer List</name>
<post>dev@dolphinscheduler.apache.org</post>
<subscribe>dev-subscribe@dolphinscheduler.apache.org</subscribe>
<unsubscribe>dev-unsubscribe@dolphinscheduler.apache.org</unsubscribe>
</mailingList>
</mailingLists>
<parent>
<groupId>org.apache</groupId>
<artifactId>apache</artifactId>
<version>21</version>
</parent>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<curator.version>4.3.0</curator.version>
<zookeeper.version>3.4.14</zookeeper.version>
<spring.version>5.1.19.RELEASE</spring.version>
<spring.boot.version>2.1.18.RELEASE</spring.boot.version>
<java.version>1.8</java.version>
<logback.version>1.2.3</logback.version>
<hadoop.version>2.7.3</hadoop.version>
<quartz.version>2.3.0</quartz.version>
<jackson.version>2.10.5</jackson.version>
<mybatis-plus.version>3.2.0</mybatis-plus.version>
<mybatis.spring.version>2.0.1</mybatis.spring.version>
<cron.utils.version>5.0.5</cron.utils.version>
<druid.version>1.2.4</druid.version>
<h2.version>1.4.200</h2.version>
<commons.codec.version>1.11</commons.codec.version>
<commons.logging.version>1.1.1</commons.logging.version>
<httpclient.version>4.4.1</httpclient.version>
<httpcore.version>4.4.1</httpcore.version>
<junit.version>4.12</junit.version>
<mysql.connector.version>8.0.16</mysql.connector.version>
<slf4j.api.version>1.7.5</slf4j.api.version>
<slf4j.log4j12.version>1.7.5</slf4j.log4j12.version>
<commons.collections.version>3.2.2</commons.collections.version>
<commons.httpclient>3.0.1</commons.httpclient>
<commons.beanutils.version>1.9.4</commons.beanutils.version>
<commons.configuration.version>1.10</commons.configuration.version>
<commons.lang.version>2.6</commons.lang.version>
<commons.email.version>1.5</commons.email.version>
<poi.version>4.1.2</poi.version>
<javax.servlet.api.version>3.1.0</javax.servlet.api.version>
<commons.collections4.version>4.1</commons.collections4.version>
<guava.version>24.1-jre</guava.version>
<postgresql.version>42.2.5</postgresql.version>
<hive.jdbc.version>2.1.0</hive.jdbc.version>
<commons.io.version>2.4</commons.io.version>
<oshi.core.version>3.9.1</oshi.core.version>
<clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version>
<mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version>
<presto.jdbc.version>0.238.1</presto.jdbc.version>
<spotbugs.version>3.1.12</spotbugs.version>
<checkstyle.version>3.1.2</checkstyle.version>
<zookeeper.version>3.4.14</zookeeper.version>
<curator.test>2.12.0</curator.test>
<frontend-maven-plugin.version>1.6</frontend-maven-plugin.version>
<maven-compiler-plugin.version>3.3</maven-compiler-plugin.version>
<maven-assembly-plugin.version>3.1.0</maven-assembly-plugin.version>
<maven-release-plugin.version>2.5.3</maven-release-plugin.version>
<maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version>
<maven-source-plugin.version>2.4</maven-source-plugin.version>
<maven-surefire-plugin.version>2.22.1</maven-surefire-plugin.version>
<maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version>
<rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version>
<jacoco.version>0.8.4</jacoco.version>
<jcip.version>1.0</jcip.version>
<maven.deploy.skip>false</maven.deploy.skip>
<cobertura-maven-plugin.version>2.7</cobertura-maven-plugin.version>
<mockito.version>2.21.0</mockito.version>
<powermock.version>2.0.2</powermock.version>
<servlet-api.version>2.5</servlet-api.version>
<swagger.version>1.9.3</swagger.version>
<springfox.version>2.9.2</springfox.version>
<swagger-models.version>1.5.24</swagger-models.version>
<guava-retry.version>2.0.0</guava-retry.version>
<dep.airlift.version>0.184</dep.airlift.version>
<dep.packaging.version>${dep.airlift.version}</dep.packaging.version>
<protostuff.version>1.7.2</protostuff.version>
<reflections.version>0.9.12</reflections.version>
<byte-buddy.version>1.9.16</byte-buddy.version>
<java-websocket.version>1.5.1</java-websocket.version>
<py4j.version>0.10.9</py4j.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.java-websocket</groupId>
<artifactId>Java-WebSocket</artifactId>
<version>${java-websocket.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<!-- quartz-->
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz-jobs</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>com.cronutils</groupId>
<artifactId>cron-utils</artifactId>
<version>${cron.utils.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>${druid.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>${spring.boot.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-core</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-beans</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-tx</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-jdbc</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
<version>${spring.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-server</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-standalone-server</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert-plugin</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-registry-plugin</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-dao</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-remote</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-service</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-spi</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-python</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<version>${zookeeper.version}</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<artifactId>netty</artifactId>
<groupId>io.netty</groupId>
</exclusion>
<exclusion>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-annotations</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-client</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>log4j-1.2-api</groupId>
<artifactId>org.apache.logging.log4j</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-test</artifactId>
<version>${curator.test}</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>${commons.codec.version}</version>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons.logging.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>${httpclient.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>${httpcore.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<version>${jackson.version}</version>
</dependency>
<!--protostuff-->
<!-- https://mvnrepository.com/artifact/io.protostuff/protostuff-core -->
<dependency>
<groupId>io.protostuff</groupId>
<artifactId>protostuff-core</artifactId>
<version>${protostuff.version}</version>
</dependency>
<!-- https://mvnrepository.com/artifact/io.protostuff/protostuff-runtime -->
<dependency>
<groupId>io.protostuff</groupId>
<artifactId>protostuff-runtime</artifactId>
<version>${protostuff.version}</version>
</dependency>
<dependency>
<groupId>net.bytebuddy</groupId>
<artifactId>byte-buddy</artifactId>
<version>${byte-buddy.version}</version>
</dependency>
<dependency>
<groupId>org.reflections</groupId>
<artifactId>reflections</artifactId>
<version>${reflections.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>${mockito.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-junit4</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-api-mockito2</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.jacoco</groupId>
<artifactId>org.jacoco.agent</artifactId>
<version>${jacoco.version}</version>
<classifier>runtime</classifier>
<scope>test</scope>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${mysql.connector.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.h2database</groupId>
<artifactId>h2</artifactId>
<version>${h2.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${slf4j.api.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>${slf4j.log4j12.version}</version>
</dependency>
<dependency>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
<version>${commons.collections.version}</version>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>${commons.httpclient}</version>
</dependency>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<version>${commons.beanutils.version}</version>
</dependency>
<dependency>
<groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId>
<version>${commons.configuration.version}</version>
</dependency>
<dependency>
<groupId>commons-lang</groupId>
<artifactId>commons-lang</artifactId>
<version>${commons.lang.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-email</artifactId>
<version>${commons.email.version}</version>
</dependency>
<!--excel poi-->
<dependency>
<groupId>org.apache.poi</groupId>
<artifactId>poi</artifactId>
<version>${poi.version}</version>
</dependency>
<dependency>
<groupId>org.apache.poi</groupId>
<artifactId>poi-ooxml</artifactId>
<version>${poi.version}</version>
</dependency>
<!-- hadoop -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>com.sun.jersey</artifactId>
<groupId>jersey-json</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-aws</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>${commons.collections4.version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>${postgresql.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>${hive.jdbc.version}</version>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>${commons.io.version}</version>
</dependency>
<dependency>
<groupId>com.github.oshi</groupId>
<artifactId>oshi-core</artifactId>
<version>${oshi.core.version}</version>
</dependency>
<dependency>
<groupId>ru.yandex.clickhouse</groupId>
<artifactId>clickhouse-jdbc</artifactId>
<version>${clickhouse.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
<version>${mssql.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.facebook.presto</groupId>
<artifactId>presto-jdbc</artifactId>
<version>${presto.jdbc.version}</version>
</dependency>
<dependency>
<groupId>net.jcip</groupId>
<artifactId>jcip-annotations</artifactId>
<version>${jcip.version}</version>
<optional>true</optional>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<version>${servlet-api.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
<version>${javax.servlet.api.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger2</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger-ui</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.swagger</groupId>
<artifactId>swagger-models</artifactId>
<version>${swagger-models.version}</version>
</dependency>
<dependency>
<groupId>com.github.xiaoymin</groupId>
<artifactId>swagger-bootstrap-ui</artifactId>
<version>${swagger.version}</version>
</dependency>
<dependency>
<groupId>com.github.rholder</groupId>
<artifactId>guava-retrying</artifactId>
<version>${guava-retry.version}</version>
</dependency>
<dependency>
<groupId>org.sonatype.aether</groupId>
<artifactId>aether-api</artifactId>
<version>1.13.1</version>
</dependency>
<dependency>
<groupId>io.airlift.resolver</groupId>
<artifactId>resolver</artifactId>
<version>1.5</version>
</dependency>
<dependency>
<groupId>org.ow2.asm</groupId>
<artifactId>asm</artifactId>
<version>6.2.1</version>
</dependency>
<dependency>
<groupId>javax.activation</groupId>
<artifactId>activation</artifactId>
<version>1.1</version>
</dependency>
<dependency>
<groupId>com.sun.mail</groupId>
<artifactId>javax.mail</artifactId>
<version>1.6.2</version>
</dependency>
<dependency>
<groupId>net.sf.py4j</groupId>
<artifactId>py4j</artifactId>
<version>${py4j.version}</version>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<finalName>apache-dolphinscheduler-${project.version}</finalName>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-maven-plugin</artifactId>
<version>1.0.0</version>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>ca.vanzyl.maven.plugins</groupId>
<artifactId>provisio-maven-plugin</artifactId>
<version>1.0.4</version>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>rpm-maven-plugin</artifactId>
<version>${rpm-maven-plugion.version}</version>
<inherited>false</inherited>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<testSource>${java.version}</testSource>
<testTarget>${java.version}</testTarget>
</configuration>
<version>${maven-compiler-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<tagNameFormat>@{project.version}</tagNameFormat>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>${maven-assembly-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<configuration>
<source>8</source>
<failOnError>false</failOnError>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>${maven-source-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>${maven-dependency-plugin.version}</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-maven-plugin</artifactId>
<extensions>true</extensions>
<!--<configuration>-->
<!--<allowedProvidedDependencies>-->
<!--<allowedProvidedDependency>org.apache.dolphinscheduler:dolphinscheduler-common</allowedProvidedDependency>-->
<!--</allowedProvidedDependencies>-->
<!--</configuration>-->
</plugin>
<plugin>
<groupId>ca.vanzyl.maven.plugins</groupId>
<artifactId>provisio-maven-plugin</artifactId>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<id>attach-sources</id>
<phase>verify</phase>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
<configuration>
<aggregate>true</aggregate>
<charset>${project.build.sourceEncoding}</charset>
<encoding>${project.build.sourceEncoding}</encoding>
<docencoding>${project.build.sourceEncoding}</docencoding>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<autoVersionSubmodules>true</autoVersionSubmodules>
<tagNameFormat>@{project.version}</tagNameFormat>
<tagBase>${project.version}</tagBase>
<!--<goals>-f pom.xml deploy</goals>-->
</configuration>
<dependencies>
<dependency>
<groupId>org.apache.maven.scm</groupId>
<artifactId>maven-scm-provider-jgit</artifactId>
<version>1.9.5</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<encoding>${project.build.sourceEncoding}</encoding>
<skip>false</skip><!--not skip compile test classes-->
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<systemPropertyVariables>
<jacoco-agent.destfile>${project.build.directory}/jacoco.exec</jacoco-agent.destfile>
</systemPropertyVariables>
<includes>
<!--registry plugin -->
<include>**/plugin/registry/zookeeper/ZookeeperRegistryTest.java</include>
<!-- API -->
<include>**/api/controller/ProjectControllerTest.java</include>
<include>**/api/controller/QueueControllerTest.java</include>
<include>**/api/configuration/TrafficConfigurationTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TenantControllerTest.java</include>
<include>**/api/controller/SchedulerControllerTest.java</include>
<include>**/api/dto/resources/filter/ResourceFilterTest.java</include>
<include>**/api/dto/resources/visitor/ResourceTreeVisitorTest.java</include>
<includeDataxTaskTest>**/api/enums/testGetEnum.java</includeDataxTaskTest>
<include>**/api/enums/StatusTest.java</include>
<include>**/api/exceptions/ApiExceptionHandlerTest.java</include>
<include>**/api/exceptions/ServiceExceptionTest.java</include>
<include>**/api/interceptor/LocaleChangeInterceptorTest.java</include>
<include>**/api/interceptor/LoginHandlerInterceptorTest.java</include>
<include>**/api/interceptor/RateLimitInterceptorTest.java</include>
<include>**/api/security/impl/pwd/PasswordAuthenticatorTest.java</include>
<include>**/api/security/impl/ldap/LdapAuthenticatorTest.java</include>
<include>**/api/security/SecurityConfigLDAPTest.java</include>
<include>**/api/security/SecurityConfigPasswordTest.java</include>
<include>**/api/service/AccessTokenServiceTest.java</include>
<include>**/api/service/AlertGroupServiceTest.java</include>
<include>**/api/service/BaseDAGServiceTest.java</include>
<include>**/api/service/BaseServiceTest.java</include>
<include>**/api/service/DataAnalysisServiceTest.java</include>
<include>**/api/service/AlertPluginInstanceServiceTest.java</include>
<include>**/api/service/DataSourceServiceTest.java</include>
<include>**/api/service/ExecutorService2Test.java</include>
<include>**/api/service/ExecutorServiceTest.java</include>
<include>**/api/service/LoggerServiceTest.java</include>
<include>**/api/service/MonitorServiceTest.java</include>
<include>**/api/service/ProcessDefinitionServiceTest.java</include>
<include>**/api/service/ProcessTaskRelationServiceImplTest.java</include>
<include>**/api/service/TaskDefinitionServiceImplTest.java</include>
<include>**/api/service/ProcessInstanceServiceTest.java</include>
<include>**/api/service/ProjectServiceTest.java</include>
<include>**/api/service/QueueServiceTest.java</include>
<include>**/api/service/ResourcesServiceTest.java</include>
<include>**/api/service/SchedulerServiceTest.java</include>
<include>**/api/service/SessionServiceTest.java</include>
<include>**/api/service/TaskInstanceServiceTest.java</include>
<include>**/api/service/TenantServiceTest.java</include>
<include>**/api/service/UdfFuncServiceTest.java</include>
<include>**/api/service/UiPluginServiceTest.java</include>
<include>**/api/service/UserAlertGroupServiceTest.java</include>
<include>**/api/service/UsersServiceTest.java</include>
<include>**/api/service/WorkerGroupServiceTest.java</include>
<include>**/api/service/WorkFlowLineageServiceTest.java</include>
<include>**/api/service/EnvironmentServiceTest.java</include>
<include>**/api/service/EnvironmentWorkerGroupRelationServiceTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TaskInstanceControllerTest.java</include>
<include>**/api/controller/WorkFlowLineageControllerTest.java</include>
<include>**/api/controller/EnvironmentControllerTest.java</include>
<include>**/api/utils/exportprocess/DataSourceParamTest.java</include>
<include>**/api/utils/exportprocess/DependentParamTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/FileUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/ResultTest.java</include>
<include>**/common/graph/DAGTest.java</include>
<include>**/common/os/OshiTest.java</include>
<include>**/common/shell/ShellExecutorTest.java</include>
<include>**/common/task/DataxParametersTest.java</include>
<include>**/common/task/EntityTestUtils.java</include>
<include>**/common/task/FlinkParametersTest.java</include>
<include>**/common/task/HttpParametersTest.java</include>
<include>**/common/task/SparkParametersTest.java</include>
<include>**/common/task/SqlParametersTest.java</include>
<include>**/common/task/SqoopParameterEntityTest.java</include>
<include>**/common/threadutils/ThreadPoolExecutorsTest.java</include>
<include>**/common/threadutils/ThreadUtilsTest.java</include>
<include>**/common/utils/CollectionUtilsTest.java</include>
<include>**/common/utils/CommonUtilsTest.java</include>
<include>**/common/utils/DateUtilsTest.java</include>
<include>**/common/utils/DependentUtilsTest.java</include>
<include>**/common/utils/EncryptionUtilsTest.java</include>
<include>**/common/utils/FileUtilsTest.java</include>
<include>**/common/utils/JSONUtilsTest.java</include>
<include>**/common/utils/LoggerUtilsTest.java</include>
<include>**/common/utils/NetUtilsTest.java</include>
<include>**/common/utils/ParameterUtilsTest.java</include>
<include>**/common/utils/TimePlaceholderUtilsTest.java</include>
<include>**/common/utils/PreconditionsTest.java</include>
<include>**/common/utils/PropertyUtilsTest.java</include>
<include>**/common/utils/SchemaUtilsTest.java</include>
<include>**/common/utils/ScriptRunnerTest.java</include>
<include>**/common/utils/SensitiveLogUtilsTest.java</include>
<include>**/common/utils/StringTest.java</include>
<include>**/common/utils/StringUtilsTest.java</include>
<include>**/common/utils/TaskParametersUtilsTest.java</include>
<include>**/common/utils/VarPoolUtilsTest.java</include>
<include>**/common/utils/HadoopUtilsTest.java</include>
<include>**/common/utils/HttpUtilsTest.java</include>
<include>**/common/utils/KerberosHttpClientTest.java</include>
<include>**/common/utils/HiveConfUtilsTest.java</include>
<include>**/common/ConstantsTest.java</include>
<include>**/common/utils/HadoopUtils.java</include>
<include>**/common/utils/RetryerUtilsTest.java</include>
<include>**/common/datasource/clickhouse/ClickHouseDatasourceProcessorTest.java</include>
<include>**/common/datasource/db2/Db2DatasourceProcessorTest.java</include>
<include>**/common/datasource/hive/HiveDatasourceProcessorTest.java</include>
<include>**/common/datasource/mysql/MysqlDatasourceProcessorTest.java</include>
<include>**/common/datasource/oracle/OracleDatasourceProcessorTest.java</include>
<include>**/common/datasource/postgresql/PostgreSqlDatasourceProcessorTest.java</include>
<include>**/common/datasource/presto/PrestoDatasourceProcessorTest.java</include>
<include>**/common/datasource/spark/SparkDatasourceProcessorTest.java</include>
<include>**/common/datasource/sqlserver/SqlServerDatasourceProcessorTest.java</include>
<include>**/common/datasource/DatasourceUtilTest.java</include>
<include>**/common/enums/ExecutionStatusTest</include>
<include>**/dao/mapper/AccessTokenMapperTest.java</include>
<include>**/dao/mapper/AlertGroupMapperTest.java</include>
<include>**/dao/mapper/CommandMapperTest.java</include>
<include>**/dao/mapper/ConnectionFactoryTest.java</include>
<include>**/dao/mapper/DataSourceMapperTest.java</include>
<include>**/dao/datasource/MySQLDataSourceTest.java</include>
<include>**/dao/entity/TaskInstanceTest.java</include>
<include>**/dao/entity/UdfFuncTest.java</include>
<include>**/remote/command/alert/AlertSendRequestCommandTest.java</include>
<include>**/remote/command/alert/AlertSendResponseCommandTest.java</include>
<include>**/remote/command/future/ResponseFutureTest.java</include>
<include>**/remote/command/log/RemoveTaskLogRequestCommandTest.java</include>
<include>**/remote/command/log/RemoveTaskLogResponseCommandTest.java</include>
<include>**/remote/command/log/GetLogBytesRequestCommandTest.java</include>
<include>**/remote/command/log/GetLogBytesResponseCommandTest.java</include>
<include>**/remote/command/log/ViewLogRequestCommandTest.java</include>
<include>**/remote/utils/HostTest.java</include>
<include>**/remote/utils/NettyUtilTest.java</include>
<include>**/remote/NettyRemotingClientTest.java</include>
<include>**/rpc/RpcTest.java</include>
<include>**/server/log/LoggerServerTest.java</include>
<include>**/server/entity/SQLTaskExecutionContextTest.java</include>
<include>**/server/log/MasterLogFilterTest.java</include>
<include>**/server/log/SensitiveDataConverterTest.java</include>
<include>**/server/log/LoggerRequestProcessorTest.java</include>
<!--<include>**/server/log/TaskLogDiscriminatorTest.java</include>-->
<include>**/server/log/TaskLogFilterTest.java</include>
<include>**/server/log/WorkerLogFilterTest.java</include>
<include>**/server/master/cache/impl/TaskInstanceCacheManagerImplTest.java</include>
<include>**/server/master/config/MasterConfigTest.java</include>
<include>**/server/master/consumer/TaskPriorityQueueConsumerTest.java</include>
<include>**/server/master/runner/MasterTaskExecThreadTest.java</include>
<!--<include>**/server/master/dispatch/executor/NettyExecutorManagerTest.java</include>-->
<include>**/server/master/dispatch/host/assign/LowerWeightRoundRobinTest.java</include>
<include>**/server/master/dispatch/host/assign/RandomSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/HostWorkerTest.java</include>
<include>**/server/master/registry/MasterRegistryClientTest.java</include>
<include>**/server/master/registry/ServerNodeManagerTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinHostManagerTest.java</include>
<include>**/server/master/MasterCommandTest.java</include>
<include>**/server/master/DependentTaskTest.java</include>
<include>**/server/master/ConditionsTaskTest.java</include>
<include>**/server/master/SwitchTaskTest.java</include>
<include>**/server/master/MasterExecThreadTest.java</include>
<include>**/server/master/ParamsTest.java</include>
<include>**/server/master/SubProcessTaskTest.java</include>
<include>**/server/master/processor/TaskAckProcessorTest.java</include>
<include>**/server/master/processor/TaskKillResponseProcessorTest.java</include>
<include>**/server/master/processor/queue/TaskResponseServiceTest.java</include>
<include>**/server/master/zk/ZKMasterClientTest.java</include>
<include>**/server/registry/ZookeeperRegistryCenterTest.java</include>
<include>**/server/utils/DataxUtilsTest.java</include>
<include>**/server/utils/ExecutionContextTestUtils.java</include>
<include>**/server/utils/FlinkArgsUtilsTest.java</include>
<include>**/server/utils/LogUtilsTest.java</include>
<include>**/server/utils/MapReduceArgsUtilsTest.java</include>
<include>**/server/utils/ParamUtilsTest.java</include>
<include>**/server/utils/ProcessUtilsTest.java</include>
<include>**/server/utils/SparkArgsUtilsTest.java</include>
<include>**/server/worker/processor/TaskCallbackServiceTest.java</include>
<include>**/server/worker/processor/TaskExecuteProcessorTest.java</include>
<include>**/server/worker/registry/WorkerRegistryTest.java</include>
<include>**/server/worker/shell/ShellCommandExecutorTest.java</include>
<include>**/server/worker/sql/SqlExecutorTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<include>**/server/worker/task/datax/DataxTaskTest.java</include>
<!--<include>**/server/worker/task/http/HttpTaskTest.java</include>-->
<include>**/server/worker/task/sqoop/SqoopTaskTest.java</include>
<include>**/server/worker/task/processdure/ProcedureTaskTest.java</include>
<include>**/server/worker/task/shell/ShellTaskTest.java</include>
<include>**/server/worker/task/TaskManagerTest.java</include>
<include>**/server/worker/task/PythonCommandExecutorTest.java</include>
<include>**/server/worker/task/TaskParamsTest.java</include>
<include>**/server/worker/task/ShellTaskReturnTest.java</include>
<include>**/server/worker/task/sql/SqlTaskTest.java</include>
<include>**/server/worker/runner/TaskExecuteThreadTest.java</include>
<include>**/server/worker/runner/WorkerManagerThreadTest.java</include>
<include>**/server/master/cache/impl/ProcessInstanceExecCacheManagerImplTest.java</include>
<include>**/service/quartz/cron/CronUtilsTest.java</include>
<include>**/service/process/ProcessServiceTest.java</include>
<include>**/service/registry/RegistryClientTest.java</include>
<include>**/service/registry/RegistryPluginTest.java</include>
<include>**/service/queue/TaskUpdateQueueTest.java</include>
<include>**/service/queue/PeerTaskInstancePriorityQueueTest.java</include>
<include>**/service/log/LogClientServiceTest.java</include>
<include>**/service/alert/AlertClientServiceTest.java</include>
<include>**/service/alert/ProcessAlertManagerTest.java</include>
<include>**/dao/mapper/DataSourceUserMapperTest.java</include>
<!--<iTaskUpdateQueueConsumerThreadnclude>**/dao/mapper/ErrorCommandMapperTest.java</iTaskUpdateQueueConsumerThreadnclude>-->
<include>**/dao/mapper/ProcessDefinitionMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapperTest.java</include>
<include>**/dao/mapper/ProjectMapperTest.java</include>
<include>**/dao/mapper/ProjectUserMapperTest.java</include>
<include>**/dao/mapper/QueueMapperTest.java</include>
<include>**/dao/mapper/ResourceUserMapperTest.java</include>
<include>**/dao/mapper/ScheduleMapperTest.java</include>
<include>**/dao/mapper/SessionMapperTest.java</include>
<include>**/dao/mapper/TaskInstanceMapperTest.java</include>
<include>**/dao/mapper/TenantMapperTest.java</include>
<include>**/dao/mapper/UdfFuncMapperTest.java</include>
<include>**/dao/mapper/UDFUserMapperTest.java</include>
<include>**/dao/mapper/UserMapperTest.java</include>
<include>**/dao/mapper/AlertPluginInstanceMapperTest.java</include>
<include>**/dao/mapper/PluginDefineTest.java</include>
<include>**/dao/utils/DagHelperTest.java</include>
<include>**/dao/AlertDaoTest.java</include>
<include>**/dao/datasource/OracleDataSourceTest.java</include>
<include>**/dao/datasource/HiveDataSourceTest.java</include>
<include>**/dao/datasource/BaseDataSourceTest.java</include>
<include>**/dao/upgrade/ProcessDefinitionDaoTest.java</include>
<include>**/dao/upgrade/WokrerGrouopDaoTest.java</include>
<include>**/dao/upgrade/UpgradeDaoTest.java</include>
<include>**/plugin/alert/email/EmailAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/email/EmailAlertChannelTest.java</include>
<include>**/plugin/alert/email/ExcelUtilsTest.java</include>
<include>**/plugin/alert/email/template/DefaultHTMLTemplateTest.java</include>
<include>**/plugin/alert/dingtalk/DingTalkSenderTest.java</include>
<include>**/plugin/alert/dingtalk/DingTalkAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/wechat/WeChatSenderTest.java</include>
<include>**/plugin/alert/wechat/WeChatAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/script/ProcessUtilsTest.java</include>
<include>**/plugin/alert/script/ScriptAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/script/ScriptSenderTest.java</include>
<include>**/plugin/alert/http/HttpAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/http/HttpAlertChannelTest.java</include>
<include>**/plugin/alert/feishu/FeiShuAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/feishu/FeiShuSenderTest.java</include>
<include>**/plugin/alert/http/HttpAlertPluginTest.java</include>
<include>**/plugin/alert/http/HttpSenderTest.java</include>
<include>**/plugin/alert/slack/SlackAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/slack/SlackAlertPluginTest.java</include>
<include>**/plugin/alert/slack/SlackSenderTest.java</include>
<include>**/spi/params/PluginParamsTransferTest.java</include>
<include>**/spi/plugin/DolphinSchedulerPluginLoaderTest.java</include>
<include>**/alert/plugin/EmailAlertPluginTest.java</include>
<include>**/alert/plugin/AlertPluginManagerTest.java</include>
<include>**/alert/plugin/DolphinPluginLoaderTest.java</include>
<include>**/alert/utils/FuncUtilsTest.java</include>
<include>**/alert/processor/AlertRequestProcessorTest.java</include>
<include>**/alert/runner/AlertSenderTest.java</include>
<include>**/alert/AlertServerTest.java</include>
<include>**/plugin/task/pigeon/PigeonTaskTest.java</include>
</includes>
<!-- <skip>true</skip> -->
</configuration>
</plugin>
<!-- jenkins plugin jacoco report-->
<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<version>${jacoco.version}</version>
<configuration>
<dataFile>${project.build.directory}/jacoco.exec</dataFile>
</configuration>
<executions>
<execution>
<id>default-instrument</id>
<goals>
<goal>instrument</goal>
</goals>
</execution>
<execution>
<id>default-restore-instrumented-classes</id>
<goals>
<goal>restore-instrumented-classes</goal>
</goals>
</execution>
<execution>
<id>default-report</id>
<goals>
<goal>report</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-maven-plugin</artifactId>
<version>${spotbugs.version}</version>
<configuration>
<xmlOutput>true</xmlOutput>
<threshold>medium</threshold>
<effort>default</effort>
<excludeFilterFile>dev-config/spotbugs-exclude.xml</excludeFilterFile>
<failOnError>true</failOnError>
</configuration>
<dependencies>
<dependency>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs</artifactId>
<version>4.0.0-beta4</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>${checkstyle.version}</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
<version>8.45</version>
</dependency>
</dependencies>
<configuration>
<consoleOutput>true</consoleOutput>
<encoding>UTF-8</encoding>
<configLocation>style/checkstyle.xml</configLocation>
<failOnViolation>true</failOnViolation>
<violationSeverity>warning</violationSeverity>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<sourceDirectories>
<sourceDirectory>${project.build.sourceDirectory}</sourceDirectory>
</sourceDirectories>
<excludes>**\/generated-sources\/</excludes>
</configuration>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>cobertura-maven-plugin</artifactId>
<version>${cobertura-maven-plugin.version}</version>
<configuration>
<check>
</check>
<aggregate>true</aggregate>
<outputDirectory>./target/cobertura</outputDirectory>
<encoding>${project.build.sourceEncoding}</encoding>
<quiet>true</quiet>
<format>xml</format>
<instrumentation>
<ignoreTrivial>true</ignoreTrivial>
</instrumentation>
</configuration>
</plugin>
</plugins>
</build>
<modules>
<module>dolphinscheduler-spi</module>
<module>dolphinscheduler-alert-plugin</module>
<module>dolphinscheduler-registry-plugin</module>
<module>dolphinscheduler-task-plugin</module>
<module>dolphinscheduler-ui</module>
<module>dolphinscheduler-server</module>
<module>dolphinscheduler-common</module>
<module>dolphinscheduler-api</module>
<module>dolphinscheduler-dao</module>
<module>dolphinscheduler-alert</module>
<module>dolphinscheduler-dist</module>
<module>dolphinscheduler-remote</module>
<module>dolphinscheduler-service</module>
<module>dolphinscheduler-microbench</module>
<module>dolphinscheduler-standalone-server</module>
<module>dolphinscheduler-python</module>
</modules>
</project>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 6,727 | [Bug] [master-server] ProcessInstanceMapMapper extra column | ### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
There is an extra column in the mapper of ProcessInstanceMap
### What you expected to happen
delete it
### How to reproduce
delete it
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
| https://github.com/apache/dolphinscheduler/issues/6727 | https://github.com/apache/dolphinscheduler/pull/6728 | 5400abc74fdb16717d22c15f964cad1a6dfd899e | d27c6ea8f0077ec52da2f1bd3411c866643d5afb | "2021-11-07T09:03:09Z" | java | "2021-11-07T10:01:40Z" | dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapMapper.xml | <?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper">
<sql id="baseSql">
id, parent_process_instance_id, parent_task_instance_id, process_instance_id,next_process_instance_id
</sql>
<delete id="deleteByParentProcessId">
delete
from t_ds_relation_process_instance
where parent_process_instance_id=#{parentProcessId}
</delete>
<select id="queryByParentId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap">
select
<include refid="baseSql"/>
from t_ds_relation_process_instance
where parent_process_instance_id = #{parentProcessId}
and parent_task_instance_id = #{parentTaskId}
</select>
<select id="queryBySubProcessId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap">
select
<include refid="baseSql"/>
from t_ds_relation_process_instance
where process_instance_id = #{subProcessId}
</select>
<select id="querySubIdListByParentId" resultType="java.lang.Integer">
select process_instance_id
from t_ds_relation_process_instance
where parent_process_instance_id = #{parentInstanceId}
</select>
</mapper>
|