status
stringclasses 1
value | repo_name
stringclasses 31
values | repo_url
stringclasses 31
values | issue_id
int64 1
104k
| title
stringlengths 4
369
| body
stringlengths 0
254k
⌀ | issue_url
stringlengths 37
56
| pull_url
stringlengths 37
54
| before_fix_sha
stringlengths 40
40
| after_fix_sha
stringlengths 40
40
| report_datetime
unknown | language
stringclasses 5
values | commit_datetime
unknown | updated_file
stringlengths 4
188
| file_content
stringlengths 0
5.12M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,085 | [Bug][server-master]if global Param of out is a number, throw format exception | sql or shell task node has out param,if the format of param is number,throw format exception. | https://github.com/apache/dolphinscheduler/issues/5085 | https://github.com/apache/dolphinscheduler/pull/5077 | 5856a12855328e67aeb6a2005f86b3c1081750a1 | 29d42fd92d6720a8a0641e37923c6e6f38a5ae85 | "2021-03-18T02:56:45Z" | java | "2021-03-18T03:50:10Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.service.process;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_EMPTY_SUB_PROCESS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID;
import static org.apache.dolphinscheduler.common.Constants.LOCAL_PARAMS;
import static org.apache.dolphinscheduler.common.Constants.YYYY_MM_DD_HH_MM_SS;
import static java.util.stream.Collectors.toSet;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.CycleEnum;
import org.apache.dolphinscheduler.common.enums.Direct;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.TaskDependType;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.model.DateInterval;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.task.conditions.ConditionsParameters;
import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.Command;
import org.apache.dolphinscheduler.dao.entity.CycleDependency;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.dao.entity.ErrorCommand;
import org.apache.dolphinscheduler.dao.entity.ProcessData;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.ProjectUser;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.CommandMapper;
import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper;
import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceMapper;
import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.TenantMapper;
import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper;
import org.apache.dolphinscheduler.dao.mapper.UserMapper;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.service.log.LogClientService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Date;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import org.quartz.CronExpression;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import com.cronutils.model.Cron;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
/**
* process relative dao that some mappers in this.
*/
@Component
public class ProcessService {
private final Logger logger = LoggerFactory.getLogger(getClass());
private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
ExecutionStatus.RUNNING_EXECUTION.ordinal(),
ExecutionStatus.DELAY_EXECUTION.ordinal(),
ExecutionStatus.READY_PAUSE.ordinal(),
ExecutionStatus.READY_STOP.ordinal()};
@Autowired
private UserMapper userMapper;
@Autowired
private ProcessDefinitionMapper processDefineMapper;
@Autowired
private ProcessInstanceMapper processInstanceMapper;
@Autowired
private DataSourceMapper dataSourceMapper;
@Autowired
private ProcessInstanceMapMapper processInstanceMapMapper;
@Autowired
private TaskInstanceMapper taskInstanceMapper;
@Autowired
private CommandMapper commandMapper;
@Autowired
private ScheduleMapper scheduleMapper;
@Autowired
private UdfFuncMapper udfFuncMapper;
@Autowired
private ResourceMapper resourceMapper;
@Autowired
private ErrorCommandMapper errorCommandMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private ProjectMapper projectMapper;
/**
* handle Command (construct ProcessInstance from Command) , wrapped in transaction
*
* @param logger logger
* @param host host
* @param validThreadNum validThreadNum
* @param command found command
* @return process instance
*/
@Transactional(rollbackFor = Exception.class)
public ProcessInstance handleCommand(Logger logger, String host, int validThreadNum, Command command) {
ProcessInstance processInstance = constructProcessInstance(command, host);
// cannot construct process instance, return null
if (processInstance == null) {
logger.error("scan command, command parameter is error: {}", command);
moveToErrorCommand(command, "process instance is null");
return null;
}
if (!checkThreadNum(command, validThreadNum)) {
logger.info("there is not enough thread for this command: {}", command);
return setWaitingThreadProcess(command, processInstance);
}
processInstance.setCommandType(command.getCommandType());
processInstance.addHistoryCmd(command.getCommandType());
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
delCommandById(command.getId());
return processInstance;
}
/**
* save error command, and delete original command
*
* @param command command
* @param message message
*/
@Transactional(rollbackFor = Exception.class)
public void moveToErrorCommand(Command command, String message) {
ErrorCommand errorCommand = new ErrorCommand(command, message);
this.errorCommandMapper.insert(errorCommand);
delCommandById(command.getId());
}
/**
* set process waiting thread
*
* @param command command
* @param processInstance processInstance
* @return process instance
*/
private ProcessInstance setWaitingThreadProcess(Command command, ProcessInstance processInstance) {
processInstance.setState(ExecutionStatus.WAITTING_THREAD);
if (command.getCommandType() != CommandType.RECOVER_WAITTING_THREAD) {
processInstance.addHistoryCmd(command.getCommandType());
}
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
createRecoveryWaitingThreadCommand(command, processInstance);
return null;
}
/**
* check thread num
*
* @param command command
* @param validThreadNum validThreadNum
* @return if thread is enough
*/
private boolean checkThreadNum(Command command, int validThreadNum) {
int commandThreadCount = this.workProcessThreadNumCount(command.getProcessDefinitionId());
return validThreadNum >= commandThreadCount;
}
/**
* insert one command
*
* @param command command
* @return create result
*/
public int createCommand(Command command) {
int result = 0;
if (command != null) {
result = commandMapper.insert(command);
}
return result;
}
/**
* find one command from queue list
*
* @return command
*/
public Command findOneCommand() {
return commandMapper.getOneToRun();
}
/**
* check the input command exists in queue list
*
* @param command command
* @return create command result
*/
public boolean verifyIsNeedCreateCommand(Command command) {
boolean isNeedCreate = true;
EnumMap<CommandType, Integer> cmdTypeMap = new EnumMap<>(CommandType.class);
cmdTypeMap.put(CommandType.REPEAT_RUNNING, 1);
cmdTypeMap.put(CommandType.RECOVER_SUSPENDED_PROCESS, 1);
cmdTypeMap.put(CommandType.START_FAILURE_TASK_PROCESS, 1);
CommandType commandType = command.getCommandType();
if (cmdTypeMap.containsKey(commandType)) {
ObjectNode cmdParamObj = JSONUtils.parseObject(command.getCommandParam());
int processInstanceId = cmdParamObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt();
List<Command> commands = commandMapper.selectList(null);
// for all commands
for (Command tmpCommand : commands) {
if (cmdTypeMap.containsKey(tmpCommand.getCommandType())) {
ObjectNode tempObj = JSONUtils.parseObject(tmpCommand.getCommandParam());
if (tempObj != null && processInstanceId == tempObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt()) {
isNeedCreate = false;
break;
}
}
}
}
return isNeedCreate;
}
/**
* find process instance detail by id
*
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceDetailById(int processId) {
return processInstanceMapper.queryDetailById(processId);
}
/**
* get task node list by definitionId
*/
public List<TaskNode> getTaskNodeListByDefinitionId(Integer defineId) {
ProcessDefinition processDefinition = processDefineMapper.selectById(defineId);
if (processDefinition == null) {
logger.info("process define not exists");
return null;
}
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
//process data check
if (null == processData) {
logger.error("process data is null");
return new ArrayList<>();
}
return processData.getTasks();
}
/**
* find process instance by id
*
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceById(int processId) {
return processInstanceMapper.selectById(processId);
}
/**
* find process define by id.
*
* @param processDefinitionId processDefinitionId
* @return process definition
*/
public ProcessDefinition findProcessDefineById(int processDefinitionId) {
return processDefineMapper.selectById(processDefinitionId);
}
/**
* delete work process instance by id
*
* @param processInstanceId processInstanceId
* @return delete process instance result
*/
public int deleteWorkProcessInstanceById(int processInstanceId) {
return processInstanceMapper.deleteById(processInstanceId);
}
/**
* delete all sub process by parent instance id
*
* @param processInstanceId processInstanceId
* @return delete all sub process instance result
*/
public int deleteAllSubWorkProcessByParentId(int processInstanceId) {
List<Integer> subProcessIdList = processInstanceMapMapper.querySubIdListByParentId(processInstanceId);
for (Integer subId : subProcessIdList) {
deleteAllSubWorkProcessByParentId(subId);
deleteWorkProcessMapByParentId(subId);
removeTaskLogFile(subId);
deleteWorkProcessInstanceById(subId);
}
return 1;
}
/**
* remove task log file
*
* @param processInstanceId processInstanceId
*/
public void removeTaskLogFile(Integer processInstanceId) {
LogClientService logClient = null;
try {
logClient = new LogClientService();
List<TaskInstance> taskInstanceList = findValidTaskListByProcessId(processInstanceId);
if (CollectionUtils.isEmpty(taskInstanceList)) {
return;
}
for (TaskInstance taskInstance : taskInstanceList) {
String taskLogPath = taskInstance.getLogPath();
if (StringUtils.isEmpty(taskInstance.getHost())) {
continue;
}
int port = Constants.RPC_PORT;
String ip = "";
try {
ip = Host.of(taskInstance.getHost()).getIp();
} catch (Exception e) {
// compatible old version
ip = taskInstance.getHost();
}
// remove task log from loggerserver
logClient.removeTaskLog(ip, port, taskLogPath);
}
} finally {
if (logClient != null) {
logClient.close();
}
}
}
/**
* calculate sub process number in the process define.
*
* @param processDefinitionId processDefinitionId
* @return process thread num count
*/
private Integer workProcessThreadNumCount(Integer processDefinitionId) {
List<Integer> ids = new ArrayList<>();
recurseFindSubProcessId(processDefinitionId, ids);
return ids.size() + 1;
}
/**
* recursive query sub process definition id by parent id.
*
* @param parentId parentId
* @param ids ids
*/
public void recurseFindSubProcessId(int parentId, List<Integer> ids) {
ProcessDefinition processDefinition = processDefineMapper.selectById(parentId);
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
List<TaskNode> taskNodeList = processData.getTasks();
if (taskNodeList != null && !taskNodeList.isEmpty()) {
for (TaskNode taskNode : taskNodeList) {
String parameter = taskNode.getParams();
ObjectNode parameterJson = JSONUtils.parseObject(parameter);
if (parameterJson.get(CMD_PARAM_SUB_PROCESS_DEFINE_ID) != null) {
SubProcessParameters subProcessParam = JSONUtils.parseObject(parameter, SubProcessParameters.class);
ids.add(subProcessParam.getProcessDefinitionId());
recurseFindSubProcessId(subProcessParam.getProcessDefinitionId(), ids);
}
}
}
}
/**
* create recovery waiting thread command when thread pool is not enough for the process instance.
* sub work process instance need not to create recovery command.
* create recovery waiting thread command and delete origin command at the same time.
* if the recovery command is exists, only update the field update_time
*
* @param originCommand originCommand
* @param processInstance processInstance
*/
public void createRecoveryWaitingThreadCommand(Command originCommand, ProcessInstance processInstance) {
// sub process doesnot need to create wait command
if (processInstance.getIsSubProcess() == Flag.YES) {
if (originCommand != null) {
commandMapper.deleteById(originCommand.getId());
}
return;
}
Map<String, String> cmdParam = new HashMap<>();
cmdParam.put(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD, String.valueOf(processInstance.getId()));
// process instance quit by "waiting thread" state
if (originCommand == null) {
Command command = new Command(
CommandType.RECOVER_WAITTING_THREAD,
processInstance.getTaskDependType(),
processInstance.getFailureStrategy(),
processInstance.getExecutorId(),
processInstance.getProcessDefinitionId(),
JSONUtils.toJsonString(cmdParam),
processInstance.getWarningType(),
processInstance.getWarningGroupId(),
processInstance.getScheduleTime(),
processInstance.getWorkerGroup(),
processInstance.getProcessInstancePriority()
);
saveCommand(command);
return;
}
// update the command time if current command if recover from waiting
if (originCommand.getCommandType() == CommandType.RECOVER_WAITTING_THREAD) {
originCommand.setUpdateTime(new Date());
saveCommand(originCommand);
} else {
// delete old command and create new waiting thread command
commandMapper.deleteById(originCommand.getId());
originCommand.setId(0);
originCommand.setCommandType(CommandType.RECOVER_WAITTING_THREAD);
originCommand.setUpdateTime(new Date());
originCommand.setCommandParam(JSONUtils.toJsonString(cmdParam));
originCommand.setProcessInstancePriority(processInstance.getProcessInstancePriority());
saveCommand(originCommand);
}
}
/**
* get schedule time from command
*
* @param command command
* @param cmdParam cmdParam map
* @return date
*/
private Date getScheduleTime(Command command, Map<String, String> cmdParam) {
Date scheduleTime = command.getScheduleTime();
if (scheduleTime == null && cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) {
scheduleTime = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
}
return scheduleTime;
}
/**
* generate a new work process instance from command.
*
* @param processDefinition processDefinition
* @param command command
* @param cmdParam cmdParam map
* @return process instance
*/
private ProcessInstance generateNewProcessInstance(ProcessDefinition processDefinition,
Command command,
Map<String, String> cmdParam) {
ProcessInstance processInstance = new ProcessInstance(processDefinition);
processInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
processInstance.setRecovery(Flag.NO);
processInstance.setStartTime(new Date());
processInstance.setRunTimes(1);
processInstance.setMaxTryTimes(0);
processInstance.setProcessDefinitionId(command.getProcessDefinitionId());
processInstance.setCommandParam(command.getCommandParam());
processInstance.setCommandType(command.getCommandType());
processInstance.setIsSubProcess(Flag.NO);
processInstance.setTaskDependType(command.getTaskDependType());
processInstance.setFailureStrategy(command.getFailureStrategy());
processInstance.setExecutorId(command.getExecutorId());
WarningType warningType = command.getWarningType() == null ? WarningType.NONE : command.getWarningType();
processInstance.setWarningType(warningType);
Integer warningGroupId = command.getWarningGroupId() == null ? 0 : command.getWarningGroupId();
processInstance.setWarningGroupId(warningGroupId);
// schedule time
Date scheduleTime = getScheduleTime(command, cmdParam);
if (scheduleTime != null) {
processInstance.setScheduleTime(scheduleTime);
}
processInstance.setCommandStartTime(command.getStartTime());
processInstance.setLocations(processDefinition.getLocations());
processInstance.setConnects(processDefinition.getConnects());
// reset global params while there are start parameters
setGlobalParamIfCommanded(processDefinition,cmdParam);
// curing global params
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
getCommandTypeIfComplement(processInstance, command),
processInstance.getScheduleTime()));
//copy process define json to process instance
processInstance.setProcessInstanceJson(processDefinition.getProcessDefinitionJson());
// set process instance priority
processInstance.setProcessInstancePriority(command.getProcessInstancePriority());
String workerGroup = StringUtils.isBlank(command.getWorkerGroup()) ? Constants.DEFAULT_WORKER_GROUP : command.getWorkerGroup();
processInstance.setWorkerGroup(workerGroup);
processInstance.setTimeout(processDefinition.getTimeout());
processInstance.setTenantId(processDefinition.getTenantId());
return processInstance;
}
private void setGlobalParamIfCommanded(ProcessDefinition processDefinition, Map<String, String> cmdParam) {
// get start params from command param
Map<String, String> startParamMap = null;
if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_START_PARAMS)) {
String startParamJson = cmdParam.get(Constants.CMD_PARAM_START_PARAMS);
startParamMap = JSONUtils.toMap(startParamJson);
}
// set start param into global params
if (startParamMap != null && startParamMap.size() > 0
&& processDefinition.getGlobalParamMap() != null) {
for (Map.Entry<String, String> param : processDefinition.getGlobalParamMap().entrySet()) {
String val = startParamMap.get(param.getKey());
if (val != null) {
param.setValue(val);
}
}
}
}
/**
* get process tenant
* there is tenant id in definition, use the tenant of the definition.
* if there is not tenant id in the definiton or the tenant not exist
* use definition creator's tenant.
*
* @param tenantId tenantId
* @param userId userId
* @return tenant
*/
public Tenant getTenantForProcess(int tenantId, int userId) {
Tenant tenant = null;
if (tenantId >= 0) {
tenant = tenantMapper.queryById(tenantId);
}
if (userId == 0) {
return null;
}
if (tenant == null) {
User user = userMapper.selectById(userId);
tenant = tenantMapper.queryById(user.getTenantId());
}
return tenant;
}
/**
* check command parameters is valid
*
* @param command command
* @param cmdParam cmdParam map
* @return whether command param is valid
*/
private Boolean checkCmdParam(Command command, Map<String, String> cmdParam) {
if (command.getTaskDependType() == TaskDependType.TASK_ONLY || command.getTaskDependType() == TaskDependType.TASK_PRE) {
if (cmdParam == null
|| !cmdParam.containsKey(Constants.CMD_PARAM_START_NODE_NAMES)
|| cmdParam.get(Constants.CMD_PARAM_START_NODE_NAMES).isEmpty()) {
logger.error("command node depend type is {}, but start nodes is null ", command.getTaskDependType());
return false;
}
}
return true;
}
/**
* construct process instance according to one command.
*
* @param command command
* @param host host
* @return process instance
*/
private ProcessInstance constructProcessInstance(Command command, String host) {
ProcessInstance processInstance = null;
CommandType commandType = command.getCommandType();
Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam());
ProcessDefinition processDefinition = null;
if (command.getProcessDefinitionId() != 0) {
processDefinition = processDefineMapper.selectById(command.getProcessDefinitionId());
if (processDefinition == null) {
logger.error("cannot find the work process define! define id : {}", command.getProcessDefinitionId());
return null;
}
}
if (cmdParam != null) {
Integer processInstanceId = 0;
// recover from failure or pause tasks
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) {
String processId = cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING);
processInstanceId = Integer.parseInt(processId);
if (processInstanceId == 0) {
logger.error("command parameter is error, [ ProcessInstanceId ] is 0");
return null;
}
} else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) {
// sub process map
String pId = cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS);
processInstanceId = Integer.parseInt(pId);
} else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) {
// waiting thread command
String pId = cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD);
processInstanceId = Integer.parseInt(pId);
}
if (processInstanceId == 0) {
processInstance = generateNewProcessInstance(processDefinition, command, cmdParam);
} else {
processInstance = this.findProcessInstanceDetailById(processInstanceId);
CommandType commandTypeIfComplement = getCommandTypeIfComplement(processInstance, command);
// reset global params while repeat running is needed by cmdParam
if (commandTypeIfComplement == CommandType.REPEAT_RUNNING) {
setGlobalParamIfCommanded(processDefinition, cmdParam);
}
// Recalculate global parameters after rerun.
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
commandTypeIfComplement,
processInstance.getScheduleTime()));
}
processDefinition = processDefineMapper.selectById(processInstance.getProcessDefinitionId());
processInstance.setProcessDefinition(processDefinition);
//reset command parameter
if (processInstance.getCommandParam() != null) {
Map<String, String> processCmdParam = JSONUtils.toMap(processInstance.getCommandParam());
for (Map.Entry<String, String> entry : processCmdParam.entrySet()) {
if (!cmdParam.containsKey(entry.getKey())) {
cmdParam.put(entry.getKey(), entry.getValue());
}
}
}
// reset command parameter if sub process
if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) {
processInstance.setCommandParam(command.getCommandParam());
}
} else {
// generate one new process instance
processInstance = generateNewProcessInstance(processDefinition, command, cmdParam);
}
if (Boolean.FALSE.equals(checkCmdParam(command, cmdParam))) {
logger.error("command parameter check failed!");
return null;
}
if (command.getScheduleTime() != null) {
processInstance.setScheduleTime(command.getScheduleTime());
}
processInstance.setHost(host);
ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXECUTION;
int runTime = processInstance.getRunTimes();
switch (commandType) {
case START_PROCESS:
break;
case START_FAILURE_TASK_PROCESS:
// find failed tasks and init these tasks
List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE);
List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE);
List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL);
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
failedList.addAll(killedList);
failedList.addAll(toleranceList);
for (Integer taskId : failedList) {
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING,
String.join(Constants.COMMA, convertIntListToString(failedList)));
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
processInstance.setRunTimes(runTime + 1);
break;
case START_CURRENT_TASK_PROCESS:
break;
case RECOVER_WAITTING_THREAD:
break;
case RECOVER_SUSPENDED_PROCESS:
// find pause tasks and init task's state
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE);
List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(),
ExecutionStatus.KILL);
suspendedNodeList.addAll(stopNodeList);
for (Integer taskId : suspendedNodeList) {
// initialize the pause state
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(",", convertIntListToString(suspendedNodeList)));
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
processInstance.setRunTimes(runTime + 1);
break;
case RECOVER_TOLERANCE_FAULT_PROCESS:
// recover tolerance fault process
processInstance.setRecovery(Flag.YES);
runStatus = processInstance.getState();
break;
case COMPLEMENT_DATA:
// delete all the valid tasks when complement data
List<TaskInstance> taskInstanceList = this.findValidTaskListByProcessId(processInstance.getId());
for (TaskInstance taskInstance : taskInstanceList) {
taskInstance.setFlag(Flag.NO);
this.updateTaskInstance(taskInstance);
}
initComplementDataParam(processDefinition, processInstance, cmdParam);
break;
case REPEAT_RUNNING:
// delete the recover task names from command parameter
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) {
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
}
// delete all the valid tasks when repeat running
List<TaskInstance> validTaskList = findValidTaskListByProcessId(processInstance.getId());
for (TaskInstance taskInstance : validTaskList) {
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
}
processInstance.setStartTime(new Date());
processInstance.setEndTime(null);
processInstance.setRunTimes(runTime + 1);
initComplementDataParam(processDefinition, processInstance, cmdParam);
break;
case SCHEDULER:
break;
default:
break;
}
processInstance.setState(runStatus);
return processInstance;
}
/**
* return complement data if the process start with complement data
*
* @param processInstance processInstance
* @param command command
* @return command type
*/
private CommandType getCommandTypeIfComplement(ProcessInstance processInstance, Command command) {
if (CommandType.COMPLEMENT_DATA == processInstance.getCmdTypeIfComplement()) {
return CommandType.COMPLEMENT_DATA;
} else {
return command.getCommandType();
}
}
/**
* initialize complement data parameters
*
* @param processDefinition processDefinition
* @param processInstance processInstance
* @param cmdParam cmdParam
*/
private void initComplementDataParam(ProcessDefinition processDefinition,
ProcessInstance processInstance,
Map<String, String> cmdParam) {
if (!processInstance.isComplementData()) {
return;
}
Date startComplementTime = DateUtils.parse(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE),
YYYY_MM_DD_HH_MM_SS);
if (Flag.NO == processInstance.getIsSubProcess()) {
processInstance.setScheduleTime(startComplementTime);
}
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
}
/**
* set sub work process parameters.
* handle sub work process instance, update relation table and command parameters
* set sub work process flag, extends parent work process command parameters
*
* @param subProcessInstance subProcessInstance
* @return process instance
*/
public ProcessInstance setSubProcessParam(ProcessInstance subProcessInstance) {
String cmdParam = subProcessInstance.getCommandParam();
if (StringUtils.isEmpty(cmdParam)) {
return subProcessInstance;
}
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
// write sub process id into cmd param.
if (paramMap.containsKey(CMD_PARAM_SUB_PROCESS)
&& CMD_PARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMD_PARAM_SUB_PROCESS))) {
paramMap.remove(CMD_PARAM_SUB_PROCESS);
paramMap.put(CMD_PARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId()));
subProcessInstance.setCommandParam(JSONUtils.toJsonString(paramMap));
subProcessInstance.setIsSubProcess(Flag.YES);
this.saveProcessInstance(subProcessInstance);
}
// copy parent instance user def params to sub process..
String parentInstanceId = paramMap.get(CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID);
if (StringUtils.isNotEmpty(parentInstanceId)) {
ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId));
if (parentInstance != null) {
subProcessInstance.setGlobalParams(
joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams()));
this.saveProcessInstance(subProcessInstance);
} else {
logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam);
}
}
ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class);
if (processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0) {
return subProcessInstance;
}
// update sub process id to process map table
processInstanceMap.setProcessInstanceId(subProcessInstance.getId());
this.updateWorkProcessInstanceMap(processInstanceMap);
return subProcessInstance;
}
/**
* join parent global params into sub process.
* only the keys doesn't in sub process global would be joined.
*
* @param parentGlobalParams parentGlobalParams
* @param subGlobalParams subGlobalParams
* @return global params join
*/
private String joinGlobalParams(String parentGlobalParams, String subGlobalParams) {
List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class);
List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class);
Map<String, String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
for (Property parent : parentPropertyList) {
if (!subMap.containsKey(parent.getProp())) {
subPropertyList.add(parent);
}
}
return JSONUtils.toJsonString(subPropertyList);
}
/**
* initialize task instance
*
* @param taskInstance taskInstance
*/
private void initTaskInstance(TaskInstance taskInstance) {
if (!taskInstance.isSubProcess()
&& (taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure())) {
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
return;
}
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
updateTaskInstance(taskInstance);
}
/**
* submit task to db
* submit sub process to command
*
* @param taskInstance taskInstance
* @return task instance
*/
@Transactional(rollbackFor = Exception.class)
public TaskInstance submitTask(TaskInstance taskInstance) {
ProcessInstance processInstance = this.findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
logger.info("start submit task : {}, instance id:{}, state: {}",
taskInstance.getName(), taskInstance.getProcessInstanceId(), processInstance.getState());
//submit to db
TaskInstance task = submitTaskInstanceToDB(taskInstance, processInstance);
if (task == null) {
logger.error("end submit task to db error, task name:{}, process id:{} state: {} ",
taskInstance.getName(), taskInstance.getProcessInstance(), processInstance.getState());
return task;
}
if (!task.getState().typeIsFinished()) {
createSubWorkProcess(processInstance, task);
}
logger.info("end submit task to db successfully:{} state:{} complete, instance id:{} state: {} ",
taskInstance.getName(), task.getState(), processInstance.getId(), processInstance.getState());
return task;
}
/**
* set work process instance map
* consider o
* repeat running does not generate new sub process instance
* set map {parent instance id, task instance id, 0(child instance id)}
*
* @param parentInstance parentInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask) {
ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId());
if (processMap != null) {
return processMap;
}
if (parentInstance.getCommandType() == CommandType.REPEAT_RUNNING) {
// update current task id to map
processMap = findPreviousTaskProcessMap(parentInstance, parentTask);
if (processMap != null) {
processMap.setParentTaskInstanceId(parentTask.getId());
updateWorkProcessInstanceMap(processMap);
return processMap;
}
}
// new task
processMap = new ProcessInstanceMap();
processMap.setParentProcessInstanceId(parentInstance.getId());
processMap.setParentTaskInstanceId(parentTask.getId());
createWorkProcessInstanceMap(processMap);
return processMap;
}
/**
* find previous task work process map.
*
* @param parentProcessInstance parentProcessInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap findPreviousTaskProcessMap(ProcessInstance parentProcessInstance,
TaskInstance parentTask) {
Integer preTaskId = 0;
List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId());
for (TaskInstance task : preTaskList) {
if (task.getName().equals(parentTask.getName())) {
preTaskId = task.getId();
ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId);
if (map != null) {
return map;
}
}
}
logger.info("sub process instance is not found,parent task:{},parent instance:{}",
parentTask.getId(), parentProcessInstance.getId());
return null;
}
/**
* create sub work process command
*
* @param parentProcessInstance parentProcessInstance
* @param task task
*/
public void createSubWorkProcess(ProcessInstance parentProcessInstance, TaskInstance task) {
if (!task.isSubProcess()) {
return;
}
//check create sub work flow firstly
ProcessInstanceMap instanceMap = findWorkProcessMapByParent(parentProcessInstance.getId(), task.getId());
if (null != instanceMap && CommandType.RECOVER_TOLERANCE_FAULT_PROCESS == parentProcessInstance.getCommandType()) {
// recover failover tolerance would not create a new command when the sub command already have been created
return;
}
instanceMap = setProcessInstanceMap(parentProcessInstance, task);
ProcessInstance childInstance = null;
if (instanceMap.getProcessInstanceId() != 0) {
childInstance = findProcessInstanceById(instanceMap.getProcessInstanceId());
}
Command subProcessCommand = createSubProcessCommand(parentProcessInstance, childInstance, instanceMap, task);
updateSubProcessDefinitionByParent(parentProcessInstance, subProcessCommand.getProcessDefinitionId());
initSubInstanceState(childInstance);
createCommand(subProcessCommand);
logger.info("sub process command created: {} ", subProcessCommand);
}
/**
* complement data needs transform parent parameter to child.
*/
private String getSubWorkFlowParam(ProcessInstanceMap instanceMap, ProcessInstance parentProcessInstance) {
// set sub work process command
String processMapStr = JSONUtils.toJsonString(instanceMap);
Map<String, String> cmdParam = JSONUtils.toMap(processMapStr);
if (parentProcessInstance.isComplementData()) {
Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam());
String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE);
String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime);
processMapStr = JSONUtils.toJsonString(cmdParam);
}
return processMapStr;
}
/**
* create sub work process command
*/
public Command createSubProcessCommand(ProcessInstance parentProcessInstance,
ProcessInstance childInstance,
ProcessInstanceMap instanceMap,
TaskInstance task) {
CommandType commandType = getSubCommandType(parentProcessInstance, childInstance);
TaskNode taskNode = JSONUtils.parseObject(task.getTaskJson(), TaskNode.class);
Map<String, String> subProcessParam = JSONUtils.toMap(taskNode.getParams());
Integer childDefineId = Integer.parseInt(subProcessParam.get(Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID));
String processParam = getSubWorkFlowParam(instanceMap, parentProcessInstance);
return new Command(
commandType,
TaskDependType.TASK_POST,
parentProcessInstance.getFailureStrategy(),
parentProcessInstance.getExecutorId(),
childDefineId,
processParam,
parentProcessInstance.getWarningType(),
parentProcessInstance.getWarningGroupId(),
parentProcessInstance.getScheduleTime(),
task.getWorkerGroup(),
parentProcessInstance.getProcessInstancePriority()
);
}
/**
* initialize sub work flow state
* child instance state would be initialized when 'recovery from pause/stop/failure'
*/
private void initSubInstanceState(ProcessInstance childInstance) {
if (childInstance != null) {
childInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
updateProcessInstance(childInstance);
}
}
/**
* get sub work flow command type
* child instance exist: child command = fatherCommand
* child instance not exists: child command = fatherCommand[0]
*/
private CommandType getSubCommandType(ProcessInstance parentProcessInstance, ProcessInstance childInstance) {
CommandType commandType = parentProcessInstance.getCommandType();
if (childInstance == null) {
String fatherHistoryCommand = parentProcessInstance.getHistoryCmd();
commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]);
}
return commandType;
}
/**
* update sub process definition
*
* @param parentProcessInstance parentProcessInstance
* @param childDefinitionId childDefinitionId
*/
private void updateSubProcessDefinitionByParent(ProcessInstance parentProcessInstance, int childDefinitionId) {
ProcessDefinition fatherDefinition = this.findProcessDefineById(parentProcessInstance.getProcessDefinitionId());
ProcessDefinition childDefinition = this.findProcessDefineById(childDefinitionId);
if (childDefinition != null && fatherDefinition != null) {
childDefinition.setWarningGroupId(fatherDefinition.getWarningGroupId());
processDefineMapper.updateById(childDefinition);
}
}
/**
* submit task to mysql
*
* @param taskInstance taskInstance
* @param processInstance processInstance
* @return task instance
*/
public TaskInstance submitTaskInstanceToDB(TaskInstance taskInstance, ProcessInstance processInstance) {
ExecutionStatus processInstanceState = processInstance.getState();
if (taskInstance.getState().typeIsFailure()) {
if (taskInstance.isSubProcess()) {
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1);
} else {
if (processInstanceState != ExecutionStatus.READY_STOP
&& processInstanceState != ExecutionStatus.READY_PAUSE) {
// failure task set invalid
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
// crate new task instance
if (taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE) {
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1);
}
taskInstance.setSubmitTime(null);
taskInstance.setStartTime(null);
taskInstance.setEndTime(null);
taskInstance.setFlag(Flag.YES);
taskInstance.setHost(null);
taskInstance.setId(0);
}
}
}
taskInstance.setExecutorId(processInstance.getExecutorId());
taskInstance.setProcessInstancePriority(processInstance.getProcessInstancePriority());
taskInstance.setState(getSubmitTaskState(taskInstance, processInstanceState));
if (taskInstance.getSubmitTime() == null) {
taskInstance.setSubmitTime(new Date());
}
if (taskInstance.getFirstSubmitTime() == null) {
taskInstance.setFirstSubmitTime(taskInstance.getSubmitTime());
}
boolean saveResult = saveTaskInstance(taskInstance);
if (!saveResult) {
return null;
}
return taskInstance;
}
/**
* get submit task instance state by the work process state
* cannot modify the task state when running/kill/submit success, or this
* task instance is already exists in task queue .
* return pause if work process state is ready pause
* return stop if work process state is ready stop
* if all of above are not satisfied, return submit success
*
* @param taskInstance taskInstance
* @param processInstanceState processInstanceState
* @return process instance state
*/
public ExecutionStatus getSubmitTaskState(TaskInstance taskInstance, ExecutionStatus processInstanceState) {
ExecutionStatus state = taskInstance.getState();
// running, delayed or killed
// the task already exists in task queue
// return state
if (
state == ExecutionStatus.RUNNING_EXECUTION
|| state == ExecutionStatus.DELAY_EXECUTION
|| state == ExecutionStatus.KILL
) {
return state;
}
//return pasue /stop if process instance state is ready pause / stop
// or return submit success
if (processInstanceState == ExecutionStatus.READY_PAUSE) {
state = ExecutionStatus.PAUSE;
} else if (processInstanceState == ExecutionStatus.READY_STOP
|| !checkProcessStrategy(taskInstance)) {
state = ExecutionStatus.KILL;
} else {
state = ExecutionStatus.SUBMITTED_SUCCESS;
}
return state;
}
/**
* check process instance strategy
*
* @param taskInstance taskInstance
* @return check strategy result
*/
private boolean checkProcessStrategy(TaskInstance taskInstance) {
ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId());
FailureStrategy failureStrategy = processInstance.getFailureStrategy();
if (failureStrategy == FailureStrategy.CONTINUE) {
return true;
}
List<TaskInstance> taskInstances = this.findValidTaskListByProcessId(taskInstance.getProcessInstanceId());
for (TaskInstance task : taskInstances) {
if (task.getState() == ExecutionStatus.FAILURE) {
return false;
}
}
return true;
}
/**
* create a new process instance
*
* @param processInstance processInstance
*/
public void createProcessInstance(ProcessInstance processInstance) {
if (processInstance != null) {
processInstanceMapper.insert(processInstance);
}
}
/**
* insert or update work process instance to data base
*
* @param processInstance processInstance
*/
public void saveProcessInstance(ProcessInstance processInstance) {
if (processInstance == null) {
logger.error("save error, process instance is null!");
return;
}
if (processInstance.getId() != 0) {
processInstanceMapper.updateById(processInstance);
} else {
createProcessInstance(processInstance);
}
}
/**
* insert or update command
*
* @param command command
* @return save command result
*/
public int saveCommand(Command command) {
if (command.getId() != 0) {
return commandMapper.updateById(command);
} else {
return commandMapper.insert(command);
}
}
/**
* insert or update task instance
*
* @param taskInstance taskInstance
* @return save task instance result
*/
public boolean saveTaskInstance(TaskInstance taskInstance) {
if (taskInstance.getId() != 0) {
return updateTaskInstance(taskInstance);
} else {
return createTaskInstance(taskInstance);
}
}
/**
* insert task instance
*
* @param taskInstance taskInstance
* @return create task instance result
*/
public boolean createTaskInstance(TaskInstance taskInstance) {
int count = taskInstanceMapper.insert(taskInstance);
return count > 0;
}
/**
* update task instance
*
* @param taskInstance taskInstance
* @return update task instance result
*/
public boolean updateTaskInstance(TaskInstance taskInstance) {
int count = taskInstanceMapper.updateById(taskInstance);
return count > 0;
}
/**
* delete a command by id
*
* @param id id
*/
public void delCommandById(int id) {
commandMapper.deleteById(id);
}
/**
* find task instance by id
*
* @param taskId task id
* @return task intance
*/
public TaskInstance findTaskInstanceById(Integer taskId) {
return taskInstanceMapper.selectById(taskId);
}
/**
* package task instance,associate processInstance and processDefine
*
* @param taskInstId taskInstId
* @return task instance
*/
public TaskInstance getTaskInstanceDetailByTaskId(int taskInstId) {
// get task instance
TaskInstance taskInstance = findTaskInstanceById(taskInstId);
if (taskInstance == null) {
return taskInstance;
}
// get process instance
ProcessInstance processInstance = findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
// get process define
ProcessDefinition processDefine = findProcessDefineById(taskInstance.getProcessDefinitionId());
taskInstance.setProcessInstance(processInstance);
taskInstance.setProcessDefine(processDefine);
return taskInstance;
}
/**
* get id list by task state
*
* @param instanceId instanceId
* @param state state
* @return task instance states
*/
public List<Integer> findTaskIdByInstanceState(int instanceId, ExecutionStatus state) {
return taskInstanceMapper.queryTaskByProcessIdAndState(instanceId, state.ordinal());
}
/**
* find valid task list by process definition id
*
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findValidTaskListByProcessId(Integer processInstanceId) {
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.YES);
}
/**
* find previous task list by work process id
*
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId) {
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO);
}
/**
* update work process instance map
*
* @param processInstanceMap processInstanceMap
* @return update process instance result
*/
public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) {
return processInstanceMapMapper.updateById(processInstanceMap);
}
/**
* create work process instance map
*
* @param processInstanceMap processInstanceMap
* @return create process instance result
*/
public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) {
int count = 0;
if (processInstanceMap != null) {
return processInstanceMapMapper.insert(processInstanceMap);
}
return count;
}
/**
* find work process map by parent process id and parent task id.
*
* @param parentWorkProcessId parentWorkProcessId
* @param parentTaskId parentTaskId
* @return process instance map
*/
public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId) {
return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId);
}
/**
* delete work process map by parent process id
*
* @param parentWorkProcessId parentWorkProcessId
* @return delete process map result
*/
public int deleteWorkProcessMapByParentId(int parentWorkProcessId) {
return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId);
}
/**
* find sub process instance
*
* @param parentProcessId parentProcessId
* @param parentTaskId parentTaskId
* @return process instance
*/
public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId) {
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId);
if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) {
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId());
return processInstance;
}
/**
* find parent process instance
*
* @param subProcessId subProcessId
* @return process instance
*/
public ProcessInstance findParentProcessInstance(Integer subProcessId) {
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId);
if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) {
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId());
return processInstance;
}
/**
* change task state
*
* @param state state
* @param startTime startTime
* @param host host
* @param executePath executePath
* @param logPath logPath
* @param taskInstId taskInstId
*/
public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date startTime, String host,
String executePath,
String logPath,
int taskInstId) {
taskInstance.setState(state);
taskInstance.setStartTime(startTime);
taskInstance.setHost(host);
taskInstance.setExecutePath(executePath);
taskInstance.setLogPath(logPath);
saveTaskInstance(taskInstance);
}
/**
* update process instance
*
* @param processInstance processInstance
* @return update process instance result
*/
public int updateProcessInstance(ProcessInstance processInstance) {
return processInstanceMapper.updateById(processInstance);
}
/**
* update the process instance
*
* @param processInstanceId processInstanceId
* @param processJson processJson
* @param globalParams globalParams
* @param scheduleTime scheduleTime
* @param flag flag
* @param locations locations
* @param connects connects
* @return update process instance result
*/
public int updateProcessInstance(Integer processInstanceId, String processJson,
String globalParams, Date scheduleTime, Flag flag,
String locations, String connects) {
ProcessInstance processInstance = processInstanceMapper.queryDetailById(processInstanceId);
if (processInstance != null) {
processInstance.setProcessInstanceJson(processJson);
processInstance.setGlobalParams(globalParams);
processInstance.setScheduleTime(scheduleTime);
processInstance.setLocations(locations);
processInstance.setConnects(connects);
return processInstanceMapper.updateById(processInstance);
}
return 0;
}
/**
* change task state
*
* @param state state
* @param endTime endTime
* @param taskInstId taskInstId
* @param varPool varPool
*/
public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state,
Date endTime,
int processId,
String appIds,
int taskInstId,
String varPool,
String result) {
taskInstance.setPid(processId);
taskInstance.setAppLink(appIds);
taskInstance.setState(state);
taskInstance.setEndTime(endTime);
taskInstance.setVarPool(varPool);
changeOutParam(result, taskInstance);
saveTaskInstance(taskInstance);
}
public void changeOutParam(String result, TaskInstance taskInstance) {
if (StringUtils.isEmpty(result)) {
return;
}
List<Map<String, String>> workerResultParam = getListMapByString(result);
if (CollectionUtils.isEmpty(workerResultParam)) {
return;
}
//if the result more than one line,just get the first .
Map<String, String> row = workerResultParam.get(0);
if (row == null || row.size() == 0) {
return;
}
TaskNode taskNode = JSONUtils.parseObject(taskInstance.getTaskJson(), TaskNode.class);
Map<String, Object> taskParams = JSONUtils.toMap(taskNode.getParams(), String.class, Object.class);
Object localParams = taskParams.get(LOCAL_PARAMS);
if (localParams == null) {
return;
}
ProcessInstance processInstance = this.processInstanceMapper.queryDetailById(taskInstance.getProcessInstanceId());
List<Property> params4Property = JSONUtils.toList(processInstance.getGlobalParams(), Property.class);
Map<String, Property> allParamMap = params4Property.stream().collect(Collectors.toMap(Property::getProp, Property -> Property));
List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class);
for (Property info : allParam) {
if (info.getDirect() == Direct.OUT) {
String paramName = info.getProp();
Property property = allParamMap.get(paramName);
if (property == null) {
continue;
}
String value = row.get(paramName);
if (StringUtils.isNotEmpty(value)) {
property.setValue(value);
info.setValue(value);
}
}
}
taskParams.put(LOCAL_PARAMS, allParam);
taskNode.setParams(JSONUtils.toJsonString(taskParams));
// task instance node json
taskInstance.setTaskJson(JSONUtils.toJsonString(taskNode));
String params4ProcessString = JSONUtils.toJsonString(params4Property);
int updateCount = this.processInstanceMapper.updateGlobalParamsById(params4ProcessString, processInstance.getId());
logger.info("updateCount:{}, params4Process:{}, processInstanceId:{}", updateCount, params4ProcessString, processInstance.getId());
}
public List<Map<String, String>> getListMapByString(String json) {
List<Map<String, String>> allParams = new ArrayList<>();
ArrayNode paramsByJson = JSONUtils.parseArray(json);
Iterator<JsonNode> listIterator = paramsByJson.iterator();
while (listIterator.hasNext()) {
Map<String, String> param = JSONUtils.toMap(listIterator.next().toString(), String.class, String.class);
allParams.add(param);
}
return allParams;
}
/**
* convert integer list to string list
*
* @param intList intList
* @return string list
*/
public List<String> convertIntListToString(List<Integer> intList) {
if (intList == null) {
return new ArrayList<>();
}
List<String> result = new ArrayList<>(intList.size());
for (Integer intVar : intList) {
result.add(String.valueOf(intVar));
}
return result;
}
/**
* query schedule by id
*
* @param id id
* @return schedule
*/
public Schedule querySchedule(int id) {
return scheduleMapper.selectById(id);
}
/**
* query Schedule by processDefinitionId
*
* @param processDefinitionId processDefinitionId
* @see Schedule
*/
public List<Schedule> queryReleaseSchedulerListByProcessDefinitionId(int processDefinitionId) {
return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId);
}
/**
* query need failover process instance
*
* @param host host
* @return process instance list
*/
public List<ProcessInstance> queryNeedFailoverProcessInstances(String host) {
return processInstanceMapper.queryByHostAndStatus(host, stateArray);
}
/**
* process need failover process instance
*
* @param processInstance processInstance
*/
@Transactional(rollbackFor = RuntimeException.class)
public void processNeedFailoverProcessInstances(ProcessInstance processInstance) {
//1 update processInstance host is null
processInstance.setHost(Constants.NULL);
processInstanceMapper.updateById(processInstance);
//2 insert into recover command
Command cmd = new Command();
cmd.setProcessDefinitionId(processInstance.getProcessDefinitionId());
cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId()));
cmd.setExecutorId(processInstance.getExecutorId());
cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS);
createCommand(cmd);
}
/**
* query all need failover task instances by host
*
* @param host host
* @return task instance list
*/
public List<TaskInstance> queryNeedFailoverTaskInstances(String host) {
return taskInstanceMapper.queryByHostAndStatus(host,
stateArray);
}
/**
* find data source by id
*
* @param id id
* @return datasource
*/
public DataSource findDataSourceById(int id) {
return dataSourceMapper.selectById(id);
}
/**
* update process instance state by id
*
* @param processInstanceId processInstanceId
* @param executionStatus executionStatus
* @return update process result
*/
public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) {
ProcessInstance instance = processInstanceMapper.selectById(processInstanceId);
instance.setState(executionStatus);
return processInstanceMapper.updateById(instance);
}
/**
* find process instance by the task id
*
* @param taskId taskId
* @return process instance
*/
public ProcessInstance findProcessInstanceByTaskId(int taskId) {
TaskInstance taskInstance = taskInstanceMapper.selectById(taskId);
if (taskInstance != null) {
return processInstanceMapper.selectById(taskInstance.getProcessInstanceId());
}
return null;
}
/**
* find udf function list by id list string
*
* @param ids ids
* @return udf function list
*/
public List<UdfFunc> queryUdfFunListByIds(int[] ids) {
return udfFuncMapper.queryUdfByIdStr(ids, null);
}
/**
* find tenant code by resource name
*
* @param resName resource name
* @param resourceType resource type
* @return tenant code
*/
public String queryTenantCodeByResName(String resName, ResourceType resourceType) {
// in order to query tenant code successful although the version is older
String fullName = resName.startsWith("/") ? resName : String.format("/%s", resName);
return resourceMapper.queryTenantCodeByResourceName(fullName, resourceType.ordinal());
}
/**
* find schedule list by process define id.
*
* @param ids ids
* @return schedule list
*/
public List<Schedule> selectAllByProcessDefineId(int[] ids) {
return scheduleMapper.selectAllByProcessDefineArray(
ids);
}
/**
* get dependency cycle by work process define id and scheduler fire time
*
* @param masterId masterId
* @param processDefinitionId processDefinitionId
* @param scheduledFireTime the time the task schedule is expected to trigger
* @return CycleDependency
* @throws Exception if error throws Exception
*/
public CycleDependency getCycleDependency(int masterId, int processDefinitionId, Date scheduledFireTime) throws Exception {
List<CycleDependency> list = getCycleDependencies(masterId, new int[]{processDefinitionId}, scheduledFireTime);
return !list.isEmpty() ? list.get(0) : null;
}
/**
* get dependency cycle list by work process define id list and scheduler fire time
*
* @param masterId masterId
* @param ids ids
* @param scheduledFireTime the time the task schedule is expected to trigger
* @return CycleDependency list
* @throws Exception if error throws Exception
*/
public List<CycleDependency> getCycleDependencies(int masterId, int[] ids, Date scheduledFireTime) throws Exception {
List<CycleDependency> cycleDependencyList = new ArrayList<>();
if (null == ids || ids.length == 0) {
logger.warn("ids[] is empty!is invalid!");
return cycleDependencyList;
}
if (scheduledFireTime == null) {
logger.warn("scheduledFireTime is null!is invalid!");
return cycleDependencyList;
}
String strCrontab = "";
CronExpression depCronExpression;
Cron depCron;
List<Date> list;
List<Schedule> schedules = this.selectAllByProcessDefineId(ids);
// for all scheduling information
for (Schedule depSchedule : schedules) {
strCrontab = depSchedule.getCrontab();
depCronExpression = CronUtils.parse2CronExpression(strCrontab);
depCron = CronUtils.parse2Cron(strCrontab);
CycleEnum cycleEnum = CronUtils.getMiniCycle(depCron);
if (cycleEnum == null) {
logger.error("{} is not valid", strCrontab);
continue;
}
Calendar calendar = Calendar.getInstance();
switch (cycleEnum) {
case HOUR:
calendar.add(Calendar.HOUR, -25);
break;
case DAY:
case WEEK:
calendar.add(Calendar.DATE, -32);
break;
case MONTH:
calendar.add(Calendar.MONTH, -13);
break;
default:
String cycleName = cycleEnum.name();
logger.warn("Dependent process definition's cycleEnum is {},not support!!", cycleName);
continue;
}
Date start = calendar.getTime();
if (depSchedule.getProcessDefinitionId() == masterId) {
list = CronUtils.getSelfFireDateList(start, scheduledFireTime, depCronExpression);
} else {
list = CronUtils.getFireDateList(start, scheduledFireTime, depCronExpression);
}
if (!list.isEmpty()) {
start = list.get(list.size() - 1);
CycleDependency dependency = new CycleDependency(depSchedule.getProcessDefinitionId(), start, CronUtils.getExpirationTime(start, cycleEnum), cycleEnum);
cycleDependencyList.add(dependency);
}
}
return cycleDependencyList;
}
/**
* find last scheduler process instance in the date interval
*
* @param definitionId definitionId
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastSchedulerProcessInterval(int definitionId, DateInterval dateInterval) {
return processInstanceMapper.queryLastSchedulerProcess(definitionId,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last manual process instance interval
*
* @param definitionId process definition id
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastManualProcessInterval(int definitionId, DateInterval dateInterval) {
return processInstanceMapper.queryLastManualProcess(definitionId,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last running process instance
*
* @param definitionId process definition id
* @param startTime start time
* @param endTime end time
* @return process instance
*/
public ProcessInstance findLastRunningProcess(int definitionId, Date startTime, Date endTime) {
return processInstanceMapper.queryLastRunningProcess(definitionId,
startTime,
endTime,
stateArray);
}
/**
* query user queue by process instance id
*
* @param processInstanceId processInstanceId
* @return queue
*/
public String queryUserQueueByProcessInstanceId(int processInstanceId) {
String queue = "";
ProcessInstance processInstance = processInstanceMapper.selectById(processInstanceId);
if (processInstance == null) {
return queue;
}
User executor = userMapper.selectById(processInstance.getExecutorId());
if (executor != null) {
queue = executor.getQueue();
}
return queue;
}
/**
* query project name and user name by processInstanceId.
*
* @param processInstanceId processInstanceId
* @return projectName and userName
*/
public ProjectUser queryProjectWithUserByProcessInstanceId(int processInstanceId) {
return projectMapper.queryProjectWithUserByProcessInstanceId(processInstanceId);
}
/**
* get task worker group
*
* @param taskInstance taskInstance
* @return workerGroupId
*/
public String getTaskWorkerGroup(TaskInstance taskInstance) {
String workerGroup = taskInstance.getWorkerGroup();
if (StringUtils.isNotBlank(workerGroup)) {
return workerGroup;
}
int processInstanceId = taskInstance.getProcessInstanceId();
ProcessInstance processInstance = findProcessInstanceById(processInstanceId);
if (processInstance != null) {
return processInstance.getWorkerGroup();
}
logger.info("task : {} will use default worker group", taskInstance.getId());
return Constants.DEFAULT_WORKER_GROUP;
}
/**
* get have perm project list
*
* @param userId userId
* @return project list
*/
public List<Project> getProjectListHavePerm(int userId) {
List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId);
List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId);
if (createProjects == null) {
createProjects = new ArrayList<>();
}
if (authedProjects != null) {
createProjects.addAll(authedProjects);
}
return createProjects;
}
/**
* get have perm project ids
*
* @param userId userId
* @return project ids
*/
public List<Integer> getProjectIdListHavePerm(int userId) {
List<Integer> projectIdList = new ArrayList<>();
for (Project project : getProjectListHavePerm(userId)) {
projectIdList.add(project.getId());
}
return projectIdList;
}
/**
* list unauthorized udf function
*
* @param userId user id
* @param needChecks data source id array
* @return unauthorized udf function list
*/
public <T> List<T> listUnauthorized(int userId, T[] needChecks, AuthorizationType authorizationType) {
List<T> resultList = new ArrayList<>();
if (Objects.nonNull(needChecks) && needChecks.length > 0) {
Set<T> originResSet = new HashSet<>(Arrays.asList(needChecks));
switch (authorizationType) {
case RESOURCE_FILE_ID:
case UDF_FILE:
Set<Integer> authorizedResourceFiles = resourceMapper.listAuthorizedResourceById(userId, needChecks).stream().map(Resource::getId).collect(toSet());
originResSet.removeAll(authorizedResourceFiles);
break;
case RESOURCE_FILE_NAME:
Set<String> authorizedResources = resourceMapper.listAuthorizedResource(userId, needChecks).stream().map(Resource::getFullName).collect(toSet());
originResSet.removeAll(authorizedResources);
break;
case DATASOURCE:
Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId, needChecks).stream().map(DataSource::getId).collect(toSet());
originResSet.removeAll(authorizedDatasources);
break;
case UDF:
Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(UdfFunc::getId).collect(toSet());
originResSet.removeAll(authorizedUdfs);
break;
default:
break;
}
resultList.addAll(originResSet);
}
return resultList;
}
/**
* get user by user id
*
* @param userId user id
* @return User
*/
public User getUserById(int userId) {
return userMapper.selectById(userId);
}
/**
* get resource by resoruce id
*
* @param resoruceId resource id
* @return Resource
*/
public Resource getResourceById(int resoruceId) {
return resourceMapper.selectById(resoruceId);
}
/**
* list resources by ids
*
* @param resIds resIds
* @return resource list
*/
public List<Resource> listResourceByIds(Integer[] resIds) {
return resourceMapper.listResourceByIds(resIds);
}
/**
* format task app id in task instance
*/
public String formatTaskAppId(TaskInstance taskInstance) {
ProcessDefinition definition = this.findProcessDefineById(taskInstance.getProcessDefinitionId());
ProcessInstance processInstanceById = this.findProcessInstanceById(taskInstance.getProcessInstanceId());
if (definition == null || processInstanceById == null) {
return "";
}
return String.format("%s_%s_%s",
definition.getId(),
processInstanceById.getId(),
taskInstance.getId());
}
/**
* solve the branch rename bug
*
* @param processData
* @param oldJson
* @return String
*/
public String changeJson(ProcessData processData, String oldJson) {
ProcessData oldProcessData = JSONUtils.parseObject(oldJson, ProcessData.class);
HashMap<String, String> oldNameTaskId = new HashMap<>();
List<TaskNode> oldTasks = oldProcessData.getTasks();
for (int i = 0; i < oldTasks.size(); i++) {
TaskNode taskNode = oldTasks.get(i);
String oldName = taskNode.getName();
String oldId = taskNode.getId();
oldNameTaskId.put(oldName, oldId);
}
// take the processdefinitionjson saved this time, and then save the taskid and name
HashMap<String, String> newNameTaskId = new HashMap<>();
List<TaskNode> newTasks = processData.getTasks();
for (int i = 0; i < newTasks.size(); i++) {
TaskNode taskNode = newTasks.get(i);
String newId = taskNode.getId();
String newName = taskNode.getName();
newNameTaskId.put(newId, newName);
}
// replace the previous conditionresult with a new one
List<TaskNode> tasks = processData.getTasks();
for (int i = 0; i < tasks.size(); i++) {
TaskNode taskNode = newTasks.get(i);
String type = taskNode.getType();
if (TaskType.CONDITIONS.getDescp().equalsIgnoreCase(type)) {
ConditionsParameters conditionsParameters = JSONUtils.parseObject(taskNode.getConditionResult(), ConditionsParameters.class);
String oldSuccessNodeName = conditionsParameters.getSuccessNode().get(0);
String oldFailedNodeName = conditionsParameters.getFailedNode().get(0);
String newSuccessNodeName = newNameTaskId.get(oldNameTaskId.get(oldSuccessNodeName));
String newFailedNodeName = newNameTaskId.get(oldNameTaskId.get(oldFailedNodeName));
if (newSuccessNodeName != null) {
ArrayList<String> successNode = new ArrayList<>();
successNode.add(newSuccessNodeName);
conditionsParameters.setSuccessNode(successNode);
}
if (newFailedNodeName != null) {
ArrayList<String> failedNode = new ArrayList<>();
failedNode.add(newFailedNodeName);
conditionsParameters.setFailedNode(failedNode);
}
String conditionResultStr = conditionsParameters.getConditionResult();
taskNode.setConditionResult(conditionResultStr);
tasks.set(i, taskNode);
}
}
return JSONUtils.toJsonString(processData);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,006 | [Feature][Worker] Add a configuration item to set whether the tenant is automatically created on Worker | **Describe the feature**
In the dev branch, the tenant executing a task will not be automatically created in worker. This will cause issue [#4995](https://github.com/apache/incubator-dolphinscheduler/issues/4995)
In the following two scenarios, it will cause inconvenience to users
- Product or operation people want to automatically create users instead of manually creating tenants
- In the docker container environment, the tenants cannot be created in advance. Once the container is pulled up again, all tenants will disappear
**Which version of Dolphin Scheduler:**
- [dev]
**Is your feature request related to a problem? Please describe.**
In the docker container environment, it's a very painful thing to create new tenants on every worker every time.
**Describe the solution you'd like**
Add a configuration item to set whether the tenant is automatically created on Worker
The default value of this configuration item is `false`, but it needs to be set to `true` in the container
The name of this configuration item is `worker.tenant.auto.create`
**Additional context**
Previous dev email discussion: https://lists.apache.org/thread.html/ra44b2e69759fcc980e4ed04c1811037bf0e743e47827fc2dcd1049d6%40%3Cdev.dolphinscheduler.apache.org%3E | https://github.com/apache/dolphinscheduler/issues/5006 | https://github.com/apache/dolphinscheduler/pull/5007 | 29d42fd92d6720a8a0641e37923c6e6f38a5ae85 | f94cfc620dfd0c51010a49134a073e3848c0bd7e | "2021-03-08T15:28:05Z" | java | "2021-03-18T10:34:42Z" | docker/build/conf/dolphinscheduler/worker.properties.tpl | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# worker execute thread num
worker.exec.threads=${WORKER_EXEC_THREADS}
# worker heartbeat interval
worker.heartbeat.interval=${WORKER_HEARTBEAT_INTERVAL}
# only less than cpu avg load, worker server can work. default value -1: the number of cpu cores * 2
worker.max.cpuload.avg=${WORKER_MAX_CPULOAD_AVG}
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
worker.reserved.memory=${WORKER_RESERVED_MEMORY}
# worker listener port
worker.listen.port=${WORKER_LISTEN_PORT}
# default worker groups
worker.groups=${WORKER_GROUPS}
# default worker host weight
worker.host.weight=${WORKER_HOST_WEIGHT}
# alert server listener host
alert.listen.host=${ALERT_LISTEN_HOST}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,006 | [Feature][Worker] Add a configuration item to set whether the tenant is automatically created on Worker | **Describe the feature**
In the dev branch, the tenant executing a task will not be automatically created in worker. This will cause issue [#4995](https://github.com/apache/incubator-dolphinscheduler/issues/4995)
In the following two scenarios, it will cause inconvenience to users
- Product or operation people want to automatically create users instead of manually creating tenants
- In the docker container environment, the tenants cannot be created in advance. Once the container is pulled up again, all tenants will disappear
**Which version of Dolphin Scheduler:**
- [dev]
**Is your feature request related to a problem? Please describe.**
In the docker container environment, it's a very painful thing to create new tenants on every worker every time.
**Describe the solution you'd like**
Add a configuration item to set whether the tenant is automatically created on Worker
The default value of this configuration item is `false`, but it needs to be set to `true` in the container
The name of this configuration item is `worker.tenant.auto.create`
**Additional context**
Previous dev email discussion: https://lists.apache.org/thread.html/ra44b2e69759fcc980e4ed04c1811037bf0e743e47827fc2dcd1049d6%40%3Cdev.dolphinscheduler.apache.org%3E | https://github.com/apache/dolphinscheduler/issues/5006 | https://github.com/apache/dolphinscheduler/pull/5007 | 29d42fd92d6720a8a0641e37923c6e6f38a5ae85 | f94cfc620dfd0c51010a49134a073e3848c0bd7e | "2021-03-08T15:28:05Z" | java | "2021-03-18T10:34:42Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.shell.ShellExecutor;
import org.apache.commons.configuration.Configuration;
import java.lang.management.OperatingSystemMXBean;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.lang.management.ManagementFactory;
import java.lang.management.RuntimeMXBean;
import java.math.RoundingMode;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.StringTokenizer;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import oshi.SystemInfo;
import oshi.hardware.CentralProcessor;
import oshi.hardware.GlobalMemory;
import oshi.hardware.HardwareAbstractionLayer;
/**
* os utils
*/
public class OSUtils {
private static final Logger logger = LoggerFactory.getLogger(OSUtils.class);
public static final ThreadLocal<Logger> taskLoggerThreadLocal = new ThreadLocal<>();
private static final SystemInfo SI = new SystemInfo();
public static final String TWO_DECIMAL = "0.00";
/**
* return -1 when the function can not get hardware env info
* e.g {@link OSUtils#loadAverage()} {@link OSUtils#cpuUsage()}
*/
public static final double NEGATIVE_ONE = -1;
private static HardwareAbstractionLayer hal = SI.getHardware();
private OSUtils() {
throw new UnsupportedOperationException("Construct OSUtils");
}
/**
* Initialization regularization, solve the problem of pre-compilation performance,
* avoid the thread safety problem of multi-thread operation
*/
private static final Pattern PATTERN = Pattern.compile("\\s+");
/**
* get memory usage
* Keep 2 decimal
*
* @return percent %
*/
public static double memoryUsage() {
GlobalMemory memory = hal.getMemory();
double memoryUsage = (memory.getTotal() - memory.getAvailable() - memory.getSwapUsed()) * 0.1 / memory.getTotal() * 10;
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(memoryUsage));
}
/**
* get available physical memory size
* <p>
* Keep 2 decimal
*
* @return available Physical Memory Size, unit: G
*/
public static double availablePhysicalMemorySize() {
GlobalMemory memory = hal.getMemory();
double availablePhysicalMemorySize = (memory.getAvailable() + memory.getSwapUsed()) / 1024.0 / 1024 / 1024;
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(availablePhysicalMemorySize));
}
/**
* get total physical memory size
* <p>
* Keep 2 decimal
*
* @return available Physical Memory Size, unit: G
*/
public static double totalMemorySize() {
GlobalMemory memory = hal.getMemory();
double availablePhysicalMemorySize = memory.getTotal() / 1024.0 / 1024 / 1024;
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(availablePhysicalMemorySize));
}
/**
* load average
*
* @return load average
*/
public static double loadAverage() {
double loadAverage;
try {
OperatingSystemMXBean osBean = ManagementFactory.getPlatformMXBean(OperatingSystemMXBean.class);
loadAverage = osBean.getSystemLoadAverage();
} catch (Exception e) {
logger.error("get operation system load average exception, try another method ", e);
loadAverage = hal.getProcessor().getSystemLoadAverage();
if (Double.isNaN(loadAverage)) {
return NEGATIVE_ONE;
}
}
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(loadAverage));
}
/**
* get cpu usage
*
* @return cpu usage
*/
public static double cpuUsage() {
CentralProcessor processor = hal.getProcessor();
double cpuUsage = processor.getSystemCpuLoad();
if (Double.isNaN(cpuUsage)) {
return NEGATIVE_ONE;
}
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(cpuUsage));
}
public static List<String> getUserList() {
try {
if (isMacOS()) {
return getUserListFromMac();
} else if (isWindows()) {
return getUserListFromWindows();
} else {
return getUserListFromLinux();
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
return Collections.emptyList();
}
/**
* get user list from linux
*
* @return user list
*/
private static List<String> getUserListFromLinux() throws IOException {
List<String> userList = new ArrayList<>();
try (BufferedReader bufferedReader = new BufferedReader(
new InputStreamReader(new FileInputStream("/etc/passwd")))) {
String line;
while ((line = bufferedReader.readLine()) != null) {
if (line.contains(":")) {
String[] userInfo = line.split(":");
userList.add(userInfo[0]);
}
}
}
return userList;
}
/**
* get user list from mac
*
* @return user list
*/
private static List<String> getUserListFromMac() throws IOException {
String result = exeCmd("dscl . list /users");
if (StringUtils.isNotEmpty(result)) {
return Arrays.asList(result.split("\n"));
}
return Collections.emptyList();
}
/**
* get user list from windows
*
* @return user list
*/
private static List<String> getUserListFromWindows() throws IOException {
String result = exeCmd("net user");
String[] lines = result.split("\n");
int startPos = 0;
int endPos = lines.length - 2;
for (int i = 0; i < lines.length; i++) {
if (lines[i].isEmpty()) {
continue;
}
int count = 0;
if (lines[i].charAt(0) == '-') {
for (int j = 0; j < lines[i].length(); j++) {
if (lines[i].charAt(i) == '-') {
count++;
}
}
}
if (count == lines[i].length()) {
startPos = i + 1;
break;
}
}
List<String> users = new ArrayList<>();
while (startPos <= endPos) {
users.addAll(Arrays.asList(PATTERN.split(lines[startPos])));
startPos++;
}
return users;
}
/**
* create user
*
* @param userName user name
* @return true if creation was successful, otherwise false
*/
public static boolean createUser(String userName) {
try {
String userGroup = OSUtils.getGroup();
if (StringUtils.isEmpty(userGroup)) {
String errorLog = String.format("%s group does not exist for this operating system.", userGroup);
LoggerUtils.logError(Optional.ofNullable(logger), errorLog);
LoggerUtils.logError(Optional.ofNullable(taskLoggerThreadLocal.get()), errorLog);
return false;
}
if (isMacOS()) {
createMacUser(userName, userGroup);
} else if (isWindows()) {
createWindowsUser(userName, userGroup);
} else {
createLinuxUser(userName, userGroup);
}
return true;
} catch (Exception e) {
LoggerUtils.logError(Optional.ofNullable(logger), e);
LoggerUtils.logError(Optional.ofNullable(taskLoggerThreadLocal.get()), e);
}
return false;
}
/**
* create linux user
*
* @param userName user name
* @param userGroup user group
* @throws IOException in case of an I/O error
*/
private static void createLinuxUser(String userName, String userGroup) throws IOException {
String infoLog1 = String.format("create linux os user : %s", userName);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog1);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog1);
String cmd = String.format("sudo useradd -g %s %s", userGroup, userName);
String infoLog2 = String.format("execute cmd : %s", cmd);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog2);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog2);
OSUtils.exeCmd(cmd);
}
/**
* create mac user (Supports Mac OSX 10.10+)
*
* @param userName user name
* @param userGroup user group
* @throws IOException in case of an I/O error
*/
private static void createMacUser(String userName, String userGroup) throws IOException {
Optional<Logger> optionalLogger = Optional.ofNullable(logger);
Optional<Logger> optionalTaskLogger = Optional.ofNullable(taskLoggerThreadLocal.get());
String infoLog1 = String.format("create mac os user : %s", userName);
LoggerUtils.logInfo(optionalLogger, infoLog1);
LoggerUtils.logInfo(optionalTaskLogger, infoLog1);
String createUserCmd = String.format("sudo sysadminctl -addUser %s -password %s", userName, userName);
String infoLog2 = String.format("create user command : %s", createUserCmd);
LoggerUtils.logInfo(optionalLogger, infoLog2);
LoggerUtils.logInfo(optionalTaskLogger, infoLog2);
OSUtils.exeCmd(createUserCmd);
String appendGroupCmd = String.format("sudo dseditgroup -o edit -a %s -t user %s", userName, userGroup);
String infoLog3 = String.format("append user to group : %s", appendGroupCmd);
LoggerUtils.logInfo(optionalLogger, infoLog3);
LoggerUtils.logInfo(optionalTaskLogger, infoLog3);
OSUtils.exeCmd(appendGroupCmd);
}
/**
* create windows user
*
* @param userName user name
* @param userGroup user group
* @throws IOException in case of an I/O error
*/
private static void createWindowsUser(String userName, String userGroup) throws IOException {
String infoLog1 = String.format("create windows os user : %s", userName);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog1);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog1);
String userCreateCmd = String.format("net user \"%s\" /add", userName);
String infoLog2 = String.format("execute create user command : %s", userCreateCmd);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog2);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog2);
OSUtils.exeCmd(userCreateCmd);
String appendGroupCmd = String.format("net localgroup \"%s\" \"%s\" /add", userGroup, userName);
String infoLog3 = String.format("execute append user to group : %s", appendGroupCmd);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog3);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog3);
OSUtils.exeCmd(appendGroupCmd);
}
/**
* get system group information
*
* @return system group info
* @throws IOException errors
*/
public static String getGroup() throws IOException {
if (isWindows()) {
String currentProcUserName = System.getProperty("user.name");
String result = exeCmd(String.format("net user \"%s\"", currentProcUserName));
String line = result.split("\n")[22];
String group = PATTERN.split(line)[1];
if (group.charAt(0) == '*') {
return group.substring(1);
} else {
return group;
}
} else {
String result = exeCmd("groups");
if (StringUtils.isNotEmpty(result)) {
String[] groupInfo = result.split(" ");
return groupInfo[0];
}
}
return null;
}
/**
* get sudo command
* @param tenantCode tenantCode
* @param command command
* @return result of sudo execute command
*/
public static String getSudoCmd(String tenantCode, String command) {
return StringUtils.isEmpty(tenantCode) ? command : "sudo -u " + tenantCode + " " + command;
}
/**
* Execute the corresponding command of Linux or Windows
*
* @param command command
* @return result of execute command
* @throws IOException errors
*/
public static String exeCmd(String command) throws IOException {
StringTokenizer st = new StringTokenizer(command);
String[] cmdArray = new String[st.countTokens()];
for (int i = 0; st.hasMoreTokens(); i++) {
cmdArray[i] = st.nextToken();
}
return exeShell(cmdArray);
}
/**
* Execute the shell
*
* @param command command
* @return result of execute the shell
* @throws IOException errors
*/
public static String exeShell(String[] command) throws IOException {
return ShellExecutor.execCommand(command);
}
/**
* get process id
*
* @return process id
*/
public static int getProcessID() {
RuntimeMXBean runtimeMXBean = ManagementFactory.getRuntimeMXBean();
return Integer.parseInt(runtimeMXBean.getName().split("@")[0]);
}
/**
* whether is macOS
*
* @return true if mac
*/
public static boolean isMacOS() {
return getOSName().startsWith("Mac");
}
/**
* whether is windows
*
* @return true if windows
*/
public static boolean isWindows() {
return getOSName().startsWith("Windows");
}
/**
* get current OS name
*
* @return current OS name
*/
public static String getOSName() {
return System.getProperty("os.name");
}
/**
* check memory and cpu usage
*
* @param systemCpuLoad systemCpuLoad
* @param systemReservedMemory systemReservedMemory
* @return check memory and cpu usage
*/
public static Boolean checkResource(double systemCpuLoad, double systemReservedMemory) {
// system load average
double loadAverage = OSUtils.loadAverage();
// system available physical memory
double availablePhysicalMemorySize = OSUtils.availablePhysicalMemorySize();
if (loadAverage > systemCpuLoad || availablePhysicalMemorySize < systemReservedMemory) {
logger.warn("load is too high or availablePhysicalMemorySize(G) is too low, it's availablePhysicalMemorySize(G):{},loadAvg:{}", availablePhysicalMemorySize, loadAverage);
return false;
} else {
return true;
}
}
/**
* check memory and cpu usage
*
* @param conf conf
* @param isMaster is master
* @return check memory and cpu usage
*/
public static Boolean checkResource(Configuration conf, Boolean isMaster) {
double systemCpuLoad;
double systemReservedMemory;
if (Boolean.TRUE.equals(isMaster)) {
systemCpuLoad = conf.getDouble(Constants.MASTER_MAX_CPULOAD_AVG, Constants.DEFAULT_MASTER_CPU_LOAD);
systemReservedMemory = conf.getDouble(Constants.MASTER_RESERVED_MEMORY, Constants.DEFAULT_MASTER_RESERVED_MEMORY);
} else {
systemCpuLoad = conf.getDouble(Constants.WORKER_MAX_CPULOAD_AVG, Constants.DEFAULT_WORKER_CPU_LOAD);
systemReservedMemory = conf.getDouble(Constants.WORKER_RESERVED_MEMORY, Constants.DEFAULT_WORKER_RESERVED_MEMORY);
}
return checkResource(systemCpuLoad, systemReservedMemory);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,006 | [Feature][Worker] Add a configuration item to set whether the tenant is automatically created on Worker | **Describe the feature**
In the dev branch, the tenant executing a task will not be automatically created in worker. This will cause issue [#4995](https://github.com/apache/incubator-dolphinscheduler/issues/4995)
In the following two scenarios, it will cause inconvenience to users
- Product or operation people want to automatically create users instead of manually creating tenants
- In the docker container environment, the tenants cannot be created in advance. Once the container is pulled up again, all tenants will disappear
**Which version of Dolphin Scheduler:**
- [dev]
**Is your feature request related to a problem? Please describe.**
In the docker container environment, it's a very painful thing to create new tenants on every worker every time.
**Describe the solution you'd like**
Add a configuration item to set whether the tenant is automatically created on Worker
The default value of this configuration item is `false`, but it needs to be set to `true` in the container
The name of this configuration item is `worker.tenant.auto.create`
**Additional context**
Previous dev email discussion: https://lists.apache.org/thread.html/ra44b2e69759fcc980e4ed04c1811037bf0e743e47827fc2dcd1049d6%40%3Cdev.dolphinscheduler.apache.org%3E | https://github.com/apache/dolphinscheduler/issues/5006 | https://github.com/apache/dolphinscheduler/pull/5007 | 29d42fd92d6720a8a0641e37923c6e6f38a5ae85 | f94cfc620dfd0c51010a49134a073e3848c0bd7e | "2021-03-08T15:28:05Z" | java | "2021-03-18T10:34:42Z" | dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/OSUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.dolphinscheduler.common.Constants;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
public class OSUtilsTest {
private static final Logger logger = LoggerFactory.getLogger(OSUtilsTest.class);
@Test
public void getUserList() {
List<String> userList = OSUtils.getUserList();
Assert.assertNotEquals("System user list should not be empty", userList.size(), 0);
logger.info("OS user list : {}", userList.toString());
}
@Test
public void testOSMetric(){
if (!OSUtils.isWindows()) {
double availablePhysicalMemorySize = OSUtils.availablePhysicalMemorySize();
Assert.assertTrue(availablePhysicalMemorySize >= 0.0d);
double totalMemorySize = OSUtils.totalMemorySize();
Assert.assertTrue(totalMemorySize >= 0.0d);
double loadAverage = OSUtils.loadAverage();
logger.info("loadAverage {}", loadAverage);
double memoryUsage = OSUtils.memoryUsage();
Assert.assertTrue(memoryUsage >= 0.0d);
double cpuUsage = OSUtils.cpuUsage();
Assert.assertTrue(cpuUsage >= 0.0d || cpuUsage == -1.0d);
} else {
// TODO window ut
}
}
@Test
public void getGroup() {
try {
String group = OSUtils.getGroup();
Assert.assertNotNull(group);
} catch (IOException e) {
Assert.fail("get group failed " + e.getMessage());
}
}
@Test
public void createUser() {
boolean result = OSUtils.createUser("test123");
if (result) {
Assert.assertTrue("create user test123 success", true);
} else {
Assert.assertTrue("create user test123 fail", true);
}
}
@Test
public void testGetSudoCmd() {
String cmd = "kill -9 1234";
String sudoCmd = OSUtils.getSudoCmd("test123", cmd);
Assert.assertEquals("sudo -u test123 " + cmd, sudoCmd);
}
@Test
public void exeCmd() {
if(OSUtils.isMacOS() || !OSUtils.isWindows()){
try {
String result = OSUtils.exeCmd("echo helloWorld");
Assert.assertEquals("helloWorld\n",result);
} catch (IOException e) {
Assert.fail("exeCmd " + e.getMessage());
}
}
}
@Test
public void getProcessID(){
int processId = OSUtils.getProcessID();
Assert.assertNotEquals(0, processId);
}
@Test
public void checkResource(){
boolean resource = OSUtils.checkResource(100,0);
Assert.assertTrue(resource);
resource = OSUtils.checkResource(0,Double.MAX_VALUE);
Assert.assertFalse(resource);
Configuration configuration = new PropertiesConfiguration();
configuration.setProperty(Constants.MASTER_MAX_CPULOAD_AVG,100);
configuration.setProperty(Constants.MASTER_RESERVED_MEMORY,0);
resource = OSUtils.checkResource(configuration,true);
Assert.assertTrue(resource);
configuration.setProperty(Constants.MASTER_MAX_CPULOAD_AVG,0);
configuration.setProperty(Constants.MASTER_RESERVED_MEMORY,Double.MAX_VALUE);
resource = OSUtils.checkResource(configuration,true);
Assert.assertFalse(resource);
configuration.setProperty(Constants.WORKER_MAX_CPULOAD_AVG,100);
configuration.setProperty(Constants.WORKER_RESERVED_MEMORY,0);
resource = OSUtils.checkResource(configuration,false);
Assert.assertTrue(resource);
configuration.setProperty(Constants.WORKER_MAX_CPULOAD_AVG,0);
configuration.setProperty(Constants.WORKER_RESERVED_MEMORY,Double.MAX_VALUE);
resource = OSUtils.checkResource(configuration,false);
Assert.assertFalse(resource);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,006 | [Feature][Worker] Add a configuration item to set whether the tenant is automatically created on Worker | **Describe the feature**
In the dev branch, the tenant executing a task will not be automatically created in worker. This will cause issue [#4995](https://github.com/apache/incubator-dolphinscheduler/issues/4995)
In the following two scenarios, it will cause inconvenience to users
- Product or operation people want to automatically create users instead of manually creating tenants
- In the docker container environment, the tenants cannot be created in advance. Once the container is pulled up again, all tenants will disappear
**Which version of Dolphin Scheduler:**
- [dev]
**Is your feature request related to a problem? Please describe.**
In the docker container environment, it's a very painful thing to create new tenants on every worker every time.
**Describe the solution you'd like**
Add a configuration item to set whether the tenant is automatically created on Worker
The default value of this configuration item is `false`, but it needs to be set to `true` in the container
The name of this configuration item is `worker.tenant.auto.create`
**Additional context**
Previous dev email discussion: https://lists.apache.org/thread.html/ra44b2e69759fcc980e4ed04c1811037bf0e743e47827fc2dcd1049d6%40%3Cdev.dolphinscheduler.apache.org%3E | https://github.com/apache/dolphinscheduler/issues/5006 | https://github.com/apache/dolphinscheduler/pull/5007 | 29d42fd92d6720a8a0641e37923c6e6f38a5ae85 | f94cfc620dfd0c51010a49134a073e3848c0bd7e | "2021-03-08T15:28:05Z" | java | "2021-03-18T10:34:42Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/config/WorkerConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.config;
import org.apache.dolphinscheduler.common.Constants;
import java.util.Set;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.PropertySource;
import org.springframework.stereotype.Component;
@Component
@PropertySource(value = "worker.properties")
public class WorkerConfig {
@Value("${worker.exec.threads:100}")
private int workerExecThreads;
@Value("${worker.heartbeat.interval:10}")
private int workerHeartbeatInterval;
@Value("${worker.fetch.task.num:3}")
private int workerFetchTaskNum;
@Value("${worker.max.cpuload.avg:-1}")
private int workerMaxCpuloadAvg;
@Value("${worker.reserved.memory:0.3}")
private double workerReservedMemory;
@Value("#{'${worker.groups:default}'.split(',')}")
private Set<String> workerGroups;
@Value("${worker.listen.port:1234}")
private int listenPort;
@Value("${worker.host.weight:100}")
private int hostWeight;
@Value("${alert.listen.host:localhost}")
private String alertListenHost;
public int getListenPort() {
return listenPort;
}
public void setListenPort(int listenPort) {
this.listenPort = listenPort;
}
public Set<String> getWorkerGroups() {
return workerGroups;
}
public void setWorkerGroups(Set<String> workerGroups) {
this.workerGroups = workerGroups;
}
public int getWorkerExecThreads() {
return workerExecThreads;
}
public void setWorkerExecThreads(int workerExecThreads) {
this.workerExecThreads = workerExecThreads;
}
public int getWorkerHeartbeatInterval() {
return workerHeartbeatInterval;
}
public void setWorkerHeartbeatInterval(int workerHeartbeatInterval) {
this.workerHeartbeatInterval = workerHeartbeatInterval;
}
public int getWorkerFetchTaskNum() {
return workerFetchTaskNum;
}
public void setWorkerFetchTaskNum(int workerFetchTaskNum) {
this.workerFetchTaskNum = workerFetchTaskNum;
}
public double getWorkerReservedMemory() {
return workerReservedMemory;
}
public void setWorkerReservedMemory(double workerReservedMemory) {
this.workerReservedMemory = workerReservedMemory;
}
public int getWorkerMaxCpuloadAvg() {
if (workerMaxCpuloadAvg == -1) {
return Constants.DEFAULT_WORKER_CPU_LOAD;
}
return workerMaxCpuloadAvg;
}
public void setWorkerMaxCpuloadAvg(int workerMaxCpuloadAvg) {
this.workerMaxCpuloadAvg = workerMaxCpuloadAvg;
}
public int getHostWeight() {
return hostWeight;
}
public void setHostWeight(int hostWeight) {
this.hostWeight = hostWeight;
}
public String getAlertListenHost() {
return alertListenHost;
}
public void setAlertListenHost(String alertListenHost) {
this.alertListenHost = alertListenHost;
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,006 | [Feature][Worker] Add a configuration item to set whether the tenant is automatically created on Worker | **Describe the feature**
In the dev branch, the tenant executing a task will not be automatically created in worker. This will cause issue [#4995](https://github.com/apache/incubator-dolphinscheduler/issues/4995)
In the following two scenarios, it will cause inconvenience to users
- Product or operation people want to automatically create users instead of manually creating tenants
- In the docker container environment, the tenants cannot be created in advance. Once the container is pulled up again, all tenants will disappear
**Which version of Dolphin Scheduler:**
- [dev]
**Is your feature request related to a problem? Please describe.**
In the docker container environment, it's a very painful thing to create new tenants on every worker every time.
**Describe the solution you'd like**
Add a configuration item to set whether the tenant is automatically created on Worker
The default value of this configuration item is `false`, but it needs to be set to `true` in the container
The name of this configuration item is `worker.tenant.auto.create`
**Additional context**
Previous dev email discussion: https://lists.apache.org/thread.html/ra44b2e69759fcc980e4ed04c1811037bf0e743e47827fc2dcd1049d6%40%3Cdev.dolphinscheduler.apache.org%3E | https://github.com/apache/dolphinscheduler/issues/5006 | https://github.com/apache/dolphinscheduler/pull/5007 | 29d42fd92d6720a8a0641e37923c6e6f38a5ae85 | f94cfc620dfd0c51010a49134a073e3848c0bd7e | "2021-03-08T15:28:05Z" | java | "2021-03-18T10:34:42Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.processor;
import org.apache.dolphinscheduler.common.enums.Event;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.LoggerUtils;
import org.apache.dolphinscheduler.common.utils.NetUtils;
import org.apache.dolphinscheduler.common.utils.Preconditions;
import org.apache.dolphinscheduler.remote.command.Command;
import org.apache.dolphinscheduler.remote.command.CommandType;
import org.apache.dolphinscheduler.remote.command.TaskExecuteAckCommand;
import org.apache.dolphinscheduler.remote.command.TaskExecuteRequestCommand;
import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.LogUtils;
import org.apache.dolphinscheduler.server.worker.cache.ResponceCache;
import org.apache.dolphinscheduler.server.worker.cache.TaskExecutionContextCacheManager;
import org.apache.dolphinscheduler.server.worker.cache.impl.TaskExecutionContextCacheManagerImpl;
import org.apache.dolphinscheduler.server.worker.config.WorkerConfig;
import org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread;
import org.apache.dolphinscheduler.server.worker.runner.WorkerManagerThread;
import org.apache.dolphinscheduler.service.alert.AlertClientService;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import java.util.Date;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.netty.channel.Channel;
/**
* worker request processor
*/
public class TaskExecuteProcessor implements NettyRequestProcessor {
private static final Logger logger = LoggerFactory.getLogger(TaskExecuteProcessor.class);
/**
* worker config
*/
private final WorkerConfig workerConfig;
/**
* task callback service
*/
private final TaskCallbackService taskCallbackService;
/**
* alert client service
*/
private AlertClientService alertClientService;
/**
* taskExecutionContextCacheManager
*/
private final TaskExecutionContextCacheManager taskExecutionContextCacheManager;
/*
* task execute manager
*/
private final WorkerManagerThread workerManager;
public TaskExecuteProcessor() {
this.taskCallbackService = SpringApplicationContext.getBean(TaskCallbackService.class);
this.workerConfig = SpringApplicationContext.getBean(WorkerConfig.class);
this.taskExecutionContextCacheManager = SpringApplicationContext.getBean(TaskExecutionContextCacheManagerImpl.class);
this.workerManager = SpringApplicationContext.getBean(WorkerManagerThread.class);
}
/**
* Pre-cache task to avoid extreme situations when kill task. There is no such task in the cache
*
* @param taskExecutionContext task
*/
private void setTaskCache(TaskExecutionContext taskExecutionContext) {
TaskExecutionContext preTaskCache = new TaskExecutionContext();
preTaskCache.setTaskInstanceId(taskExecutionContext.getTaskInstanceId());
taskExecutionContextCacheManager.cacheTaskExecutionContext(preTaskCache);
}
public TaskExecuteProcessor(AlertClientService alertClientService) {
this();
this.alertClientService = alertClientService;
}
@Override
public void process(Channel channel, Command command) {
Preconditions.checkArgument(CommandType.TASK_EXECUTE_REQUEST == command.getType(),
String.format("invalid command type : %s", command.getType()));
TaskExecuteRequestCommand taskRequestCommand = JSONUtils.parseObject(
command.getBody(), TaskExecuteRequestCommand.class);
logger.info("received command : {}", taskRequestCommand);
if (taskRequestCommand == null) {
logger.error("task execute request command is null");
return;
}
String contextJson = taskRequestCommand.getTaskExecutionContext();
TaskExecutionContext taskExecutionContext = JSONUtils.parseObject(contextJson, TaskExecutionContext.class);
if (taskExecutionContext == null) {
logger.error("task execution context is null");
return;
}
setTaskCache(taskExecutionContext);
// custom logger
Logger taskLogger = LoggerFactory.getLogger(LoggerUtils.buildTaskId(LoggerUtils.TASK_LOGGER_INFO_PREFIX,
taskExecutionContext.getProcessDefineId(),
taskExecutionContext.getProcessInstanceId(),
taskExecutionContext.getTaskInstanceId()));
taskExecutionContext.setHost(NetUtils.getAddr(workerConfig.getListenPort()));
taskExecutionContext.setLogPath(LogUtils.getTaskLogPath(taskExecutionContext));
// local execute path
String execLocalPath = getExecLocalPath(taskExecutionContext);
logger.info("task instance local execute path : {} ", execLocalPath);
taskExecutionContext.setExecutePath(execLocalPath);
FileUtils.taskLoggerThreadLocal.set(taskLogger);
try {
FileUtils.createWorkDirIfAbsent(execLocalPath);
} catch (Throwable ex) {
String errorLog = String.format("create execLocalPath : %s", execLocalPath);
LoggerUtils.logError(Optional.of(logger), errorLog, ex);
LoggerUtils.logError(Optional.ofNullable(taskLogger), errorLog, ex);
taskExecutionContextCacheManager.removeByTaskInstanceId(taskExecutionContext.getTaskInstanceId());
}
FileUtils.taskLoggerThreadLocal.remove();
taskCallbackService.addRemoteChannel(taskExecutionContext.getTaskInstanceId(),
new NettyRemoteChannel(channel, command.getOpaque()));
// delay task process
long remainTime = DateUtils.getRemainTime(taskExecutionContext.getFirstSubmitTime(), taskExecutionContext.getDelayTime() * 60L);
if (remainTime > 0) {
logger.info("delay the execution of task instance {}, delay time: {} s", taskExecutionContext.getTaskInstanceId(), remainTime);
taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.DELAY_EXECUTION);
taskExecutionContext.setStartTime(null);
} else {
taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.RUNNING_EXECUTION);
taskExecutionContext.setStartTime(new Date());
}
this.doAck(taskExecutionContext);
// submit task to manager
if (!workerManager.offer(new TaskExecuteThread(taskExecutionContext, taskCallbackService, taskLogger, alertClientService))) {
logger.info("submit task to manager error, queue is full, queue size is {}", workerManager.getQueueSize());
}
}
private void doAck(TaskExecutionContext taskExecutionContext) {
// tell master that task is in executing
TaskExecuteAckCommand ackCommand = buildAckCommand(taskExecutionContext);
ResponceCache.get().cache(taskExecutionContext.getTaskInstanceId(), ackCommand.convert2Command(), Event.ACK);
taskCallbackService.sendAck(taskExecutionContext.getTaskInstanceId(), ackCommand.convert2Command());
}
/**
* build ack command
*
* @param taskExecutionContext taskExecutionContext
* @return TaskExecuteAckCommand
*/
private TaskExecuteAckCommand buildAckCommand(TaskExecutionContext taskExecutionContext) {
TaskExecuteAckCommand ackCommand = new TaskExecuteAckCommand();
ackCommand.setTaskInstanceId(taskExecutionContext.getTaskInstanceId());
ackCommand.setStatus(taskExecutionContext.getCurrentExecutionStatus().getCode());
ackCommand.setLogPath(LogUtils.getTaskLogPath(taskExecutionContext));
ackCommand.setHost(taskExecutionContext.getHost());
ackCommand.setStartTime(taskExecutionContext.getStartTime());
if (taskExecutionContext.getTaskType().equals(TaskType.SQL.name()) || taskExecutionContext.getTaskType().equals(TaskType.PROCEDURE.name())) {
ackCommand.setExecutePath(null);
} else {
ackCommand.setExecutePath(taskExecutionContext.getExecutePath());
}
taskExecutionContext.setLogPath(ackCommand.getLogPath());
return ackCommand;
}
/**
* get execute local path
*
* @param taskExecutionContext taskExecutionContext
* @return execute local path
*/
private String getExecLocalPath(TaskExecutionContext taskExecutionContext) {
return FileUtils.getProcessExecDir(taskExecutionContext.getProjectId(),
taskExecutionContext.getProcessDefineId(),
taskExecutionContext.getProcessInstanceId(),
taskExecutionContext.getTaskInstanceId());
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,006 | [Feature][Worker] Add a configuration item to set whether the tenant is automatically created on Worker | **Describe the feature**
In the dev branch, the tenant executing a task will not be automatically created in worker. This will cause issue [#4995](https://github.com/apache/incubator-dolphinscheduler/issues/4995)
In the following two scenarios, it will cause inconvenience to users
- Product or operation people want to automatically create users instead of manually creating tenants
- In the docker container environment, the tenants cannot be created in advance. Once the container is pulled up again, all tenants will disappear
**Which version of Dolphin Scheduler:**
- [dev]
**Is your feature request related to a problem? Please describe.**
In the docker container environment, it's a very painful thing to create new tenants on every worker every time.
**Describe the solution you'd like**
Add a configuration item to set whether the tenant is automatically created on Worker
The default value of this configuration item is `false`, but it needs to be set to `true` in the container
The name of this configuration item is `worker.tenant.auto.create`
**Additional context**
Previous dev email discussion: https://lists.apache.org/thread.html/ra44b2e69759fcc980e4ed04c1811037bf0e743e47827fc2dcd1049d6%40%3Cdev.dolphinscheduler.apache.org%3E | https://github.com/apache/dolphinscheduler/issues/5006 | https://github.com/apache/dolphinscheduler/pull/5007 | 29d42fd92d6720a8a0641e37923c6e6f38a5ae85 | f94cfc620dfd0c51010a49134a073e3848c0bd7e | "2021-03-08T15:28:05Z" | java | "2021-03-18T10:34:42Z" | dolphinscheduler-server/src/main/resources/worker.properties | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# worker execute thread num
#worker.exec.threads=100
# worker heartbeat interval
#worker.heartbeat.interval=10
# only less than cpu avg load, worker server can work. default value -1: the number of cpu cores * 2
#worker.max.cpuload.avg=-1
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
#worker.reserved.memory=0.3
# worker listener port
#worker.listen.port=1234
# default worker groups
#worker.groups=default
# default worker host weight
#worker.host.weight=100
# alert server listener host
alert.listen.host=localhost
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,995 | [Bug][api] select an existing tenant, the workflow fails to run, indicating that the tenant does not exist |
![image](https://user-images.githubusercontent.com/55787491/110239702-40d3d580-7f83-11eb-8cb9-14f0110ac478.png)
![image](https://user-images.githubusercontent.com/55787491/110239708-47fae380-7f83-11eb-8c43-5435143e5c7b.png)
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/4995 | https://github.com/apache/dolphinscheduler/pull/5007 | 29d42fd92d6720a8a0641e37923c6e6f38a5ae85 | f94cfc620dfd0c51010a49134a073e3848c0bd7e | "2021-03-07T12:25:55Z" | java | "2021-03-18T10:34:42Z" | docker/build/conf/dolphinscheduler/worker.properties.tpl | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# worker execute thread num
worker.exec.threads=${WORKER_EXEC_THREADS}
# worker heartbeat interval
worker.heartbeat.interval=${WORKER_HEARTBEAT_INTERVAL}
# only less than cpu avg load, worker server can work. default value -1: the number of cpu cores * 2
worker.max.cpuload.avg=${WORKER_MAX_CPULOAD_AVG}
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
worker.reserved.memory=${WORKER_RESERVED_MEMORY}
# worker listener port
worker.listen.port=${WORKER_LISTEN_PORT}
# default worker groups
worker.groups=${WORKER_GROUPS}
# default worker host weight
worker.host.weight=${WORKER_HOST_WEIGHT}
# alert server listener host
alert.listen.host=${ALERT_LISTEN_HOST}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,995 | [Bug][api] select an existing tenant, the workflow fails to run, indicating that the tenant does not exist |
![image](https://user-images.githubusercontent.com/55787491/110239702-40d3d580-7f83-11eb-8cb9-14f0110ac478.png)
![image](https://user-images.githubusercontent.com/55787491/110239708-47fae380-7f83-11eb-8c43-5435143e5c7b.png)
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/4995 | https://github.com/apache/dolphinscheduler/pull/5007 | 29d42fd92d6720a8a0641e37923c6e6f38a5ae85 | f94cfc620dfd0c51010a49134a073e3848c0bd7e | "2021-03-07T12:25:55Z" | java | "2021-03-18T10:34:42Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.shell.ShellExecutor;
import org.apache.commons.configuration.Configuration;
import java.lang.management.OperatingSystemMXBean;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.lang.management.ManagementFactory;
import java.lang.management.RuntimeMXBean;
import java.math.RoundingMode;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.StringTokenizer;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import oshi.SystemInfo;
import oshi.hardware.CentralProcessor;
import oshi.hardware.GlobalMemory;
import oshi.hardware.HardwareAbstractionLayer;
/**
* os utils
*/
public class OSUtils {
private static final Logger logger = LoggerFactory.getLogger(OSUtils.class);
public static final ThreadLocal<Logger> taskLoggerThreadLocal = new ThreadLocal<>();
private static final SystemInfo SI = new SystemInfo();
public static final String TWO_DECIMAL = "0.00";
/**
* return -1 when the function can not get hardware env info
* e.g {@link OSUtils#loadAverage()} {@link OSUtils#cpuUsage()}
*/
public static final double NEGATIVE_ONE = -1;
private static HardwareAbstractionLayer hal = SI.getHardware();
private OSUtils() {
throw new UnsupportedOperationException("Construct OSUtils");
}
/**
* Initialization regularization, solve the problem of pre-compilation performance,
* avoid the thread safety problem of multi-thread operation
*/
private static final Pattern PATTERN = Pattern.compile("\\s+");
/**
* get memory usage
* Keep 2 decimal
*
* @return percent %
*/
public static double memoryUsage() {
GlobalMemory memory = hal.getMemory();
double memoryUsage = (memory.getTotal() - memory.getAvailable() - memory.getSwapUsed()) * 0.1 / memory.getTotal() * 10;
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(memoryUsage));
}
/**
* get available physical memory size
* <p>
* Keep 2 decimal
*
* @return available Physical Memory Size, unit: G
*/
public static double availablePhysicalMemorySize() {
GlobalMemory memory = hal.getMemory();
double availablePhysicalMemorySize = (memory.getAvailable() + memory.getSwapUsed()) / 1024.0 / 1024 / 1024;
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(availablePhysicalMemorySize));
}
/**
* get total physical memory size
* <p>
* Keep 2 decimal
*
* @return available Physical Memory Size, unit: G
*/
public static double totalMemorySize() {
GlobalMemory memory = hal.getMemory();
double availablePhysicalMemorySize = memory.getTotal() / 1024.0 / 1024 / 1024;
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(availablePhysicalMemorySize));
}
/**
* load average
*
* @return load average
*/
public static double loadAverage() {
double loadAverage;
try {
OperatingSystemMXBean osBean = ManagementFactory.getPlatformMXBean(OperatingSystemMXBean.class);
loadAverage = osBean.getSystemLoadAverage();
} catch (Exception e) {
logger.error("get operation system load average exception, try another method ", e);
loadAverage = hal.getProcessor().getSystemLoadAverage();
if (Double.isNaN(loadAverage)) {
return NEGATIVE_ONE;
}
}
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(loadAverage));
}
/**
* get cpu usage
*
* @return cpu usage
*/
public static double cpuUsage() {
CentralProcessor processor = hal.getProcessor();
double cpuUsage = processor.getSystemCpuLoad();
if (Double.isNaN(cpuUsage)) {
return NEGATIVE_ONE;
}
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(cpuUsage));
}
public static List<String> getUserList() {
try {
if (isMacOS()) {
return getUserListFromMac();
} else if (isWindows()) {
return getUserListFromWindows();
} else {
return getUserListFromLinux();
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
return Collections.emptyList();
}
/**
* get user list from linux
*
* @return user list
*/
private static List<String> getUserListFromLinux() throws IOException {
List<String> userList = new ArrayList<>();
try (BufferedReader bufferedReader = new BufferedReader(
new InputStreamReader(new FileInputStream("/etc/passwd")))) {
String line;
while ((line = bufferedReader.readLine()) != null) {
if (line.contains(":")) {
String[] userInfo = line.split(":");
userList.add(userInfo[0]);
}
}
}
return userList;
}
/**
* get user list from mac
*
* @return user list
*/
private static List<String> getUserListFromMac() throws IOException {
String result = exeCmd("dscl . list /users");
if (StringUtils.isNotEmpty(result)) {
return Arrays.asList(result.split("\n"));
}
return Collections.emptyList();
}
/**
* get user list from windows
*
* @return user list
*/
private static List<String> getUserListFromWindows() throws IOException {
String result = exeCmd("net user");
String[] lines = result.split("\n");
int startPos = 0;
int endPos = lines.length - 2;
for (int i = 0; i < lines.length; i++) {
if (lines[i].isEmpty()) {
continue;
}
int count = 0;
if (lines[i].charAt(0) == '-') {
for (int j = 0; j < lines[i].length(); j++) {
if (lines[i].charAt(i) == '-') {
count++;
}
}
}
if (count == lines[i].length()) {
startPos = i + 1;
break;
}
}
List<String> users = new ArrayList<>();
while (startPos <= endPos) {
users.addAll(Arrays.asList(PATTERN.split(lines[startPos])));
startPos++;
}
return users;
}
/**
* create user
*
* @param userName user name
* @return true if creation was successful, otherwise false
*/
public static boolean createUser(String userName) {
try {
String userGroup = OSUtils.getGroup();
if (StringUtils.isEmpty(userGroup)) {
String errorLog = String.format("%s group does not exist for this operating system.", userGroup);
LoggerUtils.logError(Optional.ofNullable(logger), errorLog);
LoggerUtils.logError(Optional.ofNullable(taskLoggerThreadLocal.get()), errorLog);
return false;
}
if (isMacOS()) {
createMacUser(userName, userGroup);
} else if (isWindows()) {
createWindowsUser(userName, userGroup);
} else {
createLinuxUser(userName, userGroup);
}
return true;
} catch (Exception e) {
LoggerUtils.logError(Optional.ofNullable(logger), e);
LoggerUtils.logError(Optional.ofNullable(taskLoggerThreadLocal.get()), e);
}
return false;
}
/**
* create linux user
*
* @param userName user name
* @param userGroup user group
* @throws IOException in case of an I/O error
*/
private static void createLinuxUser(String userName, String userGroup) throws IOException {
String infoLog1 = String.format("create linux os user : %s", userName);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog1);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog1);
String cmd = String.format("sudo useradd -g %s %s", userGroup, userName);
String infoLog2 = String.format("execute cmd : %s", cmd);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog2);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog2);
OSUtils.exeCmd(cmd);
}
/**
* create mac user (Supports Mac OSX 10.10+)
*
* @param userName user name
* @param userGroup user group
* @throws IOException in case of an I/O error
*/
private static void createMacUser(String userName, String userGroup) throws IOException {
Optional<Logger> optionalLogger = Optional.ofNullable(logger);
Optional<Logger> optionalTaskLogger = Optional.ofNullable(taskLoggerThreadLocal.get());
String infoLog1 = String.format("create mac os user : %s", userName);
LoggerUtils.logInfo(optionalLogger, infoLog1);
LoggerUtils.logInfo(optionalTaskLogger, infoLog1);
String createUserCmd = String.format("sudo sysadminctl -addUser %s -password %s", userName, userName);
String infoLog2 = String.format("create user command : %s", createUserCmd);
LoggerUtils.logInfo(optionalLogger, infoLog2);
LoggerUtils.logInfo(optionalTaskLogger, infoLog2);
OSUtils.exeCmd(createUserCmd);
String appendGroupCmd = String.format("sudo dseditgroup -o edit -a %s -t user %s", userName, userGroup);
String infoLog3 = String.format("append user to group : %s", appendGroupCmd);
LoggerUtils.logInfo(optionalLogger, infoLog3);
LoggerUtils.logInfo(optionalTaskLogger, infoLog3);
OSUtils.exeCmd(appendGroupCmd);
}
/**
* create windows user
*
* @param userName user name
* @param userGroup user group
* @throws IOException in case of an I/O error
*/
private static void createWindowsUser(String userName, String userGroup) throws IOException {
String infoLog1 = String.format("create windows os user : %s", userName);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog1);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog1);
String userCreateCmd = String.format("net user \"%s\" /add", userName);
String infoLog2 = String.format("execute create user command : %s", userCreateCmd);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog2);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog2);
OSUtils.exeCmd(userCreateCmd);
String appendGroupCmd = String.format("net localgroup \"%s\" \"%s\" /add", userGroup, userName);
String infoLog3 = String.format("execute append user to group : %s", appendGroupCmd);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog3);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog3);
OSUtils.exeCmd(appendGroupCmd);
}
/**
* get system group information
*
* @return system group info
* @throws IOException errors
*/
public static String getGroup() throws IOException {
if (isWindows()) {
String currentProcUserName = System.getProperty("user.name");
String result = exeCmd(String.format("net user \"%s\"", currentProcUserName));
String line = result.split("\n")[22];
String group = PATTERN.split(line)[1];
if (group.charAt(0) == '*') {
return group.substring(1);
} else {
return group;
}
} else {
String result = exeCmd("groups");
if (StringUtils.isNotEmpty(result)) {
String[] groupInfo = result.split(" ");
return groupInfo[0];
}
}
return null;
}
/**
* get sudo command
* @param tenantCode tenantCode
* @param command command
* @return result of sudo execute command
*/
public static String getSudoCmd(String tenantCode, String command) {
return StringUtils.isEmpty(tenantCode) ? command : "sudo -u " + tenantCode + " " + command;
}
/**
* Execute the corresponding command of Linux or Windows
*
* @param command command
* @return result of execute command
* @throws IOException errors
*/
public static String exeCmd(String command) throws IOException {
StringTokenizer st = new StringTokenizer(command);
String[] cmdArray = new String[st.countTokens()];
for (int i = 0; st.hasMoreTokens(); i++) {
cmdArray[i] = st.nextToken();
}
return exeShell(cmdArray);
}
/**
* Execute the shell
*
* @param command command
* @return result of execute the shell
* @throws IOException errors
*/
public static String exeShell(String[] command) throws IOException {
return ShellExecutor.execCommand(command);
}
/**
* get process id
*
* @return process id
*/
public static int getProcessID() {
RuntimeMXBean runtimeMXBean = ManagementFactory.getRuntimeMXBean();
return Integer.parseInt(runtimeMXBean.getName().split("@")[0]);
}
/**
* whether is macOS
*
* @return true if mac
*/
public static boolean isMacOS() {
return getOSName().startsWith("Mac");
}
/**
* whether is windows
*
* @return true if windows
*/
public static boolean isWindows() {
return getOSName().startsWith("Windows");
}
/**
* get current OS name
*
* @return current OS name
*/
public static String getOSName() {
return System.getProperty("os.name");
}
/**
* check memory and cpu usage
*
* @param systemCpuLoad systemCpuLoad
* @param systemReservedMemory systemReservedMemory
* @return check memory and cpu usage
*/
public static Boolean checkResource(double systemCpuLoad, double systemReservedMemory) {
// system load average
double loadAverage = OSUtils.loadAverage();
// system available physical memory
double availablePhysicalMemorySize = OSUtils.availablePhysicalMemorySize();
if (loadAverage > systemCpuLoad || availablePhysicalMemorySize < systemReservedMemory) {
logger.warn("load is too high or availablePhysicalMemorySize(G) is too low, it's availablePhysicalMemorySize(G):{},loadAvg:{}", availablePhysicalMemorySize, loadAverage);
return false;
} else {
return true;
}
}
/**
* check memory and cpu usage
*
* @param conf conf
* @param isMaster is master
* @return check memory and cpu usage
*/
public static Boolean checkResource(Configuration conf, Boolean isMaster) {
double systemCpuLoad;
double systemReservedMemory;
if (Boolean.TRUE.equals(isMaster)) {
systemCpuLoad = conf.getDouble(Constants.MASTER_MAX_CPULOAD_AVG, Constants.DEFAULT_MASTER_CPU_LOAD);
systemReservedMemory = conf.getDouble(Constants.MASTER_RESERVED_MEMORY, Constants.DEFAULT_MASTER_RESERVED_MEMORY);
} else {
systemCpuLoad = conf.getDouble(Constants.WORKER_MAX_CPULOAD_AVG, Constants.DEFAULT_WORKER_CPU_LOAD);
systemReservedMemory = conf.getDouble(Constants.WORKER_RESERVED_MEMORY, Constants.DEFAULT_WORKER_RESERVED_MEMORY);
}
return checkResource(systemCpuLoad, systemReservedMemory);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,995 | [Bug][api] select an existing tenant, the workflow fails to run, indicating that the tenant does not exist |
![image](https://user-images.githubusercontent.com/55787491/110239702-40d3d580-7f83-11eb-8cb9-14f0110ac478.png)
![image](https://user-images.githubusercontent.com/55787491/110239708-47fae380-7f83-11eb-8c43-5435143e5c7b.png)
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/4995 | https://github.com/apache/dolphinscheduler/pull/5007 | 29d42fd92d6720a8a0641e37923c6e6f38a5ae85 | f94cfc620dfd0c51010a49134a073e3848c0bd7e | "2021-03-07T12:25:55Z" | java | "2021-03-18T10:34:42Z" | dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/OSUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.dolphinscheduler.common.Constants;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
public class OSUtilsTest {
private static final Logger logger = LoggerFactory.getLogger(OSUtilsTest.class);
@Test
public void getUserList() {
List<String> userList = OSUtils.getUserList();
Assert.assertNotEquals("System user list should not be empty", userList.size(), 0);
logger.info("OS user list : {}", userList.toString());
}
@Test
public void testOSMetric(){
if (!OSUtils.isWindows()) {
double availablePhysicalMemorySize = OSUtils.availablePhysicalMemorySize();
Assert.assertTrue(availablePhysicalMemorySize >= 0.0d);
double totalMemorySize = OSUtils.totalMemorySize();
Assert.assertTrue(totalMemorySize >= 0.0d);
double loadAverage = OSUtils.loadAverage();
logger.info("loadAverage {}", loadAverage);
double memoryUsage = OSUtils.memoryUsage();
Assert.assertTrue(memoryUsage >= 0.0d);
double cpuUsage = OSUtils.cpuUsage();
Assert.assertTrue(cpuUsage >= 0.0d || cpuUsage == -1.0d);
} else {
// TODO window ut
}
}
@Test
public void getGroup() {
try {
String group = OSUtils.getGroup();
Assert.assertNotNull(group);
} catch (IOException e) {
Assert.fail("get group failed " + e.getMessage());
}
}
@Test
public void createUser() {
boolean result = OSUtils.createUser("test123");
if (result) {
Assert.assertTrue("create user test123 success", true);
} else {
Assert.assertTrue("create user test123 fail", true);
}
}
@Test
public void testGetSudoCmd() {
String cmd = "kill -9 1234";
String sudoCmd = OSUtils.getSudoCmd("test123", cmd);
Assert.assertEquals("sudo -u test123 " + cmd, sudoCmd);
}
@Test
public void exeCmd() {
if(OSUtils.isMacOS() || !OSUtils.isWindows()){
try {
String result = OSUtils.exeCmd("echo helloWorld");
Assert.assertEquals("helloWorld\n",result);
} catch (IOException e) {
Assert.fail("exeCmd " + e.getMessage());
}
}
}
@Test
public void getProcessID(){
int processId = OSUtils.getProcessID();
Assert.assertNotEquals(0, processId);
}
@Test
public void checkResource(){
boolean resource = OSUtils.checkResource(100,0);
Assert.assertTrue(resource);
resource = OSUtils.checkResource(0,Double.MAX_VALUE);
Assert.assertFalse(resource);
Configuration configuration = new PropertiesConfiguration();
configuration.setProperty(Constants.MASTER_MAX_CPULOAD_AVG,100);
configuration.setProperty(Constants.MASTER_RESERVED_MEMORY,0);
resource = OSUtils.checkResource(configuration,true);
Assert.assertTrue(resource);
configuration.setProperty(Constants.MASTER_MAX_CPULOAD_AVG,0);
configuration.setProperty(Constants.MASTER_RESERVED_MEMORY,Double.MAX_VALUE);
resource = OSUtils.checkResource(configuration,true);
Assert.assertFalse(resource);
configuration.setProperty(Constants.WORKER_MAX_CPULOAD_AVG,100);
configuration.setProperty(Constants.WORKER_RESERVED_MEMORY,0);
resource = OSUtils.checkResource(configuration,false);
Assert.assertTrue(resource);
configuration.setProperty(Constants.WORKER_MAX_CPULOAD_AVG,0);
configuration.setProperty(Constants.WORKER_RESERVED_MEMORY,Double.MAX_VALUE);
resource = OSUtils.checkResource(configuration,false);
Assert.assertFalse(resource);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,995 | [Bug][api] select an existing tenant, the workflow fails to run, indicating that the tenant does not exist |
![image](https://user-images.githubusercontent.com/55787491/110239702-40d3d580-7f83-11eb-8cb9-14f0110ac478.png)
![image](https://user-images.githubusercontent.com/55787491/110239708-47fae380-7f83-11eb-8c43-5435143e5c7b.png)
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/4995 | https://github.com/apache/dolphinscheduler/pull/5007 | 29d42fd92d6720a8a0641e37923c6e6f38a5ae85 | f94cfc620dfd0c51010a49134a073e3848c0bd7e | "2021-03-07T12:25:55Z" | java | "2021-03-18T10:34:42Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/config/WorkerConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.config;
import org.apache.dolphinscheduler.common.Constants;
import java.util.Set;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.PropertySource;
import org.springframework.stereotype.Component;
@Component
@PropertySource(value = "worker.properties")
public class WorkerConfig {
@Value("${worker.exec.threads:100}")
private int workerExecThreads;
@Value("${worker.heartbeat.interval:10}")
private int workerHeartbeatInterval;
@Value("${worker.fetch.task.num:3}")
private int workerFetchTaskNum;
@Value("${worker.max.cpuload.avg:-1}")
private int workerMaxCpuloadAvg;
@Value("${worker.reserved.memory:0.3}")
private double workerReservedMemory;
@Value("#{'${worker.groups:default}'.split(',')}")
private Set<String> workerGroups;
@Value("${worker.listen.port:1234}")
private int listenPort;
@Value("${worker.host.weight:100}")
private int hostWeight;
@Value("${alert.listen.host:localhost}")
private String alertListenHost;
public int getListenPort() {
return listenPort;
}
public void setListenPort(int listenPort) {
this.listenPort = listenPort;
}
public Set<String> getWorkerGroups() {
return workerGroups;
}
public void setWorkerGroups(Set<String> workerGroups) {
this.workerGroups = workerGroups;
}
public int getWorkerExecThreads() {
return workerExecThreads;
}
public void setWorkerExecThreads(int workerExecThreads) {
this.workerExecThreads = workerExecThreads;
}
public int getWorkerHeartbeatInterval() {
return workerHeartbeatInterval;
}
public void setWorkerHeartbeatInterval(int workerHeartbeatInterval) {
this.workerHeartbeatInterval = workerHeartbeatInterval;
}
public int getWorkerFetchTaskNum() {
return workerFetchTaskNum;
}
public void setWorkerFetchTaskNum(int workerFetchTaskNum) {
this.workerFetchTaskNum = workerFetchTaskNum;
}
public double getWorkerReservedMemory() {
return workerReservedMemory;
}
public void setWorkerReservedMemory(double workerReservedMemory) {
this.workerReservedMemory = workerReservedMemory;
}
public int getWorkerMaxCpuloadAvg() {
if (workerMaxCpuloadAvg == -1) {
return Constants.DEFAULT_WORKER_CPU_LOAD;
}
return workerMaxCpuloadAvg;
}
public void setWorkerMaxCpuloadAvg(int workerMaxCpuloadAvg) {
this.workerMaxCpuloadAvg = workerMaxCpuloadAvg;
}
public int getHostWeight() {
return hostWeight;
}
public void setHostWeight(int hostWeight) {
this.hostWeight = hostWeight;
}
public String getAlertListenHost() {
return alertListenHost;
}
public void setAlertListenHost(String alertListenHost) {
this.alertListenHost = alertListenHost;
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,995 | [Bug][api] select an existing tenant, the workflow fails to run, indicating that the tenant does not exist |
![image](https://user-images.githubusercontent.com/55787491/110239702-40d3d580-7f83-11eb-8cb9-14f0110ac478.png)
![image](https://user-images.githubusercontent.com/55787491/110239708-47fae380-7f83-11eb-8c43-5435143e5c7b.png)
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/4995 | https://github.com/apache/dolphinscheduler/pull/5007 | 29d42fd92d6720a8a0641e37923c6e6f38a5ae85 | f94cfc620dfd0c51010a49134a073e3848c0bd7e | "2021-03-07T12:25:55Z" | java | "2021-03-18T10:34:42Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.processor;
import org.apache.dolphinscheduler.common.enums.Event;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.LoggerUtils;
import org.apache.dolphinscheduler.common.utils.NetUtils;
import org.apache.dolphinscheduler.common.utils.Preconditions;
import org.apache.dolphinscheduler.remote.command.Command;
import org.apache.dolphinscheduler.remote.command.CommandType;
import org.apache.dolphinscheduler.remote.command.TaskExecuteAckCommand;
import org.apache.dolphinscheduler.remote.command.TaskExecuteRequestCommand;
import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.LogUtils;
import org.apache.dolphinscheduler.server.worker.cache.ResponceCache;
import org.apache.dolphinscheduler.server.worker.cache.TaskExecutionContextCacheManager;
import org.apache.dolphinscheduler.server.worker.cache.impl.TaskExecutionContextCacheManagerImpl;
import org.apache.dolphinscheduler.server.worker.config.WorkerConfig;
import org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread;
import org.apache.dolphinscheduler.server.worker.runner.WorkerManagerThread;
import org.apache.dolphinscheduler.service.alert.AlertClientService;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import java.util.Date;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.netty.channel.Channel;
/**
* worker request processor
*/
public class TaskExecuteProcessor implements NettyRequestProcessor {
private static final Logger logger = LoggerFactory.getLogger(TaskExecuteProcessor.class);
/**
* worker config
*/
private final WorkerConfig workerConfig;
/**
* task callback service
*/
private final TaskCallbackService taskCallbackService;
/**
* alert client service
*/
private AlertClientService alertClientService;
/**
* taskExecutionContextCacheManager
*/
private final TaskExecutionContextCacheManager taskExecutionContextCacheManager;
/*
* task execute manager
*/
private final WorkerManagerThread workerManager;
public TaskExecuteProcessor() {
this.taskCallbackService = SpringApplicationContext.getBean(TaskCallbackService.class);
this.workerConfig = SpringApplicationContext.getBean(WorkerConfig.class);
this.taskExecutionContextCacheManager = SpringApplicationContext.getBean(TaskExecutionContextCacheManagerImpl.class);
this.workerManager = SpringApplicationContext.getBean(WorkerManagerThread.class);
}
/**
* Pre-cache task to avoid extreme situations when kill task. There is no such task in the cache
*
* @param taskExecutionContext task
*/
private void setTaskCache(TaskExecutionContext taskExecutionContext) {
TaskExecutionContext preTaskCache = new TaskExecutionContext();
preTaskCache.setTaskInstanceId(taskExecutionContext.getTaskInstanceId());
taskExecutionContextCacheManager.cacheTaskExecutionContext(preTaskCache);
}
public TaskExecuteProcessor(AlertClientService alertClientService) {
this();
this.alertClientService = alertClientService;
}
@Override
public void process(Channel channel, Command command) {
Preconditions.checkArgument(CommandType.TASK_EXECUTE_REQUEST == command.getType(),
String.format("invalid command type : %s", command.getType()));
TaskExecuteRequestCommand taskRequestCommand = JSONUtils.parseObject(
command.getBody(), TaskExecuteRequestCommand.class);
logger.info("received command : {}", taskRequestCommand);
if (taskRequestCommand == null) {
logger.error("task execute request command is null");
return;
}
String contextJson = taskRequestCommand.getTaskExecutionContext();
TaskExecutionContext taskExecutionContext = JSONUtils.parseObject(contextJson, TaskExecutionContext.class);
if (taskExecutionContext == null) {
logger.error("task execution context is null");
return;
}
setTaskCache(taskExecutionContext);
// custom logger
Logger taskLogger = LoggerFactory.getLogger(LoggerUtils.buildTaskId(LoggerUtils.TASK_LOGGER_INFO_PREFIX,
taskExecutionContext.getProcessDefineId(),
taskExecutionContext.getProcessInstanceId(),
taskExecutionContext.getTaskInstanceId()));
taskExecutionContext.setHost(NetUtils.getAddr(workerConfig.getListenPort()));
taskExecutionContext.setLogPath(LogUtils.getTaskLogPath(taskExecutionContext));
// local execute path
String execLocalPath = getExecLocalPath(taskExecutionContext);
logger.info("task instance local execute path : {} ", execLocalPath);
taskExecutionContext.setExecutePath(execLocalPath);
FileUtils.taskLoggerThreadLocal.set(taskLogger);
try {
FileUtils.createWorkDirIfAbsent(execLocalPath);
} catch (Throwable ex) {
String errorLog = String.format("create execLocalPath : %s", execLocalPath);
LoggerUtils.logError(Optional.of(logger), errorLog, ex);
LoggerUtils.logError(Optional.ofNullable(taskLogger), errorLog, ex);
taskExecutionContextCacheManager.removeByTaskInstanceId(taskExecutionContext.getTaskInstanceId());
}
FileUtils.taskLoggerThreadLocal.remove();
taskCallbackService.addRemoteChannel(taskExecutionContext.getTaskInstanceId(),
new NettyRemoteChannel(channel, command.getOpaque()));
// delay task process
long remainTime = DateUtils.getRemainTime(taskExecutionContext.getFirstSubmitTime(), taskExecutionContext.getDelayTime() * 60L);
if (remainTime > 0) {
logger.info("delay the execution of task instance {}, delay time: {} s", taskExecutionContext.getTaskInstanceId(), remainTime);
taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.DELAY_EXECUTION);
taskExecutionContext.setStartTime(null);
} else {
taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.RUNNING_EXECUTION);
taskExecutionContext.setStartTime(new Date());
}
this.doAck(taskExecutionContext);
// submit task to manager
if (!workerManager.offer(new TaskExecuteThread(taskExecutionContext, taskCallbackService, taskLogger, alertClientService))) {
logger.info("submit task to manager error, queue is full, queue size is {}", workerManager.getQueueSize());
}
}
private void doAck(TaskExecutionContext taskExecutionContext) {
// tell master that task is in executing
TaskExecuteAckCommand ackCommand = buildAckCommand(taskExecutionContext);
ResponceCache.get().cache(taskExecutionContext.getTaskInstanceId(), ackCommand.convert2Command(), Event.ACK);
taskCallbackService.sendAck(taskExecutionContext.getTaskInstanceId(), ackCommand.convert2Command());
}
/**
* build ack command
*
* @param taskExecutionContext taskExecutionContext
* @return TaskExecuteAckCommand
*/
private TaskExecuteAckCommand buildAckCommand(TaskExecutionContext taskExecutionContext) {
TaskExecuteAckCommand ackCommand = new TaskExecuteAckCommand();
ackCommand.setTaskInstanceId(taskExecutionContext.getTaskInstanceId());
ackCommand.setStatus(taskExecutionContext.getCurrentExecutionStatus().getCode());
ackCommand.setLogPath(LogUtils.getTaskLogPath(taskExecutionContext));
ackCommand.setHost(taskExecutionContext.getHost());
ackCommand.setStartTime(taskExecutionContext.getStartTime());
if (taskExecutionContext.getTaskType().equals(TaskType.SQL.name()) || taskExecutionContext.getTaskType().equals(TaskType.PROCEDURE.name())) {
ackCommand.setExecutePath(null);
} else {
ackCommand.setExecutePath(taskExecutionContext.getExecutePath());
}
taskExecutionContext.setLogPath(ackCommand.getLogPath());
return ackCommand;
}
/**
* get execute local path
*
* @param taskExecutionContext taskExecutionContext
* @return execute local path
*/
private String getExecLocalPath(TaskExecutionContext taskExecutionContext) {
return FileUtils.getProcessExecDir(taskExecutionContext.getProjectId(),
taskExecutionContext.getProcessDefineId(),
taskExecutionContext.getProcessInstanceId(),
taskExecutionContext.getTaskInstanceId());
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,995 | [Bug][api] select an existing tenant, the workflow fails to run, indicating that the tenant does not exist |
![image](https://user-images.githubusercontent.com/55787491/110239702-40d3d580-7f83-11eb-8cb9-14f0110ac478.png)
![image](https://user-images.githubusercontent.com/55787491/110239708-47fae380-7f83-11eb-8c43-5435143e5c7b.png)
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/4995 | https://github.com/apache/dolphinscheduler/pull/5007 | 29d42fd92d6720a8a0641e37923c6e6f38a5ae85 | f94cfc620dfd0c51010a49134a073e3848c0bd7e | "2021-03-07T12:25:55Z" | java | "2021-03-18T10:34:42Z" | dolphinscheduler-server/src/main/resources/worker.properties | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# worker execute thread num
#worker.exec.threads=100
# worker heartbeat interval
#worker.heartbeat.interval=10
# only less than cpu avg load, worker server can work. default value -1: the number of cpu cores * 2
#worker.max.cpuload.avg=-1
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
#worker.reserved.memory=0.3
# worker listener port
#worker.listen.port=1234
# default worker groups
#worker.groups=default
# default worker host weight
#worker.host.weight=100
# alert server listener host
alert.listen.host=localhost
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,089 | [Bug][UI] Code mirror cannot be displayed normally in sqoop task | **To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to 'Create process' in 'Process definition'
2. Create a sqoop task
3. Switch 'Custom Job' and 'ModelType'
4. See error
**Expected behavior**
Bug fixed
**Screenshots**
Picture 1:
![image](https://user-images.githubusercontent.com/4902714/111579750-86996500-87f1-11eb-8ceb-fa00645a6ad9.png)
Picture 2:
![image](https://user-images.githubusercontent.com/4902714/111579796-9c0e8f00-87f1-11eb-9e13-0577426b9aa4.png)
**Which version of Dolphin Scheduler:**
-[dev]
**Additional context**
Add any other context about the problem here.
**Requirement or improvement**
- Please describe about your requirements or improvement suggestions.
| https://github.com/apache/dolphinscheduler/issues/5089 | https://github.com/apache/dolphinscheduler/pull/5090 | 086e71644163a0bb44e4bd1c5f7f16abc383ab1a | 9008fa4b0c43a615270f080e9d8c3c2a324bee9c | "2021-03-18T05:55:52Z" | java | "2021-03-19T03:57:46Z" | dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/dag.scss | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.dag-model {
background: url("../img/dag_bg.png");
height: calc(100vh - 100px);
::selection {
background:transparent;
}
::-moz-selection {
background:transparent;
}
::-webkit-selection {
background:transparent;
}
.jsplumb-connector {
z-index: 1;
}
.endpoint-tasks {
margin-top:22px;
}
.draggable {
> span {
text-align: center;
display: block;
margin-top: -4px;
padding: 0 4px;
width: 200px;
margin-left: -81px;
position: absolute;
left: 0;
bottom: -12px;
}
.fa {
display: inline-block;
position: absolute;
right: -8px;
top: -8px;
z-index: 2;
cursor: pointer;
}
.icos {
display: inline-block;
cursor: pointer;
}
&.active-tasks {
span {
color: #0296DF;
}
}
}
.icos {
width: 32px;
height: 32px;
margin: 2px;
border-radius: 3px;
position: relative;
z-index: 9;
}
.icos-SHELL {
background: url("../img/toolbar_SHELL.png") no-repeat 50% 50%;
}
.icos-WATERDROP {
background: url("../img/toolbar_WATERDROP.png") no-repeat 50% 50%;
}
.icos-SUB_PROCESS {
background: url("../img/toolbar_SUB_PROCESS.png") no-repeat 50% 50%;
}
.icos-PROCEDURE {
background: url("../img/toolbar_PROCEDURE.png") no-repeat 50% 50%;
}
.icos-SQL {
background: url("../img/toolbar_SQL.png") no-repeat 50% 50%;
}
.icos-SPARK {
background: url("../img/toolbar_SPARK.png") no-repeat 50% 50%;
}
.icos-FLINK {
background: url("../img/toolbar_FLINK.png") no-repeat 50% 50%;
}
.icos-MR {
background: url("../img/toolbar_MR.png") no-repeat 50% 50%;
}
.icos-PYTHON {
background: url("../img/toolbar_PYTHON.png") no-repeat 50% 50%;
}
.icos-DEPENDENT {
background: url("../img/toolbar_DEPENDENT.png") no-repeat 50% 50%;
}
.icos-HTTP {
background: url("../img/toolbar_HTTP.png") no-repeat 50% 50%;
}
.icos-DATAX {
background: url("../img/toolbar_DATAX.png") no-repeat 50% 50%;
}
.icos-SQOOP {
background: url("../img/toolbar_SQOOP.png") no-repeat 50% 50%;
}
.icos-CONDITIONS {
background: url("../img/toolbar_CONDITIONS.png") no-repeat 50% 50%;
}
.toolbar {
width: 60px;
height: 100%;
background: #F2F3F7;
float: left;
border-radius: 0 0 0 3px;
.title {
height: 40px;
line-height: 40px;
background: #40434C;
text-align: center;
border-radius: 3px 0 0 0;
span {
font-size: 14px;
color: #fff;
font-weight: bold;
}
}
.toolbar-btn {
overflow: hidden;
padding: 8px 11px 0 11px;
.bar-box {
width: 36px;
height: 36px;
float: left;
margin-bottom: 3px;
border-radius: 3px;
.disabled {
.icos {
opacity: .6;
-webkit-filter: grayscale(100%);
-moz-filter: grayscale(100%);
-ms-filter: grayscale(100%);
-o-filter: grayscale(100%);
filter: grayscale(100%);
filter: gray;
}
}
&:nth-child(odd) {
margin-right: 6px;
}
&.active {
background: #e1e2e3;
}
}
}
}
.dag-contect {
float: left;
width: calc(100% - 60px);
height: 100%;
.dag-toolbar {
height: 40px;
background: #F2F3F7;
position: relative;
border-radius: 0 3px 0 0;
.ans-btn-text {
color: #337ab7;
.ans-icon {
font-size: 16px;
}
}
.assist-btn {
position: absolute;
left: 10px;
top: 7px;
>.name {
padding-left: 6px;
vertical-align: middle;
}
>.copy-name {
cursor: pointer;
padding-left: 4px;
position: relative;
top: -2px;
&:hover {
i {
color: #47c3ff;
}
}
i {
color: #333;
font-size: 18px;
vertical-align: middle;
}
}
}
.save-btn {
position: absolute;
right: 8px;
top: 6px;
.operation {
overflow: hidden;
display: inline-block;
a {
float: left;
width: 28px;
height: 28px;
text-align: center;
line-height: 28px;
margin-left: 6px;
border-radius: 3px;
vertical-align: middle;
i {
color: #333;
}
&.active {
// background: #e1e2e3;
i {
color: #2d8cf0;
}
}
&.disable {
i {
color: #bbb;
}
}
}
}
}
}
.dag-container {
height: calc(100% - 40px);
overflow-x: auto;
&::-webkit-scrollbar{
width: 9px;
}
}
}
.tools-model {
height: 60px;
background: #F4F5F4;
border-radius: 3px 3px 0px 0px;
}
}
#screen {
margin-right: 5px;
}
.v-modal-custom-log {
z-index: 101;
}
svg path:hover {
cursor: pointer;
}
#chart-container .ui-selecting {
span {
color: #0296DF;
}
}
#chart-container .ui-selected {
span {
color: #0296DF;
}
}
.contextmenu {
position: fixed;
width: 90px;
background: #fff;
border-radius: 3px;
box-shadow: 0 2px 4px 1px rgba(0, 0, 0, 0.1);
padding: 4px 4px;
visibility:hidden;
z-index: 10000;
a {
height: 30px;
line-height: 28px;
display: block;
i {
font-size: 16px;
vertical-align: middle;
margin-left: 10px;
}
span {
vertical-align: middle;
font-size: 12px;
color: #666;
padding-left: 2px;
}
&:hover {
background: #f6faff;
}
&#startRunning {
i {
color: #35cd75;
}
}
&#editNodes {
i {
color: #0097e0;
}
}
&#removeNodes {
i {
color: #f04d4e;
}
}
&#copyNodes {
i {
color: #FABC05;
}
}
&.disbled {
i,span {
color: #aaa !important;
}
}
}
}
.jtk-demo {
//min-width: calc(100% - 220px);
width: 8000px;
height: 5000px;
svg:not(:root){
z-index: 11;
}
}
.jtk-demo-canvas {
position: relative;
height: 100%;
display: flex;
}
.jtk-bootstrap {
min-height: 100vh;
display: flex;
flex-direction: column;
}
.jtk-bootstrap .jtk-page-container {
display: flex;
width: 100vw;
justify-content: center;
flex: 1;
}
.jtk-bootstrap .jtk-container {
width: 60%;
max-width: 800px;
}
.jtk-bootstrap-wide .jtk-container {
width: 80%;
max-width: 1187px;
}
.jtk-demo-main {
position: relative;
margin-top: 98px;
}
.jtk-demo-main .description {
font-size: 13px;
margin-top: 25px;
padding: 13px;
margin-bottom: 22px;
background-color: #f4f5ef;
}
.jtk-demo-main .description li {
list-style-type: disc !important;
}
.canvas-wide {
padding-top: 10px;
margin-left: 0;
-ms-transition: all .1s ease-out;
-moz-transition: all .1s ease-out;
-webkit-transition: all .1s ease-out;
-o-transition: all .1s ease-out;
}
.jtk-demo-dataset {
text-align: left;
max-height: 600px;
overflow: auto;
}
.demo-title {
float: left;
font-size: 18px;
}
.controls {
top: 25px;
color: #FFF;
margin-right: 10px;
position: absolute;
left: 25px;
z-index: 1;
}
.controls i {
background-color: #3E7E9C;
border-radius: 4px;
cursor: pointer;
margin-right: 0;
padding: 4px;
}
.w {
position: absolute;
z-index: 4;
font-size: 11px;
-webkit-transition: background-color 0.25s ease-in;
-moz-transition: background-color 0.25s ease-in;
transition: background-color 0.25s ease-in;
border: 7px solid transparent;
border-bottom: 30px solid transparent;
.icos {
width: 50px;
height: 50px;
box-shadow: 2px 2px 19px #e0e0e0;
-o-box-shadow: 2px 2px 19px #e0e0e0;
-webkit-box-shadow: 2px 2px 19px #e0e0e0;
-moz-box-shadow: 2px 2px 19px #e0e0e0;
-moz-border-radius: 8px;
border-radius: 8px;
opacity: 0.8;
cursor: move;
background-color: #fff;
}
.name-p {
position: absolute;
left: 50%;
top: 58px;
width: 200px;
text-align: center;
margin-left: -100px;
word-break:break-all;
}
.ban-p {
position: absolute;
left: -4px;
top: 36px;
z-index: 21;
i {
font-size: 18px;
color: #ff0000;
cursor: pointer;
}
}
.state-p {
width: 20px;
height: 20px;
position: absolute;
top: -20px;
left: 18px;
text-align: center;
cursor: pointer;
b {
font-weight: normal;
}
}
}
.aLabel {
-webkit-transition: background-color 0.25s ease-in;
-moz-transition: background-color 0.25s ease-in;
transition: background-color 0.25s ease-in;
background-color: white;
opacity: 0.8;
padding: 0.3em;
border-radius: 0.5em;
border: 1px solid #346789;
cursor: pointer;
}
.aLabel.jtk-hover,
.jtk-source-hover,
.jtk-target-hover {
.icos {
background-color: #333;
color: #333;
-ms-transition: all 0.6s ease-out;
-moz-transition: all 0.6s ease-out;
-webkit-transition: all 0.6s ease-out;
-o-transition: all 0.6s ease-out;
}
}
.jtk-tasks-active {
.icos {
background-color: #2db7f5;
color: #0097e0;
-ms-transition: all 0.6s ease-out;
-moz-transition: all 0.6s ease-out;
-webkit-transition: all 0.6s ease-out;
-o-transition: all 0.6s ease-out;
}
}
.jtk-ep {
.ep {
display: block;
}
}
.ep {
position: absolute;
top: -4%;
right: -1px;
width: 1em;
height: 1em;
z-index: 12;
background-color: orange;
cursor: pointer;
box-shadow: 0 0 2px black;
-webkit-transition: -webkit-box-shadow 0.25s ease-in;
-moz-transition: -moz-box-shadow 0.25s ease-in;
transition: box-shadow 0.25s ease-in;
border-radius:100%;
display: none;
}
.ep:hover {
box-shadow: 0 0 6px black;
}
.statemachine-demo .jtk-endpoint {
z-index: 3;
}
#canvas {
.dot-style {
opacity: 0;
}
}
.from-mirror {
width: 100%;
position: relative;
z-index: 0;
.CodeMirror {
height:auto;
min-height: 72px;
}
.CodeMirror-scroll {
height:auto;
min-height: 72px;
overflow-y: hidden;
overflow-x: auto;
}
}
.ans-modal-box.ans-drawer.ans-drawer-right.dagMask.mask {
width: 628px;
left: auto;
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,089 | [Bug][UI] Code mirror cannot be displayed normally in sqoop task | **To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to 'Create process' in 'Process definition'
2. Create a sqoop task
3. Switch 'Custom Job' and 'ModelType'
4. See error
**Expected behavior**
Bug fixed
**Screenshots**
Picture 1:
![image](https://user-images.githubusercontent.com/4902714/111579750-86996500-87f1-11eb-8ceb-fa00645a6ad9.png)
Picture 2:
![image](https://user-images.githubusercontent.com/4902714/111579796-9c0e8f00-87f1-11eb-9e13-0577426b9aa4.png)
**Which version of Dolphin Scheduler:**
-[dev]
**Additional context**
Add any other context about the problem here.
**Requirement or improvement**
- Please describe about your requirements or improvement suggestions.
| https://github.com/apache/dolphinscheduler/issues/5089 | https://github.com/apache/dolphinscheduler/pull/5090 | 086e71644163a0bb44e4bd1c5f7f16abc383ab1a | 9008fa4b0c43a615270f080e9d8c3c2a324bee9c | "2021-03-18T05:55:52Z" | java | "2021-03-19T03:57:46Z" | dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/_source/jsonBox.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="script-model">
<m-list-box>
<div slot="content">
<div class="from-mirror1">
<textarea
id="code-shell-mirror1"
name="code-shell-mirror1"
style="opacity: 0">
</textarea>
</div>
</div>
</m-list-box>
</div>
</template>
<script>
import _ from 'lodash'
import mListBox from './listBox'
import disabledState from '@/module/mixin/disabledState'
import codemirror from '@/conf/home/pages/resource/pages/file/pages/_source/codemirror'
let editor
export default {
name: 'shell',
data () {
return {
// script
rawScript: ''
}
},
mixins: [disabledState],
props: {
jsonItem: String
},
methods: {
/**
* Processing code highlighting
*/
_handlerEditor () {
// editor
let self = this
editor = codemirror('code-shell-mirror1', {
mode: 'shell',
readOnly: this.isDetails
})
editor.on('change', function () {
self.$emit('getJsonBoxValue', editor.getValue())
})
this.keypress = () => {
if (!editor.getOption('readOnly')) {
editor.showHint({
completeSingle: false
})
}
}
// Monitor keyboard
editor.on('keypress', this.keypress)
editor.setValue(this.rawScript)
return editor
}
},
watch: {},
created () {
let o = this.jsonItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
this.rawScript = o
}
},
mounted () {
setTimeout(() => {
this._handlerEditor()
}, 200)
},
destroyed () {
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-shell-mirror1'), 'keypress', this.keypress)
}
},
components: { mListBox }
}
</script>
<style lang="scss" rel="stylesheet/scss" scope>
.script-model {
width:100%;
}
.from-mirror1 {
.CodeMirror {
min-height: 600px;
max-height: 700px;
}
}
</style>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,089 | [Bug][UI] Code mirror cannot be displayed normally in sqoop task | **To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to 'Create process' in 'Process definition'
2. Create a sqoop task
3. Switch 'Custom Job' and 'ModelType'
4. See error
**Expected behavior**
Bug fixed
**Screenshots**
Picture 1:
![image](https://user-images.githubusercontent.com/4902714/111579750-86996500-87f1-11eb-8ceb-fa00645a6ad9.png)
Picture 2:
![image](https://user-images.githubusercontent.com/4902714/111579796-9c0e8f00-87f1-11eb-9e13-0577426b9aa4.png)
**Which version of Dolphin Scheduler:**
-[dev]
**Additional context**
Add any other context about the problem here.
**Requirement or improvement**
- Please describe about your requirements or improvement suggestions.
| https://github.com/apache/dolphinscheduler/issues/5089 | https://github.com/apache/dolphinscheduler/pull/5090 | 086e71644163a0bb44e4bd1c5f7f16abc383ab1a | 9008fa4b0c43a615270f080e9d8c3c2a324bee9c | "2021-03-18T05:55:52Z" | java | "2021-03-19T03:57:46Z" | dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/_source/scriptBox.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="script-model">
<m-list-box>
<div slot="content">
<div class="from-mirror1">
<textarea
id="code-shell-mirror1"
name="code-shell-mirror1"
style="opacity: 0">
</textarea>
</div>
</div>
</m-list-box>
</div>
</template>
<script>
import _ from 'lodash'
import mListBox from './listBox'
import disabledState from '@/module/mixin/disabledState'
import codemirror from '@/conf/home/pages/resource/pages/file/pages/_source/codemirror'
let editor
export default {
name: 'shell',
data () {
return {
// script
rawScript: ''
}
},
mixins: [disabledState],
props: {
item: String
},
methods: {
/**
* Processing code highlighting
*/
_handlerEditor () {
// editor
let self = this
editor = codemirror('code-shell-mirror1', {
mode: 'shell',
readOnly: this.isDetails
})
editor.on('change', function () {
self.$emit('getSriptBoxValue', editor.getValue())
})
this.keypress = () => {
if (!editor.getOption('readOnly')) {
editor.showHint({
completeSingle: false
})
}
}
// Monitor keyboard
editor.on('keypress', this.keypress)
editor.setValue(this.rawScript)
return editor
}
},
watch: {},
created () {
let o = this.item
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
this.rawScript = o
}
},
mounted () {
setTimeout(() => {
this._handlerEditor()
}, 200)
},
destroyed () {
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-shell-mirror1'), 'keypress', this.keypress)
}
},
components: { mListBox }
}
</script>
<style lang="scss" rel="stylesheet/scss" scope>
.script-model {
width:100%;
}
.from-mirror1 {
.CodeMirror {
height: calc(70vh - 90px);
}
}
</style>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,089 | [Bug][UI] Code mirror cannot be displayed normally in sqoop task | **To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to 'Create process' in 'Process definition'
2. Create a sqoop task
3. Switch 'Custom Job' and 'ModelType'
4. See error
**Expected behavior**
Bug fixed
**Screenshots**
Picture 1:
![image](https://user-images.githubusercontent.com/4902714/111579750-86996500-87f1-11eb-8ceb-fa00645a6ad9.png)
Picture 2:
![image](https://user-images.githubusercontent.com/4902714/111579796-9c0e8f00-87f1-11eb-9e13-0577426b9aa4.png)
**Which version of Dolphin Scheduler:**
-[dev]
**Additional context**
Add any other context about the problem here.
**Requirement or improvement**
- Please describe about your requirements or improvement suggestions.
| https://github.com/apache/dolphinscheduler/issues/5089 | https://github.com/apache/dolphinscheduler/pull/5090 | 086e71644163a0bb44e4bd1c5f7f16abc383ab1a | 9008fa4b0c43a615270f080e9d8c3c2a324bee9c | "2021-03-18T05:55:52Z" | java | "2021-03-19T03:57:46Z" | dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/datax.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="datax-model">
<m-list-box>
<div slot="text">{{$t('Custom template')}}</div>
<div slot="content">
<label class="label-box">
<div style="padding-top: 5px;">
<el-switch v-model="enable" @change="_onSwitch" :disabled="isDetails"></el-switch>
</div>
</label>
</div>
</m-list-box>
<template v-if="!enable">
<m-list-box>
<div slot="text">{{$t('Datasource')}}</div>
<div slot="content">
<m-datasource
ref="refDs"
@on-dsData="_onDsData"
:supportType="['MYSQL','POSTGRESQL', 'ORACLE', 'SQLSERVER']"
:data="{ type:dsType,datasource:datasource }">
</m-datasource>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('SQL Statement')}}</div>
<div slot="content">
<div class="from-mirror">
<textarea
id="code-sql-mirror"
name="code-sql-mirror"
style="opacity: 0;">
</textarea>
<a class="ans-modal-box-max">
<em class="el-icon-full-screen" @click="setEditorVal"></em>
</a>
</div>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('TargetDataBase')}}</div>
<div slot="content">
<m-datasource
ref="refDt"
@on-dsData="_onDtData"
:supportType="['MYSQL','POSTGRESQL', 'ORACLE', 'SQLSERVER']"
:data="{ type:dtType,datasource:datatarget }">
</m-datasource>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('TargetTable')}}</div>
<div slot="content">
<el-input
type="input"
size="small"
v-model="targetTable"
:placeholder="$t('Please enter the table of target')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('TargetDataBase')}}{{$t('Pre Statement')}}</div>
<div slot="content">
<m-statement-list
ref="refPreStatements"
@on-statement-list="_onPreStatements"
:statement-list="preStatements">
</m-statement-list>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('TargetDataBase')}}{{$t('Post Statement')}}</div>
<div slot="content">
<m-statement-list
ref="refPostStatements"
@on-statement-list="_onPostStatements"
:statement-list="postStatements">
</m-statement-list>
</div>
</m-list-box>
<m-list-box>
<div slot="text">
<span>{{$t('SpeedByte')}}</span>
</div>
<div slot="content">
<m-select-input v-model="jobSpeedByte" :list="[0,1,10,50,100,512]">
</m-select-input>
<span>({{$t('0 means unlimited by byte')}})</span>
</div>
</m-list-box>
<m-list-box>
<div slot="text">
<span>{{$t('SpeedRecord')}}</span>
</div>
<div slot="content">
<m-select-input v-model="jobSpeedRecord" :list="[0,500,1000,1500,2000,2500,3000]">
</m-select-input>
<span>({{$t('0 means unlimited by count')}})</span>
</div>
</m-list-box>
</template>
<template v-else>
<m-list-box>
<div slot="text">json</div>
<div slot="content">
<div class="from-mirror">
<textarea
id="code-json-mirror"
name="code-json-mirror"
style="opacity: 0;">
</textarea>
</div>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Custom Parameters')}}</div>
<div slot="content">
<m-local-params
ref="refLocalParams"
@on-local-params="_onLocalParams"
:udp-list="localParams"
:hide="false">
</m-local-params>
</div>
</m-list-box>
</template>
<m-list-box>
<div slot="text">{{$t('Running Memory')}}</div>
<div slot="content">
<span >{{$t('Min Memory')}}</span>
<m-select-input v-model="xms" :list="[1,2,3,4]">
</m-select-input>
<span> G </span>
<span >{{$t('Max Memory')}}</span>
<m-select-input v-model="xmx" :list="[1,2,3,4]">
</m-select-input>
<span> G</span>
</div>
</m-list-box>
<el-dialog
:visible.sync="scriptBoxDialog"
append-to-body="true"
width="80%">
<m-script-box :item="item" @getSriptBoxValue="getSriptBoxValue" @closeAble="closeAble"></m-script-box>
</el-dialog>
</div>
</template>
<script>
import _ from 'lodash'
import i18n from '@/module/i18n'
import mListBox from './_source/listBox'
import mScriptBox from './_source/scriptBox'
import mDatasource from './_source/datasource'
import mLocalParams from './_source/localParams'
import mStatementList from './_source/statementList'
import disabledState from '@/module/mixin/disabledState'
import mSelectInput from '../_source/selectInput'
import codemirror from '@/conf/home/pages/resource/pages/file/pages/_source/codemirror'
let editor
let jsonEditor
export default {
name: 'datax',
data () {
return {
// Data Custom template
enable: false,
// Data source type
dsType: '',
// data source
datasource: '',
// Data source type
dtType: '',
// data source
datatarget: '',
// Return to the selected data source
rtDatasource: '',
// Return to the selected data target
rtDatatarget: '',
// Sql statement
sql: '',
json: '',
// target table
targetTable: '',
// Pre statements
preStatements: [],
// Post statements
postStatements: [],
// speed byte
jobSpeedByte: 0,
// speed record
jobSpeedRecord: 1000,
// Custom parameter
localParams: [],
customConfig: 0,
// jvm memory xms
xms: 1,
// jvm memory xms
xmx: 1,
scriptBoxDialog: false,
item: ''
}
},
mixins: [disabledState],
props: {
backfillItem: Object,
createNodeId: Number
},
methods: {
setEditorVal () {
this.item = editor.getValue()
this.scriptBoxDialog = true
},
getSriptBoxValue (val) {
editor.setValue(val)
},
_onSwitch (is) {
if (is) {
this.customConfig = 1
setTimeout(() => {
this._handlerJsonEditor()
}, 200)
} else {
this.customConfig = 0
setTimeout(() => {
this._handlerEditor()
}, 200)
}
},
/**
* return data source
*/
_onDsData (o) {
this.dsType = o.type
this.rtDatasource = o.datasource
},
/**
* return data target
*/
_onDtData (o) {
this.dtType = o.type
this.rtDatatarget = o.datasource
},
/**
* return pre statements
*/
_onPreStatements (a) {
this.preStatements = a
},
/**
* return post statements
*/
_onPostStatements (a) {
this.postStatements = a
},
/**
* return localParams
*/
_onLocalParams (a) {
this.localParams = a
},
/**
* verification
*/
_verification () {
if (this.customConfig) {
if (!jsonEditor.getValue()) {
this.$message.warning(`${i18n.$t('Please enter a JSON Statement(required)')}`)
return false
}
// localParams Subcomponent verification
if (!this.$refs.refLocalParams._verifProp()) {
return false
}
// storage
this.$emit('on-params', {
customConfig: this.customConfig,
json: jsonEditor.getValue(),
localParams: this.localParams,
xms: +this.xms,
xmx: +this.xmx
})
return true
} else {
if (!editor.getValue()) {
this.$message.warning(`${i18n.$t('Please enter a SQL Statement(required)')}`)
return false
}
// datasource Subcomponent verification
if (!this.$refs.refDs._verifDatasource()) {
return false
}
// datasource Subcomponent verification
if (!this.$refs.refDt._verifDatasource()) {
return false
}
if (!this.targetTable) {
this.$message.warning(`${i18n.$t('Please enter a Target Table(required)')}`)
return false
}
// preStatements Subcomponent verification
if (!this.$refs.refPreStatements._verifProp()) {
return false
}
// postStatements Subcomponent verification
if (!this.$refs.refPostStatements._verifProp()) {
return false
}
// storage
this.$emit('on-params', {
customConfig: this.customConfig,
dsType: this.dsType,
dataSource: this.rtDatasource,
dtType: this.dtType,
dataTarget: this.rtDatatarget,
sql: editor.getValue(),
targetTable: this.targetTable,
jobSpeedByte: this.jobSpeedByte * 1024,
jobSpeedRecord: this.jobSpeedRecord,
preStatements: this.preStatements,
postStatements: this.postStatements,
xms: +this.xms,
xmx: +this.xmx
})
return true
}
},
/**
* Processing code highlighting
*/
_handlerEditor () {
this._destroyEditor()
// editor
editor = codemirror('code-sql-mirror', {
mode: 'sql',
readOnly: this.isDetails
})
this.keypress = () => {
if (!editor.getOption('readOnly')) {
editor.showHint({
completeSingle: false
})
}
}
// Monitor keyboard
editor.on('keypress', this.keypress)
editor.on('changes', () => {
this._cacheParams()
})
editor.setValue(this.sql)
return editor
},
_handlerJsonEditor () {
this._destroyJsonEditor()
// jsonEditor
jsonEditor = codemirror('code-json-mirror', {
mode: 'json',
readOnly: this.isDetails
})
this.keypress = () => {
if (!jsonEditor.getOption('readOnly')) {
jsonEditor.showHint({
completeSingle: false
})
}
}
// Monitor keyboard
jsonEditor.on('keypress', this.keypress)
jsonEditor.on('changes', () => {
// this._cacheParams()
})
jsonEditor.setValue(this.json)
return jsonEditor
},
_cacheParams () {
this.$emit('on-cache-params', {
dsType: this.dsType,
dataSource: this.rtDatasource,
dtType: this.dtType,
dataTarget: this.rtDatatarget,
sql: editor ? editor.getValue() : '',
targetTable: this.targetTable,
jobSpeedByte: this.jobSpeedByte * 1024,
jobSpeedRecord: this.jobSpeedRecord,
preStatements: this.preStatements,
postStatements: this.postStatements,
xms: +this.xms,
xmx: +this.xmx
})
},
_destroyEditor () {
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-sql-mirror'), 'keypress', this.keypress)
editor.off($('.code-sql-mirror'), 'changes', this.changes)
}
},
_destroyJsonEditor () {
if (jsonEditor) {
jsonEditor.toTextArea() // Uninstall
jsonEditor.off($('.code-json-mirror'), 'keypress', this.keypress)
jsonEditor.off($('.code-json-mirror'), 'changes', this.changes)
}
}
},
created () {
let o = this.backfillItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
// set jvm memory
this.xms = o.params.xms || 1
this.xmx = o.params.xmx || 1
// backfill
if (o.params.customConfig === 0) {
this.customConfig = 0
this.enable = false
this.dsType = o.params.dsType || ''
this.datasource = o.params.dataSource || ''
this.dtType = o.params.dtType || ''
this.datatarget = o.params.dataTarget || ''
this.sql = o.params.sql || ''
this.targetTable = o.params.targetTable || ''
this.jobSpeedByte = o.params.jobSpeedByte / 1024 || 0
this.jobSpeedRecord = o.params.jobSpeedRecord || 0
this.preStatements = o.params.preStatements || []
this.postStatements = o.params.postStatements || []
} else {
this.customConfig = 1
this.enable = true
this.json = o.params.json || []
this.localParams = o.params.localParams || ''
}
}
},
mounted () {
if (this.customConfig) {
setTimeout(() => {
this._handlerJsonEditor()
}, 200)
} else {
setTimeout(() => {
this._handlerEditor()
}, 200)
}
},
destroyed () {
/**
* Destroy the editor instance
*/
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-sql-mirror'), 'keypress', this.keypress)
}
if (jsonEditor) {
jsonEditor.toTextArea() // Uninstall
jsonEditor.off($('.code-json-mirror'), 'keypress', this.keypress)
}
},
watch: {
// Watch the cacheParams
cacheParams (val) {
this._cacheParams()
}
},
computed: {
cacheParams () {
return {
dsType: this.dsType,
dataSource: this.rtDatasource,
dtType: this.dtType,
dataTarget: this.rtDatatarget,
targetTable: this.targetTable,
jobSpeedByte: this.jobSpeedByte * 1024,
jobSpeedRecord: this.jobSpeedRecord,
preStatements: this.preStatements,
postStatements: this.postStatements
}
}
},
components: { mListBox, mDatasource, mLocalParams, mStatementList, mSelectInput, mScriptBox }
}
</script>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,089 | [Bug][UI] Code mirror cannot be displayed normally in sqoop task | **To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to 'Create process' in 'Process definition'
2. Create a sqoop task
3. Switch 'Custom Job' and 'ModelType'
4. See error
**Expected behavior**
Bug fixed
**Screenshots**
Picture 1:
![image](https://user-images.githubusercontent.com/4902714/111579750-86996500-87f1-11eb-8ceb-fa00645a6ad9.png)
Picture 2:
![image](https://user-images.githubusercontent.com/4902714/111579796-9c0e8f00-87f1-11eb-9e13-0577426b9aa4.png)
**Which version of Dolphin Scheduler:**
-[dev]
**Additional context**
Add any other context about the problem here.
**Requirement or improvement**
- Please describe about your requirements or improvement suggestions.
| https://github.com/apache/dolphinscheduler/issues/5089 | https://github.com/apache/dolphinscheduler/pull/5090 | 086e71644163a0bb44e4bd1c5f7f16abc383ab1a | 9008fa4b0c43a615270f080e9d8c3c2a324bee9c | "2021-03-18T05:55:52Z" | java | "2021-03-19T03:57:46Z" | dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/python.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="python-model">
<m-list-box>
<div slot="text">{{$t('Script')}}</div>
<div slot="content">
<div class="from-mirror">
<textarea id="code-python-mirror" name="code-python-mirror" style="opacity: 0;">
</textarea>
<a class="ans-modal-box-max">
<em class="el-icon-full-screen" @click="setEditorVal"></em>
</a>
</div>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Resources')}}</div>
<div slot="content">
<treeselect v-model="resourceList" :multiple="true" maxHeight="200" :options="resourceOptions" :normalizer="normalizer" :value-consists-of="valueConsistsOf" :disabled="isDetails" :placeholder="$t('Please select resources')">
<div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}</div>
</treeselect>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Custom Parameters')}}</div>
<div slot="content">
<m-local-params
ref="refLocalParams"
@on-local-params="_onLocalParams"
:udp-list="localParams"
:hide="false">
</m-local-params>
</div>
</m-list-box>
<el-dialog
:visible.sync="scriptBoxDialog"
append-to-body="true"
width="80%">
<m-script-box :item="item" @getSriptBoxValue="getSriptBoxValue" @closeAble="closeAble"></m-script-box>
</el-dialog>
</div>
</template>
<script>
import _ from 'lodash'
import i18n from '@/module/i18n'
import mListBox from './_source/listBox'
import mScriptBox from './_source/scriptBox'
import mLocalParams from './_source/localParams'
import Treeselect from '@riophae/vue-treeselect'
import '@riophae/vue-treeselect/dist/vue-treeselect.css'
import disabledState from '@/module/mixin/disabledState'
import codemirror from '@/conf/home/pages/resource/pages/file/pages/_source/codemirror'
let editor
export default {
name: 'python',
data () {
return {
valueConsistsOf: 'LEAF_PRIORITY',
// script
rawScript: '',
// Custom parameter
localParams: [],
// resource(list)
resourceList: [],
// Cache ResourceList
cacheResourceList: [],
resourceOptions: [],
normalizer (node) {
return {
label: node.name
}
},
allNoResources: [],
noRes: [],
item: '',
scriptBoxDialog: false
}
},
mixins: [disabledState],
props: {
backfillItem: Object
},
methods: {
/**
* return localParams
*/
_onLocalParams (a) {
this.localParams = a
},
setEditorVal () {
this.item = editor.getValue()
this.scriptBoxDialog = true
},
getSriptBoxValue (val) {
editor.setValue(val)
},
/**
* return resourceList
*/
// _onResourcesData (a) {
// this.resourceList = a
// },
/**
* cache resourceList
*/
_onCacheResourcesData (a) {
this.cacheResourceList = a
},
/**
* verification
*/
_verification () {
// rawScript 验证
if (!editor.getValue()) {
this.$message.warning(`${i18n.$t('Please enter script(required)')}`)
return false
}
// localParams Subcomponent verification
if (!this.$refs.refLocalParams._verifProp()) {
return false
}
// noRes
if (this.noRes.length > 0) {
this.$message.warning(`${i18n.$t('Please delete all non-existent resources')}`)
return false
}
// storage
this.$emit('on-params', {
resourceList: _.map(this.resourceList, v => {
return { id: v }
}),
localParams: this.localParams,
rawScript: editor.getValue()
})
return true
},
/**
* Processing code highlighting
*/
_handlerEditor () {
// editor
editor = codemirror('code-python-mirror', {
mode: 'python',
readOnly: this.isDetails
})
this.keypress = () => {
if (!editor.getOption('readOnly')) {
editor.showHint({
completeSingle: false
})
}
}
// Monitor keyboard
editor.on('keypress', this.keypress)
editor.setValue(this.rawScript)
return editor
},
diGuiTree (item) { // Recursive convenience tree structure
item.forEach(item => {
item.children === '' || item.children === undefined || item.children === null || item.children.length === 0
? this.operationTree(item) : this.diGuiTree(item.children)
})
},
operationTree (item) {
if (item.dirctory) {
item.isDisabled = true
}
delete item.children
},
searchTree (element, id) {
// 根据id查找节点
if (element.id === id) {
return element
} else if (element.children !== null) {
let i
let result = null
for (i = 0; result === null && i < element.children.length; i++) {
result = this.searchTree(element.children[i], id)
}
return result
}
return null
},
dataProcess (backResource) {
let isResourceId = []
let resourceIdArr = []
if (this.resourceList.length > 0) {
this.resourceList.forEach(v => {
this.resourceOptions.forEach(v1 => {
if (this.searchTree(v1, v)) {
isResourceId.push(this.searchTree(v1, v))
}
})
})
resourceIdArr = isResourceId.map(item => {
return item.id
})
Array.prototype.diff = function (a) {
return this.filter(function (i) { return a.indexOf(i) < 0 })
}
let diffSet = this.resourceList.diff(resourceIdArr)
let optionsCmp = []
if (diffSet.length > 0) {
diffSet.forEach(item => {
backResource.forEach(item1 => {
if (item === item1.id || item === item1.res) {
optionsCmp.push(item1)
}
})
})
}
let noResources = [{
id: -1,
name: $t('Unauthorized or deleted resources'),
fullName: '/' + $t('Unauthorized or deleted resources'),
children: []
}]
if (optionsCmp.length > 0) {
this.allNoResources = optionsCmp
optionsCmp = optionsCmp.map(item => {
return { id: item.id, name: item.name, fullName: item.res }
})
optionsCmp.forEach(item => {
item.isNew = true
})
noResources[0].children = optionsCmp
this.resourceOptions = this.resourceOptions.concat(noResources)
}
}
}
},
watch: {
// Watch the cacheParams
cacheParams (val) {
this.$emit('on-cache-params', val)
},
resourceIdArr (arr) {
let result = []
arr.forEach(item => {
this.allNoResources.forEach(item1 => {
if (item.id === item1.id) {
// resultBool = true
result.push(item1)
}
})
})
this.noRes = result
}
},
computed: {
resourceIdArr () {
let isResourceId = []
let resourceIdArr = []
if (this.resourceList.length > 0) {
this.resourceList.forEach(v => {
this.resourceOptions.forEach(v1 => {
if (this.searchTree(v1, v)) {
isResourceId.push(this.searchTree(v1, v))
}
})
})
resourceIdArr = isResourceId.map(item => {
return { id: item.id, name: item.name, res: item.fullName }
})
}
return resourceIdArr
},
cacheParams () {
return {
resourceList: this.resourceIdArr,
localParams: this.localParams
}
}
},
created () {
let item = this.store.state.dag.resourcesListS
this.diGuiTree(item)
this.resourceOptions = item
let o = this.backfillItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
this.rawScript = o.params.rawScript || ''
// backfill resourceList
let backResource = o.params.resourceList || []
let resourceList = o.params.resourceList || []
if (resourceList.length) {
_.map(resourceList, v => {
if (!v.id) {
this.store.dispatch('dag/getResourceId', {
type: 'FILE',
fullName: '/' + v.res
}).then(res => {
this.resourceList.push(res.id)
this.dataProcess(backResource)
}).catch(e => {
this.resourceList.push(v.res)
this.dataProcess(backResource)
})
} else {
this.resourceList.push(v.id)
this.dataProcess(backResource)
}
})
this.cacheResourceList = resourceList
}
// backfill localParams
let localParams = o.params.localParams || []
if (localParams.length) {
this.localParams = localParams
}
}
},
mounted () {
setTimeout(() => {
this._handlerEditor()
}, 200)
},
destroyed () {
editor.toTextArea() // Uninstall
editor.off($('.code-python-mirror'), 'keypress', this.keypress)
},
components: { mLocalParams, mListBox, Treeselect, mScriptBox }
}
</script>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,089 | [Bug][UI] Code mirror cannot be displayed normally in sqoop task | **To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to 'Create process' in 'Process definition'
2. Create a sqoop task
3. Switch 'Custom Job' and 'ModelType'
4. See error
**Expected behavior**
Bug fixed
**Screenshots**
Picture 1:
![image](https://user-images.githubusercontent.com/4902714/111579750-86996500-87f1-11eb-8ceb-fa00645a6ad9.png)
Picture 2:
![image](https://user-images.githubusercontent.com/4902714/111579796-9c0e8f00-87f1-11eb-9e13-0577426b9aa4.png)
**Which version of Dolphin Scheduler:**
-[dev]
**Additional context**
Add any other context about the problem here.
**Requirement or improvement**
- Please describe about your requirements or improvement suggestions.
| https://github.com/apache/dolphinscheduler/issues/5089 | https://github.com/apache/dolphinscheduler/pull/5090 | 086e71644163a0bb44e4bd1c5f7f16abc383ab1a | 9008fa4b0c43a615270f080e9d8c3c2a324bee9c | "2021-03-18T05:55:52Z" | java | "2021-03-19T03:57:46Z" | dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/shell.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="shell-model">
<m-list-box>
<div slot="text">{{$t('Script')}}</div>
<div slot="content">
<div class="from-mirror">
<textarea
id="code-shell-mirror"
name="code-shell-mirror"
style="opacity: 0">
</textarea>
<a class="ans-modal-box-max">
<em class="el-icon-full-screen" @click="setEditorVal"></em>
</a>
</div>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Resources')}}</div>
<div slot="content">
<treeselect v-model="resourceList" :multiple="true" maxHeight="200" :options="options" :normalizer="normalizer" :disabled="isDetails" :value-consists-of="valueConsistsOf" :placeholder="$t('Please select resources')">
<div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}</div>
</treeselect>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Custom Parameters')}}</div>
<div slot="content">
<m-local-params
ref="refLocalParams"
@on-local-params="_onLocalParams"
:udp-list="localParams"
:hide="true">
</m-local-params>
</div>
</m-list-box>
<el-dialog
:visible.sync="scriptBoxDialog"
append-to-body="true"
width="80%">
<m-script-box :item="item" @getSriptBoxValue="getSriptBoxValue" @closeAble="closeAble"></m-script-box>
</el-dialog>
</div>
</template>
<script>
import _ from 'lodash'
import i18n from '@/module/i18n'
import mListBox from './_source/listBox'
import mScriptBox from './_source/scriptBox'
import mLocalParams from './_source/localParams'
import disabledState from '@/module/mixin/disabledState'
import Treeselect from '@riophae/vue-treeselect'
import '@riophae/vue-treeselect/dist/vue-treeselect.css'
import codemirror from '@/conf/home/pages/resource/pages/file/pages/_source/codemirror'
let editor
export default {
name: 'shell',
data () {
return {
valueConsistsOf: 'LEAF_PRIORITY',
// script
rawScript: '',
// Custom parameter
localParams: [],
// resource(list)
resourceList: [],
// Cache ResourceList
cacheResourceList: [],
// define options
options: [],
normalizer (node) {
return {
label: node.name
}
},
allNoResources: [],
noRes: [],
item: '',
scriptBoxDialog: false
}
},
mixins: [disabledState],
props: {
backfillItem: Object
},
methods: {
/**
* return localParams
*/
_onLocalParams (a) {
this.localParams = a
},
setEditorVal () {
this.item = editor.getValue()
this.scriptBoxDialog = true
},
getSriptBoxValue (val) {
editor.setValue(val)
// this.scriptBoxDialog = false
},
closeAble () {
// this.scriptBoxDialog = false
},
/**
* return resourceList
*
*/
_onResourcesData (a) {
this.resourceList = a
},
/**
* cache resourceList
*/
_onCacheResourcesData (a) {
this.cacheResourceList = a
},
/**
* verification
*/
_verification () {
// rawScript verification
if (!editor.getValue()) {
this.$message.warning(`${i18n.$t('Please enter script(required)')}`)
return false
}
// localParams Subcomponent verification
if (!this.$refs.refLocalParams._verifProp()) {
return false
}
// noRes
if (this.noRes.length > 0) {
this.$message.warning(`${i18n.$t('Please delete all non-existent resources')}`)
return false
}
// Process resourcelist
let dataProcessing = _.map(this.resourceList, v => {
return {
id: v
}
})
// storage
this.$emit('on-params', {
resourceList: dataProcessing,
localParams: this.localParams,
rawScript: editor.getValue()
})
return true
},
/**
* Processing code highlighting
*/
_handlerEditor () {
// editor
editor = codemirror('code-shell-mirror', {
mode: 'shell',
readOnly: this.isDetails
})
this.keypress = () => {
if (!editor.getOption('readOnly')) {
editor.showHint({
completeSingle: false
})
}
}
// Monitor keyboard
editor.on('keypress', this.keypress)
editor.setValue(this.rawScript)
return editor
},
diGuiTree (item) { // Recursive convenience tree structure
item.forEach(item => {
item.children === '' || item.children === undefined || item.children === null || item.children.length === 0
? this.operationTree(item) : this.diGuiTree(item.children)
})
},
operationTree (item) {
if (item.dirctory) {
item.isDisabled = true
}
delete item.children
},
searchTree (element, id) {
// 根据id查找节点
if (element.id === id) {
return element
} else if (element.children !== null) {
let i
let result = null
for (i = 0; result === null && i < element.children.length; i++) {
result = this.searchTree(element.children[i], id)
}
return result
}
return null
},
dataProcess (backResource) {
let isResourceId = []
let resourceIdArr = []
if (this.resourceList.length > 0) {
this.resourceList.forEach(v => {
this.options.forEach(v1 => {
if (this.searchTree(v1, v)) {
isResourceId.push(this.searchTree(v1, v))
}
})
})
resourceIdArr = isResourceId.map(item => {
return item.id
})
Array.prototype.diff = function (a) {
return this.filter(function (i) { return a.indexOf(i) < 0 })
}
let diffSet = this.resourceList.diff(resourceIdArr)
let optionsCmp = []
if (diffSet.length > 0) {
diffSet.forEach(item => {
backResource.forEach(item1 => {
if (item === item1.id || item === item1.res) {
optionsCmp.push(item1)
}
})
})
}
let noResources = [{
id: -1,
name: $t('Unauthorized or deleted resources'),
fullName: '/' + $t('Unauthorized or deleted resources'),
children: []
}]
if (optionsCmp.length > 0) {
this.allNoResources = optionsCmp
optionsCmp = optionsCmp.map(item => {
return { id: item.id, name: item.name, fullName: item.res }
})
optionsCmp.forEach(item => {
item.isNew = true
})
noResources[0].children = optionsCmp
this.options = this.options.concat(noResources)
}
}
}
},
watch: {
// Watch the cacheParams
cacheParams (val) {
this.$emit('on-cache-params', val)
},
resourceIdArr (arr) {
let result = []
arr.forEach(item => {
this.allNoResources.forEach(item1 => {
if (item.id === item1.id) {
// resultBool = true
result.push(item1)
}
})
})
this.noRes = result
}
},
computed: {
resourceIdArr () {
let isResourceId = []
let resourceIdArr = []
if (this.resourceList.length > 0) {
this.resourceList.forEach(v => {
this.options.forEach(v1 => {
if (this.searchTree(v1, v)) {
isResourceId.push(this.searchTree(v1, v))
}
})
})
resourceIdArr = isResourceId.map(item => {
return { id: item.id, name: item.name, res: item.fullName }
})
}
return resourceIdArr
},
cacheParams () {
return {
resourceList: this.resourceIdArr,
localParams: this.localParams
}
}
},
created () {
let item = this.store.state.dag.resourcesListS
this.diGuiTree(item)
this.options = item
let o = this.backfillItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
this.rawScript = o.params.rawScript || ''
// backfill resourceList
let backResource = o.params.resourceList || []
let resourceList = o.params.resourceList || []
if (resourceList.length) {
_.map(resourceList, v => {
if (!v.id) {
this.store.dispatch('dag/getResourceId', {
type: 'FILE',
fullName: '/' + v.res
}).then(res => {
this.resourceList.push(res.id)
this.dataProcess(backResource)
}).catch(e => {
this.resourceList.push(v.res)
this.dataProcess(backResource)
})
} else {
this.resourceList.push(v.id)
this.dataProcess(backResource)
}
})
this.cacheResourceList = resourceList
}
// backfill localParams
let localParams = o.params.localParams || []
if (localParams.length) {
this.localParams = localParams
}
}
},
mounted () {
setTimeout(() => {
this._handlerEditor()
}, 200)
},
destroyed () {
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-shell-mirror'), 'keypress', this.keypress)
}
},
components: { mLocalParams, mListBox, mScriptBox, Treeselect }
}
</script>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,089 | [Bug][UI] Code mirror cannot be displayed normally in sqoop task | **To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to 'Create process' in 'Process definition'
2. Create a sqoop task
3. Switch 'Custom Job' and 'ModelType'
4. See error
**Expected behavior**
Bug fixed
**Screenshots**
Picture 1:
![image](https://user-images.githubusercontent.com/4902714/111579750-86996500-87f1-11eb-8ceb-fa00645a6ad9.png)
Picture 2:
![image](https://user-images.githubusercontent.com/4902714/111579796-9c0e8f00-87f1-11eb-9e13-0577426b9aa4.png)
**Which version of Dolphin Scheduler:**
-[dev]
**Additional context**
Add any other context about the problem here.
**Requirement or improvement**
- Please describe about your requirements or improvement suggestions.
| https://github.com/apache/dolphinscheduler/issues/5089 | https://github.com/apache/dolphinscheduler/pull/5090 | 086e71644163a0bb44e4bd1c5f7f16abc383ab1a | 9008fa4b0c43a615270f080e9d8c3c2a324bee9c | "2021-03-18T05:55:52Z" | java | "2021-03-19T03:57:46Z" | dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/sql.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="sql-model">
<m-list-box>
<div slot="text">{{$t('Datasource')}}</div>
<div slot="content">
<m-datasource
ref="refDs"
@on-dsData="_onDsData"
:data="{ type:type,datasource:datasource }">
</m-datasource>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('SQL Type')}}</div>
<div slot="content">
<div style="display: inline-block;">
<m-sql-type @on-sqlType="_onSqlType" :sql-type="sqlType"></m-sql-type>
</div>
</div>
</m-list-box>
<template v-if="sqlType === 0">
<m-list-box>
<div slot="text"><strong class='requiredIcon'>*</strong>{{$t('Title')}}</div>
<div slot="content">
<el-input
type="input"
size="small"
v-model="title"
:disabled="isDetails"
:placeholder="$t('Please enter the title of email')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text"><strong class='requiredIcon'>*</strong>{{$t('Alarm group')}}</div>
<div slot="content">
<m-warning-groups v-model="groupId"></m-warning-groups>
</div>
</m-list-box>
</template>
<m-list-box v-if="type === 'HIVE'">
<div slot="text">{{$t('SQL Parameter')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="input"
size="small"
v-model="connParams"
:placeholder="$t('Please enter format') + ' key1=value1;key2=value2...'">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('SQL Statement')}}</div>
<div slot="content">
<div class="from-mirror">
<textarea
id="code-sql-mirror"
name="code-sql-mirror"
style="opacity: 0;">
</textarea>
<a class="ans-modal-box-max">
<em class="el-icon-full-screen" @click="setEditorVal"></em>
</a>
</div>
</div>
</m-list-box>
<m-list-box v-if="type === 'HIVE'">
<div slot="text">{{$t('UDF Function')}}</div>
<div slot="content">
<m-udfs
ref="refUdfs"
@on-udfsData="_onUdfsData"
:udfs="udfs"
:type="type">
</m-udfs>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Custom Parameters')}}</div>
<div slot="content">
<m-local-params
ref="refLocalParams"
@on-udpData="_onUdpData"
:udp-list="localParams">
</m-local-params>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Pre Statement')}}</div>
<div slot="content">
<m-statement-list
ref="refPreStatements"
@on-statement-list="_onPreStatements"
:statement-list="preStatements">
</m-statement-list>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Post Statement')}}</div>
<div slot="content">
<m-statement-list
ref="refPostStatements"
@on-statement-list="_onPostStatements"
:statement-list="postStatements">
</m-statement-list>
</div>
</m-list-box>
<el-dialog
:visible.sync="scriptBoxDialog"
append-to-body="true"
width="80%">
<m-script-box :item="item" @getSriptBoxValue="getSriptBoxValue" @closeAble="closeAble"></m-script-box>
</el-dialog>
</div>
</template>
<script>
import _ from 'lodash'
import i18n from '@/module/i18n'
import mUdfs from './_source/udfs'
import mListBox from './_source/listBox'
import mScriptBox from './_source/scriptBox'
import mSqlType from './_source/sqlType'
import mDatasource from './_source/datasource'
import mLocalParams from './_source/localParams'
import mStatementList from './_source/statementList'
import mWarningGroups from './_source/warningGroups'
import disabledState from '@/module/mixin/disabledState'
import codemirror from '@/conf/home/pages/resource/pages/file/pages/_source/codemirror'
let editor
export default {
name: 'sql',
data () {
return {
// Data source type
type: '',
// data source
datasource: '',
// Return to the selected data source
rtDatasource: '',
// Sql statement
sql: '',
// Custom parameter
localParams: [],
// UDF function
udfs: '',
// Sql type
sqlType: '0',
// Email title
title: '',
// Sql parameter
connParams: '',
// Pre statements
preStatements: [],
// Post statements
postStatements: [],
item: '',
scriptBoxDialog: false,
groupId: null
}
},
mixins: [disabledState],
props: {
backfillItem: Object,
createNodeId: Number
},
methods: {
setEditorVal () {
this.item = editor.getValue()
this.scriptBoxDialog = true
},
getSriptBoxValue (val) {
editor.setValue(val)
},
/**
* return sqlType
*/
_onSqlType (a) {
this.sqlType = a
},
/**
* return udfs
*/
_onUdfsData (a) {
this.udfs = a
},
/**
* return Custom parameter
*/
_onUdpData (a) {
this.localParams = a
},
/**
* return data source
*/
_onDsData (o) {
this.type = o.type
this.rtDatasource = o.datasource
},
/**
* return pre statements
*/
_onPreStatements (a) {
this.preStatements = a
},
/**
* return post statements
*/
_onPostStatements (a) {
this.postStatements = a
},
/**
* verification
*/
_verification () {
if (!editor.getValue()) {
this.$message.warning(`${i18n.$t('Please enter a SQL Statement(required)')}`)
return false
}
// datasource Subcomponent verification
if (!this.$refs.refDs._verifDatasource()) {
return false
}
if (this.sqlType === '0' && !this.title) {
this.$message.warning(`${i18n.$t('Mail subject required')}`)
return false
}
if (this.sqlType === '0' && (this.groupId === '' || this.groupId === null)) {
this.$message.warning(`${i18n.$t('Alarm group required')}`)
return false
}
// udfs Subcomponent verification Verification only if the data type is HIVE
if (this.type === 'HIVE') {
if (!this.$refs.refUdfs._verifUdfs()) {
return false
}
}
// localParams Subcomponent verification
if (!this.$refs.refLocalParams._verifProp()) {
return false
}
// preStatements Subcomponent verification
if (!this.$refs.refPreStatements._verifProp()) {
return false
}
// postStatements Subcomponent verification
if (!this.$refs.refPostStatements._verifProp()) {
return false
}
// storage
this.$emit('on-params', {
type: this.type,
datasource: this.rtDatasource,
sql: editor.getValue(),
udfs: this.udfs,
sqlType: this.sqlType,
title: this.title,
groupId: this.groupId,
localParams: this.localParams,
connParams: this.connParams,
preStatements: this.preStatements,
postStatements: this.postStatements
})
return true
},
/**
* Processing code highlighting
*/
_handlerEditor () {
this._destroyEditor()
// editor
editor = codemirror('code-sql-mirror', {
mode: 'sql',
readOnly: this.isDetails
})
this.keypress = () => {
if (!editor.getOption('readOnly')) {
editor.showHint({
completeSingle: false
})
}
}
this.changes = () => {
this._cacheParams()
}
// Monitor keyboard
editor.on('keypress', this.keypress)
editor.on('changes', this.changes)
editor.setValue(this.sql)
return editor
},
_cacheParams () {
this.$emit('on-cache-params', {
type: this.type,
datasource: this.rtDatasource,
sql: editor ? editor.getValue() : '',
udfs: this.udfs,
sqlType: this.sqlType,
title: this.title,
groupId: this.groupId,
localParams: this.localParams,
connParams: this.connParams,
preStatements: this.preStatements,
postStatements: this.postStatements
})
},
_destroyEditor () {
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-sql-mirror'), 'keypress', this.keypress)
editor.off($('.code-sql-mirror'), 'changes', this.changes)
}
}
},
watch: {
// Listening to sqlType
sqlType (val) {
if (val !== 0) {
this.title = ''
this.groupId = null
}
},
// Listening data source
type (val) {
if (val !== 'HIVE') {
this.connParams = ''
}
},
// Watch the cacheParams
cacheParams (val) {
this._cacheParams()
}
},
created () {
let o = this.backfillItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
// backfill
this.type = o.params.type || ''
this.datasource = o.params.datasource || ''
this.sql = o.params.sql || ''
this.udfs = o.params.udfs || ''
this.sqlType = o.params.sqlType
this.connParams = o.params.connParams || ''
this.localParams = o.params.localParams || []
this.preStatements = o.params.preStatements || []
this.postStatements = o.params.postStatements || []
this.title = o.params.title || ''
this.groupId = o.params.groupId
}
},
mounted () {
setTimeout(() => {
this._handlerEditor()
}, 200)
},
destroyed () {
/**
* Destroy the editor instance
*/
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-sql-mirror'), 'keypress', this.keypress)
editor.off($('.code-sql-mirror'), 'changes', this.changes)
}
},
computed: {
cacheParams () {
return {
type: this.type,
datasource: this.rtDatasource,
udfs: this.udfs,
sqlType: this.sqlType,
title: this.title,
groupId: this.groupId,
localParams: this.localParams,
connParams: this.connParams,
preStatements: this.preStatements,
postStatements: this.postStatements
}
}
},
components: { mListBox, mDatasource, mLocalParams, mUdfs, mSqlType, mStatementList, mScriptBox, mWarningGroups }
}
</script>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,089 | [Bug][UI] Code mirror cannot be displayed normally in sqoop task | **To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to 'Create process' in 'Process definition'
2. Create a sqoop task
3. Switch 'Custom Job' and 'ModelType'
4. See error
**Expected behavior**
Bug fixed
**Screenshots**
Picture 1:
![image](https://user-images.githubusercontent.com/4902714/111579750-86996500-87f1-11eb-8ceb-fa00645a6ad9.png)
Picture 2:
![image](https://user-images.githubusercontent.com/4902714/111579796-9c0e8f00-87f1-11eb-9e13-0577426b9aa4.png)
**Which version of Dolphin Scheduler:**
-[dev]
**Additional context**
Add any other context about the problem here.
**Requirement or improvement**
- Please describe about your requirements or improvement suggestions.
| https://github.com/apache/dolphinscheduler/issues/5089 | https://github.com/apache/dolphinscheduler/pull/5090 | 086e71644163a0bb44e4bd1c5f7f16abc383ab1a | 9008fa4b0c43a615270f080e9d8c3c2a324bee9c | "2021-03-18T05:55:52Z" | java | "2021-03-19T03:57:46Z" | dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/sqoop.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="sqoop-model">
<m-list-box>
<div slot="text">{{$t('Custom Job')}}</div>
<div slot="content">
<el-switch size="small" v-model="isCustomTask" @change="_onSwitch" :disabled="isDetails"></el-switch>
</div>
</m-list-box>
<m-list-box v-show="isCustomTask">
<div slot="text">{{$t('Custom Script')}}</div>
<div slot="content">
<div class="from-mirror">
<textarea id="code-shell-mirror" name="code-shell-mirror" style="opacity: 0;"></textarea>
</div>
</div>
</m-list-box>
<template v-if="!isCustomTask">
<m-list-box>
<div slot="text">{{$t('Sqoop Job Name')}}</div>
<div slot="content">
<el-input :disabled="isDetails" size="small" type="text" v-model="jobName" :placeholder="$t('Please enter Job Name(required)')"></el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Direct')}}</div>
<div slot="content">
<el-select
style="width: 130px;"
size="small"
v-model="modelType"
:disabled="isDetails"
@change="_handleModelTypeChange">
<el-option
v-for="city in modelTypeList"
:key="city.code"
:value="city.code"
:label="city.code">
</el-option>
</el-select>
</div>
</m-list-box>
<m-list-box>
<div slot="text" style="width: 110px;">{{$t('Hadoop Custom Params')}}</div>
<div slot="content">
<m-local-params
ref="refMapColumnHadoopParams"
@on-local-params="_onHadoopCustomParams"
:udp-list="hadoopCustomParams"
:hide="false">
</m-local-params>
</div>
</m-list-box>
<m-list-box>
<div slot="text" style="width: 100px;">{{$t('Sqoop Advanced Parameters')}}</div>
<div slot="content">
<m-local-params
ref="refMapColumnAdvancedParams"
@on-local-params="_onSqoopAdvancedParams"
:udp-list="sqoopAdvancedParams"
:hide="false">
</m-local-params>
</div>
</m-list-box>
<m-list-box>
<div slot="text" style="font-weight:bold">{{$t('Data Source')}}</div>
</m-list-box>
<hr style="margin-left: 60px;">
<m-list-box>
<div slot="text">{{$t('Type')}}</div>
<div slot="content">
<el-select
style="width: 130px;"
size="small"
v-model="sourceType"
:disabled="isDetails"
@change="_handleSourceTypeChange">
<el-option
v-for="city in sourceTypeList"
:key="city.code"
:value="city.code"
:label="city.code">
</el-option>
</el-select>
</div>
</m-list-box>
<template v-if="sourceType === 'MYSQL'">
<m-list-box>
<div slot="text">{{$t('Datasource')}}</div>
<div slot="content">
<m-datasource
ref="refSourceDs"
@on-dsData="_onSourceDsData"
:data="{type:sourceMysqlParams.srcType,
typeList: [{id: 0, code: 'MYSQL', disabled: false}],
datasource:sourceMysqlParams.srcDatasource }"
>
</m-datasource>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('ModelType')}}</div>
<div slot="content">
<el-radio-group v-model="srcQueryType" size="small" @change="_handleQueryType">
<el-radio label="0">{{$t('Form')}}</el-radio>
<el-radio label="1">SQL</el-radio>
</el-radio-group>
</div>
</m-list-box>
<template v-if="sourceMysqlParams.srcQueryType === '0'">
<m-list-box>
<div slot="text">{{$t('Table')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="sourceMysqlParams.srcTable"
:placeholder="$t('Please enter Mysql Table(required)')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('ColumnType')}}</div>
<div slot="content">
<el-radio-group v-model="sourceMysqlParams.srcColumnType" size="small" style="vertical-align: sub;">
<el-radio label="0">{{$t('All Columns')}}</el-radio>
<el-radio label="1">{{$t('Some Columns')}}</el-radio>
</el-radio-group>
</div>
</m-list-box>
<m-list-box v-if="sourceMysqlParams.srcColumnType === '1'">
<div slot="text">{{$t('Column')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="sourceMysqlParams.srcColumns"
:placeholder="$t('Please enter Columns (Comma separated)')">
</el-input>
</div>
</m-list-box>
</template>
</template>
<template v-if="sourceType === 'HIVE'">
<m-list-box>
<div slot="text">{{$t('Database')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="sourceHiveParams.hiveDatabase"
:placeholder="$t('Please enter Hive Database(required)')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Table')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="sourceHiveParams.hiveTable"
:placeholder="$t('Please enter Hive Table(required)')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Hive partition Keys')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="sourceHiveParams.hivePartitionKey"
:placeholder="$t('Please enter Hive Partition Keys')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Hive partition Values')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="sourceHiveParams.hivePartitionValue"
:placeholder="$t('Please enter Hive Partition Values')">
</el-input>
</div>
</m-list-box>
</template>
<template v-if="sourceType === 'HDFS'">
<m-list-box>
<div slot="text">{{$t('Export Dir')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="sourceHdfsParams.exportDir"
:placeholder="$t('Please enter Export Dir(required)')">
</el-input>
</div>
</m-list-box>
</template>
<template v-if="sourceType === 'MYSQL'">
<m-list-box v-if="srcQueryType === '1'">
<div slot="text">{{$t('SQL Statement')}}</div>
<div slot="content">
<div class="from-mirror">
<textarea
id="code-sqoop-mirror"
name="code-sqoop-mirror"
style="opacity: 0;">
</textarea>
<a class="ans-modal-box-max">
<em class="el-icon-full-screen" @click="setEditorVal"></em>
</a>
</div>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Map Column Hive')}}</div>
<div slot="content">
<m-local-params
ref="refMapColumnHiveParams"
@on-local-params="_onMapColumnHive"
:udp-list="sourceMysqlParams.mapColumnHive"
:hide="false">
</m-local-params>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Map Column Java')}}</div>
<div slot="content">
<m-local-params
ref="refMapColumnJavaParams"
@on-local-params="_onMapColumnJava"
:udp-list="sourceMysqlParams.mapColumnJava"
:hide="false">
</m-local-params>
</div>
</m-list-box>
</template>
<m-list-box>
<div slot="text" style="font-weight:bold">{{$t('Data Target')}}</div>
</m-list-box>
<hr style="margin-left: 60px;">
<m-list-box>
<div slot="text">{{$t('Type')}}</div>
<div slot="content">
<el-select
style="width: 130px;"
size="small"
v-model="targetType"
:disabled="isDetails">
<el-option
v-for="city in targetTypeList"
:key="city.code"
:value="city.code"
:label="city.code">
</el-option>
</el-select>
</div>
</m-list-box>
<template v-if="targetType === 'HIVE'">
<m-list-box>
<div slot="text">{{$t('Database')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="targetHiveParams.hiveDatabase"
:placeholder="$t('Please enter Hive Database(required)')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Table')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="targetHiveParams.hiveTable"
:placeholder="$t('Please enter Hive Table(required)')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('CreateHiveTable')}}</div>
<div slot="content">
<el-switch v-model="targetHiveParams.createHiveTable" size="small"></el-switch>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('DropDelimiter')}}</div>
<div slot="content">
<el-switch v-model="targetHiveParams.dropDelimiter" size="small"></el-switch>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('OverWriteSrc')}}</div>
<div slot="content">
<el-switch v-model="targetHiveParams.hiveOverWrite" size="small"></el-switch>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('ReplaceDelimiter')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="targetHiveParams.replaceDelimiter"
:placeholder="$t('Please enter Replace Delimiter')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Hive partition Keys')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="targetHiveParams.hivePartitionKey"
:placeholder="$t('Please enter Hive Partition Keys')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Hive partition Values')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="targetHiveParams.hivePartitionValue"
:placeholder="$t('Please enter Hive Partition Values')">
</el-input>
</div>
</m-list-box>
</template>
<template v-if="targetType === 'HDFS'">
<m-list-box>
<div slot="text">{{$t('Target Dir')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="targetHdfsParams.targetPath"
:placeholder="$t('Please enter Target Dir(required)')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('DeleteTargetDir')}}</div>
<div slot="content">
<el-switch v-model="targetHdfsParams.deleteTargetDir" size="small"></el-switch>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('CompressionCodec')}}</div>
<div slot="content">
<el-radio-group v-model="targetHdfsParams.compressionCodec" size="small">
<el-radio label="snappy">snappy</el-radio>
<el-radio label="lzo">lzo</el-radio>
<el-radio label="gzip">gzip</el-radio>
<el-radio label="">no</el-radio>
</el-radio-group>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('FileType')}}</div>
<div slot="content">
<el-radio-group v-model="targetHdfsParams.fileType" size="small">
<el-radio label="--as-avrodatafile">avro</el-radio>
<el-radio label="--as-sequencefile">sequence</el-radio>
<el-radio label="--as-textfile">text</el-radio>
<el-radio label="--as-parquetfile">parquet</el-radio>
</el-radio-group>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('FieldsTerminated')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="targetHdfsParams.fieldsTerminated"
:placeholder="$t('Please enter Fields Terminated')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('LinesTerminated')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="targetHdfsParams.linesTerminated"
:placeholder="$t('Please enter Lines Terminated')">
</el-input>
</div>
</m-list-box>
</template>
<template v-if="targetType === 'MYSQL'">
<m-list-box>
<div slot="text">{{$t('Datasource')}}</div>
<div slot="content">
<m-datasource
ref="refTargetDs"
@on-dsData="_onTargetDsData"
:data="{ type:targetMysqlParams.targetType,
typeList: [{id: 0, code: 'MYSQL', disabled: false}],
datasource:targetMysqlParams.targetDatasource }"
>
</m-datasource>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Table')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="targetMysqlParams.targetTable"
:placeholder="$t('Please enter Mysql Table(required)')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Column')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="targetMysqlParams.targetColumns"
:placeholder="$t('Please enter Columns (Comma separated)')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('FieldsTerminated')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="targetMysqlParams.fieldsTerminated"
:placeholder="$t('Please enter Fields Terminated')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('LinesTerminated')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="targetMysqlParams.linesTerminated"
:placeholder="$t('Please enter Lines Terminated')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('IsUpdate')}}</div>
<div slot="content">
<el-switch v-model="targetMysqlParams.isUpdate" size="small"></el-switch>
</div>
</m-list-box>
<m-list-box v-show="targetMysqlParams.isUpdate">
<div slot="text">{{$t('UpdateKey')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="targetMysqlParams.targetUpdateKey"
:placeholder="$t('Please enter Update Key')">
</el-input>
</div>
</m-list-box>
<m-list-box v-show="targetMysqlParams.isUpdate">
<div slot="text">{{$t('UpdateMode')}}</div>
<div slot="content">
<el-radio-group v-model="targetMysqlParams.targetUpdateMode" size="small">
<el-radio label="updateonly">{{$t('OnlyUpdate')}}</el-radio>
<el-radio label="allowinsert">{{$t('AllowInsert')}}</el-radio>
</el-radio-group>
</div>
</m-list-box>
</template>
<m-list-box>
<div slot="text">{{$t('Concurrency')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="text"
size="small"
v-model="concurrency"
:placeholder="$t('Please enter Concurrency')">
</el-input>
</div>
</m-list-box>
</template>
<m-list-box>
<div slot="text">{{$t('Custom Parameters')}}</div>
<div slot="content">
<m-local-params
ref="refLocalParams"
@on-local-params="_onLocalParams"
:udp-list="localParams"
:hide="false">
</m-local-params>
</div>
</m-list-box>
<el-dialog
:visible.sync="scriptBoxDialog"
append-to-body="true"
width="80%">
<m-script-box :item="item" @getSriptBoxValue="getSriptBoxValue" @closeAble="closeAble"></m-script-box>
</el-dialog>
</div>
</template>
<script>
import _ from 'lodash'
import i18n from '@/module/i18n'
import mListBox from './_source/listBox'
import mScriptBox from './_source/scriptBox'
import mDatasource from './_source/datasource'
import mLocalParams from './_source/localParams'
import disabledState from '@/module/mixin/disabledState'
import codemirror from '@/conf/home/pages/resource/pages/file/pages/_source/codemirror'
let editor
let shellEditor
export default {
name: 'sql',
data () {
return {
/**
* Is Custom Task
*/
isCustomTask: false,
/**
* Customer Params
*/
localParams: [],
/**
* Hadoop Custom Params
*/
hadoopCustomParams: [],
/**
* Sqoop Advanced Params
*/
sqoopAdvancedParams: [],
/**
* script
*/
customShell: '',
/**
* task name
*/
jobName: '',
/**
* mysql query type
*/
srcQueryType: '1',
/**
* source data source
*/
srcDatasource: '',
/**
* target data source
*/
targetDatasource: '',
/**
* concurrency
*/
concurrency: 1,
/**
* default job type
*/
jobType: 'TEMPLATE',
/**
* direct model type
*/
modelType: 'import',
modelTypeList: [{ code: 'import' }, { code: 'export' }],
sourceTypeList: [
{
code: 'MYSQL'
}
],
targetTypeList: [
{
code: 'HIVE'
},
{
code: 'HDFS'
}
],
sourceType: 'MYSQL',
targetType: 'HDFS',
sourceMysqlParams: {
srcType: 'MYSQL',
srcDatasource: '',
srcTable: '',
srcQueryType: '1',
srcQuerySql: '',
srcColumnType: '0',
srcColumns: '',
srcConditionList: [],
mapColumnHive: [],
mapColumnJava: []
},
sourceHdfsParams: {
exportDir: ''
},
sourceHiveParams: {
hiveDatabase: '',
hiveTable: '',
hivePartitionKey: '',
hivePartitionValue: ''
},
targetHdfsParams: {
targetPath: '',
deleteTargetDir: true,
fileType: '--as-avrodatafile',
compressionCodec: 'snappy',
fieldsTerminated: '',
linesTerminated: ''
},
targetMysqlParams: {
targetType: 'MYSQL',
targetDatasource: '',
targetTable: '',
targetColumns: '',
fieldsTerminated: '',
linesTerminated: '',
preQuery: '',
isUpdate: false,
targetUpdateKey: '',
targetUpdateMode: 'allowinsert'
},
targetHiveParams: {
hiveDatabase: '',
hiveTable: '',
createHiveTable: false,
dropDelimiter: false,
hiveOverWrite: true,
replaceDelimiter: '',
hivePartitionKey: '',
hivePartitionValue: ''
},
item: '',
scriptBoxDialog: false
}
},
mixins: [disabledState],
props: {
backfillItem: Object
},
methods: {
setEditorVal () {
this.item = editor.getValue()
this.scriptBoxDialog = true
},
getSriptBoxValue (val) {
editor.setValue(val)
},
_handleQueryType (o) {
this.sourceMysqlParams.srcQueryType = this.srcQueryType
this._getTargetTypeList(this.sourceType)
this.targetType = this.targetTypeList[0].code
},
_handleModelTypeChange (a) {
this._getSourceTypeList(a)
this.sourceType = this.sourceTypeList[0].code
this._handleSourceTypeChange({ label: this.sourceType, value: this.sourceType })
},
_handleSourceTypeChange (a) {
this._getTargetTypeList(a.label)
this.targetType = this.targetTypeList[0].code
},
_getSourceTypeList (data) {
switch (data) {
case 'import':
this.sourceTypeList = [
{
code: 'MYSQL'
}
]
break
case 'export':
this.sourceTypeList = [
{
code: 'HDFS'
},
{
code: 'HIVE'
}
]
break
default:
this.sourceTypeList = [
{
code: 'MYSQL'
},
{
code: 'HIVE'
},
{
code: 'HDFS'
}
]
break
}
},
_getTargetTypeList (data) {
switch (data) {
case 'MYSQL':
if (this.srcQueryType === '1') {
this.targetTypeList = [
{
code: 'HDFS'
}]
} else {
this.targetTypeList = [
{
code: 'HIVE'
},
{
code: 'HDFS'
}
]
}
break
case 'HDFS':
this.targetTypeList = [
{
code: 'MYSQL'
}
]
break
case 'HIVE':
this.targetTypeList = [
{
code: 'MYSQL'
}
]
break
default:
this.targetTypeList = [
{
code: 'HIVE'
},
{
code: 'HDFS'
}
]
break
}
},
_onMapColumnHive (a) {
this.sourceMysqlParams.mapColumnHive = a
},
_onMapColumnJava (a) {
this.sourceMysqlParams.mapColumnJava = a
},
/**
* return data source
*/
_onSourceDsData (o) {
this.sourceMysqlParams.srcType = o.type
this.sourceMysqlParams.srcDatasource = o.datasource
},
/**
* return data source
*/
_onTargetDsData (o) {
this.targetMysqlParams.targetType = o.type
this.targetMysqlParams.targetDatasource = o.datasource
},
/**
* stringify the source params
*/
_handleSourceParams () {
let params = null
switch (this.sourceType) {
case 'MYSQL':
this.sourceMysqlParams.srcQuerySql = this.sourceMysqlParams.srcQueryType === '1' && editor
? editor.getValue() : this.sourceMysqlParams.srcQuerySql
params = JSON.stringify(this.sourceMysqlParams)
break
case 'ORACLE':
params = JSON.stringify(this.sourceOracleParams)
break
case 'HDFS':
params = JSON.stringify(this.sourceHdfsParams)
break
case 'HIVE':
params = JSON.stringify(this.sourceHiveParams)
break
default:
params = ''
break
}
return params
},
/**
* stringify the target params
*/
_handleTargetParams () {
let params = null
switch (this.targetType) {
case 'HIVE':
params = JSON.stringify(this.targetHiveParams)
break
case 'HDFS':
params = JSON.stringify(this.targetHdfsParams)
break
case 'MYSQL':
params = JSON.stringify(this.targetMysqlParams)
break
default:
params = ''
break
}
return params
},
/**
* get source params by source type
*/
_getSourceParams (data) {
switch (this.sourceType) {
case 'MYSQL':
this.sourceMysqlParams = JSON.parse(data)
this.srcDatasource = this.sourceMysqlParams.srcDatasource
break
case 'ORACLE':
this.sourceOracleParams = JSON.parse(data)
break
case 'HDFS':
this.sourceHdfsParams = JSON.parse(data)
break
case 'HIVE':
this.sourceHiveParams = JSON.parse(data)
break
default:
break
}
},
/**
* get target params by target type
*/
_getTargetParams (data) {
switch (this.targetType) {
case 'HIVE':
this.targetHiveParams = JSON.parse(data)
break
case 'HDFS':
this.targetHdfsParams = JSON.parse(data)
break
case 'MYSQL':
this.targetMysqlParams = JSON.parse(data)
this.targetDatasource = this.targetMysqlParams.targetDatasource
break
default:
break
}
},
/**
* verification
*/
_verification () {
let sqoopParams = {
jobType: this.jobType,
localParams: this.localParams
}
if (this.jobType === 'CUSTOM') {
if (!shellEditor.getValue()) {
this.$message.warning(`${i18n.$t('Please enter Custom Shell(required)')}`)
return false
}
sqoopParams.customShell = shellEditor.getValue()
} else {
if (!this.jobName) {
this.$message.warning(`${i18n.$t('Please enter Job Name(required)')}`)
return false
}
switch (this.sourceType) {
case 'MYSQL':
if (!this.$refs.refSourceDs._verifDatasource()) {
return false
}
if (this.srcQueryType === '1') {
if (!editor.getValue()) {
this.$message.warning(`${i18n.$t('Please enter a SQL Statement(required)')}`)
return false
}
this.sourceMysqlParams.srcTable = ''
this.sourceMysqlParams.srcColumnType = '0'
this.sourceMysqlParams.srcColumns = ''
} else {
if (this.sourceMysqlParams.srcTable === '') {
this.$message.warning(`${i18n.$t('Please enter Mysql Table(required)')}`)
return false
}
this.sourceMysqlParams.srcQuerySql = ''
if (this.sourceMysqlParams.srcColumnType === '1' && this.sourceMysqlParams.srcColumns === '') {
this.$message.warning(`${i18n.$t('Please enter Columns (Comma separated)')}`)
return false
}
if (this.sourceMysqlParams.srcColumnType === '0') {
this.sourceMysqlParams.srcColumns = ''
}
}
break
case 'HDFS':
if (this.sourceHdfsParams.exportDir === '') {
this.$message.warning(`${i18n.$t('Please enter Export Dir(required)')}`)
return false
}
break
case 'HIVE':
if (this.sourceHiveParams.hiveDatabase === '') {
this.$message.warning(`${i18n.$t('Please enter Hive Database(required)')}`)
return false
}
if (this.sourceHiveParams.hiveTable === '') {
this.$message.warning(`${i18n.$t('Please enter Hive Table(required)')}`)
return false
}
break
default:
break
}
switch (this.targetType) {
case 'HIVE':
if (this.targetHiveParams.hiveDatabase === '') {
this.$message.warning(`${i18n.$t('Please enter Hive Database(required)')}`)
return false
}
if (this.targetHiveParams.hiveTable === '') {
this.$message.warning(`${i18n.$t('Please enter Hive Table(required)')}`)
return false
}
break
case 'HDFS':
if (this.targetHdfsParams.targetPath === '') {
this.$message.warning(`${i18n.$t('Please enter Target Dir(required)')}`)
return false
}
break
case 'MYSQL':
if (!this.$refs.refTargetDs._verifDatasource()) {
return false
}
if (this.targetMysqlParams.targetTable === '') {
this.$message.warning(`${i18n.$t('Please enter Mysql Table(required)')}`)
return false
}
break
default:
break
}
sqoopParams.jobName = this.jobName
sqoopParams.hadoopCustomParams = this.hadoopCustomParams
sqoopParams.sqoopAdvancedParams = this.sqoopAdvancedParams
sqoopParams.concurrency = this.concurrency
sqoopParams.modelType = this.modelType
sqoopParams.sourceType = this.sourceType
sqoopParams.targetType = this.targetType
sqoopParams.targetParams = this._handleTargetParams()
sqoopParams.sourceParams = this._handleSourceParams()
}
// storage
this.$emit('on-params', sqoopParams)
return true
},
/**
* Processing code highlighting
*/
_handlerEditor () {
this._destroyEditor()
editor = codemirror('code-sqoop-mirror', {
mode: 'sql',
readOnly: this.isDetails
})
this.keypress = () => {
if (!editor.getOption('readOnly')) {
editor.showHint({
completeSingle: false
})
}
}
this.changes = () => {
this._cacheParams()
}
// Monitor keyboard
editor.on('keypress', this.keypress)
editor.on('changes', this.changes)
editor.setValue(this.sourceMysqlParams.srcQuerySql)
return editor
},
/**
* Processing code highlighting
*/
_handlerShellEditor () {
this._destroyShellEditor()
// shellEditor
shellEditor = codemirror('code-shell-mirror', {
mode: 'shell',
readOnly: this.isDetails
})
this.keypress = () => {
if (!shellEditor.getOption('readOnly')) {
shellEditor.showHint({
completeSingle: false
})
}
}
// Monitor keyboard
shellEditor.on('keypress', this.keypress)
shellEditor.setValue(this.customShell)
return shellEditor
},
/**
* return localParams
*/
_onLocalParams (a) {
this.localParams = a
},
/**
* return hadoopParams
*/
_onHadoopCustomParams (a) {
this.hadoopCustomParams = a
},
/**
* return sqoopAdvancedParams
*/
_onSqoopAdvancedParams (a) {
this.sqoopAdvancedParams = a
},
_cacheParams () {
this.$emit('on-cache-params', {
concurrency: this.concurrency,
modelType: this.modelType,
sourceType: this.sourceType,
targetType: this.targetType,
sourceParams: this._handleSourceParams(),
targetParams: this._handleTargetParams(),
localParams: this.localParams
})
},
_destroyEditor () {
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-sqoop-mirror'), 'keypress', this.keypress)
editor.off($('.code-sqoop-mirror'), 'changes', this.changes)
editor = null
}
},
_destroyShellEditor () {
if (shellEditor) {
shellEditor.toTextArea() // Uninstall
shellEditor.off($('.code-shell-mirror'), 'keypress', this.keypress)
shellEditor.off($('.code-shell-mirror'), 'changes', this.changes)
}
}
},
watch: {
// Listening to sqlType
sqlType (val) {
if (val === 0) {
this.showType = []
}
if (val !== 0) {
this.title = ''
}
},
// Listening data source
type (val) {
if (val !== 'HIVE') {
this.connParams = ''
}
},
// Watch the cacheParams
cacheParams (val) {
this._cacheParams()
}
},
created () {
this._destroyEditor()
let o = this.backfillItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
this.jobType = o.params.jobType
this.isCustomTask = false
if (this.jobType === 'CUSTOM') {
this.customShell = o.params.customShell
this.isCustomTask = true
} else {
this.jobName = o.params.jobName
this.hadoopCustomParams = o.params.hadoopCustomParams
this.sqoopAdvancedParams = o.params.sqoopAdvancedParams
this.concurrency = o.params.concurrency || 1
this.modelType = o.params.modelType
this.sourceType = o.params.sourceType
this._getTargetTypeList(this.sourceType)
this.targetType = o.params.targetType
this._getSourceParams(o.params.sourceParams)
this._getTargetParams(o.params.targetParams)
this.localParams = o.params.localParams
}
}
},
mounted () {
setTimeout(() => {
this._handlerEditor()
}, 200)
setTimeout(() => {
this._handlerShellEditor()
}, 200)
setTimeout(() => {
this.srcQueryType = this.sourceMysqlParams.srcQueryType
}, 500)
},
destroyed () {
/**
* Destroy the editor instance
*/
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-sqoop-mirror'), 'keypress', this.keypress)
editor.off($('.code-sqoop-mirror'), 'changes', this.changes)
editor = null
}
},
computed: {
cacheParams () {
return {
concurrency: this.concurrency,
modelType: this.modelType,
sourceType: this.sourceType,
targetType: this.targetType,
localParams: this.localParams,
sourceMysqlParams: this.sourceMysqlParams,
sourceHdfsParams: this.sourceHdfsParams,
sourceHiveParams: this.sourceHiveParams,
targetHdfsParams: this.targetHdfsParams,
targetMysqlParams: this.targetMysqlParams,
targetHiveParams: this.targetHiveParams
}
}
},
components: { mListBox, mDatasource, mLocalParams, mScriptBox }
}
</script>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,089 | [Bug][UI] Code mirror cannot be displayed normally in sqoop task | **To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to 'Create process' in 'Process definition'
2. Create a sqoop task
3. Switch 'Custom Job' and 'ModelType'
4. See error
**Expected behavior**
Bug fixed
**Screenshots**
Picture 1:
![image](https://user-images.githubusercontent.com/4902714/111579750-86996500-87f1-11eb-8ceb-fa00645a6ad9.png)
Picture 2:
![image](https://user-images.githubusercontent.com/4902714/111579796-9c0e8f00-87f1-11eb-9e13-0577426b9aa4.png)
**Which version of Dolphin Scheduler:**
-[dev]
**Additional context**
Add any other context about the problem here.
**Requirement or improvement**
- Please describe about your requirements or improvement suggestions.
| https://github.com/apache/dolphinscheduler/issues/5089 | https://github.com/apache/dolphinscheduler/pull/5090 | 086e71644163a0bb44e4bd1c5f7f16abc383ab1a | 9008fa4b0c43a615270f080e9d8c3c2a324bee9c | "2021-03-18T05:55:52Z" | java | "2021-03-19T03:57:46Z" | dolphinscheduler-ui/src/js/module/components/conditions/conditions.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="conditions-model">
<div class="left">
<slot name="button-group"></slot>
</div>
<div class="right">
<div class="from-box">
<slot name="search-group" v-if="isShow"></slot>
<template v-if="!isShow">
<div class="list">
<el-button size="mini" @click="_ckQuery" icon="el-icon-search"></el-button>
</div>
<div class="list">
<el-input v-model="searchVal"
@keyup.enter="_ckQuery"
size="mini"
:placeholder="$t('Please enter keyword')"
type="text"
style="width:180px;">
</el-input>
</div>
</template>
</div>
</div>
</div>
</template>
<script>
import _ from 'lodash'
export default {
name: 'conditions',
data () {
return {
// search value
searchVal: ''
}
},
props: {
operation: Array
},
methods: {
/**
* emit Query parameter
*/
_ckQuery () {
this.$emit('on-conditions', {
searchVal: _.trim(this.searchVal)
})
}
},
computed: {
// Whether the slot comes in
isShow () {
return this.$slots['search-group']
}
},
created () {
// Routing parameter merging
if (!_.isEmpty(this.$route.query)) {
this.searchVal = this.$route.query.searchVal || ''
}
},
components: {}
}
</script>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,089 | [Bug][UI] Code mirror cannot be displayed normally in sqoop task | **To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to 'Create process' in 'Process definition'
2. Create a sqoop task
3. Switch 'Custom Job' and 'ModelType'
4. See error
**Expected behavior**
Bug fixed
**Screenshots**
Picture 1:
![image](https://user-images.githubusercontent.com/4902714/111579750-86996500-87f1-11eb-8ceb-fa00645a6ad9.png)
Picture 2:
![image](https://user-images.githubusercontent.com/4902714/111579796-9c0e8f00-87f1-11eb-9e13-0577426b9aa4.png)
**Which version of Dolphin Scheduler:**
-[dev]
**Additional context**
Add any other context about the problem here.
**Requirement or improvement**
- Please describe about your requirements or improvement suggestions.
| https://github.com/apache/dolphinscheduler/issues/5089 | https://github.com/apache/dolphinscheduler/pull/5090 | 086e71644163a0bb44e4bd1c5f7f16abc383ab1a | 9008fa4b0c43a615270f080e9d8c3c2a324bee9c | "2021-03-18T05:55:52Z" | java | "2021-03-19T03:57:46Z" | dolphinscheduler-ui/src/sass/common/index.scss | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@import "scrollbar";
@import "table";
.ans-input {
textarea,input {
font-weight: 400;
}
}
.ans-radio {
.ans-radio-inner {
border: 1px solid #B3B3B3;
}
}
.ans-poptip {
min-width: 158px;
}
.ans-checkbox-wrapper .checkbox-label,
.ans-radio-wrapper {
font-weight: normal;
}
a:focus {
text-decoration: none !important;
}
a:hover{
color:#0097e0;
text-decoration: none !important;
}
.CodeMirror-hints {
z-index: 999 !important;
}
.cm-s-mdn-like .CodeMirror-gutters {
border-left:0 !important;
}
.cm-s-mdn-like .CodeMirror-linenumber {
padding-left: 0;
}
body{
background: #EDEEED;
.home-main {
min-height: calc(100vh - 100px);
background: #fff;
margin: 20px;
border-radius: 3px;
>.content-title {
height: 48px;
background: #f8fbfe;
border-radius: 3px 3px 0 0;
span {
font-size: 22px;
padding-left: 18px;
padding-top: 10px;
display: inline-block;
color: #2A455B;
}
}
>.conditions-box {
//background: #f8fbfe;
}
.conditions-model {
height: 50px;
position: relative;
.left {
position: absolute;
left: 12px;
top: 13px;
}
.right {
position: absolute;
right: 8px;
top: 13px;
.from-box {
.list {
float: right;
margin-right: 4px;
}
}
}
}
}
.main-layout-model {
&.dag-screen {
.m-top {
position: unset;
}
.m-bottom {
z-index: 2;
}
.index-model {
width: 100%;
height: 100%;
position: fixed;
top: -20px;
left: -20px;
.dag-model {
height: 100% !important;
}
}
}
}
font-family:13px/1.5 PingFangSC-Light,Helvetica Neue,Helvetica,Microsoft Yahei,Arial,Hiragino Sans GB,tahoma,SimSun,sans-serif;
-webkit-backface-visibility:hidden;
-webkit-tap-highlight-color:transparent;
-webkit-text-size-adjust:none;
-webkit-touch-callout:none;
-webkit-font-smoothing:antialiased;
-moz-osx-font-smoothing:grayscale
}
// icon disabled state
.iconfont.icon-disabled {
color: #999 !important;
}
.main-layout-box {
padding-left: 200px;
&.no {
padding-left: 0;
}
}
.tooltip-cont-model {
width: 200px;
height: 300px;
background: #fff;
border-radius: 3px;
margin-left: -4px;
margin-top: -4px;
}
.global-loading {
background: #fff;
width: 100%;
height: 100%;
position: fixed;
left: 0;
top: 0;
z-index: 999;
.svg-box {
width: 100px;
height: 66px;
position: absolute;
left:50%;
top: 50%;
margin-left: -50px;
margin-top: -100px;
text-align: center;
.sp1 {
display: block;
font-size: 14px;
color: #999;
padding-top: 4px;
}
}
}
.page-box {
text-align: right;
padding: 20px;
.ans-page {
display: inline-block;
}
}
button[disabled='disabled'] {
color: #bbbec4 !important;
background-color: #f7f7f7 !important;
border-color: #dddee1 !important;
}
.ans-icon-spinner.loading {
text-rendering: auto;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
-webkit-animation: fa-spin .6s infinite linear;
animation: fa-spin .6s infinite linear;
}
.fa-spin {
font-size: 16px;
}
.CodeMirror {
border: 1px solid #DDDDDD !important;
border-radius: 3px;
}
.el-dialog__wrapper .el-dialog {
display: table;
width: auto;
}
article,aside,blockquote,body,button,code,dd,details,div,dl,dt,fieldset,figcaption,figure,footer,form,h1,h2,h3,h4,h5,h6,header,hgroup,hr,input,legend,li,menu,nav,ol,p,pre,section,td,textarea,th,ul {
margin:0;
padding:0
}
h1,h2,h3,p {
font-style:normal
}
table {
border-collapse:collapse;
border-spacing:0
}
button,input,select,textarea {
font-family:inherit;
font-size:inherit
}
ol,ul {
list-style:none
}
img {
border:0
}
input::-ms-clear,input::-ms-reveal {
display:none
}
a,body {
color:#333
}
:focus {
outline:0
}
*,:after,:before {
-webkit-box-sizing:border-box;
box-sizing:border-box
}
[hidden],[v-cloak] {
display:none
}
body::-webkit-scrollbar {
width:4px;
height:4px
}
body::-webkit-scrollbar-track {
background:hsla(0,0%,100%,0);
border-radius:2x;
margin:4px 0
}
body::-webkit-scrollbar-thumb {
background:hsla(0,0%,100%,0);
border-radius:2px;
-webkit-transition:background .2s ease-in-out;
transition:background .2s ease-in-out
}
[class*=scrollbar]:hover::-webkit-scrollbar-thumb {
background:#70bdf7
}
.clearfix,.outer {
zoom:1
}
.clearfix:after,.outer:after {
clear:both;
content:" ";
display:block;
width:0;
height:0;
visibility:hidden
}
.f-show {
display:block
}
.f-hide {
display:none
}
.f-pr {
position:relative
}
.f-fl {
float:left
}
.f-fr {
float:right
}
.float-left {
float:left
}
.float-right {
float:right
}
.f-no-select {
-webkit-user-select:none;
-moz-user-select:none;
-ms-user-select:none;
-o-user-select:none;
user-select:none
}
.f-word-break {
white-space:normal;
word-wrap:break-word;
word-break:break-all
}
.f-text-ellipis {
overflow:hidden;
word-wrap:normal;
white-space:nowrap;
text-overflow:ellipsis
}
.f-link {
display:inline-block;
text-decoration:none;
width:100%;
height:100%
}
.f-wide {
margin:0 auto
}
.f-pre,.f-wide {
text-align:left
}
.f-pre {
overflow:hidden;
white-space:pre-wrap;
word-wrap:break-word;
word-break:break-all
}
.f-cursor-p {
cursor:pointer
}
.f-text-overflow {
white-space:nowrap;
overflow:hidden;
text-overflow:ellipsis
}
.f-f12 {
font-size:12px
}
.f-f14 {
font-size:14px
}
.f-f16 {
font-size:16px
}
.f-f18 {
font-size:18px
}
.vue-treeselect__multi-value {
max-height: 200px;
overflow: auto;
}
.el-dialog__body {
padding: 10px;
}
.el-dialog__header {
.el-dialog__headerbtn {
right: 10px;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,094 | [Improvement][UI] Add a main switch for refresh_in_switched_tab | **Describe the question**
Add a main switch for `refresh_in_switched_tab`
**What are the current deficiencies and the benefits of improvement**
- If a user does not want to automatically refresh the page in switched tab, he can set a main switch to `false`
**Which version of DolphinScheduler:**
-[dev]
@lijufeng2016 once mentioned this user requirement. | https://github.com/apache/dolphinscheduler/issues/5094 | https://github.com/apache/dolphinscheduler/pull/5095 | 9008fa4b0c43a615270f080e9d8c3c2a324bee9c | 91c29e6ca35b6ca81172f8b8c4ce3191d094852f | "2021-03-18T07:34:01Z" | java | "2021-03-19T03:58:33Z" | dolphinscheduler-ui/src/js/conf/home/App.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<m-layout>
<m-nav slot="top"></m-nav>
<router-view slot="bottom" v-if="isRenderRouterView"></router-view>
</m-layout>
</template>
<script>
import visibility from '@/module/visibility'
import mLayout from '@/module/components/layout/layout'
import mNav from '@/module/components/nav/nav'
export default {
name: 'app',
data () {
return {
isRenderRouterView: true
}
},
methods: {
reload () {
this.isRenderRouterView = false
this.$nextTick(() => {
this.isRenderRouterView = true
})
}
},
mounted () {
visibility.change((evt, hidden) => {
if (hidden === false && this.$route.meta.refresh_in_switched_tab) {
this.reload()
}
})
},
components: { mLayout, mNav }
}
</script>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,094 | [Improvement][UI] Add a main switch for refresh_in_switched_tab | **Describe the question**
Add a main switch for `refresh_in_switched_tab`
**What are the current deficiencies and the benefits of improvement**
- If a user does not want to automatically refresh the page in switched tab, he can set a main switch to `false`
**Which version of DolphinScheduler:**
-[dev]
@lijufeng2016 once mentioned this user requirement. | https://github.com/apache/dolphinscheduler/issues/5094 | https://github.com/apache/dolphinscheduler/pull/5095 | 9008fa4b0c43a615270f080e9d8c3c2a324bee9c | 91c29e6ca35b6ca81172f8b8c4ce3191d094852f | "2021-03-18T07:34:01Z" | java | "2021-03-19T03:58:33Z" | dolphinscheduler-ui/src/js/conf/home/router/index.js | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import Vue from 'vue'
import i18n from '@/module/i18n/index.js'
import Router from 'vue-router'
Vue.use(Router)
const router = new Router({
routes: [
{
path: '/',
name: 'index',
redirect: {
name: 'home'
}
},
{
path: '/home',
name: 'home',
component: resolve => require(['../pages/home/index'], resolve),
meta: {
title: `${i18n.$t('Home')} - DolphinScheduler`,
refresh_in_switched_tab: true
}
},
{
path: '/projects',
name: 'projects',
component: resolve => require(['../pages/projects/index'], resolve),
meta: {
title: `${i18n.$t('Project')}`
},
redirect: {
name: 'projects-list'
},
children: [
{
path: '/projects/index',
name: 'projects-index',
component: resolve => require(['../pages/projects/pages/index/index'], resolve),
meta: {
title: `${i18n.$t('Project Home')}`,
refresh_in_switched_tab: true
}
},
{
path: '/projects/kinship',
name: 'projects-kinship',
component: resolve => require(['../pages/projects/pages/kinship/index'], resolve),
meta: {
title: `${i18n.$t('Kinship')}`,
refresh_in_switched_tab: true
}
},
{
path: '/projects/list',
name: 'projects-list',
component: resolve => require(['../pages/projects/pages/list/index'], resolve),
meta: {
title: `${i18n.$t('Project')}`,
refresh_in_switched_tab: true
}
},
{
path: '/projects/definition',
name: 'definition',
component: resolve => require(['../pages/projects/pages/definition/index'], resolve),
meta: {
title: `${i18n.$t('Process definition')}`,
refresh_in_switched_tab: true
},
redirect: {
name: 'projects-definition-list'
},
children: [
{
path: '/projects/definition/list',
name: 'projects-definition-list',
component: resolve => require(['../pages/projects/pages/definition/pages/list/index'], resolve),
meta: {
title: `${i18n.$t('Process definition')}`,
refresh_in_switched_tab: true
}
},
{
path: '/projects/definition/list/:id',
name: 'projects-definition-details',
component: resolve => require(['../pages/projects/pages/definition/pages/details/index'], resolve),
meta: {
title: `${i18n.$t('Process definition details')}`,
refresh_in_switched_tab: true
}
},
{
path: '/projects/definition/create',
name: 'definition-create',
component: resolve => require(['../pages/projects/pages/definition/pages/create/index'], resolve),
meta: {
title: `${i18n.$t('Create process definition')}`
}
},
{
path: '/projects/definition/tree/:id',
name: 'definition-tree-view-index',
component: resolve => require(['../pages/projects/pages/definition/pages/tree/index'], resolve),
meta: {
title: `${i18n.$t('TreeView')}`,
refresh_in_switched_tab: true
}
},
{
path: '/projects/definition/list/timing/:id',
name: 'definition-timing-details',
component: resolve => require(['../pages/projects/pages/definition/timing/index'], resolve),
meta: {
title: `${i18n.$t('Scheduled task list')}`,
refresh_in_switched_tab: true
}
}
]
},
{
path: '/projects/instance',
name: 'instance',
component: resolve => require(['../pages/projects/pages/instance/index'], resolve),
meta: {
title: `${i18n.$t('Process Instance')}`
},
redirect: {
name: 'projects-instance-list'
},
children: [
{
path: '/projects/instance/list',
name: 'projects-instance-list',
component: resolve => require(['../pages/projects/pages/instance/pages/list/index'], resolve),
meta: {
title: `${i18n.$t('Process Instance')}`,
refresh_in_switched_tab: true
}
},
{
path: '/projects/instance/list/:id',
name: 'projects-instance-details',
component: resolve => require(['../pages/projects/pages/instance/pages/details/index'], resolve),
meta: {
title: `${i18n.$t('Process instance details')}`,
refresh_in_switched_tab: true
}
},
{
path: '/projects/instance/gantt/:id',
name: 'instance-gantt-index',
component: resolve => require(['../pages/projects/pages/instance/pages/gantt/index'], resolve),
meta: {
title: `${i18n.$t('Gantt')}`,
refresh_in_switched_tab: true
}
}
]
},
{
path: '/projects/task-instance',
name: 'task-instance',
component: resolve => require(['../pages/projects/pages/taskInstance'], resolve),
meta: {
title: `${i18n.$t('Task Instance')}`,
refresh_in_switched_tab: true
}
},
{
path: '/projects/task-record',
name: 'task-record',
component: resolve => require(['../pages/projects/pages/taskRecord'], resolve),
meta: {
title: `${i18n.$t('Task record')}`,
refresh_in_switched_tab: true
}
},
{
path: '/projects/history-task-record',
name: 'history-task-record',
component: resolve => require(['../pages/projects/pages/historyTaskRecord'], resolve),
meta: {
title: `${i18n.$t('History task record')}`,
refresh_in_switched_tab: true
}
}
]
},
{
path: '/resource',
name: 'resource',
component: resolve => require(['../pages/resource/index'], resolve),
redirect: {
name: 'file'
},
meta: {
title: `${i18n.$t('Resources')}`,
refresh_in_switched_tab: true
},
children: [
{
path: '/resource/file',
name: 'file',
component: resolve => require(['../pages/resource/pages/file/pages/list/index'], resolve),
meta: {
title: `${i18n.$t('File Manage')}`,
refresh_in_switched_tab: true
}
},
{
path: '/resource/file/create',
name: 'resource-file-create',
component: resolve => require(['../pages/resource/pages/file/pages/create/index'], resolve),
meta: {
title: `${i18n.$t('Create Resource')}`
}
},
{
path: '/resource/file/createFolder',
name: 'resource-file-createFolder',
component: resolve => require(['../pages/resource/pages/file/pages/createFolder/index'], resolve),
meta: {
title: `${i18n.$t('Create Resource')}`
}
},
{
path: '/resource/file/subFileFolder/:id',
name: 'resource-file-subFileFolder',
component: resolve => require(['../pages/resource/pages/file/pages/subFileFolder/index'], resolve),
meta: {
title: `${i18n.$t('Create Resource')}`,
refresh_in_switched_tab: true
}
},
{
path: '/resource/file/subFile/:id',
name: 'resource-file-subFile',
component: resolve => require(['../pages/resource/pages/file/pages/subFile/index'], resolve),
meta: {
title: `${i18n.$t('Create Resource')}`,
refresh_in_switched_tab: true
}
},
{
path: '/resource/file/list/:id',
name: 'resource-file-details',
component: resolve => require(['../pages/resource/pages/file/pages/details/index'], resolve),
meta: {
title: `${i18n.$t('File Details')}`,
refresh_in_switched_tab: true
}
},
{
path: '/resource/file/subdirectory/:id',
name: 'resource-file-subdirectory',
component: resolve => require(['../pages/resource/pages/file/pages/subdirectory/index'], resolve),
meta: {
title: `${i18n.$t('File Manage')}`,
refresh_in_switched_tab: true
}
},
{
path: '/resource/file/edit/:id',
name: 'resource-file-edit',
component: resolve => require(['../pages/resource/pages/file/pages/edit/index'], resolve),
meta: {
title: `${i18n.$t('File Details')}`
}
},
{
path: '/resource/udf',
name: 'udf',
component: resolve => require(['../pages/resource/pages/udf/index'], resolve),
meta: {
title: `${i18n.$t('UDF manage')}`,
refresh_in_switched_tab: true
},
children: [
{
path: '/resource/udf',
name: 'resource-udf',
component: resolve => require(['../pages/resource/pages/udf/pages/resource/index'], resolve),
meta: {
title: `${i18n.$t('UDF Resources')}`,
refresh_in_switched_tab: true
}
},
{
path: '/resource/udf/subUdfDirectory/:id',
name: 'resource-udf-subUdfDirectory',
component: resolve => require(['../pages/resource/pages/udf/pages/subUdfDirectory/index'], resolve),
meta: {
title: `${i18n.$t('UDF Resources')}`,
refresh_in_switched_tab: true
}
},
{
path: '/resource/udf/createUdfFolder',
name: 'resource-udf-createUdfFolder',
component: resolve => require(['../pages/resource/pages/udf/pages/createUdfFolder/index'], resolve),
meta: {
title: `${i18n.$t('Create Resource')}`
}
},
{
path: '/resource/udf/subCreateUdfFolder/:id',
name: 'resource-udf-subCreateUdfFolder',
component: resolve => require(['../pages/resource/pages/udf/pages/subUdfFolder/index'], resolve),
meta: {
title: `${i18n.$t('Create Resource')}`
}
},
{
path: '/resource/func',
name: 'resource-func',
component: resolve => require(['../pages/resource/pages/udf/pages/function/index'], resolve),
meta: {
title: `${i18n.$t('UDF Function')}`
}
}
]
}
]
},
{
path: '/datasource',
name: 'datasource',
component: resolve => require(['../pages/datasource/index'], resolve),
meta: {
title: `${i18n.$t('Datasource')}`
},
redirect: {
name: 'datasource-list'
},
children: [
{
path: '/datasource/list',
name: 'datasource-list',
component: resolve => require(['../pages/datasource/pages/list/index'], resolve),
meta: {
title: `${i18n.$t('Datasource')}`
}
}
]
},
{
path: '/security',
name: 'security',
component: resolve => require(['../pages/security/index'], resolve),
meta: {
title: `${i18n.$t('Security')}`
},
redirect: {
name: 'tenement-manage'
},
children: [
{
path: '/security/tenant',
name: 'tenement-manage',
component: resolve => require(['../pages/security/pages/tenement/index'], resolve),
meta: {
title: `${i18n.$t('Tenant Manage')}`
}
},
{
path: '/security/users',
name: 'users-manage',
component: resolve => require(['../pages/security/pages/users/index'], resolve),
meta: {
title: `${i18n.$t('User Manage')}`,
refresh_in_switched_tab: true
}
},
{
path: '/security/warning-groups',
name: 'warning-groups-manage',
component: resolve => require(['../pages/security/pages/warningGroups/index'], resolve),
meta: {
title: `${i18n.$t('Warning group manage')}`
}
},
{
path: '/security/warning-instance',
name: 'warning-instance-manage',
component: resolve => require(['../pages/security/pages/warningInstance/index'], resolve),
meta: {
title: `${i18n.$t('Warning instance manage')}`
}
},
{
path: '/security/queue',
name: 'queue-manage',
component: resolve => require(['../pages/security/pages/queue/index'], resolve),
meta: {
title: `${i18n.$t('Queue manage')}`
}
},
{
path: '/security/worker-groups',
name: 'worker-groups-manage',
component: resolve => require(['../pages/security/pages/workerGroups/index'], resolve),
meta: {
title: `${i18n.$t('Worker group manage')}`
}
},
{
path: '/security/token',
name: 'token-manage',
component: resolve => require(['../pages/security/pages/token/index'], resolve),
meta: {
title: `${i18n.$t('Token manage')}`
}
}
]
},
{
path: '/user',
name: 'user',
component: resolve => require(['../pages/user/index'], resolve),
meta: {
title: `${i18n.$t('User Center')}`
},
redirect: {
name: 'account'
},
children: [
{
path: '/user/account',
name: 'account',
component: resolve => require(['../pages/user/pages/account/index'], resolve),
meta: {
title: `${i18n.$t('User Information')}`
}
},
{
path: '/user/password',
name: 'password',
component: resolve => require(['../pages/user/pages/password/index'], resolve),
meta: {
title: `${i18n.$t('Edit password')}`
}
},
{
path: '/user/token',
name: 'token',
component: resolve => require(['../pages/user/pages/token/index'], resolve),
meta: {
title: `${i18n.$t('Token manage')}`
}
}
]
},
{
path: '/monitor',
name: 'monitor',
component: resolve => require(['../pages/monitor/index'], resolve),
meta: {
title: 'monitor'
},
redirect: {
name: 'servers-master'
},
children: [
{
path: '/monitor/servers/master',
name: 'servers-master',
component: resolve => require(['../pages/monitor/pages/servers/master'], resolve),
meta: {
title: `${i18n.$t('Service-Master')}`,
refresh_in_switched_tab: true
}
},
{
path: '/monitor/servers/worker',
name: 'servers-worker',
component: resolve => require(['../pages/monitor/pages/servers/worker'], resolve),
meta: {
title: `${i18n.$t('Service-Worker')}`,
refresh_in_switched_tab: true
}
},
{
path: '/monitor/servers/alert',
name: 'servers-alert',
component: resolve => require(['../pages/monitor/pages/servers/alert'], resolve),
meta: {
title: 'Alert',
refresh_in_switched_tab: true
}
},
{
path: '/monitor/servers/rpcserver',
name: 'servers-rpcserver',
component: resolve => require(['../pages/monitor/pages/servers/rpcserver'], resolve),
meta: {
title: 'Rpcserver',
refresh_in_switched_tab: true
}
},
{
path: '/monitor/servers/zookeeper',
name: 'servers-zookeeper',
component: resolve => require(['../pages/monitor/pages/servers/zookeeper'], resolve),
meta: {
title: 'Zookeeper',
refresh_in_switched_tab: true
}
},
{
path: '/monitor/servers/apiserver',
name: 'servers-apiserver',
component: resolve => require(['../pages/monitor/pages/servers/apiserver'], resolve),
meta: {
title: 'Apiserver',
refresh_in_switched_tab: true
}
},
{
path: '/monitor/servers/db',
name: 'servers-db',
component: resolve => require(['../pages/monitor/pages/servers/db'], resolve),
meta: {
title: 'DB',
refresh_in_switched_tab: true
}
},
{
path: '/monitor/servers/statistics',
name: 'statistics',
component: resolve => require(['../pages/monitor/pages/servers/statistics'], resolve),
meta: {
title: 'statistics',
refresh_in_switched_tab: true
}
}
]
}
]
})
const VueRouterPush = Router.prototype.push
Router.prototype.push = function push (to) {
return VueRouterPush.call(this, to).catch(err => err)
}
router.beforeEach((to, from, next) => {
const $body = $('body')
$body.find('.tooltip.fade.top.in').remove()
if (to.meta.title) {
document.title = `${to.meta.title} - DolphinScheduler`
}
next()
})
export default router
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,094 | [Improvement][UI] Add a main switch for refresh_in_switched_tab | **Describe the question**
Add a main switch for `refresh_in_switched_tab`
**What are the current deficiencies and the benefits of improvement**
- If a user does not want to automatically refresh the page in switched tab, he can set a main switch to `false`
**Which version of DolphinScheduler:**
-[dev]
@lijufeng2016 once mentioned this user requirement. | https://github.com/apache/dolphinscheduler/issues/5094 | https://github.com/apache/dolphinscheduler/pull/5095 | 9008fa4b0c43a615270f080e9d8c3c2a324bee9c | 91c29e6ca35b6ca81172f8b8c4ce3191d094852f | "2021-03-18T07:34:01Z" | java | "2021-03-19T03:58:33Z" | dolphinscheduler-ui/src/lib/external/config.js | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* project external config
*/
export default {
// task record switch
recordSwitch: false
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,103 | [Bug][Resource] The file name of File and UDF resource not changed and cannot re-upload after renaming name | **To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to 'File Manage'
2. Upload a file
3. Rename the file
4. See error (picture 1)
5. The same error also occurs in 'UDF Resources' (picture 2)
6. In 1.3.x version, if the renamed file is re-uploaded, 'resource already exists' error occurs (picture 3)
**Expected behavior**
Bug fixed.
**Screenshots**
In **File Manage**: (picture 1)
![image](https://user-images.githubusercontent.com/4902714/111659876-85dfed80-8848-11eb-8fa1-83dc95e816c8.png)
In *UDF Resources*: (picture 2)
![image](https://user-images.githubusercontent.com/4902714/111659982-9e500800-8848-11eb-8718-3b9fc4b4660d.png)
In **File Manage** to **Re-upload**: (picture 3)
![image](https://user-images.githubusercontent.com/4902714/111734181-8f06a400-88b4-11eb-8340-ed4ead79c97c.png)
**Which version of Dolphin Scheduler:**
-[1.3.x]
-[dev]
| https://github.com/apache/dolphinscheduler/issues/5103 | https://github.com/apache/dolphinscheduler/pull/5107 | 91c29e6ca35b6ca81172f8b8c4ce3191d094852f | 4a6e8b7afac5c56392ca74de008ef5f1319a3be6 | "2021-03-18T16:20:49Z" | java | "2021-03-19T06:51:12Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ResourcesServiceImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service.impl;
import static org.apache.dolphinscheduler.common.Constants.ALIAS;
import static org.apache.dolphinscheduler.common.Constants.CONTENT;
import static org.apache.dolphinscheduler.common.Constants.JAR;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter;
import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor;
import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
import org.apache.dolphinscheduler.api.service.ResourcesService;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.RegexUtils;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.utils.BooleanUtils;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.PropertyUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.ResourcesUser;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper;
import org.apache.dolphinscheduler.dao.mapper.TenantMapper;
import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper;
import org.apache.dolphinscheduler.dao.mapper.UserMapper;
import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils;
import org.apache.commons.beanutils.BeanMap;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.regex.Matcher;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.fasterxml.jackson.databind.SerializationFeature;
/**
* resources service impl
*/
@Service
public class ResourcesServiceImpl extends BaseServiceImpl implements ResourcesService {
private static final Logger logger = LoggerFactory.getLogger(ResourcesServiceImpl.class);
@Autowired
private ResourceMapper resourcesMapper;
@Autowired
private UdfFuncMapper udfFunctionMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private UserMapper userMapper;
@Autowired
private ResourceUserMapper resourceUserMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
/**
* create directory
*
* @param loginUser login user
* @param name alias
* @param description description
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create directory result
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Result<Object> createDirectory(User loginUser,
String name,
String description,
ResourceType type,
int pid,
String currentDir) {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name) : String.format("%s/%s",currentDir,name);
result = verifyResource(loginUser, type, fullName, pid);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource directory {} has exist, can't recreate", fullName);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,true,description,name,loginUser.getId(),type,0,now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (DuplicateKeyException e) {
logger.error("resource directory {} has exist, can't recreate", fullName);
putMsg(result, Status.RESOURCE_EXIST);
return result;
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new ServiceException("resource already exists, can't recreate");
}
//create directory in hdfs
createDirectory(loginUser,fullName,type,result);
return result;
}
/**
* create resource
*
* @param loginUser login user
* @param name alias
* @param desc description
* @param file file
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create result code
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Result<Object> createResource(User loginUser,
String name,
String desc,
ResourceType type,
MultipartFile file,
int pid,
String currentDir) {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
result = verifyPid(loginUser, pid);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
result = verifyFile(name, type, file);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// check resource name exists
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name) : String.format("%s/%s",currentDir,name);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} has exist, can't recreate", RegexUtils.escapeNRT(name));
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new ServiceException("resource already exists, can't recreate");
}
// fail upload
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", RegexUtils.escapeNRT(name), RegexUtils.escapeNRT(file.getOriginalFilename()));
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new ServiceException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
}
return result;
}
/**
* check resource is exists
*
* @param fullName fullName
* @param userId user id
* @param type type
* @return true if resource exists
*/
private boolean checkResourceExists(String fullName, int userId, int type) {
Boolean existResource = resourcesMapper.existResource(fullName, userId, type);
return BooleanUtils.isTrue(existResource);
}
/**
* update resource
* @param loginUser login user
* @param resourceId resource id
* @param name name
* @param desc description
* @param type resource type
* @param file resource file
* @return update result code
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Result<Object> updateResource(User loginUser,
int resourceId,
String name,
String desc,
ResourceType type,
MultipartFile file) {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
if (file == null && name.equals(resource.getAlias()) && desc.equals(resource.getDescription())) {
putMsg(result, Status.SUCCESS);
return result;
}
//check resource already exists
String originFullName = resource.getFullName();
String originResourceName = resource.getAlias();
String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/") + 1),name);
if (!originResourceName.equals(name) && checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} already exists, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
result = verifyFile(name, type, file);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// query tenant by user id
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)) {
return result;
}
// verify whether the resource exists in storage
// get the path of origin file in storage
String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName);
try {
if (!HadoopUtils.getInstance().exists(originHdfsFileName)) {
logger.error("{} not exist", originHdfsFileName);
putMsg(result,Status.RESOURCE_NOT_EXIST);
return result;
}
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new ServiceException(Status.HDFS_OPERATION_ERROR);
}
if (!resource.isDirectory()) {
//get the origin file suffix
String originSuffix = FileUtils.suffix(originFullName);
String suffix = FileUtils.suffix(fullName);
boolean suffixIsChanged = false;
if (StringUtils.isBlank(suffix) && StringUtils.isNotBlank(originSuffix)) {
suffixIsChanged = true;
}
if (StringUtils.isNotBlank(suffix) && !suffix.equals(originSuffix)) {
suffixIsChanged = true;
}
//verify whether suffix is changed
if (suffixIsChanged) {
//need verify whether this resource is authorized to other users
Map<String, Object> columnMap = new HashMap<>();
columnMap.put("resources_id", resourceId);
List<ResourcesUser> resourcesUsers = resourceUserMapper.selectByMap(columnMap);
if (CollectionUtils.isNotEmpty(resourcesUsers)) {
List<Integer> userIds = resourcesUsers.stream().map(ResourcesUser::getUserId).collect(Collectors.toList());
List<User> users = userMapper.selectBatchIds(userIds);
String userNames = users.stream().map(User::getUserName).collect(Collectors.toList()).toString();
logger.error("resource is authorized to user {},suffix not allowed to be modified", userNames);
putMsg(result,Status.RESOURCE_IS_AUTHORIZED,userNames);
return result;
}
}
}
// updateResource data
Date now = new Date();
resource.setAlias(name);
resource.setFullName(fullName);
resource.setDescription(desc);
resource.setUpdateTime(now);
if (file != null) {
resource.setFileName(file.getOriginalFilename());
resource.setSize(file.getSize());
}
try {
resourcesMapper.updateById(resource);
if (resource.isDirectory()) {
List<Integer> childrenResource = listAllChildren(resource,false);
if (CollectionUtils.isNotEmpty(childrenResource)) {
String matcherFullName = Matcher.quoteReplacement(fullName);
List<Resource> childResourceList;
Integer[] childResIdArray = childrenResource.toArray(new Integer[childrenResource.size()]);
List<Resource> resourceList = resourcesMapper.listResourceByIds(childResIdArray);
childResourceList = resourceList.stream().map(t -> {
t.setFullName(t.getFullName().replaceFirst(originFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
resourcesMapper.batchUpdateResource(childResourceList);
if (ResourceType.UDF.equals(resource.getType())) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(childResIdArray);
if (CollectionUtils.isNotEmpty(udfFuncs)) {
udfFuncs = udfFuncs.stream().map(t -> {
t.setResourceName(t.getResourceName().replaceFirst(originFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
udfFunctionMapper.batchUpdateUdfFunc(udfFuncs);
}
}
}
} else if (ResourceType.UDF.equals(resource.getType())) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(new Integer[]{resourceId});
if (CollectionUtils.isNotEmpty(udfFuncs)) {
udfFuncs = udfFuncs.stream().map(t -> {
t.setResourceName(fullName);
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
udfFunctionMapper.batchUpdateUdfFunc(udfFuncs);
}
}
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error(Status.UPDATE_RESOURCE_ERROR.getMsg(), e);
throw new ServiceException(Status.UPDATE_RESOURCE_ERROR);
}
// if name unchanged, return directly without moving on HDFS
if (originResourceName.equals(name) && file == null) {
return result;
}
if (file != null) {
// fail upload
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, RegexUtils.escapeNRT(file.getOriginalFilename()));
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new ServiceException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
}
if (!fullName.equals(originFullName)) {
try {
HadoopUtils.getInstance().delete(originHdfsFileName,false);
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new ServiceException(String.format("delete resource: %s failed.", originFullName));
}
}
return result;
}
// get the path of dest file in hdfs
String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName);
try {
logger.info("start hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName);
HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true);
} catch (Exception e) {
logger.error(MessageFormat.format("hdfs copy {0} -> {1} fail", originHdfsFileName, destHdfsFileName), e);
putMsg(result,Status.HDFS_COPY_FAIL);
throw new ServiceException(Status.HDFS_COPY_FAIL);
}
return result;
}
private Result<Object> verifyFile(String name, ResourceType type, MultipartFile file) {
Result<Object> result = new Result<>();
putMsg(result, Status.SUCCESS);
if (file != null) {
// file is empty
if (file.isEmpty()) {
logger.error("file is empty: {}", RegexUtils.escapeNRT(file.getOriginalFilename()));
putMsg(result, Status.RESOURCE_FILE_IS_EMPTY);
return result;
}
// file suffix
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(name);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
// rename file suffix and original suffix must be consistent
logger.error("rename file suffix and original suffix must be consistent: {}", RegexUtils.escapeNRT(file.getOriginalFilename()));
putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE);
return result;
}
//If resource type is UDF, only jar packages are allowed to be uploaded, and the suffix must be .jar
if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(fileSuffix)) {
logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg());
putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR);
return result;
}
if (file.getSize() > Constants.MAX_FILE_SIZE) {
logger.error("file size is too large: {}", RegexUtils.escapeNRT(file.getOriginalFilename()));
putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT);
return result;
}
}
return result;
}
/**
* query resources list paging
*
* @param loginUser login user
* @param type resource type
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @return resource list page
*/
@Override
public Map<String, Object> queryResourceListPaging(User loginUser, int directoryId, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) {
HashMap<String, Object> result = new HashMap<>();
Page<Resource> page = new Page<>(pageNo, pageSize);
int userId = loginUser.getId();
if (isAdmin(loginUser)) {
userId = 0;
}
if (directoryId != -1) {
Resource directory = resourcesMapper.selectById(directoryId);
if (directory == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
}
IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page,
userId,directoryId, type.ordinal(), searchVal);
PageInfo<Resource> pageInfo = new PageInfo<>(pageNo, pageSize);
pageInfo.setTotalCount((int)resourceIPage.getTotal());
pageInfo.setLists(resourceIPage.getRecords());
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* create directory
* @param loginUser login user
* @param fullName full name
* @param type resource type
* @param result Result
*/
private void createDirectory(User loginUser,String fullName,ResourceType type,Result<Object> result) {
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
String directoryName = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourceRootPath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
if (!HadoopUtils.getInstance().exists(resourceRootPath)) {
createTenantDirIfNotExists(tenantCode);
}
if (!HadoopUtils.getInstance().mkdir(directoryName)) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new ServiceException(String.format("create resource directory: %s failed.", directoryName));
}
} catch (Exception e) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new ServiceException(String.format("create resource directory: %s failed.", directoryName));
}
}
/**
* upload file to hdfs
*
* @param loginUser login user
* @param fullName full name
* @param file file
*/
private boolean upload(User loginUser, String fullName, MultipartFile file, ResourceType type) {
// save to local
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(fullName);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
return false;
}
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
// random file name
String localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
// save file to hdfs, and delete original file
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourcePath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
// if tenant dir not exists
if (!HadoopUtils.getInstance().exists(resourcePath)) {
createTenantDirIfNotExists(tenantCode);
}
org.apache.dolphinscheduler.api.utils.FileUtils.copyFile(file, localFilename);
HadoopUtils.getInstance().copyLocalToHdfs(localFilename, hdfsFilename, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
return false;
}
return true;
}
/**
* query resource list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@Override
public Map<String, Object> queryResourceList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>();
int userId = loginUser.getId();
if (isAdmin(loginUser)) {
userId = 0;
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
Visitor resourceTreeVisitor = new ResourceTreeVisitor(allResourceList);
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* query resource list by program type
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@Override
public Map<String, Object> queryResourceByProgramType(User loginUser, ResourceType type, ProgramType programType) {
Map<String, Object> result = new HashMap<>();
String suffix = ".jar";
int userId = loginUser.getId();
if (isAdmin(loginUser)) {
userId = 0;
}
if (programType != null) {
switch (programType) {
case JAVA:
case SCALA:
break;
case PYTHON:
suffix = ".py";
break;
default:
}
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
List<Resource> resources = new ResourceFilter(suffix,new ArrayList<>(allResourceList)).filter();
Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources);
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* delete resource
*
* @param loginUser login user
* @param resourceId resource id
* @return delete result code
* @throws IOException exception
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Result<Object> delete(User loginUser, int resourceId) throws IOException {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// get resource by id
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)) {
return result;
}
// get all resource id of process definitions those is released
List<Map<String, Object>> list = processDefinitionMapper.listResources();
Map<Integer, Set<Integer>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list);
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
// get all children of the resource
List<Integer> allChildren = listAllChildren(resource,true);
Integer[] needDeleteResourceIdArray = allChildren.toArray(new Integer[allChildren.size()]);
//if resource type is UDF,need check whether it is bound by UDF function
if (resource.getType() == (ResourceType.UDF)) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(needDeleteResourceIdArray);
if (CollectionUtils.isNotEmpty(udfFuncs)) {
logger.error("can't be deleted,because it is bound by UDF functions:{}", udfFuncs);
putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName());
return result;
}
}
if (resourceIdSet.contains(resource.getPid())) {
logger.error("can't be deleted,because it is used of process definition");
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
resourceIdSet.retainAll(allChildren);
if (CollectionUtils.isNotEmpty(resourceIdSet)) {
logger.error("can't be deleted,because it is used of process definition");
for (Integer resId : resourceIdSet) {
logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId));
}
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
// get hdfs file by type
String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
//delete data in database
resourcesMapper.deleteIds(needDeleteResourceIdArray);
resourceUserMapper.deleteResourceUserArray(0, needDeleteResourceIdArray);
//delete file on hdfs
HadoopUtils.getInstance().delete(hdfsFilename, true);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* verify resource by name and type
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
@Override
public Result<Object> verifyResourceName(String fullName, ResourceType type, User loginUser) {
Result<Object> result = new Result<>();
putMsg(result, Status.SUCCESS);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource type:{} name:{} has exist, can't create again.", type, RegexUtils.escapeNRT(fullName));
putMsg(result, Status.RESOURCE_EXIST);
} else {
// query tenant
Tenant tenant = tenantMapper.queryById(loginUser.getTenantId());
if (tenant != null) {
String tenantCode = tenant.getTenantCode();
try {
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
if (HadoopUtils.getInstance().exists(hdfsFilename)) {
logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, RegexUtils.escapeNRT(fullName), hdfsFilename);
putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename);
}
} catch (Exception e) {
logger.error(e.getMessage(),e);
putMsg(result,Status.HDFS_OPERATION_ERROR);
}
} else {
putMsg(result,Status.TENANT_NOT_EXIST);
}
}
return result;
}
/**
* verify resource by full name or pid and type
* @param fullName resource full name
* @param id resource id
* @param type resource type
* @return true if the resource full name or pid not exists, otherwise return false
*/
@Override
public Result<Object> queryResource(String fullName, Integer id, ResourceType type) {
Result<Object> result = new Result<>();
if (StringUtils.isBlank(fullName) && id == null) {
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
return result;
}
if (StringUtils.isNotBlank(fullName)) {
List<Resource> resourceList = resourcesMapper.queryResource(fullName,type.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(resourceList.get(0));
} else {
Resource resource = resourcesMapper.selectById(id);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
Resource parentResource = resourcesMapper.selectById(resource.getPid());
if (parentResource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(parentResource);
}
return result;
}
/**
* view resource file online
*
* @param resourceId resource id
* @param skipLineNum skip line number
* @param limit limit
* @return resource content
*/
@Override
public Result<Object> readResource(int resourceId, int skipLineNum, int limit) {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// get resource by id
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check preview or not by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support view, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)) {
return result;
}
// hdfs path
String hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resource.getFullName());
logger.info("resource hdfs path is {}", hdfsFileName);
try {
if (HadoopUtils.getInstance().exists(hdfsFileName)) {
List<String> content = HadoopUtils.getInstance().catFile(hdfsFileName, skipLineNum, limit);
putMsg(result, Status.SUCCESS);
Map<String, Object> map = new HashMap<>();
map.put(ALIAS, resource.getAlias());
map.put(CONTENT, String.join("\n", content));
result.setData(map);
} else {
logger.error("read file {} not exist in hdfs", hdfsFileName);
putMsg(result, Status.RESOURCE_FILE_NOT_EXIST,hdfsFileName);
}
} catch (Exception e) {
logger.error("Resource {} read failed", hdfsFileName, e);
putMsg(result, Status.HDFS_OPERATION_ERROR);
}
return result;
}
/**
* create resource file online
*
* @param loginUser login user
* @param type resource type
* @param fileName file name
* @param fileSuffix file suffix
* @param desc description
* @param content content
* @param pid pid
* @param currentDir current directory
* @return create result code
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Result<Object> onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content,int pid,String currentDir) {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
//check file suffix
String nameSuffix = fileSuffix.trim();
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support create", nameSuffix);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String name = fileName.trim() + "." + nameSuffix;
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name) : String.format("%s/%s",currentDir,name);
result = verifyResource(loginUser, type, fullName, pid);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// save data
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,name,loginUser.getId(),type,content.getBytes().length,now,now);
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
result = uploadContentToHdfs(fullName, tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new ServiceException(result.getMsg());
}
return result;
}
private Result<Object> checkResourceUploadStartupState() {
Result<Object> result = new Result<>();
putMsg(result, Status.SUCCESS);
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()) {
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
return result;
}
private Result<Object> verifyResource(User loginUser, ResourceType type, String fullName, int pid) {
Result<Object> result = verifyResourceName(fullName, type, loginUser);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
return verifyPid(loginUser, pid);
}
private Result<Object> verifyPid(User loginUser, int pid) {
Result<Object> result = new Result<>();
putMsg(result, Status.SUCCESS);
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
return result;
}
/**
* updateProcessInstance resource
*
* @param resourceId resource id
* @param content content
* @return update result cod
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Result<Object> updateResourceContent(int resourceId, String content) {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("read file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check can edit by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support updateProcessInstance, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)) {
return result;
}
resource.setSize(content.getBytes().length);
resource.setUpdateTime(new Date());
resourcesMapper.updateById(resource);
result = uploadContentToHdfs(resource.getFullName(), tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new ServiceException(result.getMsg());
}
return result;
}
/**
* @param resourceName resource name
* @param tenantCode tenant code
* @param content content
* @return result
*/
private Result<Object> uploadContentToHdfs(String resourceName, String tenantCode, String content) {
Result<Object> result = new Result<>();
String localFilename = "";
String hdfsFileName = "";
try {
localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
if (!FileUtils.writeContent2File(content, localFilename)) {
// write file fail
logger.error("file {} fail, content is {}", localFilename, RegexUtils.escapeNRT(content));
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
// get resource file hdfs path
hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resourceName);
String resourcePath = HadoopUtils.getHdfsResDir(tenantCode);
logger.info("resource hdfs path is {}, resource dir is {}", hdfsFileName, resourcePath);
HadoopUtils hadoopUtils = HadoopUtils.getInstance();
if (!hadoopUtils.exists(resourcePath)) {
// create if tenant dir not exists
createTenantDirIfNotExists(tenantCode);
}
if (hadoopUtils.exists(hdfsFileName)) {
hadoopUtils.delete(hdfsFileName, false);
}
hadoopUtils.copyLocalToHdfs(localFilename, hdfsFileName, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
result.setCode(Status.HDFS_OPERATION_ERROR.getCode());
result.setMsg(String.format("copy %s to hdfs %s fail", localFilename, hdfsFileName));
return result;
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* download file
*
* @param resourceId resource id
* @return resource content
* @throws IOException exception
*/
@Override
public org.springframework.core.io.Resource downloadResource(int resourceId) throws IOException {
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()) {
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
throw new ServiceException("hdfs not startup");
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("download file not exist, resource id {}", resourceId);
return null;
}
if (resource.isDirectory()) {
logger.error("resource id {} is directory,can't download it", resourceId);
throw new ServiceException("can't download directory");
}
int userId = resource.getUserId();
User user = userMapper.selectById(userId);
if (user == null) {
logger.error("user id {} not exists", userId);
throw new ServiceException(String.format("resource owner id %d not exist",userId));
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if (tenant == null) {
logger.error("tenant id {} not exists", user.getTenantId());
throw new ServiceException(String.format("The tenant id %d of resource owner not exist",user.getTenantId()));
}
String tenantCode = tenant.getTenantCode();
String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
String localFileName = FileUtils.getDownloadFilename(resource.getAlias());
logger.info("resource hdfs path is {}, download local filename is {}", hdfsFileName, localFileName);
HadoopUtils.getInstance().copyHdfsToLocal(hdfsFileName, localFileName, false, true);
return org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(localFileName);
}
/**
* list all file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
@Override
public Map<String, Object> authorizeResourceTree(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<ResourceComponent> list;
if (CollectionUtils.isNotEmpty(resourceList)) {
Visitor visitor = new ResourceTreeVisitor(resourceList);
list = visitor.visit().getChildren();
} else {
list = new ArrayList<>(0);
}
result.put(Constants.DATA_LIST, list);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* unauthorized file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
@Override
public Map<String, Object> unauthorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<Resource> list;
if (resourceList != null && !resourceList.isEmpty()) {
Set<Resource> resourceSet = new HashSet<>(resourceList);
List<Resource> authedResourceList = resourcesMapper.queryAuthorizedResourceList(userId);
getAuthorizedResourceList(resourceSet, authedResourceList);
list = new ArrayList<>(resourceSet);
} else {
list = new ArrayList<>(0);
}
Visitor visitor = new ResourceTreeVisitor(list);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result, Status.SUCCESS);
return result;
}
/**
* unauthorized udf function
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
@Override
public Map<String, Object> unauthorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
//only admin can operate
if (isNotAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncList = udfFunctionMapper.queryUdfFuncExceptUserId(userId);
List<UdfFunc> resultList = new ArrayList<>();
Set<UdfFunc> udfFuncSet;
if (CollectionUtils.isNotEmpty(udfFuncList)) {
udfFuncSet = new HashSet<>(udfFuncList);
List<UdfFunc> authedUDFFuncList = udfFunctionMapper.queryAuthedUdfFunc(userId);
getAuthorizedResourceList(udfFuncSet, authedUDFFuncList);
resultList = new ArrayList<>(udfFuncSet);
}
result.put(Constants.DATA_LIST, resultList);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* authorized udf function
*
* @param loginUser login user
* @param userId user id
* @return authorized result code
*/
@Override
public Map<String, Object> authorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncs = udfFunctionMapper.queryAuthedUdfFunc(userId);
result.put(Constants.DATA_LIST, udfFuncs);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* authorized file
*
* @param loginUser login user
* @param userId user id
* @return authorized result
*/
@Override
public Map<String, Object> authorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
List<Resource> authedResources = resourcesMapper.queryAuthorizedResourceList(userId);
Visitor visitor = new ResourceTreeVisitor(authedResources);
String visit = JSONUtils.toJsonString(visitor.visit(), SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS);
logger.info(visit);
String jsonTreeStr = JSONUtils.toJsonString(visitor.visit().getChildren(), SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS);
logger.info(jsonTreeStr);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* get authorized resource list
*
* @param resourceSet resource set
* @param authedResourceList authorized resource list
*/
private void getAuthorizedResourceList(Set<?> resourceSet, List<?> authedResourceList) {
Set<?> authedResourceSet;
if (CollectionUtils.isNotEmpty(authedResourceList)) {
authedResourceSet = new HashSet<>(authedResourceList);
resourceSet.removeAll(authedResourceSet);
}
}
/**
* get tenantCode by UserId
*
* @param userId user id
* @param result return result
* @return tenant code
*/
private String getTenantCode(int userId,Result<Object> result) {
User user = userMapper.selectById(userId);
if (user == null) {
logger.error("user {} not exists", userId);
putMsg(result, Status.USER_NOT_EXIST,userId);
return null;
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if (tenant == null) {
logger.error("tenant not exists");
putMsg(result, Status.TENANT_NOT_EXIST);
return null;
}
return tenant.getTenantCode();
}
/**
* list all children id
* @param resource resource
* @param containSelf whether add self to children list
* @return all children id
*/
List<Integer> listAllChildren(Resource resource,boolean containSelf) {
List<Integer> childList = new ArrayList<>();
if (resource.getId() != -1 && containSelf) {
childList.add(resource.getId());
}
if (resource.isDirectory()) {
listAllChildren(resource.getId(),childList);
}
return childList;
}
/**
* list all children id
* @param resourceId resource id
* @param childList child list
*/
void listAllChildren(int resourceId,List<Integer> childList) {
List<Integer> children = resourcesMapper.listChildren(resourceId);
for (int childId : children) {
childList.add(childId);
listAllChildren(childId, childList);
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,087 | [Feature][SqlTask] Add a switch to send mail and print head logs in SqlTask | **Describe the feature**
**Add the switch to send mail and print head logs in SqlTask**
| https://github.com/apache/dolphinscheduler/issues/5087 | https://github.com/apache/dolphinscheduler/pull/5088 | 4a6e8b7afac5c56392ca74de008ef5f1319a3be6 | 8ac72e80e6656ab2119f938e038df663765e1379 | "2021-03-18T05:17:33Z" | java | "2021-03-19T07:07:39Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import java.util.regex.Pattern;
/**
* Constants
*/
public final class Constants {
private Constants() {
throw new UnsupportedOperationException("Construct Constants");
}
/**
* quartz config
*/
public static final String ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS = "org.quartz.jobStore.driverDelegateClass";
public static final String ORG_QUARTZ_SCHEDULER_INSTANCENAME = "org.quartz.scheduler.instanceName";
public static final String ORG_QUARTZ_SCHEDULER_INSTANCEID = "org.quartz.scheduler.instanceId";
public static final String ORG_QUARTZ_SCHEDULER_MAKESCHEDULERTHREADDAEMON = "org.quartz.scheduler.makeSchedulerThreadDaemon";
public static final String ORG_QUARTZ_JOBSTORE_USEPROPERTIES = "org.quartz.jobStore.useProperties";
public static final String ORG_QUARTZ_THREADPOOL_CLASS = "org.quartz.threadPool.class";
public static final String ORG_QUARTZ_THREADPOOL_THREADCOUNT = "org.quartz.threadPool.threadCount";
public static final String ORG_QUARTZ_THREADPOOL_MAKETHREADSDAEMONS = "org.quartz.threadPool.makeThreadsDaemons";
public static final String ORG_QUARTZ_THREADPOOL_THREADPRIORITY = "org.quartz.threadPool.threadPriority";
public static final String ORG_QUARTZ_JOBSTORE_CLASS = "org.quartz.jobStore.class";
public static final String ORG_QUARTZ_JOBSTORE_TABLEPREFIX = "org.quartz.jobStore.tablePrefix";
public static final String ORG_QUARTZ_JOBSTORE_ISCLUSTERED = "org.quartz.jobStore.isClustered";
public static final String ORG_QUARTZ_JOBSTORE_MISFIRETHRESHOLD = "org.quartz.jobStore.misfireThreshold";
public static final String ORG_QUARTZ_JOBSTORE_CLUSTERCHECKININTERVAL = "org.quartz.jobStore.clusterCheckinInterval";
public static final String ORG_QUARTZ_JOBSTORE_ACQUIRETRIGGERSWITHINLOCK = "org.quartz.jobStore.acquireTriggersWithinLock";
public static final String ORG_QUARTZ_JOBSTORE_DATASOURCE = "org.quartz.jobStore.dataSource";
public static final String ORG_QUARTZ_DATASOURCE_MYDS_CONNECTIONPROVIDER_CLASS = "org.quartz.dataSource.myDs.connectionProvider.class";
/**
* quartz config default value
*/
public static final String QUARTZ_TABLE_PREFIX = "QRTZ_";
public static final String QUARTZ_MISFIRETHRESHOLD = "60000";
public static final String QUARTZ_CLUSTERCHECKININTERVAL = "5000";
public static final String QUARTZ_DATASOURCE = "myDs";
public static final String QUARTZ_THREADCOUNT = "25";
public static final String QUARTZ_THREADPRIORITY = "5";
public static final String QUARTZ_INSTANCENAME = "DolphinScheduler";
public static final String QUARTZ_INSTANCEID = "AUTO";
public static final String QUARTZ_ACQUIRETRIGGERSWITHINLOCK = "true";
/**
* common properties path
*/
public static final String COMMON_PROPERTIES_PATH = "/common.properties";
/**
* fs.defaultFS
*/
public static final String FS_DEFAULTFS = "fs.defaultFS";
/**
* fs s3a endpoint
*/
public static final String FS_S3A_ENDPOINT = "fs.s3a.endpoint";
/**
* fs s3a access key
*/
public static final String FS_S3A_ACCESS_KEY = "fs.s3a.access.key";
/**
* fs s3a secret key
*/
public static final String FS_S3A_SECRET_KEY = "fs.s3a.secret.key";
/**
* yarn.resourcemanager.ha.rm.ids
*/
public static final String YARN_RESOURCEMANAGER_HA_RM_IDS = "yarn.resourcemanager.ha.rm.ids";
public static final String YARN_RESOURCEMANAGER_HA_XX = "xx";
/**
* yarn.application.status.address
*/
public static final String YARN_APPLICATION_STATUS_ADDRESS = "yarn.application.status.address";
/**
* yarn.job.history.status.address
*/
public static final String YARN_JOB_HISTORY_STATUS_ADDRESS = "yarn.job.history.status.address";
/**
* hdfs configuration
* hdfs.root.user
*/
public static final String HDFS_ROOT_USER = "hdfs.root.user";
/**
* hdfs/s3 configuration
* resource.upload.path
*/
public static final String RESOURCE_UPLOAD_PATH = "resource.upload.path";
/**
* data basedir path
*/
public static final String DATA_BASEDIR_PATH = "data.basedir.path";
/**
* dolphinscheduler.env.path
*/
public static final String DOLPHINSCHEDULER_ENV_PATH = "dolphinscheduler.env.path";
/**
* environment properties default path
*/
public static final String ENV_PATH = "env/dolphinscheduler_env.sh";
/**
* python home
*/
public static final String PYTHON_HOME = "PYTHON_HOME";
/**
* resource.view.suffixs
*/
public static final String RESOURCE_VIEW_SUFFIXS = "resource.view.suffixs";
public static final String RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE = "txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js";
/**
* development.state
*/
public static final String DEVELOPMENT_STATE = "development.state";
public static final String DEVELOPMENT_STATE_DEFAULT_VALUE = "true";
/**
* string true
*/
public static final String STRING_TRUE = "true";
/**
* string false
*/
public static final String STRING_FALSE = "false";
/**
* resource storage type
*/
public static final String RESOURCE_STORAGE_TYPE = "resource.storage.type";
/**
* MasterServer directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_MASTERS = "/nodes/master";
/**
* WorkerServer directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_WORKERS = "/nodes/worker";
/**
* all servers directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_DEAD_SERVERS = "/dead-servers";
/**
* MasterServer lock directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_MASTERS = "/lock/masters";
/**
* MasterServer failover directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_MASTERS = "/lock/failover/masters";
/**
* WorkerServer failover directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_WORKERS = "/lock/failover/workers";
/**
* MasterServer startup failover runing and fault tolerance process
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS = "/lock/failover/startup-masters";
/**
* comma ,
*/
public static final String COMMA = ",";
/**
* slash /
*/
public static final String SLASH = "/";
/**
* COLON :
*/
public static final String COLON = ":";
/**
* SPACE " "
*/
public static final String SPACE = " ";
/**
* SINGLE_SLASH /
*/
public static final String SINGLE_SLASH = "/";
/**
* DOUBLE_SLASH //
*/
public static final String DOUBLE_SLASH = "//";
/**
* SINGLE_QUOTES "'"
*/
public static final String SINGLE_QUOTES = "'";
/**
* DOUBLE_QUOTES "\""
*/
public static final String DOUBLE_QUOTES = "\"";
/**
* SEMICOLON ;
*/
public static final String SEMICOLON = ";";
/**
* EQUAL SIGN
*/
public static final String EQUAL_SIGN = "=";
/**
* AT SIGN
*/
public static final String AT_SIGN = "@";
public static final String WORKER_MAX_CPULOAD_AVG = "worker.max.cpuload.avg";
public static final String WORKER_RESERVED_MEMORY = "worker.reserved.memory";
public static final String MASTER_MAX_CPULOAD_AVG = "master.max.cpuload.avg";
public static final String MASTER_RESERVED_MEMORY = "master.reserved.memory";
/**
* date format of yyyy-MM-dd HH:mm:ss
*/
public static final String YYYY_MM_DD_HH_MM_SS = "yyyy-MM-dd HH:mm:ss";
/**
* date format of yyyyMMddHHmmss
*/
public static final String YYYYMMDDHHMMSS = "yyyyMMddHHmmss";
/**
* date format of yyyyMMddHHmmssSSS
*/
public static final String YYYYMMDDHHMMSSSSS = "yyyyMMddHHmmssSSS";
/**
* http connect time out
*/
public static final int HTTP_CONNECT_TIMEOUT = 60 * 1000;
/**
* http connect request time out
*/
public static final int HTTP_CONNECTION_REQUEST_TIMEOUT = 60 * 1000;
/**
* httpclient soceket time out
*/
public static final int SOCKET_TIMEOUT = 60 * 1000;
/**
* http header
*/
public static final String HTTP_HEADER_UNKNOWN = "unKnown";
/**
* http X-Forwarded-For
*/
public static final String HTTP_X_FORWARDED_FOR = "X-Forwarded-For";
/**
* http X-Real-IP
*/
public static final String HTTP_X_REAL_IP = "X-Real-IP";
/**
* UTF-8
*/
public static final String UTF_8 = "UTF-8";
/**
* user name regex
*/
public static final Pattern REGEX_USER_NAME = Pattern.compile("^[a-zA-Z0-9._-]{3,39}$");
/**
* email regex
*/
public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^([a-z0-9A-Z]+[_|\\-|\\.]?)+[a-z0-9A-Z]@([a-z0-9A-Z]+(-[a-z0-9A-Z]+)?\\.)+[a-zA-Z]{2,}$");
/**
* read permission
*/
public static final int READ_PERMISSION = 2 * 1;
/**
* write permission
*/
public static final int WRITE_PERMISSION = 2 * 2;
/**
* execute permission
*/
public static final int EXECUTE_PERMISSION = 1;
/**
* default admin permission
*/
public static final int DEFAULT_ADMIN_PERMISSION = 7;
/**
* all permissions
*/
public static final int ALL_PERMISSIONS = READ_PERMISSION | WRITE_PERMISSION | EXECUTE_PERMISSION;
/**
* max task timeout
*/
public static final int MAX_TASK_TIMEOUT = 24 * 3600;
/**
* master cpu load
*/
public static final int DEFAULT_MASTER_CPU_LOAD = Runtime.getRuntime().availableProcessors() * 2;
/**
* master reserved memory
*/
public static final double DEFAULT_MASTER_RESERVED_MEMORY = OSUtils.totalMemorySize() / 10;
/**
* worker cpu load
*/
public static final int DEFAULT_WORKER_CPU_LOAD = Runtime.getRuntime().availableProcessors() * 2;
/**
* worker reserved memory
*/
public static final double DEFAULT_WORKER_RESERVED_MEMORY = OSUtils.totalMemorySize() / 10;
/**
* worker host weight
*/
public static final int DEFAULT_WORKER_HOST_WEIGHT = 100;
/**
* default log cache rows num,output when reach the number
*/
public static final int DEFAULT_LOG_ROWS_NUM = 4 * 16;
/**
* log flush interval?output when reach the interval
*/
public static final int DEFAULT_LOG_FLUSH_INTERVAL = 1000;
/**
* time unit secong to minutes
*/
public static final int SEC_2_MINUTES_TIME_UNIT = 60;
/***
*
* rpc port
*/
public static final int RPC_PORT = 50051;
/***
* alert rpc port
*/
public static final int ALERT_RPC_PORT = 50052;
/**
* forbid running task
*/
public static final String FLOWNODE_RUN_FLAG_FORBIDDEN = "FORBIDDEN";
/**
* datasource configuration path
*/
public static final String DATASOURCE_PROPERTIES = "/datasource.properties";
public static final String TASK_RECORD_URL = "task.record.datasource.url";
public static final String TASK_RECORD_FLAG = "task.record.flag";
public static final String TASK_RECORD_USER = "task.record.datasource.username";
public static final String TASK_RECORD_PWD = "task.record.datasource.password";
public static final String DEFAULT = "Default";
public static final String USER = "user";
public static final String PASSWORD = "password";
public static final String XXXXXX = "******";
public static final String NULL = "NULL";
public static final String THREAD_NAME_MASTER_SERVER = "Master-Server";
public static final String THREAD_NAME_WORKER_SERVER = "Worker-Server";
public static final String TASK_RECORD_TABLE_HIVE_LOG = "eamp_hive_log_hd";
public static final String TASK_RECORD_TABLE_HISTORY_HIVE_LOG = "eamp_hive_hist_log_hd";
/**
* command parameter keys
*/
public static final String CMD_PARAM_RECOVER_PROCESS_ID_STRING = "ProcessInstanceId";
public static final String CMD_PARAM_RECOVERY_START_NODE_STRING = "StartNodeIdList";
public static final String CMD_PARAM_RECOVERY_WAITING_THREAD = "WaitingThreadInstanceId";
public static final String CMD_PARAM_SUB_PROCESS = "processInstanceId";
public static final String CMD_PARAM_EMPTY_SUB_PROCESS = "0";
public static final String CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID = "parentProcessInstanceId";
public static final String CMD_PARAM_SUB_PROCESS_DEFINE_ID = "processDefinitionId";
public static final String CMD_PARAM_START_NODE_NAMES = "StartNodeNameList";
public static final String CMD_PARAM_START_PARAMS = "StartParams";
public static final String CMD_PARAM_FATHER_PARAMS = "fatherParams";
/**
* complement data start date
*/
public static final String CMDPARAM_COMPLEMENT_DATA_START_DATE = "complementStartDate";
/**
* complement data end date
*/
public static final String CMDPARAM_COMPLEMENT_DATA_END_DATE = "complementEndDate";
/**
* hadoop configuration
*/
public static final String HADOOP_RM_STATE_ACTIVE = "ACTIVE";
public static final String HADOOP_RM_STATE_STANDBY = "STANDBY";
public static final String HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT = "resource.manager.httpaddress.port";
/**
* data source config
*/
public static final String SPRING_DATASOURCE_DRIVER_CLASS_NAME = "spring.datasource.driver-class-name";
public static final String SPRING_DATASOURCE_URL = "spring.datasource.url";
public static final String SPRING_DATASOURCE_USERNAME = "spring.datasource.username";
public static final String SPRING_DATASOURCE_PASSWORD = "spring.datasource.password";
public static final String SPRING_DATASOURCE_VALIDATION_QUERY_TIMEOUT = "spring.datasource.validationQueryTimeout";
public static final String SPRING_DATASOURCE_INITIAL_SIZE = "spring.datasource.initialSize";
public static final String SPRING_DATASOURCE_MIN_IDLE = "spring.datasource.minIdle";
public static final String SPRING_DATASOURCE_MAX_ACTIVE = "spring.datasource.maxActive";
public static final String SPRING_DATASOURCE_MAX_WAIT = "spring.datasource.maxWait";
public static final String SPRING_DATASOURCE_TIME_BETWEEN_EVICTION_RUNS_MILLIS = "spring.datasource.timeBetweenEvictionRunsMillis";
public static final String SPRING_DATASOURCE_TIME_BETWEEN_CONNECT_ERROR_MILLIS = "spring.datasource.timeBetweenConnectErrorMillis";
public static final String SPRING_DATASOURCE_MIN_EVICTABLE_IDLE_TIME_MILLIS = "spring.datasource.minEvictableIdleTimeMillis";
public static final String SPRING_DATASOURCE_VALIDATION_QUERY = "spring.datasource.validationQuery";
public static final String SPRING_DATASOURCE_TEST_WHILE_IDLE = "spring.datasource.testWhileIdle";
public static final String SPRING_DATASOURCE_TEST_ON_BORROW = "spring.datasource.testOnBorrow";
public static final String SPRING_DATASOURCE_TEST_ON_RETURN = "spring.datasource.testOnReturn";
public static final String SPRING_DATASOURCE_POOL_PREPARED_STATEMENTS = "spring.datasource.poolPreparedStatements";
public static final String SPRING_DATASOURCE_DEFAULT_AUTO_COMMIT = "spring.datasource.defaultAutoCommit";
public static final String SPRING_DATASOURCE_KEEP_ALIVE = "spring.datasource.keepAlive";
public static final String SPRING_DATASOURCE_MAX_POOL_PREPARED_STATEMENT_PER_CONNECTION_SIZE = "spring.datasource.maxPoolPreparedStatementPerConnectionSize";
public static final String DEVELOPMENT = "development";
public static final String QUARTZ_PROPERTIES_PATH = "quartz.properties";
/**
* sleep time
*/
public static final int SLEEP_TIME_MILLIS = 1000;
/**
* heartbeat for zk info length
*/
public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 10;
public static final int HEARTBEAT_WITH_WEIGHT_FOR_ZOOKEEPER_INFO_LENGTH = 11;
/**
* jar
*/
public static final String JAR = "jar";
/**
* hadoop
*/
public static final String HADOOP = "hadoop";
/**
* -D <property>=<value>
*/
public static final String D = "-D";
/**
* -D mapreduce.job.name=name
*/
public static final String MR_NAME = "mapreduce.job.name";
/**
* -D mapreduce.job.queuename=queuename
*/
public static final String MR_QUEUE = "mapreduce.job.queuename";
/**
* spark params constant
*/
public static final String MASTER = "--master";
public static final String DEPLOY_MODE = "--deploy-mode";
/**
* --class CLASS_NAME
*/
public static final String MAIN_CLASS = "--class";
/**
* --driver-cores NUM
*/
public static final String DRIVER_CORES = "--driver-cores";
/**
* --driver-memory MEM
*/
public static final String DRIVER_MEMORY = "--driver-memory";
/**
* --num-executors NUM
*/
public static final String NUM_EXECUTORS = "--num-executors";
/**
* --executor-cores NUM
*/
public static final String EXECUTOR_CORES = "--executor-cores";
/**
* --executor-memory MEM
*/
public static final String EXECUTOR_MEMORY = "--executor-memory";
/**
* --name NAME
*/
public static final String SPARK_NAME = "--name";
/**
* --queue QUEUE
*/
public static final String SPARK_QUEUE = "--queue";
/**
* exit code success
*/
public static final int EXIT_CODE_SUCCESS = 0;
/**
* exit code kill
*/
public static final int EXIT_CODE_KILL = 137;
/**
* exit code failure
*/
public static final int EXIT_CODE_FAILURE = -1;
/**
* date format of yyyyMMdd
*/
public static final String PARAMETER_FORMAT_DATE = "yyyyMMdd";
/**
* date format of yyyyMMddHHmmss
*/
public static final String PARAMETER_FORMAT_TIME = "yyyyMMddHHmmss";
/**
* system date(yyyyMMddHHmmss)
*/
public static final String PARAMETER_DATETIME = "system.datetime";
/**
* system date(yyyymmdd) today
*/
public static final String PARAMETER_CURRENT_DATE = "system.biz.curdate";
/**
* system date(yyyymmdd) yesterday
*/
public static final String PARAMETER_BUSINESS_DATE = "system.biz.date";
/**
* ACCEPTED
*/
public static final String ACCEPTED = "ACCEPTED";
/**
* SUCCEEDED
*/
public static final String SUCCEEDED = "SUCCEEDED";
/**
* NEW
*/
public static final String NEW = "NEW";
/**
* NEW_SAVING
*/
public static final String NEW_SAVING = "NEW_SAVING";
/**
* SUBMITTED
*/
public static final String SUBMITTED = "SUBMITTED";
/**
* FAILED
*/
public static final String FAILED = "FAILED";
/**
* KILLED
*/
public static final String KILLED = "KILLED";
/**
* RUNNING
*/
public static final String RUNNING = "RUNNING";
/**
* underline "_"
*/
public static final String UNDERLINE = "_";
/**
* quartz job prifix
*/
public static final String QUARTZ_JOB_PRIFIX = "job";
/**
* quartz job group prifix
*/
public static final String QUARTZ_JOB_GROUP_PRIFIX = "jobgroup";
/**
* projectId
*/
public static final String PROJECT_ID = "projectId";
/**
* processId
*/
public static final String SCHEDULE_ID = "scheduleId";
/**
* schedule
*/
public static final String SCHEDULE = "schedule";
/**
* application regex
*/
public static final String APPLICATION_REGEX = "application_\\d+_\\d+";
public static final String PID = OSUtils.isWindows() ? "handle" : "pid";
/**
* month_begin
*/
public static final String MONTH_BEGIN = "month_begin";
/**
* add_months
*/
public static final String ADD_MONTHS = "add_months";
/**
* month_end
*/
public static final String MONTH_END = "month_end";
/**
* week_begin
*/
public static final String WEEK_BEGIN = "week_begin";
/**
* week_end
*/
public static final String WEEK_END = "week_end";
/**
* timestamp
*/
public static final String TIMESTAMP = "timestamp";
public static final char SUBTRACT_CHAR = '-';
public static final char ADD_CHAR = '+';
public static final char MULTIPLY_CHAR = '*';
public static final char DIVISION_CHAR = '/';
public static final char LEFT_BRACE_CHAR = '(';
public static final char RIGHT_BRACE_CHAR = ')';
public static final String ADD_STRING = "+";
public static final String MULTIPLY_STRING = "*";
public static final String DIVISION_STRING = "/";
public static final String LEFT_BRACE_STRING = "(";
public static final char P = 'P';
public static final char N = 'N';
public static final String SUBTRACT_STRING = "-";
public static final String GLOBAL_PARAMS = "globalParams";
public static final String LOCAL_PARAMS = "localParams";
public static final String LOCAL_PARAMS_LIST = "localParamsList";
public static final String SUBPROCESS_INSTANCE_ID = "subProcessInstanceId";
public static final String PROCESS_INSTANCE_STATE = "processInstanceState";
public static final String PARENT_WORKFLOW_INSTANCE = "parentWorkflowInstance";
public static final String TASK_TYPE = "taskType";
public static final String TASK_LIST = "taskList";
public static final String RWXR_XR_X = "rwxr-xr-x";
public static final String QUEUE = "queue";
public static final String QUEUE_NAME = "queueName";
public static final int LOG_QUERY_SKIP_LINE_NUMBER = 0;
public static final int LOG_QUERY_LIMIT = 4096;
/**
* master/worker server use for zk
*/
public static final String MASTER_PREFIX = "master";
public static final String WORKER_PREFIX = "worker";
public static final String DELETE_ZK_OP = "delete";
public static final String ADD_ZK_OP = "add";
public static final String ALIAS = "alias";
public static final String CONTENT = "content";
public static final String DEPENDENT_SPLIT = ":||";
public static final String DEPENDENT_ALL = "ALL";
/**
* preview schedule execute count
*/
public static final int PREVIEW_SCHEDULE_EXECUTE_COUNT = 5;
/**
* kerberos
*/
public static final String KERBEROS = "kerberos";
/**
* kerberos expire time
*/
public static final String KERBEROS_EXPIRE_TIME = "kerberos.expire.time";
/**
* java.security.krb5.conf
*/
public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf";
/**
* java.security.krb5.conf.path
*/
public static final String JAVA_SECURITY_KRB5_CONF_PATH = "java.security.krb5.conf.path";
/**
* hadoop.security.authentication
*/
public static final String HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authentication";
/**
* hadoop.security.authentication
*/
public static final String HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE = "hadoop.security.authentication.startup.state";
/**
* com.amazonaws.services.s3.enableV4
*/
public static final String AWS_S3_V4 = "com.amazonaws.services.s3.enableV4";
/**
* loginUserFromKeytab user
*/
public static final String LOGIN_USER_KEY_TAB_USERNAME = "login.user.keytab.username";
/**
* default worker group id
*/
public static final int DEFAULT_WORKER_ID = -1;
/**
* loginUserFromKeytab path
*/
public static final String LOGIN_USER_KEY_TAB_PATH = "login.user.keytab.path";
/**
* task log info format
*/
public static final String TASK_LOG_INFO_FORMAT = "TaskLogInfo-%s";
/**
* hive conf
*/
public static final String HIVE_CONF = "hiveconf:";
/**
* flink
*/
public static final String FLINK_YARN_CLUSTER = "yarn-cluster";
public static final String FLINK_RUN_MODE = "-m";
public static final String FLINK_YARN_SLOT = "-ys";
public static final String FLINK_APP_NAME = "-ynm";
public static final String FLINK_QUEUE = "-yqu";
public static final String FLINK_TASK_MANAGE = "-yn";
public static final String FLINK_JOB_MANAGE_MEM = "-yjm";
public static final String FLINK_TASK_MANAGE_MEM = "-ytm";
public static final String FLINK_MAIN_CLASS = "-c";
public static final String FLINK_PARALLELISM = "-p";
public static final String FLINK_SHUTDOWN_ON_ATTACHED_EXIT = "-sae";
public static final int[] NOT_TERMINATED_STATES = new int[] {
ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
ExecutionStatus.RUNNING_EXECUTION.ordinal(),
ExecutionStatus.DELAY_EXECUTION.ordinal(),
ExecutionStatus.READY_PAUSE.ordinal(),
ExecutionStatus.READY_STOP.ordinal(),
ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(),
ExecutionStatus.WAITTING_THREAD.ordinal(),
ExecutionStatus.WAITTING_DEPEND.ordinal()
};
/**
* status
*/
public static final String STATUS = "status";
/**
* message
*/
public static final String MSG = "msg";
/**
* data total
*/
public static final String COUNT = "count";
/**
* page size
*/
public static final String PAGE_SIZE = "pageSize";
/**
* current page no
*/
public static final String PAGE_NUMBER = "pageNo";
/**
*
*/
public static final String DATA_LIST = "data";
public static final String TOTAL_LIST = "totalList";
public static final String CURRENT_PAGE = "currentPage";
public static final String TOTAL_PAGE = "totalPage";
public static final String TOTAL = "total";
/**
* workflow
*/
public static final String WORKFLOW_LIST = "workFlowList";
public static final String WORKFLOW_RELATION_LIST = "workFlowRelationList";
/**
* session user
*/
public static final String SESSION_USER = "session.user";
public static final String SESSION_ID = "sessionId";
public static final String PASSWORD_DEFAULT = "******";
/**
* locale
*/
public static final String LOCALE_LANGUAGE = "language";
/**
* driver
*/
public static final String ORG_POSTGRESQL_DRIVER = "org.postgresql.Driver";
public static final String COM_MYSQL_JDBC_DRIVER = "com.mysql.jdbc.Driver";
public static final String ORG_APACHE_HIVE_JDBC_HIVE_DRIVER = "org.apache.hive.jdbc.HiveDriver";
public static final String COM_CLICKHOUSE_JDBC_DRIVER = "ru.yandex.clickhouse.ClickHouseDriver";
public static final String COM_ORACLE_JDBC_DRIVER = "oracle.jdbc.driver.OracleDriver";
public static final String COM_SQLSERVER_JDBC_DRIVER = "com.microsoft.sqlserver.jdbc.SQLServerDriver";
public static final String COM_DB2_JDBC_DRIVER = "com.ibm.db2.jcc.DB2Driver";
public static final String COM_PRESTO_JDBC_DRIVER = "com.facebook.presto.jdbc.PrestoDriver";
/**
* database type
*/
public static final String MYSQL = "MYSQL";
public static final String POSTGRESQL = "POSTGRESQL";
public static final String HIVE = "HIVE";
public static final String SPARK = "SPARK";
public static final String CLICKHOUSE = "CLICKHOUSE";
public static final String ORACLE = "ORACLE";
public static final String SQLSERVER = "SQLSERVER";
public static final String DB2 = "DB2";
public static final String PRESTO = "PRESTO";
/**
* jdbc url
*/
public static final String JDBC_MYSQL = "jdbc:mysql://";
public static final String JDBC_POSTGRESQL = "jdbc:postgresql://";
public static final String JDBC_HIVE_2 = "jdbc:hive2://";
public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse://";
public static final String JDBC_ORACLE_SID = "jdbc:oracle:thin:@";
public static final String JDBC_ORACLE_SERVICE_NAME = "jdbc:oracle:thin:@//";
public static final String JDBC_SQLSERVER = "jdbc:sqlserver://";
public static final String JDBC_DB2 = "jdbc:db2://";
public static final String JDBC_PRESTO = "jdbc:presto://";
public static final String ADDRESS = "address";
public static final String DATABASE = "database";
public static final String JDBC_URL = "jdbcUrl";
public static final String PRINCIPAL = "principal";
public static final String OTHER = "other";
public static final String ORACLE_DB_CONNECT_TYPE = "connectType";
public static final String KERBEROS_KRB5_CONF_PATH = "javaSecurityKrb5Conf";
public static final String KERBEROS_KEY_TAB_USERNAME = "loginUserKeytabUsername";
public static final String KERBEROS_KEY_TAB_PATH = "loginUserKeytabPath";
/**
* session timeout
*/
public static final int SESSION_TIME_OUT = 7200;
public static final int MAX_FILE_SIZE = 1024 * 1024 * 1024;
public static final String UDF = "UDF";
public static final String CLASS = "class";
public static final String RECEIVERS = "receivers";
public static final String RECEIVERS_CC = "receiversCc";
/**
* dataSource sensitive param
*/
public static final String DATASOURCE_PASSWORD_REGEX = "(?<=(\"password\":\")).*?(?=(\"))";
/**
* default worker group
*/
public static final String DEFAULT_WORKER_GROUP = "default";
public static final Integer TASK_INFO_LENGTH = 5;
/**
* new
* schedule time
*/
public static final String PARAMETER_SHECDULE_TIME = "schedule.time";
/**
* authorize writable perm
*/
public static final int AUTHORIZE_WRITABLE_PERM = 7;
/**
* authorize readable perm
*/
public static final int AUTHORIZE_READABLE_PERM = 4;
/**
* plugin configurations
*/
public static final String PLUGIN_JAR_SUFFIX = ".jar";
public static final int NORMAL_NODE_STATUS = 0;
public static final int ABNORMAL_NODE_STATUS = 1;
public static final String START_TIME = "start time";
public static final String END_TIME = "end time";
public static final String START_END_DATE = "startDate,endDate";
/**
* system line separator
*/
public static final String SYSTEM_LINE_SEPARATOR = System.getProperty("line.separator");
/**
* net system properties
*/
public static final String DOLPHIN_SCHEDULER_PREFERRED_NETWORK_INTERFACE = "dolphin.scheduler.network.interface.preferred";
public static final String EXCEL_SUFFIX_XLS = ".xls";
/**
* datasource encryption salt
*/
public static final String DATASOURCE_ENCRYPTION_SALT_DEFAULT = "!@#$%^&*";
public static final String DATASOURCE_ENCRYPTION_ENABLE = "datasource.encryption.enable";
public static final String DATASOURCE_ENCRYPTION_SALT = "datasource.encryption.salt";
/**
* Network IP gets priority, default inner outer
*/
public static final String NETWORK_PRIORITY_STRATEGY = "dolphin.scheduler.network.priority.strategy";
/**
* exec shell scripts
*/
public static final String SH = "sh";
/**
* pstree, get pud and sub pid
*/
public static final String PSTREE = "pstree";
/**
* docker & kubernetes
*/
public static final boolean DOCKER_MODE = StringUtils.isNotEmpty(System.getenv("DOCKER"));
public static final boolean KUBERNETES_MODE = StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_HOST")) && StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_PORT"));
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,087 | [Feature][SqlTask] Add a switch to send mail and print head logs in SqlTask | **Describe the feature**
**Add the switch to send mail and print head logs in SqlTask**
| https://github.com/apache/dolphinscheduler/issues/5087 | https://github.com/apache/dolphinscheduler/pull/5088 | 4a6e8b7afac5c56392ca74de008ef5f1319a3be6 | 8ac72e80e6656ab2119f938e038df663765e1379 | "2021-03-18T05:17:33Z" | java | "2021-03-19T07:07:39Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sql/SqlParameters.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.task.sql;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import java.util.ArrayList;
import java.util.List;
/**
* Sql/Hql parameter
*/
public class SqlParameters extends AbstractParameters {
/**
* data source type,eg MYSQL, POSTGRES, HIVE ...
*/
private String type;
/**
* datasource id
*/
private int datasource;
/**
* sql
*/
private String sql;
/**
* sql type
* 0 query
* 1 NON_QUERY
*/
private int sqlType;
/**
* udf list
*/
private String udfs;
/**
* show type
* 0 TABLE
* 1 TEXT
* 2 attachment
* 3 TABLE+attachment
*/
private String showType;
/**
* SQL connection parameters
*/
private String connParams;
/**
* Pre Statements
*/
private List<String> preStatements;
/**
* Post Statements
*/
private List<String> postStatements;
/**
* groupId
*/
private int groupId;
/**
* title
*/
private String title;
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public int getDatasource() {
return datasource;
}
public void setDatasource(int datasource) {
this.datasource = datasource;
}
public String getSql() {
return sql;
}
public void setSql(String sql) {
this.sql = sql;
}
public String getUdfs() {
return udfs;
}
public void setUdfs(String udfs) {
this.udfs = udfs;
}
public int getSqlType() {
return sqlType;
}
public void setSqlType(int sqlType) {
this.sqlType = sqlType;
}
public String getShowType() {
return showType;
}
public void setShowType(String showType) {
this.showType = showType;
}
public String getConnParams() {
return connParams;
}
public void setConnParams(String connParams) {
this.connParams = connParams;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
public List<String> getPreStatements() {
return preStatements;
}
public void setPreStatements(List<String> preStatements) {
this.preStatements = preStatements;
}
public List<String> getPostStatements() {
return postStatements;
}
public void setPostStatements(List<String> postStatements) {
this.postStatements = postStatements;
}
public int getGroupId() {
return groupId;
}
public void setGroupId(int groupId) {
this.groupId = groupId;
}
@Override
public boolean checkParameters() {
return datasource != 0 && StringUtils.isNotEmpty(type) && StringUtils.isNotEmpty(sql);
}
@Override
public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>();
}
@Override
public String toString() {
return "SqlParameters{"
+ "type='" + type + '\''
+ ", datasource=" + datasource
+ ", sql='" + sql + '\''
+ ", sqlType=" + sqlType
+ ", udfs='" + udfs + '\''
+ ", showType='" + showType + '\''
+ ", connParams='" + connParams + '\''
+ ", groupId='" + groupId + '\''
+ ", title='" + title + '\''
+ ", preStatements=" + preStatements
+ ", postStatements=" + postStatements
+ '}';
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,087 | [Feature][SqlTask] Add a switch to send mail and print head logs in SqlTask | **Describe the feature**
**Add the switch to send mail and print head logs in SqlTask**
| https://github.com/apache/dolphinscheduler/issues/5087 | https://github.com/apache/dolphinscheduler/pull/5088 | 4a6e8b7afac5c56392ca74de008ef5f1319a3be6 | 8ac72e80e6656ab2119f938e038df663765e1379 | "2021-03-18T05:17:33Z" | java | "2021-03-19T07:07:39Z" | dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/task/SqlParametersTest.java | |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,087 | [Feature][SqlTask] Add a switch to send mail and print head logs in SqlTask | **Describe the feature**
**Add the switch to send mail and print head logs in SqlTask**
| https://github.com/apache/dolphinscheduler/issues/5087 | https://github.com/apache/dolphinscheduler/pull/5088 | 4a6e8b7afac5c56392ca74de008ef5f1319a3be6 | 8ac72e80e6656ab2119f938e038df663765e1379 | "2021-03-18T05:17:33Z" | java | "2021-03-19T07:07:39Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task.sql;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.enums.Direct;
import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.sql.SqlBinds;
import org.apache.dolphinscheduler.common.task.sql.SqlParameters;
import org.apache.dolphinscheduler.common.task.sql.SqlType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.AlertDao;
import org.apache.dolphinscheduler.dao.datasource.BaseDataSource;
import org.apache.dolphinscheduler.dao.datasource.DataSourceFactory;
import org.apache.dolphinscheduler.remote.command.alert.AlertSendResponseCommand;
import org.apache.dolphinscheduler.server.entity.SQLTaskExecutionContext;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.utils.UDFUtils;
import org.apache.dolphinscheduler.server.worker.task.AbstractTask;
import org.apache.dolphinscheduler.service.alert.AlertClientService;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
/**
* sql task
*/
public class SqlTask extends AbstractTask {
/**
* sql parameters
*/
private SqlParameters sqlParameters;
/**
* alert dao
*/
private AlertDao alertDao;
/**
* base datasource
*/
private BaseDataSource baseDataSource;
/**
* taskExecutionContext
*/
private TaskExecutionContext taskExecutionContext;
/**
* default query sql limit
*/
private static final int LIMIT = 10000;
private AlertClientService alertClientService;
public SqlTask(TaskExecutionContext taskExecutionContext, Logger logger, AlertClientService alertClientService) {
super(taskExecutionContext, logger);
this.taskExecutionContext = taskExecutionContext;
logger.info("sql task params {}", taskExecutionContext.getTaskParams());
this.sqlParameters = JSONUtils.parseObject(taskExecutionContext.getTaskParams(), SqlParameters.class);
if (!sqlParameters.checkParameters()) {
throw new RuntimeException("sql task params is not valid");
}
this.alertClientService = alertClientService;
this.alertDao = SpringApplicationContext.getBean(AlertDao.class);
}
@Override
public void handle() throws Exception {
// set the name of the current thread
String threadLoggerInfoName = String.format(Constants.TASK_LOG_INFO_FORMAT, taskExecutionContext.getTaskAppId());
Thread.currentThread().setName(threadLoggerInfoName);
logger.info("Full sql parameters: {}", sqlParameters);
logger.info("sql type : {}, datasource : {}, sql : {} , localParams : {},udfs : {},showType : {},connParams : {}",
sqlParameters.getType(),
sqlParameters.getDatasource(),
sqlParameters.getSql(),
sqlParameters.getLocalParams(),
sqlParameters.getUdfs(),
sqlParameters.getShowType(),
sqlParameters.getConnParams());
try {
SQLTaskExecutionContext sqlTaskExecutionContext = taskExecutionContext.getSqlTaskExecutionContext();
// get datasource
baseDataSource = DataSourceFactory.getDatasource(DbType.valueOf(sqlParameters.getType()),
sqlTaskExecutionContext.getConnectionParams());
// ready to execute SQL and parameter entity Map
SqlBinds mainSqlBinds = getSqlAndSqlParamsMap(sqlParameters.getSql());
List<SqlBinds> preStatementSqlBinds = Optional.ofNullable(sqlParameters.getPreStatements())
.orElse(new ArrayList<>())
.stream()
.map(this::getSqlAndSqlParamsMap)
.collect(Collectors.toList());
List<SqlBinds> postStatementSqlBinds = Optional.ofNullable(sqlParameters.getPostStatements())
.orElse(new ArrayList<>())
.stream()
.map(this::getSqlAndSqlParamsMap)
.collect(Collectors.toList());
List<String> createFuncs = UDFUtils.createFuncs(sqlTaskExecutionContext.getUdfFuncTenantCodeMap(),
logger);
// execute sql task
executeFuncAndSql(mainSqlBinds, preStatementSqlBinds, postStatementSqlBinds, createFuncs, sqlParameters.getLocalParams());
setExitStatusCode(Constants.EXIT_CODE_SUCCESS);
} catch (Exception e) {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
logger.error("sql task error", e);
throw e;
}
}
/**
* ready to execute SQL and parameter entity Map
*
* @return SqlBinds
*/
private SqlBinds getSqlAndSqlParamsMap(String sql) {
Map<Integer, Property> sqlParamsMap = new HashMap<>();
StringBuilder sqlBuilder = new StringBuilder();
// find process instance by task id
Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()),
taskExecutionContext.getDefinedParams(),
sqlParameters.getLocalParametersMap(),
CommandType.of(taskExecutionContext.getCmdTypeIfComplement()),
taskExecutionContext.getScheduleTime());
// spell SQL according to the final user-defined variable
if (paramsMap == null) {
sqlBuilder.append(sql);
return new SqlBinds(sqlBuilder.toString(), sqlParamsMap);
}
if (StringUtils.isNotEmpty(sqlParameters.getTitle())) {
String title = ParameterUtils.convertParameterPlaceholders(sqlParameters.getTitle(),
ParamUtils.convert(paramsMap));
logger.info("SQL title : {}", title);
sqlParameters.setTitle(title);
}
//new
//replace variable TIME with $[YYYYmmddd...] in sql when history run job and batch complement job
sql = ParameterUtils.replaceScheduleTime(sql, taskExecutionContext.getScheduleTime());
// special characters need to be escaped, ${} needs to be escaped
String rgex = "['\"]*\\$\\{(.*?)\\}['\"]*";
setSqlParamsMap(sql, rgex, sqlParamsMap, paramsMap);
//Replace the original value in sql !{...} ,Does not participate in precompilation
String rgexo = "['\"]*\\!\\{(.*?)\\}['\"]*";
sql = replaceOriginalValue(sql, rgexo, paramsMap);
// replace the ${} of the SQL statement with the Placeholder
String formatSql = sql.replaceAll(rgex, "?");
sqlBuilder.append(formatSql);
// print repalce sql
printReplacedSql(sql, formatSql, rgex, sqlParamsMap);
return new SqlBinds(sqlBuilder.toString(), sqlParamsMap);
}
public String replaceOriginalValue(String content, String rgex, Map<String, Property> sqlParamsMap) {
Pattern pattern = Pattern.compile(rgex);
while (true) {
Matcher m = pattern.matcher(content);
if (!m.find()) {
break;
}
String paramName = m.group(1);
String paramValue = sqlParamsMap.get(paramName).getValue();
content = m.replaceFirst(paramValue);
}
return content;
}
@Override
public AbstractParameters getParameters() {
return this.sqlParameters;
}
/**
* execute function and sql
*
* @param mainSqlBinds main sql binds
* @param preStatementsBinds pre statements binds
* @param postStatementsBinds post statements binds
* @param createFuncs create functions
*/
public void executeFuncAndSql(SqlBinds mainSqlBinds,
List<SqlBinds> preStatementsBinds,
List<SqlBinds> postStatementsBinds,
List<String> createFuncs,
List<Property> properties) {
Connection connection = null;
PreparedStatement stmt = null;
ResultSet resultSet = null;
try {
baseDataSource.setConnParams(sqlParameters.getConnParams());
// create connection
connection = baseDataSource.getConnection();
// create temp function
if (CollectionUtils.isNotEmpty(createFuncs)) {
createTempFunction(connection, createFuncs);
}
// pre sql
preSql(connection, preStatementsBinds);
stmt = prepareStatementAndBind(connection, mainSqlBinds);
String result = null;
// decide whether to executeQuery or executeUpdate based on sqlType
if (sqlParameters.getSqlType() == SqlType.QUERY.ordinal()) {
// query statements need to be convert to JsonArray and inserted into Alert to send
resultSet = stmt.executeQuery();
result = resultProcess(resultSet);
} else if (sqlParameters.getSqlType() == SqlType.NON_QUERY.ordinal()) {
// non query statement
String updateResult = String.valueOf(stmt.executeUpdate());
result = setNonQuerySqlReturn(updateResult, properties);
}
postSql(connection, postStatementsBinds);
this.setResultString(result);
} catch (Exception e) {
logger.error("execute sql error", e);
throw new RuntimeException("execute sql error");
} finally {
close(resultSet, stmt, connection);
}
}
public String setNonQuerySqlReturn(String updateResult, List<Property> properties) {
String result = null;
for (Property info :properties) {
if (Direct.OUT == info.getDirect()) {
List<Map<String,String>> updateRL = new ArrayList<>();
Map<String,String> updateRM = new HashMap<>();
updateRM.put(info.getProp(),updateResult);
updateRL.add(updateRM);
result = JSONUtils.toJsonString(updateRL);
break;
}
}
return result;
}
/**
* result process
*
* @param resultSet resultSet
* @throws Exception Exception
*/
private String resultProcess(ResultSet resultSet) throws Exception {
ArrayNode resultJSONArray = JSONUtils.createArrayNode();
ResultSetMetaData md = resultSet.getMetaData();
int num = md.getColumnCount();
int rowCount = 0;
while (rowCount < LIMIT && resultSet.next()) {
ObjectNode mapOfColValues = JSONUtils.createObjectNode();
for (int i = 1; i <= num; i++) {
mapOfColValues.set(md.getColumnLabel(i), JSONUtils.toJsonNode(resultSet.getObject(i)));
}
resultJSONArray.add(mapOfColValues);
rowCount++;
}
String result = JSONUtils.toJsonString(resultJSONArray);
logger.debug("execute sql : {}", result);
try {
sendAttachment(sqlParameters.getGroupId(), StringUtils.isNotEmpty(sqlParameters.getTitle()) ? sqlParameters.getTitle() : taskExecutionContext.getTaskName() + " query result sets",
JSONUtils.toJsonString(resultJSONArray));
} catch (Exception e) {
logger.warn("sql task sendAttachment error! msg : {} ", e.getMessage());
}
return result;
}
/**
* pre sql
*
* @param connection connection
* @param preStatementsBinds preStatementsBinds
*/
private void preSql(Connection connection,
List<SqlBinds> preStatementsBinds) throws Exception {
for (SqlBinds sqlBind : preStatementsBinds) {
try (PreparedStatement pstmt = prepareStatementAndBind(connection, sqlBind)) {
int result = pstmt.executeUpdate();
logger.info("pre statement execute result: {}, for sql: {}", result, sqlBind.getSql());
}
}
}
/**
* post sql
*
* @param connection connection
* @param postStatementsBinds postStatementsBinds
*/
private void postSql(Connection connection,
List<SqlBinds> postStatementsBinds) throws Exception {
for (SqlBinds sqlBind : postStatementsBinds) {
try (PreparedStatement pstmt = prepareStatementAndBind(connection, sqlBind)) {
int result = pstmt.executeUpdate();
logger.info("post statement execute result: {},for sql: {}", result, sqlBind.getSql());
}
}
}
/**
* create temp function
*
* @param connection connection
* @param createFuncs createFuncs
*/
private void createTempFunction(Connection connection,
List<String> createFuncs) throws Exception {
try (Statement funcStmt = connection.createStatement()) {
for (String createFunc : createFuncs) {
logger.info("hive create function sql: {}", createFunc);
funcStmt.execute(createFunc);
}
}
}
/**
* close jdbc resource
*
* @param resultSet resultSet
* @param pstmt pstmt
* @param connection connection
*/
private void close(ResultSet resultSet,
PreparedStatement pstmt,
Connection connection) {
if (resultSet != null) {
try {
resultSet.close();
} catch (SQLException e) {
logger.error("close result set error : {}", e.getMessage(), e);
}
}
if (pstmt != null) {
try {
pstmt.close();
} catch (SQLException e) {
logger.error("close prepared statement error : {}", e.getMessage(), e);
}
}
if (connection != null) {
try {
connection.close();
} catch (SQLException e) {
logger.error("close connection error : {}", e.getMessage(), e);
}
}
}
/**
* preparedStatement bind
*
* @param connection connection
* @param sqlBinds sqlBinds
* @return PreparedStatement
* @throws Exception Exception
*/
private PreparedStatement prepareStatementAndBind(Connection connection, SqlBinds sqlBinds) throws Exception {
// is the timeout set
boolean timeoutFlag = TaskTimeoutStrategy.of(taskExecutionContext.getTaskTimeoutStrategy()) == TaskTimeoutStrategy.FAILED
|| TaskTimeoutStrategy.of(taskExecutionContext.getTaskTimeoutStrategy()) == TaskTimeoutStrategy.WARNFAILED;
PreparedStatement stmt = connection.prepareStatement(sqlBinds.getSql());
if (timeoutFlag) {
stmt.setQueryTimeout(taskExecutionContext.getTaskTimeout());
}
Map<Integer, Property> params = sqlBinds.getParamsMap();
if (params != null) {
for (Map.Entry<Integer, Property> entry : params.entrySet()) {
Property prop = entry.getValue();
ParameterUtils.setInParameter(entry.getKey(), stmt, prop.getType(), prop.getValue());
}
}
logger.info("prepare statement replace sql : {} ", stmt);
return stmt;
}
/**
* send mail as an attachment
*
* @param title title
* @param content content
*/
public void sendAttachment(int groupId, String title, String content) {
AlertSendResponseCommand alertSendResponseCommand = alertClientService.sendAlert(groupId, title, content);
if (!alertSendResponseCommand.getResStatus()) {
throw new RuntimeException("send mail failed!");
}
}
/**
* regular expressions match the contents between two specified strings
*
* @param content content
* @param rgex rgex
* @param sqlParamsMap sql params map
* @param paramsPropsMap params props map
*/
public void setSqlParamsMap(String content, String rgex, Map<Integer, Property> sqlParamsMap, Map<String, Property> paramsPropsMap) {
Pattern pattern = Pattern.compile(rgex);
Matcher m = pattern.matcher(content);
int index = 1;
while (m.find()) {
String paramName = m.group(1);
Property prop = paramsPropsMap.get(paramName);
sqlParamsMap.put(index, prop);
index++;
}
}
/**
* print replace sql
*
* @param content content
* @param formatSql format sql
* @param rgex rgex
* @param sqlParamsMap sql params map
*/
public void printReplacedSql(String content, String formatSql, String rgex, Map<Integer, Property> sqlParamsMap) {
//parameter print style
logger.info("after replace sql , preparing : {}", formatSql);
StringBuilder logPrint = new StringBuilder("replaced sql , parameters:");
for (int i = 1; i <= sqlParamsMap.size(); i++) {
logPrint.append(sqlParamsMap.get(i).getValue() + "(" + sqlParamsMap.get(i).getType() + ")");
}
logger.info("Sql Params are {}", logPrint);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,087 | [Feature][SqlTask] Add a switch to send mail and print head logs in SqlTask | **Describe the feature**
**Add the switch to send mail and print head logs in SqlTask**
| https://github.com/apache/dolphinscheduler/issues/5087 | https://github.com/apache/dolphinscheduler/pull/5088 | 4a6e8b7afac5c56392ca74de008ef5f1319a3be6 | 8ac72e80e6656ab2119f938e038df663765e1379 | "2021-03-18T05:17:33Z" | java | "2021-03-19T07:07:39Z" | dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/sql.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="sql-model">
<m-list-box>
<div slot="text">{{$t('Datasource')}}</div>
<div slot="content">
<m-datasource
ref="refDs"
@on-dsData="_onDsData"
:data="{ type:type,datasource:datasource }">
</m-datasource>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('SQL Type')}}</div>
<div slot="content">
<div style="display: inline-block;">
<m-sql-type @on-sqlType="_onSqlType" :sql-type="sqlType"></m-sql-type>
</div>
</div>
</m-list-box>
<template v-if="sqlType === 0">
<m-list-box>
<div slot="text"><strong class='requiredIcon'>*</strong>{{$t('Title')}}</div>
<div slot="content">
<el-input
type="input"
size="small"
v-model="title"
:disabled="isDetails"
:placeholder="$t('Please enter the title of email')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text"><strong class='requiredIcon'>*</strong>{{$t('Alarm group')}}</div>
<div slot="content">
<m-warning-groups v-model="groupId"></m-warning-groups>
</div>
</m-list-box>
</template>
<m-list-box v-if="type === 'HIVE'">
<div slot="text">{{$t('SQL Parameter')}}</div>
<div slot="content">
<el-input
:disabled="isDetails"
type="input"
size="small"
v-model="connParams"
:placeholder="$t('Please enter format') + ' key1=value1;key2=value2...'">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('SQL Statement')}}</div>
<div slot="content">
<div class="form-mirror">
<textarea
id="code-sql-mirror"
name="code-sql-mirror"
style="opacity: 0;">
</textarea>
<a class="ans-modal-box-max">
<em class="el-icon-full-screen" @click="setEditorVal"></em>
</a>
</div>
</div>
</m-list-box>
<m-list-box v-if="type === 'HIVE'">
<div slot="text">{{$t('UDF Function')}}</div>
<div slot="content">
<m-udfs
ref="refUdfs"
@on-udfsData="_onUdfsData"
:udfs="udfs"
:type="type">
</m-udfs>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Custom Parameters')}}</div>
<div slot="content">
<m-local-params
ref="refLocalParams"
@on-udpData="_onUdpData"
:udp-list="localParams">
</m-local-params>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Pre Statement')}}</div>
<div slot="content">
<m-statement-list
ref="refPreStatements"
@on-statement-list="_onPreStatements"
:statement-list="preStatements">
</m-statement-list>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Post Statement')}}</div>
<div slot="content">
<m-statement-list
ref="refPostStatements"
@on-statement-list="_onPostStatements"
:statement-list="postStatements">
</m-statement-list>
</div>
</m-list-box>
<el-dialog
:visible.sync="scriptBoxDialog"
append-to-body="true"
width="80%">
<m-script-box :item="item" @getSriptBoxValue="getSriptBoxValue" @closeAble="closeAble"></m-script-box>
</el-dialog>
</div>
</template>
<script>
import _ from 'lodash'
import i18n from '@/module/i18n'
import mUdfs from './_source/udfs'
import mListBox from './_source/listBox'
import mScriptBox from './_source/scriptBox'
import mSqlType from './_source/sqlType'
import mDatasource from './_source/datasource'
import mLocalParams from './_source/localParams'
import mStatementList from './_source/statementList'
import mWarningGroups from './_source/warningGroups'
import disabledState from '@/module/mixin/disabledState'
import codemirror from '@/conf/home/pages/resource/pages/file/pages/_source/codemirror'
let editor
export default {
name: 'sql',
data () {
return {
// Data source type
type: '',
// data source
datasource: '',
// Return to the selected data source
rtDatasource: '',
// Sql statement
sql: '',
// Custom parameter
localParams: [],
// UDF function
udfs: '',
// Sql type
sqlType: '0',
// Email title
title: '',
// Sql parameter
connParams: '',
// Pre statements
preStatements: [],
// Post statements
postStatements: [],
item: '',
scriptBoxDialog: false,
groupId: null
}
},
mixins: [disabledState],
props: {
backfillItem: Object,
createNodeId: Number
},
methods: {
setEditorVal () {
this.item = editor.getValue()
this.scriptBoxDialog = true
},
getSriptBoxValue (val) {
editor.setValue(val)
},
/**
* return sqlType
*/
_onSqlType (a) {
this.sqlType = a
},
/**
* return udfs
*/
_onUdfsData (a) {
this.udfs = a
},
/**
* return Custom parameter
*/
_onUdpData (a) {
this.localParams = a
},
/**
* return data source
*/
_onDsData (o) {
this.type = o.type
this.rtDatasource = o.datasource
},
/**
* return pre statements
*/
_onPreStatements (a) {
this.preStatements = a
},
/**
* return post statements
*/
_onPostStatements (a) {
this.postStatements = a
},
/**
* verification
*/
_verification () {
if (!editor.getValue()) {
this.$message.warning(`${i18n.$t('Please enter a SQL Statement(required)')}`)
return false
}
// datasource Subcomponent verification
if (!this.$refs.refDs._verifDatasource()) {
return false
}
if (this.sqlType === '0' && !this.title) {
this.$message.warning(`${i18n.$t('Mail subject required')}`)
return false
}
if (this.sqlType === '0' && (this.groupId === '' || this.groupId === null)) {
this.$message.warning(`${i18n.$t('Alarm group required')}`)
return false
}
// udfs Subcomponent verification Verification only if the data type is HIVE
if (this.type === 'HIVE') {
if (!this.$refs.refUdfs._verifUdfs()) {
return false
}
}
// localParams Subcomponent verification
if (!this.$refs.refLocalParams._verifProp()) {
return false
}
// preStatements Subcomponent verification
if (!this.$refs.refPreStatements._verifProp()) {
return false
}
// postStatements Subcomponent verification
if (!this.$refs.refPostStatements._verifProp()) {
return false
}
// storage
this.$emit('on-params', {
type: this.type,
datasource: this.rtDatasource,
sql: editor.getValue(),
udfs: this.udfs,
sqlType: this.sqlType,
title: this.title,
groupId: this.groupId,
localParams: this.localParams,
connParams: this.connParams,
preStatements: this.preStatements,
postStatements: this.postStatements
})
return true
},
/**
* Processing code highlighting
*/
_handlerEditor () {
this._destroyEditor()
// editor
editor = codemirror('code-sql-mirror', {
mode: 'sql',
readOnly: this.isDetails
})
this.keypress = () => {
if (!editor.getOption('readOnly')) {
editor.showHint({
completeSingle: false
})
}
}
this.changes = () => {
this._cacheParams()
}
// Monitor keyboard
editor.on('keypress', this.keypress)
editor.on('changes', this.changes)
editor.setValue(this.sql)
return editor
},
_cacheParams () {
this.$emit('on-cache-params', {
type: this.type,
datasource: this.rtDatasource,
sql: editor ? editor.getValue() : '',
udfs: this.udfs,
sqlType: this.sqlType,
title: this.title,
groupId: this.groupId,
localParams: this.localParams,
connParams: this.connParams,
preStatements: this.preStatements,
postStatements: this.postStatements
})
},
_destroyEditor () {
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-sql-mirror'), 'keypress', this.keypress)
editor.off($('.code-sql-mirror'), 'changes', this.changes)
}
}
},
watch: {
// Listening to sqlType
sqlType (val) {
if (val !== 0) {
this.title = ''
this.groupId = null
}
},
// Listening data source
type (val) {
if (val !== 'HIVE') {
this.connParams = ''
}
},
// Watch the cacheParams
cacheParams (val) {
this._cacheParams()
}
},
created () {
let o = this.backfillItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
// backfill
this.type = o.params.type || ''
this.datasource = o.params.datasource || ''
this.sql = o.params.sql || ''
this.udfs = o.params.udfs || ''
this.sqlType = o.params.sqlType
this.connParams = o.params.connParams || ''
this.localParams = o.params.localParams || []
this.preStatements = o.params.preStatements || []
this.postStatements = o.params.postStatements || []
this.title = o.params.title || ''
this.groupId = o.params.groupId
}
},
mounted () {
setTimeout(() => {
this._handlerEditor()
}, 200)
},
destroyed () {
/**
* Destroy the editor instance
*/
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-sql-mirror'), 'keypress', this.keypress)
editor.off($('.code-sql-mirror'), 'changes', this.changes)
}
},
computed: {
cacheParams () {
return {
type: this.type,
datasource: this.rtDatasource,
udfs: this.udfs,
sqlType: this.sqlType,
title: this.title,
groupId: this.groupId,
localParams: this.localParams,
connParams: this.connParams,
preStatements: this.preStatements,
postStatements: this.postStatements
}
}
},
components: { mListBox, mDatasource, mLocalParams, mUdfs, mSqlType, mStatementList, mScriptBox, mWarningGroups }
}
</script>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,087 | [Feature][SqlTask] Add a switch to send mail and print head logs in SqlTask | **Describe the feature**
**Add the switch to send mail and print head logs in SqlTask**
| https://github.com/apache/dolphinscheduler/issues/5087 | https://github.com/apache/dolphinscheduler/pull/5088 | 4a6e8b7afac5c56392ca74de008ef5f1319a3be6 | 8ac72e80e6656ab2119f938e038df663765e1379 | "2021-03-18T05:17:33Z" | java | "2021-03-19T07:07:39Z" | dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export default {
'User Name': 'User Name',
'Please enter user name': 'Please enter user name',
Password: 'Password',
'Please enter your password': 'Please enter your password',
'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22',
Login: 'Login',
Home: 'Home',
'Failed to create node to save': 'Failed to create node to save',
'Global parameters': 'Global parameters',
'Local parameters': 'Local parameters',
'Copy success': 'Copy success',
'The browser does not support automatic copying': 'The browser does not support automatic copying',
'Whether to save the DAG graph': 'Whether to save the DAG graph',
'Current node settings': 'Current node settings',
'View history': 'View history',
'View log': 'View log',
'Force success': 'Force success',
'Enter this child node': 'Enter this child node',
'Node name': 'Node name',
'Please enter name (required)': 'Please enter name (required)',
'Run flag': 'Run flag',
Normal: 'Normal',
'Prohibition execution': 'Prohibition execution',
'Please enter description': 'Please enter description',
'Number of failed retries': 'Number of failed retries',
Times: 'Times',
'Failed retry interval': 'Failed retry interval',
Minute: 'Minute',
'Delay execution time': 'Delay execution time',
'Delay execution': 'Delay execution',
'Forced success': 'Forced success',
Cancel: 'Cancel',
'Confirm add': 'Confirm add',
'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process',
'The task has not been executed and cannot enter the sub-Process': 'The task has not been executed and cannot enter the sub-Process',
'Name already exists': 'Name already exists',
'Download Log': 'Download Log',
'Refresh Log': 'Refresh Log',
'Enter full screen': 'Enter full screen',
'Cancel full screen': 'Cancel full screen',
Close: 'Close',
'Update log success': 'Update log success',
'No more logs': 'No more logs',
'No log': 'No log',
'Loading Log...': 'Loading Log...',
'Set the DAG diagram name': 'Set the DAG diagram name',
'Please enter description(optional)': 'Please enter description(optional)',
'Set global': 'Set global',
'Whether to go online the process definition': 'Whether to go online the process definition',
'Whether to update the process definition': 'Whether to update the process definition',
Add: 'Add',
'DAG graph name cannot be empty': 'DAG graph name cannot be empty',
'Create Datasource': 'Create Datasource',
'Project Home': 'Project Home',
'Project Manage': 'Project',
'Create Project': 'Create Project',
'Cron Manage': 'Cron Manage',
'Copy Workflow': 'Copy Workflow',
'Tenant Manage': 'Tenant Manage',
'Create Tenant': 'Create Tenant',
'User Manage': 'User Manage',
'Create User': 'Create User',
'User Information': 'User Information',
'Edit Password': 'Edit Password',
Success: 'Success',
Failed: 'Failed',
Delete: 'Delete',
'Please choose': 'Please choose',
'Please enter a positive integer': 'Please enter a positive integer',
'Program Type': 'Program Type',
'Main Class': 'Main Class',
'Main Jar Package': 'Main Jar Package',
'Please enter main jar package': 'Please enter main jar package',
'Please enter main class': 'Please enter main class',
'Main Arguments': 'Main Arguments',
'Please enter main arguments': 'Please enter main arguments',
'Option Parameters': 'Option Parameters',
'Please enter option parameters': 'Please enter option parameters',
Resources: 'Resources',
'Custom Parameters': 'Custom Parameters',
'Custom template': 'Custom template',
Datasource: 'Datasource',
methods: 'methods',
'Please enter method(optional)': 'Please enter method(optional)',
Script: 'Script',
'Please enter script(required)': 'Please enter script(required)',
'Deploy Mode': 'Deploy Mode',
'Driver Cores': 'Driver Cores',
'Please enter Driver cores': 'Please enter Driver cores',
'Driver Memory': 'Driver Memory',
'Please enter Driver memory': 'Please enter Driver memory',
'Executor Number': 'Executor Number',
'Please enter Executor number': 'Please enter Executor number',
'The Executor number should be a positive integer': 'The Executor number should be a positive integer',
'Executor Memory': 'Executor Memory',
'Please enter Executor memory': 'Please enter Executor memory',
'Executor Cores': 'Executor Cores',
'Please enter Executor cores': 'Please enter Executor cores',
'Memory should be a positive integer': 'Memory should be a positive integer',
'Core number should be positive integer': 'Core number should be positive integer',
'Flink Version': 'Flink Version',
'JobManager Memory': 'JobManager Memory',
'Please enter JobManager memory': 'Please enter JobManager memory',
'TaskManager Memory': 'TaskManager Memory',
'Please enter TaskManager memory': 'Please enter TaskManager memory',
'Slot Number': 'Slot Number',
'Please enter Slot number': 'Please enter Slot number',
Parallelism: 'Parallelism',
'Please enter Parallelism': 'Please enter Parallelism',
'TaskManager Number': 'TaskManager Number',
'Please enter TaskManager number': 'Please enter TaskManager number',
'App Name': 'App Name',
'Please enter app name(optional)': 'Please enter app name(optional)',
'SQL Type': 'SQL Type',
Title: 'Title',
'Please enter the title of email': 'Please enter the title of email',
Table: 'Table',
TableMode: 'Table',
Attachment: 'Attachment',
'SQL Parameter': 'SQL Parameter',
'SQL Statement': 'SQL Statement',
'UDF Function': 'UDF Function',
'Please enter a SQL Statement(required)': 'Please enter a SQL Statement(required)',
'Please enter a JSON Statement(required)': 'Please enter a JSON Statement(required)',
'One form or attachment must be selected': 'One form or attachment must be selected',
'Mail subject required': 'Mail subject required',
'Child Node': 'Child Node',
'Please select a sub-Process': 'Please select a sub-Process',
Edit: 'Edit',
'Switch To This Version': 'Switch To This Version',
'Datasource Name': 'Datasource Name',
'Please enter datasource name': 'Please enter datasource name',
IP: 'IP',
'Please enter IP': 'Please enter IP',
Port: 'Port',
'Please enter port': 'Please enter port',
'Database Name': 'Database Name',
'Please enter database name': 'Please enter database name',
'Oracle Connect Type': 'ServiceName or SID',
'Oracle Service Name': 'ServiceName',
'Oracle SID': 'SID',
'jdbc connect parameters': 'jdbc connect parameters',
'Test Connect': 'Test Connect',
'Please enter resource name': 'Please enter resource name',
'Please enter resource folder name': 'Please enter resource folder name',
'Please enter a non-query SQL statement': 'Please enter a non-query SQL statement',
'Please enter IP/hostname': 'Please enter IP/hostname',
'jdbc connection parameters is not a correct JSON format': 'jdbc connection parameters is not a correct JSON format',
'#': '#',
'Datasource Type': 'Datasource Type',
'Datasource Parameter': 'Datasource Parameter',
'Create Time': 'Create Time',
'Update Time': 'Update Time',
Operation: 'Operation',
'Current Version': 'Current Version',
'Click to view': 'Click to view',
'Delete?': 'Delete?',
'Switch Version Successfully': 'Switch Version Successfully',
'Confirm Switch To This Version?': 'Confirm Switch To This Version?',
Confirm: 'Confirm',
'Task status statistics': 'Task Status Statistics',
Number: 'Number',
State: 'State',
'Process Status Statistics': 'Process Status Statistics',
'Process Definition Statistics': 'Process Definition Statistics',
'Project Name': 'Project Name',
'Please enter name': 'Please enter name',
'Owned Users': 'Owned Users',
'Process Pid': 'Process Pid',
'Zk registration directory': 'Zk registration directory',
cpuUsage: 'cpuUsage',
memoryUsage: 'memoryUsage',
'Last heartbeat time': 'Last heartbeat time',
'Edit Tenant': 'Edit Tenant',
'OS Tenant Code': 'OS Tenant Code',
'Tenant Name': 'Tenant Name',
Queue: 'Yarn Queue',
'Please select a queue': 'default is tenant association queue',
'Please enter the os tenant code in English': 'Please enter the os tenant code in English',
'Please enter os tenant code in English': 'Please enter os tenant code in English',
'Please enter os tenant code': 'Please enter os tenant code',
'Please enter tenant Name': 'Please enter tenant Name',
'The os tenant code. Only letters or a combination of letters and numbers are allowed': 'The os tenant code. Only letters or a combination of letters and numbers are allowed',
'Edit User': 'Edit User',
Tenant: 'Tenant',
Email: 'Email',
Phone: 'Phone',
'User Type': 'User Type',
'Please enter phone number': 'Please enter phone number',
'Please enter email': 'Please enter email',
'Please enter the correct email format': 'Please enter the correct email format',
'Please enter the correct mobile phone format': 'Please enter the correct mobile phone format',
Project: 'Project',
Authorize: 'Authorize',
'File resources': 'File resources',
'UDF resources': 'UDF resources',
'UDF resources directory': 'UDF resources directory',
'Please select UDF resources directory': 'Please select UDF resources directory',
'Alarm group': 'Alarm group',
'Alarm group required': 'Alarm group required',
'Edit alarm group': 'Edit alarm group',
'Create alarm group': 'Create alarm group',
'Create Alarm Instance': 'Create Alarm Instance',
'Edit Alarm Instance': 'Edit Alarm Instance',
'Group Name': 'Group Name',
'Alarm instance name': 'Alarm instance name',
'Alarm plugin name': 'Alarm plugin name',
'Select plugin': 'Select plugin',
'Please enter group name': 'Please enter group name',
'Instance parameter exception': 'Instance parameter exception',
'Group Type': 'Group Type',
'Alarm plugin instance': 'Alarm plugin instance',
Remarks: 'Remarks',
SMS: 'SMS',
'Managing Users': 'Managing Users',
Permission: 'Permission',
Administrator: 'Administrator',
'Confirm Password': 'Confirm Password',
'Please enter confirm password': 'Please enter confirm password',
'Password cannot be in Chinese': 'Password cannot be in Chinese',
'Please enter a password (6-22) character password': 'Please enter a password (6-22) character password',
'Confirmation password cannot be in Chinese': 'Confirmation password cannot be in Chinese',
'Please enter a confirmation password (6-22) character password': 'Please enter a confirmation password (6-22) character password',
'The password is inconsistent with the confirmation password': 'The password is inconsistent with the confirmation password',
'Please select the datasource': 'Please select the datasource',
'Please select resources': 'Please select resources',
Query: 'Query',
'Non Query': 'Non Query',
'prop(required)': 'prop(required)',
'value(optional)': 'value(optional)',
'value(required)': 'value(required)',
'prop is empty': 'prop is empty',
'value is empty': 'value is empty',
'prop is repeat': 'prop is repeat',
'Start Time': 'Start Time',
'End Time': 'End Time',
crontab: 'crontab',
'Failure Strategy': 'Failure Strategy',
online: 'online',
offline: 'offline',
'Task Status': 'Task Status',
'Process Instance': 'Process Instance',
'Task Instance': 'Task Instance',
'Select date range': 'Select date range',
startDate: 'startDate',
endDate: 'endDate',
Date: 'Date',
Waiting: 'Waiting',
Execution: 'Execution',
Finish: 'Finish',
'Create File': 'Create File',
'Create folder': 'Create folder',
'File Name': 'File Name',
'Folder Name': 'Folder Name',
'File Format': 'File Format',
'Folder Format': 'Folder Format',
'File Content': 'File Content',
'Upload File Size': 'Upload File size cannot exceed 1g',
Create: 'Create',
'Please enter the resource content': 'Please enter the resource content',
'Resource content cannot exceed 3000 lines': 'Resource content cannot exceed 3000 lines',
'File Details': 'File Details',
'Download Details': 'Download Details',
Return: 'Return',
Save: 'Save',
'File Manage': 'File Manage',
'Upload Files': 'Upload Files',
'Create UDF Function': 'Create UDF Function',
'Upload UDF Resources': 'Upload UDF Resources',
'Service-Master': 'Service-Master',
'Service-Worker': 'Service-Worker',
'Process Name': 'Process Name',
Executor: 'Executor',
'Run Type': 'Run Type',
'Scheduling Time': 'Scheduling Time',
'Run Times': 'Run Times',
host: 'host',
'fault-tolerant sign': 'fault-tolerant sign',
Rerun: 'Rerun',
'Recovery Failed': 'Recovery Failed',
Stop: 'Stop',
Pause: 'Pause',
'Recovery Suspend': 'Recovery Suspend',
Gantt: 'Gantt',
'Node Type': 'Node Type',
'Submit Time': 'Submit Time',
Duration: 'Duration',
'Retry Count': 'Retry Count',
'Task Name': 'Task Name',
'Task Date': 'Task Date',
'Source Table': 'Source Table',
'Record Number': 'Record Number',
'Target Table': 'Target Table',
'Online viewing type is not supported': 'Online viewing type is not supported',
Size: 'Size',
Rename: 'Rename',
Download: 'Download',
Export: 'Export',
'Version Info': 'Version Info',
Submit: 'Submit',
'Edit UDF Function': 'Edit UDF Function',
type: 'type',
'UDF Function Name': 'UDF Function Name',
FILE: 'FILE',
UDF: 'UDF',
'File Subdirectory': 'File Subdirectory',
'Please enter a function name': 'Please enter a function name',
'Package Name': 'Package Name',
'Please enter a Package name': 'Please enter a Package name',
Parameter: 'Parameter',
'Please enter a parameter': 'Please enter a parameter',
'UDF Resources': 'UDF Resources',
'Upload Resources': 'Upload Resources',
Instructions: 'Instructions',
'Please enter a instructions': 'Please enter a instructions',
'Please enter a UDF function name': 'Please enter a UDF function name',
'Select UDF Resources': 'Select UDF Resources',
'Class Name': 'Class Name',
'Jar Package': 'Jar Package',
'Library Name': 'Library Name',
'UDF Resource Name': 'UDF Resource Name',
'File Size': 'File Size',
Description: 'Description',
'Drag Nodes and Selected Items': 'Drag Nodes and Selected Items',
'Select Line Connection': 'Select Line Connection',
'Delete selected lines or nodes': 'Delete selected lines or nodes',
'Full Screen': 'Full Screen',
Unpublished: 'Unpublished',
'Start Process': 'Start Process',
'Execute from the current node': 'Execute from the current node',
'Recover tolerance fault process': 'Recover tolerance fault process',
'Resume the suspension process': 'Resume the suspension process',
'Execute from the failed nodes': 'Execute from the failed nodes',
'Complement Data': 'Complement Data',
'Scheduling execution': 'Scheduling execution',
'Recovery waiting thread': 'Recovery waiting thread',
'Submitted successfully': 'Submitted successfully',
Executing: 'Executing',
'Ready to pause': 'Ready to pause',
'Ready to stop': 'Ready to stop',
'Need fault tolerance': 'Need fault tolerance',
Kill: 'Kill',
'Waiting for thread': 'Waiting for thread',
'Waiting for dependence': 'Waiting for dependence',
Start: 'Start',
Copy: 'Copy',
'Copy name': 'Copy name',
'Please enter keyword': 'Please enter keyword',
'File Upload': 'File Upload',
'Drag the file into the current upload window': 'Drag the file into the current upload window',
'Drag area upload': 'Drag area upload',
Upload: 'Upload',
'ReUpload File': 'Re-upload file',
'Please enter file name': 'Please enter file name',
'Please select the file to upload': 'Please select the file to upload',
'Resources manage': 'Resources',
Security: 'Security',
Logout: 'Logout',
'No data': 'No data',
'Uploading...': 'Uploading...',
'Loading...': 'Loading...',
List: 'List',
'Unable to download without proper url': 'Unable to download without proper url',
Process: 'Process',
'Process definition': 'Process definition',
'Task record': 'Task record',
'Warning group manage': 'Warning group manage',
'Warning instance manage': 'Warning instance manage',
'Servers manage': 'Servers manage',
'UDF manage': 'UDF manage',
'Resource manage': 'Resource manage',
'Function manage': 'Function manage',
'Edit password': 'Edit password',
'Ordinary users': 'Ordinary users',
'Create process': 'Create process',
'Import process': 'Import process',
'Timing state': 'Timing state',
Timing: 'Timing',
TreeView: 'TreeView',
'Mailbox already exists! Recipients and copyers cannot repeat': 'Mailbox already exists! Recipients and copyers cannot repeat',
'Mailbox input is illegal': 'Mailbox input is illegal',
'Please set the parameters before starting': 'Please set the parameters before starting',
Continue: 'Continue',
End: 'End',
'Node execution': 'Node execution',
'Backward execution': 'Backward execution',
'Forward execution': 'Forward execution',
'Execute only the current node': 'Execute only the current node',
'Notification strategy': 'Notification strategy',
'Notification group': 'Notification group',
'Please select a notification group': 'Please select a notification group',
receivers: 'receivers',
receiverCcs: 'receiverCcs',
'Whether it is a complement process?': 'Whether it is a complement process?',
'Schedule date': 'Schedule date',
'Mode of execution': 'Mode of execution',
'Serial execution': 'Serial execution',
'Parallel execution': 'Parallel execution',
'Set parameters before timing': 'Set parameters before timing',
'Start and stop time': 'Start and stop time',
'Please select time': 'Please select time',
'Please enter crontab': 'Please enter crontab',
none_1: 'none',
success_1: 'success',
failure_1: 'failure',
All_1: 'All',
Toolbar: 'Toolbar',
'View variables': 'View variables',
'Format DAG': 'Format DAG',
'Refresh DAG status': 'Refresh DAG status',
Return_1: 'Return',
'Please enter format': 'Please enter format',
'connection parameter': 'connection parameter',
'Process definition details': 'Process definition details',
'Create process definition': 'Create process definition',
'Scheduled task list': 'Scheduled task list',
'Process instance details': 'Process instance details',
'Create Resource': 'Create Resource',
'User Center': 'User Center',
'Please enter method': 'Please enter method',
None: 'None',
Name: 'Name',
'Process priority': 'Process priority',
'Task priority': 'Task priority',
'Task timeout alarm': 'Task timeout alarm',
'Timeout strategy': 'Timeout strategy',
'Timeout alarm': 'Timeout alarm',
'Timeout failure': 'Timeout failure',
'Timeout period': 'Timeout period',
'Waiting Dependent complete': 'Waiting Dependent complete',
'Waiting Dependent start': 'Waiting Dependent start',
'Check interval': 'Check interval',
'Timeout must be longer than check interval': 'Timeout must be longer than check interval',
'Timeout strategy must be selected': 'Timeout strategy must be selected',
'Timeout must be a positive integer': 'Timeout must be a positive integer',
'Add dependency': 'Add dependency',
and: 'and',
or: 'or',
month: 'month',
week: 'week',
day: 'day',
hour: 'hour',
Running: 'Running',
'Waiting for dependency to complete': 'Waiting for dependency to complete',
Selected: 'Selected',
CurrentHour: 'CurrentHour',
Last1Hour: 'Last1Hour',
Last2Hours: 'Last2Hours',
Last3Hours: 'Last3Hours',
Last24Hours: 'Last24Hours',
today: 'today',
Last1Days: 'Last1Days',
Last2Days: 'Last2Days',
Last3Days: 'Last3Days',
Last7Days: 'Last7Days',
ThisWeek: 'ThisWeek',
LastWeek: 'LastWeek',
LastMonday: 'LastMonday',
LastTuesday: 'LastTuesday',
LastWednesday: 'LastWednesday',
LastThursday: 'LastThursday',
LastFriday: 'LastFriday',
LastSaturday: 'LastSaturday',
LastSunday: 'LastSunday',
ThisMonth: 'ThisMonth',
LastMonth: 'LastMonth',
LastMonthBegin: 'LastMonthBegin',
LastMonthEnd: 'LastMonthEnd',
'Refresh status succeeded': 'Refresh status succeeded',
'Queue manage': 'Yarn Queue manage',
'Create queue': 'Create queue',
'Edit queue': 'Edit queue',
'Datasource manage': 'Datasource',
'History task record': 'History task record',
'Please go online': 'Please go online',
'Queue value': 'Queue value',
'Please enter queue value': 'Please enter queue value',
'Worker group manage': 'Worker group manage',
'Create worker group': 'Create worker group',
'Edit worker group': 'Edit worker group',
'Token manage': 'Token manage',
'Create token': 'Create token',
'Edit token': 'Edit token',
'Please enter the IP address separated by commas': 'Please enter the IP address separated by commas',
'Note: Multiple IP addresses have been comma separated': 'Note: Multiple IP addresses have been comma separated',
'Failure time': 'Failure time',
'Expiration time': 'Expiration time',
User: 'User',
'Please enter token': 'Please enter token',
'Generate token': 'Generate token',
Monitor: 'Monitor',
Group: 'Group',
'Queue statistics': 'Queue statistics',
'Command status statistics': 'Command status statistics',
'Task kill': 'Task Kill',
'Task queue': 'Task queue',
'Error command count': 'Error command count',
'Normal command count': 'Normal command count',
Manage: ' Manage',
'Number of connections': 'Number of connections',
Sent: 'Sent',
Received: 'Received',
'Min latency': 'Min latency',
'Avg latency': 'Avg latency',
'Max latency': 'Max latency',
'Node count': 'Node count',
'Query time': 'Query time',
'Node self-test status': 'Node self-test status',
'Health status': 'Health status',
'Max connections': 'Max connections',
'Threads connections': 'Threads connections',
'Max used connections': 'Max used connections',
'Threads running connections': 'Threads running connections',
'Worker group': 'Worker group',
'Please enter a positive integer greater than 0': 'Please enter a positive integer greater than 0',
'Pre Statement': 'Pre Statement',
'Post Statement': 'Post Statement',
'Statement cannot be empty': 'Statement cannot be empty',
'Process Define Count': 'Work flow Define Count',
'Process Instance Running Count': 'Process Instance Running Count',
'command number of waiting for running': 'command number of waiting for running',
'failure command number': 'failure command number',
'tasks number of waiting running': 'tasks number of waiting running',
'task number of ready to kill': 'task number of ready to kill',
'Statistics manage': 'Statistics Manage',
statistics: 'Statistics',
'select tenant': 'select tenant',
'Please enter Principal': 'Please enter Principal',
'Please enter the kerberos authentication parameter java.security.krb5.conf': 'Please enter the kerberos authentication parameter java.security.krb5.conf',
'Please enter the kerberos authentication parameter login.user.keytab.username': 'Please enter the kerberos authentication parameter login.user.keytab.username',
'Please enter the kerberos authentication parameter login.user.keytab.path': 'Please enter the kerberos authentication parameter login.user.keytab.path',
'The start time must not be the same as the end': 'The start time must not be the same as the end',
'Startup parameter': 'Startup parameter',
'Startup type': 'Startup type',
'warning of timeout': 'warning of timeout',
'Next five execution times': 'Next five execution times',
'Execute time': 'Execute time',
'Complement range': 'Complement range',
'Http Url': 'Http Url',
'Http Method': 'Http Method',
'Http Parameters': 'Http Parameters',
'Http Parameters Key': 'Http Parameters Key',
'Http Parameters Position': 'Http Parameters Position',
'Http Parameters Value': 'Http Parameters Value',
'Http Check Condition': 'Http Check Condition',
'Http Condition': 'Http Condition',
'Please Enter Http Url': 'Please Enter Http Url(required)',
'Please Enter Http Condition': 'Please Enter Http Condition',
'There is no data for this period of time': 'There is no data for this period of time',
'IP address cannot be empty': 'IP address cannot be empty',
'Please enter the correct IP': 'Please enter the correct IP',
'Please generate token': 'Please generate token',
'Spark Version': 'Spark Version',
TargetDataBase: 'target database',
TargetTable: 'target table',
'Please enter the table of target': 'Please enter the table of target',
'Please enter a Target Table(required)': 'Please enter a Target Table(required)',
SpeedByte: 'speed(byte count)',
SpeedRecord: 'speed(record count)',
'0 means unlimited by byte': '0 means unlimited',
'0 means unlimited by count': '0 means unlimited',
'Modify User': 'Modify User',
'Whether directory': 'Whether directory',
Yes: 'Yes',
No: 'No',
'Hadoop Custom Params': 'Hadoop Params',
'Sqoop Advanced Parameters': 'Sqoop Params',
'Sqoop Job Name': 'Job Name',
'Please enter Mysql Database(required)': 'Please enter Mysql Database(required)',
'Please enter Mysql Table(required)': 'Please enter Mysql Table(required)',
'Please enter Columns (Comma separated)': 'Please enter Columns (Comma separated)',
'Please enter Target Dir(required)': 'Please enter Target Dir(required)',
'Please enter Export Dir(required)': 'Please enter Export Dir(required)',
'Please enter Hive Database(required)': 'Please enter Hive Databasec(required)',
'Please enter Hive Table(required)': 'Please enter Hive Table(required)',
'Please enter Hive Partition Keys': 'Please enter Hive Partition Key',
'Please enter Hive Partition Values': 'Please enter Partition Value',
'Please enter Replace Delimiter': 'Please enter Replace Delimiter',
'Please enter Fields Terminated': 'Please enter Fields Terminated',
'Please enter Lines Terminated': 'Please enter Lines Terminated',
'Please enter Concurrency': 'Please enter Concurrency',
'Please enter Update Key': 'Please enter Update Key',
'Please enter Job Name(required)': 'Please enter Job Name(required)',
'Please enter Custom Shell(required)': 'Please enter Custom Shell(required)',
Direct: 'Direct',
Type: 'Type',
ModelType: 'ModelType',
ColumnType: 'ColumnType',
Database: 'Database',
Column: 'Column',
'Map Column Hive': 'Map Column Hive',
'Map Column Java': 'Map Column Java',
'Export Dir': 'Export Dir',
'Hive partition Keys': 'Hive partition Keys',
'Hive partition Values': 'Hive partition Values',
FieldsTerminated: 'FieldsTerminated',
LinesTerminated: 'LinesTerminated',
IsUpdate: 'IsUpdate',
UpdateKey: 'UpdateKey',
UpdateMode: 'UpdateMode',
'Target Dir': 'Target Dir',
DeleteTargetDir: 'DeleteTargetDir',
FileType: 'FileType',
CompressionCodec: 'CompressionCodec',
CreateHiveTable: 'CreateHiveTable',
DropDelimiter: 'DropDelimiter',
OverWriteSrc: 'OverWriteSrc',
ReplaceDelimiter: 'ReplaceDelimiter',
Concurrency: 'Concurrency',
Form: 'Form',
OnlyUpdate: 'OnlyUpdate',
AllowInsert: 'AllowInsert',
'Data Source': 'Data Source',
'Data Target': 'Data Target',
'All Columns': 'All Columns',
'Some Columns': 'Some Columns',
'Branch flow': 'Branch flow',
'Custom Job': 'Custom Job',
'Custom Script': 'Custom Script',
'Cannot select the same node for successful branch flow and failed branch flow': 'Cannot select the same node for successful branch flow and failed branch flow',
'Successful branch flow and failed branch flow are required': 'conditions node Successful and failed branch flow are required',
'No resources exist': 'No resources exist',
'Please delete all non-existing resources': 'Please delete all non-existing resources',
'Unauthorized or deleted resources': 'Unauthorized or deleted resources',
'Please delete all non-existent resources': 'Please delete all non-existent resources',
Kinship: 'Workflow relationship',
Reset: 'Reset',
KinshipStateActive: 'Active',
KinshipState1: 'Online',
KinshipState0: 'Workflow is not online',
KinshipState10: 'Scheduling is not online',
'Dag label display control': 'Dag label display control',
Enable: 'Enable',
Disable: 'Disable',
'The Worker group no longer exists, please select the correct Worker group!': 'The Worker group no longer exists, please select the correct Worker group!',
'Please confirm whether the workflow has been saved before downloading': 'Please confirm whether the workflow has been saved before downloading',
'User name length is between 3 and 39': 'User name length is between 3 and 39',
'Timeout Settings': 'Timeout Settings',
'Connect Timeout': 'Connect Timeout',
'Socket Timeout': 'Socket Timeout',
'Connect timeout be a positive integer': 'Connect timeout be a positive integer',
'Socket Timeout be a positive integer': 'Socket Timeout be a positive integer',
ms: 'ms',
'Please Enter Url': 'Please Enter Url eg. 127.0.0.1:7077',
Master: 'Master',
'Please select the waterdrop resources': 'Please select the waterdrop resources',
zkDirectory: 'zkDirectory',
'Directory detail': 'Directory detail',
'Connection name': 'Connection name',
'Current connection settings': 'Current connection settings',
'Please save the DAG before formatting': 'Please save the DAG before formatting',
'Batch copy': 'Batch copy',
'Related items': 'Related items',
'Project name is required': 'Project name is required',
'Batch move': 'Batch move',
Version: 'Version',
'Pre tasks': 'Pre tasks',
'Running Memory': 'Running Memory',
'Max Memory': 'Max Memory',
'Min Memory': 'Min Memory',
'The workflow canvas is abnormal and cannot be saved, please recreate': 'The workflow canvas is abnormal and cannot be saved, please recreate',
Info: 'Info',
'Datasource userName': 'owner',
'Resource userName': 'owner'
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,087 | [Feature][SqlTask] Add a switch to send mail and print head logs in SqlTask | **Describe the feature**
**Add the switch to send mail and print head logs in SqlTask**
| https://github.com/apache/dolphinscheduler/issues/5087 | https://github.com/apache/dolphinscheduler/pull/5088 | 4a6e8b7afac5c56392ca74de008ef5f1319a3be6 | 8ac72e80e6656ab2119f938e038df663765e1379 | "2021-03-18T05:17:33Z" | java | "2021-03-19T07:07:39Z" | dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export default {
'User Name': '用户名',
'Please enter user name': '请输入用户名',
Password: '密码',
'Please enter your password': '请输入密码',
'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': '密码至少包含数字,字母和字符的两种组合,长度在6-22之间',
Login: '登录',
Home: '首页',
'Failed to create node to save': '未创建节点保存失败',
'Global parameters': '全局参数',
'Local parameters': '局部参数',
'Copy success': '复制成功',
'The browser does not support automatic copying': '该浏览器不支持自动复制',
'Whether to save the DAG graph': '是否保存DAG图',
'Current node settings': '当前节点设置',
'View history': '查看历史',
'View log': '查看日志',
'Force success': '强制成功',
'Enter this child node': '进入该子节点',
'Node name': '节点名称',
'Please enter name (required)': '请输入名称(必填)',
'Run flag': '运行标志',
Normal: '正常',
'Prohibition execution': '禁止执行',
'Please enter description': '请输入描述',
'Number of failed retries': '失败重试次数',
Times: '次',
'Failed retry interval': '失败重试间隔',
Minute: '分',
'Delay execution time': '延时执行时间',
'Delay execution': '延时执行',
'Forced success': '强制成功过',
Cancel: '取消',
'Confirm add': '确认添加',
'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': '新创建子工作流还未执行,不能进入子工作流',
'The task has not been executed and cannot enter the sub-Process': '该任务还未执行,不能进入子工作流',
'Name already exists': '名称已存在请重新输入',
'Download Log': '下载日志',
'Refresh Log': '刷新日志',
'Enter full screen': '进入全屏',
'Cancel full screen': '取消全屏',
Close: '关闭',
'Update log success': '更新日志成功',
'No more logs': '暂无更多日志',
'No log': '暂无日志',
'Loading Log...': '正在努力请求日志中...',
'Set the DAG diagram name': '设置DAG图名称',
'Please enter description(optional)': '请输入描述(选填)',
'Set global': '设置全局',
'Whether to go online the process definition': '是否上线流程定义',
'Whether to update the process definition': '是否更新流程定义',
Add: '添加',
'DAG graph name cannot be empty': 'DAG图名称不能为空',
'Create Datasource': '创建数据源',
'Project Home': '项目首页',
'Project Manage': '项目管理',
'Create Project': '创建项目',
'Cron Manage': '定时管理',
'Copy Workflow': '复制工作流',
'Tenant Manage': '租户管理',
'Create Tenant': '创建租户',
'User Manage': '用户管理',
'Create User': '创建用户',
'User Information': '用户信息',
'Edit Password': '密码修改',
Success: '成功',
Failed: '失败',
Delete: '删除',
'Please choose': '请选择',
'Please enter a positive integer': '请输入正整数',
'Program Type': '程序类型',
'Main Class': '主函数的Class',
'Main Jar Package': '主Jar包',
'Please enter main jar package': '请选择主Jar包',
'Please enter main class': '请填写主函数的Class',
'Main Arguments': '主程序参数',
'Please enter main arguments': '请输入主程序参数',
'Option Parameters': '选项参数',
'Please enter option parameters': '请输入选项参数',
Resources: '资源',
'Custom Parameters': '自定义参数',
'Custom template': '自定义模版',
Datasource: '数据源',
methods: '方法',
'Please enter method(optional)': '请输入方法(选填)',
Script: '脚本',
'Please enter script(required)': '请输入脚本(必填)',
'Deploy Mode': '部署方式',
'Driver Cores': 'Driver核心数',
'Please enter Driver cores': '请输入Driver核心数',
'Driver Memory': 'Driver内存数',
'Please enter Driver memory': '请输入Driver内存数',
'Executor Number': 'Executor数量',
'Please enter Executor number': '请输入Executor数量',
'The Executor number should be a positive integer': 'Executor数量为正整数',
'Executor Memory': 'Executor内存数',
'Please enter Executor memory': '请输入Executor内存数',
'Executor Cores': 'Executor核心数',
'Please enter Executor cores': '请输入Executor核心数',
'Memory should be a positive integer': '内存数为数字',
'Core number should be positive integer': '核心数为正整数',
'Flink Version': 'Flink版本',
'JobManager Memory': 'JobManager内存数',
'Please enter JobManager memory': '请输入JobManager内存数',
'TaskManager Memory': 'TaskManager内存数',
'Please enter TaskManager memory': '请输入TaskManager内存数',
'Slot Number': 'Slot数量',
'Please enter Slot number': '请输入Slot数量',
Parallelism: '并行度',
'Please enter Parallelism': '请输入并行度',
'TaskManager Number': 'TaskManager数量',
'Please enter TaskManager number': '请输入TaskManager数量',
'App Name': '任务名称',
'Please enter app name(optional)': '请输入任务名称(选填)',
'SQL Type': 'sql类型',
Title: '主题',
'Please enter the title of email': '请输入邮件主题',
Table: '表名',
TableMode: '表格',
Attachment: '附件',
'SQL Parameter': 'sql参数',
'SQL Statement': 'sql语句',
'UDF Function': 'UDF函数',
'Please enter a SQL Statement(required)': '请输入sql语句(必填)',
'Please enter a JSON Statement(required)': '请输入json语句(必填)',
'One form or attachment must be selected': '表格、附件必须勾选一个',
'Mail subject required': '邮件主题必填',
'Child Node': '子节点',
'Please select a sub-Process': '请选择子工作流',
Edit: '编辑',
'Switch To This Version': '切换到该版本',
'Datasource Name': '数据源名称',
'Please enter datasource name': '请输入数据源名称',
IP: 'IP主机名',
'Please enter IP': '请输入IP主机名',
Port: '端口',
'Please enter port': '请输入端口',
'Database Name': '数据库名',
'Please enter database name': '请输入数据库名',
'Oracle Connect Type': '服务名或SID',
'Oracle Service Name': '服务名',
'Oracle SID': 'SID',
'jdbc connect parameters': 'jdbc连接参数',
'Test Connect': '测试连接',
'Please enter resource name': '请输入数据源名称',
'Please enter resource folder name': '请输入资源文件夹名称',
'Please enter a non-query SQL statement': '请输入非查询sql语句',
'Please enter IP/hostname': '请输入IP/主机名',
'jdbc connection parameters is not a correct JSON format': 'jdbc连接参数不是一个正确的JSON格式',
'#': '编号',
'Datasource Type': '数据源类型',
'Datasource Parameter': '数据源参数',
'Create Time': '创建时间',
'Update Time': '更新时间',
Operation: '操作',
'Current Version': '当前版本',
'Click to view': '点击查看',
'Delete?': '确定删除吗?',
'Switch Version Successfully': '切换版本成功',
'Confirm Switch To This Version?': '确定切换到该版本吗?',
Confirm: '确定',
'Task status statistics': '任务状态统计',
Number: '数量',
State: '状态',
'Process Status Statistics': '流程状态统计',
'Process Definition Statistics': '流程定义统计',
'Project Name': '项目名称',
'Please enter name': '请输入名称',
'Owned Users': '所属用户',
'Process Pid': '进程Pid',
'Zk registration directory': 'zk注册目录',
cpuUsage: 'cpuUsage',
memoryUsage: 'memoryUsage',
'Last heartbeat time': '最后心跳时间',
'Edit Tenant': '编辑租户',
'OS Tenant Code': '操作系统租户',
'Tenant Name': '租户名称',
Queue: '队列',
'Please select a queue': '默认为租户关联队列',
'Please enter the os tenant code in English': '请输入操作系统租户只允许英文',
'Please enter os tenant code in English': '请输入英文操作系统租户',
'Please enter os tenant code': '请输入操作系统租户',
'Please enter tenant Name': '请输入租户名称',
'The os tenant code. Only letters or a combination of letters and numbers are allowed': '操作系统租户只允许字母或字母与数字组合',
'Edit User': '编辑用户',
Tenant: '租户',
Email: '邮件',
Phone: '手机',
'User Type': '用户类型',
'Please enter phone number': '请输入手机',
'Please enter email': '请输入邮箱',
'Please enter the correct email format': '请输入正确的邮箱格式',
'Please enter the correct mobile phone format': '请输入正确的手机格式',
Project: '项目',
Authorize: '授权',
'File resources': '文件资源',
'UDF resources': 'UDF资源',
'UDF resources directory': 'UDF资源目录',
'Please select UDF resources directory': '请选择UDF资源目录',
'Alarm group': '告警组',
'Alarm group required': '告警组必填',
'Edit alarm group': '编辑告警组',
'Create alarm group': '创建告警组',
'Create Alarm Instance': '创建告警实例',
'Edit Alarm Instance': '编辑告警实例',
'Group Name': '组名称',
'Alarm instance name': '告警实例名称',
'Alarm plugin name': '告警插件名称',
'Select plugin': '选择插件',
'Please enter group name': '请输入组名称',
'Instance parameter exception': '实例参数异常',
'Group Type': '组类型',
'Alarm plugin instance': '告警插件实例',
Remarks: '备注',
SMS: '短信',
'Managing Users': '管理用户',
Permission: '权限',
Administrator: '管理员',
'Confirm Password': '确认密码',
'Please enter confirm password': '请输入确认密码',
'Password cannot be in Chinese': '密码不能为中文',
'Please enter a password (6-22) character password': '请输入密码(6-22)字符密码',
'Confirmation password cannot be in Chinese': '确认密码不能为中文',
'Please enter a confirmation password (6-22) character password': '请输入确认密码(6-22)字符密码',
'The password is inconsistent with the confirmation password': '密码与确认密码不一致,请重新确认',
'Please select the datasource': '请选择数据源',
'Please select resources': '请选择资源',
Query: '查询',
'Non Query': '非查询',
'prop(required)': 'prop(必填)',
'value(optional)': 'value(选填)',
'value(required)': 'value(必填)',
'prop is empty': 'prop不能为空',
'value is empty': 'value不能为空',
'prop is repeat': 'prop中有重复',
'Start Time': '开始时间',
'End Time': '结束时间',
crontab: 'crontab',
'Failure Strategy': '失败策略',
online: '上线',
offline: '下线',
'Task Status': '任务状态',
'Process Instance': '工作流实例',
'Task Instance': '任务实例',
'Select date range': '选择日期区间',
startDate: '开始日期',
endDate: '结束日期',
Date: '日期',
Waiting: '等待',
Execution: '执行中',
Finish: '完成',
'Create File': '创建文件',
'Create folder': '创建文件夹',
'File Name': '文件名称',
'Folder Name': '文件夹名称',
'File Format': '文件格式',
'Folder Format': '文件夹格式',
'File Content': '文件内容',
'Upload File Size': '文件大小不能超过1G',
Create: '创建',
'Please enter the resource content': '请输入资源内容',
'Resource content cannot exceed 3000 lines': '资源内容不能超过3000行',
'File Details': '文件详情',
'Download Details': '下载详情',
Return: '返回',
Save: '保存',
'File Manage': '文件管理',
'Upload Files': '上传文件',
'Create UDF Function': '创建UDF函数',
'Upload UDF Resources': '上传UDF资源',
'Service-Master': '服务管理-Master',
'Service-Worker': '服务管理-Worker',
'Process Name': '工作流名称',
Executor: '执行用户',
'Run Type': '运行类型',
'Scheduling Time': '调度时间',
'Run Times': '运行次数',
host: 'host',
'fault-tolerant sign': '容错标识',
Rerun: '重跑',
'Recovery Failed': '恢复失败',
Stop: '停止',
Pause: '暂停',
'Recovery Suspend': '恢复运行',
Gantt: '甘特图',
'Node Type': '节点类型',
'Submit Time': '提交时间',
Duration: '运行时长',
'Retry Count': '重试次数',
'Task Name': '任务名称',
'Task Date': '任务日期',
'Source Table': '源表',
'Record Number': '记录数',
'Target Table': '目标表',
'Online viewing type is not supported': '不支持在线查看类型',
Size: '大小',
Rename: '重命名',
Download: '下载',
Export: '导出',
'Version Info': '版本信息',
Submit: '提交',
'Edit UDF Function': '编辑UDF函数',
type: '类型',
'UDF Function Name': 'UDF函数名称',
FILE: '文件',
UDF: 'UDF',
'File Subdirectory': '文件子目录',
'Please enter a function name': '请输入函数名',
'Package Name': '包名类名',
'Please enter a Package name': '请输入包名类名',
Parameter: '参数',
'Please enter a parameter': '请输入参数',
'UDF Resources': 'UDF资源',
'Upload Resources': '上传资源',
Instructions: '使用说明',
'Please enter a instructions': '请输入使用说明',
'Please enter a UDF function name': '请输入UDF函数名称',
'Select UDF Resources': '请选择UDF资源',
'Class Name': '类名',
'Jar Package': 'jar包',
'Library Name': '库名',
'UDF Resource Name': 'UDF资源名称',
'File Size': '文件大小',
Description: '描述',
'Drag Nodes and Selected Items': '拖动节点和选中项',
'Select Line Connection': '选择线条连接',
'Delete selected lines or nodes': '删除选中的线或节点',
'Full Screen': '全屏',
Unpublished: '未发布',
'Start Process': '启动工作流',
'Execute from the current node': '从当前节点开始执行',
'Recover tolerance fault process': '恢复被容错的工作流',
'Resume the suspension process': '恢复运行流程',
'Execute from the failed nodes': '从失败节点开始执行',
'Complement Data': '补数',
'Scheduling execution': '调度执行',
'Recovery waiting thread': '恢复等待线程',
'Submitted successfully': '提交成功',
Executing: '正在执行',
'Ready to pause': '准备暂停',
'Ready to stop': '准备停止',
'Need fault tolerance': '需要容错',
Kill: 'Kill',
'Waiting for thread': '等待线程',
'Waiting for dependence': '等待依赖',
Start: '运行',
Copy: '复制节点',
'Copy name': '复制名称',
'Please enter keyword': '请输入关键词',
'File Upload': '文件上传',
'Drag the file into the current upload window': '请将文件拖拽到当前上传窗口内!',
'Drag area upload': '拖动区域上传',
Upload: '上传',
'ReUpload File': '重新上传文件',
'Please enter file name': '请输入文件名',
'Please select the file to upload': '请选择要上传的文件',
'Resources manage': '资源中心',
Security: '安全中心',
Logout: '退出',
'No data': '查询无数据',
'Uploading...': '文件上传中',
'Loading...': '正在努力加载中...',
List: '列表',
'Unable to download without proper url': '无下载url无法下载',
Process: '工作流',
'Process definition': '工作流定义',
'Task record': '任务记录',
'Warning group manage': '告警组管理',
'Warning instance manage': '告警实例管理',
'Servers manage': '服务管理',
'UDF manage': 'UDF管理',
'Resource manage': '资源管理',
'Function manage': '函数管理',
'Edit password': '修改密码',
'Ordinary users': '普通用户',
'Create process': '创建工作流',
'Import process': '导入工作流',
'Timing state': '定时状态',
Timing: '定时',
TreeView: '树形图',
'Mailbox already exists! Recipients and copyers cannot repeat': '邮箱已存在!收件人和抄送人不能重复',
'Mailbox input is illegal': '邮箱输入不合法',
'Please set the parameters before starting': '启动前请先设置参数',
Continue: '继续',
End: '结束',
'Node execution': '节点执行',
'Backward execution': '向后执行',
'Forward execution': '向前执行',
'Execute only the current node': '仅执行当前节点',
'Notification strategy': '通知策略',
'Notification group': '通知组',
'Please select a notification group': '请选择通知组',
receivers: '收件人',
receiverCcs: '抄送人',
'Whether it is a complement process?': '是否补数',
'Schedule date': '调度日期',
'Mode of execution': '执行方式',
'Serial execution': '串行执行',
'Parallel execution': '并行执行',
'Set parameters before timing': '定时前请先设置参数',
'Start and stop time': '起止时间',
'Please select time': '请选择时间',
'Please enter crontab': '请输入crontab',
none_1: '都不发',
success_1: '成功发',
failure_1: '失败发',
All_1: '成功或失败都发',
Toolbar: '工具栏',
'View variables': '查看变量',
'Format DAG': '格式化DAG',
'Refresh DAG status': '刷新DAG状态',
Return_1: '返回上一节点',
'Please enter format': '请输入格式为',
'connection parameter': '连接参数',
'Process definition details': '流程定义详情',
'Create process definition': '创建流程定义',
'Scheduled task list': '定时任务列表',
'Process instance details': '流程实例详情',
'Create Resource': '创建资源',
'User Center': '用户中心',
'Please enter method': '请输入方法',
None: '无',
Name: '名称',
'Process priority': '流程优先级',
'Task priority': '任务优先级',
'Task timeout alarm': '任务超时告警',
'Timeout strategy': '超时策略',
'Timeout alarm': '超时告警',
'Timeout failure': '超时失败',
'Timeout period': '超时时长',
'Waiting Dependent complete': '等待依赖完成',
'Waiting Dependent start': '等待依赖启动',
'Check interval': '检查间隔',
'Timeout must be longer than check interval': '超时时间必须比检查间隔长',
'Timeout strategy must be selected': '超时策略必须选一个',
'Timeout must be a positive integer': '超时时长必须为正整数',
'Add dependency': '添加依赖',
and: '且',
or: '或',
month: '月',
week: '周',
day: '日',
hour: '时',
Running: '正在运行',
'Waiting for dependency to complete': '等待依赖完成',
Selected: '已选',
CurrentHour: '当前小时',
Last1Hour: '前1小时',
Last2Hours: '前2小时',
Last3Hours: '前3小时',
Last24Hours: '前24小时',
today: '今天',
Last1Days: '昨天',
Last2Days: '前两天',
Last3Days: '前三天',
Last7Days: '前七天',
ThisWeek: '本周',
LastWeek: '上周',
LastMonday: '上周一',
LastTuesday: '上周二',
LastWednesday: '上周三',
LastThursday: '上周四',
LastFriday: '上周五',
LastSaturday: '上周六',
LastSunday: '上周日',
ThisMonth: '本月',
LastMonth: '上月',
LastMonthBegin: '上月初',
LastMonthEnd: '上月末',
'Refresh status succeeded': '刷新状态成功',
'Queue manage': 'Yarn 队列管理',
'Create queue': '创建队列',
'Edit queue': '编辑队列',
'Datasource manage': '数据源中心',
'History task record': '历史任务记录',
'Please go online': '不要忘记上线',
'Queue value': '队列值',
'Please enter queue value': '请输入队列值',
'Worker group manage': 'Worker分组管理',
'Create worker group': '创建Worker分组',
'Edit worker group': '编辑Worker分组',
'Token manage': '令牌管理',
'Create token': '创建令牌',
'Edit token': '编辑令牌',
'Please enter the IP address separated by commas': '请输入IP地址多个用英文逗号隔开',
'Note: Multiple IP addresses have been comma separated': '注意:多个IP地址以英文逗号分割',
'Failure time': '失效时间',
'Expiration time': '失效时间',
User: '用户',
'Please enter token': '请输入令牌',
'Generate token': '生成令牌',
Monitor: '监控中心',
Group: '分组',
'Queue statistics': '队列统计',
'Command status statistics': '命令状态统计',
'Task kill': '等待kill任务',
'Task queue': '等待执行任务',
'Error command count': '错误指令数',
'Normal command count': '正确指令数',
Manage: '管理',
'Number of connections': '连接数',
Sent: '发送量',
Received: '接收量',
'Min latency': '最低延时',
'Avg latency': '平均延时',
'Max latency': '最大延时',
'Node count': '节点数',
'Query time': '当前查询时间',
'Node self-test status': '节点自检状态',
'Health status': '健康状态',
'Max connections': '最大连接数',
'Threads connections': '当前连接数',
'Max used connections': '同时使用连接最大数',
'Threads running connections': '数据库当前活跃连接数',
'Worker group': 'Worker分组',
'Please enter a positive integer greater than 0': '请输入大于 0 的正整数',
'Pre Statement': '前置sql',
'Post Statement': '后置sql',
'Statement cannot be empty': '语句不能为空',
'Process Define Count': '工作流定义数',
'Process Instance Running Count': '正在运行的流程数',
'command number of waiting for running': '待执行的命令数',
'failure command number': '执行失败的命令数',
'tasks number of waiting running': '待运行任务数',
'task number of ready to kill': '待杀死任务数',
'Statistics manage': '统计管理',
statistics: '统计',
'select tenant': '选择租户',
'Please enter Principal': '请输入Principal',
'Please enter the kerberos authentication parameter java.security.krb5.conf': '请输入kerberos认证参数 java.security.krb5.conf',
'Please enter the kerberos authentication parameter login.user.keytab.username': '请输入kerberos认证参数 login.user.keytab.username',
'Please enter the kerberos authentication parameter login.user.keytab.path': '请输入kerberos认证参数 login.user.keytab.path',
'The start time must not be the same as the end': '开始时间和结束时间不能相同',
'Startup parameter': '启动参数',
'Startup type': '启动类型',
'warning of timeout': '超时告警',
'Next five execution times': '接下来五次执行时间',
'Execute time': '执行时间',
'Complement range': '补数范围',
'Http Url': '请求地址',
'Http Method': '请求类型',
'Http Parameters': '请求参数',
'Http Parameters Key': '参数名',
'Http Parameters Position': '参数位置',
'Http Parameters Value': '参数值',
'Http Check Condition': '校验条件',
'Http Condition': '校验内容',
'Please Enter Http Url': '请填写请求地址(必填)',
'Please Enter Http Condition': '请填写校验内容',
'There is no data for this period of time': '该时间段无数据',
'IP address cannot be empty': 'IP地址不能为空',
'Please enter the correct IP': '请输入正确的IP',
'Please generate token': '请生成Token',
'Spark Version': 'Spark版本',
TargetDataBase: '目标库',
TargetTable: '目标表',
'Please enter the table of target': '请输入目标表名',
'Please enter a Target Table(required)': '请输入目标表(必填)',
SpeedByte: '限流(字节数)',
SpeedRecord: '限流(记录数)',
'0 means unlimited by byte': 'KB,0代表不限制',
'0 means unlimited by count': '0代表不限制',
'Modify User': '修改用户',
'Whether directory': '是否文件夹',
Yes: '是',
No: '否',
'Hadoop Custom Params': 'Hadoop参数',
'Sqoop Advanced Parameters': 'Sqoop参数',
'Sqoop Job Name': '任务名称',
'Please enter Mysql Database(required)': '请输入Mysql数据库(必填)',
'Please enter Mysql Table(required)': '请输入Mysql表名(必填)',
'Please enter Columns (Comma separated)': '请输入列名,用 , 隔开',
'Please enter Target Dir(required)': '请输入目标路径(必填)',
'Please enter Export Dir(required)': '请输入数据源路径(必填)',
'Please enter Hive Database(required)': '请输入Hive数据库(必填)',
'Please enter Hive Table(required)': '请输入Hive表名(必填)',
'Please enter Hive Partition Keys': '请输入分区键',
'Please enter Hive Partition Values': '请输入分区值',
'Please enter Replace Delimiter': '请输入替换分隔符',
'Please enter Fields Terminated': '请输入列分隔符',
'Please enter Lines Terminated': '请输入行分隔符',
'Please enter Concurrency': '请输入并发度',
'Please enter Update Key': '请输入更新列',
'Please enter Job Name(required)': '请输入任务名称(必填)',
'Please enter Custom Shell(required)': '请输入自定义脚本',
Direct: '流向',
Type: '类型',
ModelType: '模式',
ColumnType: '列类型',
Database: '数据库',
Column: '列',
'Map Column Hive': 'Hive类型映射',
'Map Column Java': 'Java类型映射',
'Export Dir': '数据源路径',
'Hive partition Keys': 'Hive 分区键',
'Hive partition Values': 'Hive 分区值',
FieldsTerminated: '列分隔符',
LinesTerminated: '行分隔符',
IsUpdate: '是否更新',
UpdateKey: '更新列',
UpdateMode: '更新类型',
'Target Dir': '目标路径',
DeleteTargetDir: '是否删除目录',
FileType: '保存格式',
CompressionCodec: '压缩类型',
CreateHiveTable: '是否创建新表',
DropDelimiter: '是否删除分隔符',
OverWriteSrc: '是否覆盖数据源',
ReplaceDelimiter: '替换分隔符',
Concurrency: '并发度',
Form: '表单',
OnlyUpdate: '只更新',
AllowInsert: '无更新便插入',
'Data Source': '数据来源',
'Data Target': '数据目的',
'All Columns': '全表导入',
'Some Columns': '选择列',
'Branch flow': '分支流转',
'Custom Job': '自定义任务',
'Custom Script': '自定义脚本',
'Cannot select the same node for successful branch flow and failed branch flow': '成功分支流转和失败分支流转不能选择同一个节点',
'Successful branch flow and failed branch flow are required': 'conditions节点成功和失败分支流转必填',
'No resources exist': '不存在资源',
'Please delete all non-existing resources': '请删除所有不存在资源',
'Unauthorized or deleted resources': '未授权或已删除资源',
'Please delete all non-existent resources': '请删除所有未授权或已删除资源',
Kinship: '工作流关系',
Reset: '重置',
KinshipStateActive: '当前选择',
KinshipState1: '已上线',
KinshipState0: '工作流未上线',
KinshipState10: '调度未上线',
'Dag label display control': 'Dag节点名称显隐',
Enable: '启用',
Disable: '停用',
'The Worker group no longer exists, please select the correct Worker group!': '该Worker分组已经不存在,请选择正确的Worker分组!',
'Please confirm whether the workflow has been saved before downloading': '下载前请确定工作流是否已保存',
'User name length is between 3 and 39': '用户名长度在3~39之间',
'Timeout Settings': '超时设置',
'Connect Timeout': '连接超时',
'Socket Timeout': 'Socket超时',
'Connect timeout be a positive integer': '连接超时必须为数字',
'Socket Timeout be a positive integer': 'Socket超时必须为数字',
ms: '毫秒',
'Please Enter Url': '请直接填写地址,例如:127.0.0.1:7077',
Master: 'Master',
'Please select the waterdrop resources': '请选择waterdrop配置文件',
zkDirectory: 'zk注册目录',
'Directory detail': '查看目录详情',
'Connection name': '连线名',
'Current connection settings': '当前连线设置',
'Please save the DAG before formatting': '格式化前请先保存DAG',
'Batch copy': '批量复制',
'Related items': '关联项目',
'Project name is required': '项目名称必填',
'Batch move': '批量移动',
Version: '版本',
'Pre tasks': '前置任务',
'Running Memory': '运行内存',
'Max Memory': '最大内存',
'Min Memory': '最小内存',
'The workflow canvas is abnormal and cannot be saved, please recreate': '该工作流画布异常,无法保存,请重新创建',
Info: '提示',
'Datasource userName': '所属用户',
'Resource userName': '所属用户'
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,087 | [Feature][SqlTask] Add a switch to send mail and print head logs in SqlTask | **Describe the feature**
**Add the switch to send mail and print head logs in SqlTask**
| https://github.com/apache/dolphinscheduler/issues/5087 | https://github.com/apache/dolphinscheduler/pull/5088 | 4a6e8b7afac5c56392ca74de008ef5f1319a3be6 | 8ac72e80e6656ab2119f938e038df663765e1379 | "2021-03-18T05:17:33Z" | java | "2021-03-19T07:07:39Z" | pom.xml | <?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler</artifactId>
<version>${revision}</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
<url>http://dolphinscheduler.apache.org</url>
<description>Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated
to solving the complex dependencies in data processing, making the scheduling system out of the box for data
processing.
</description>
<licenses>
<license>
<name>Apache License 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:https://github.com/apache/incubator-dolphinscheduler.git</connection>
<developerConnection>scm:git:https://github.com/apache/incubator-dolphinscheduler.git</developerConnection>
<url>https://github.com/apache/incubator-dolphinscheduler</url>
<tag>HEAD</tag>
</scm>
<mailingLists>
<mailingList>
<name>DolphinScheduler Developer List</name>
<post>dev@dolphinscheduler.incubator.apache.org</post>
<subscribe>dev-subscribe@dolphinscheduler.incubator.apache.org</subscribe>
<unsubscribe>dev-unsubscribe@dolphinscheduler.incubator.apache.org</unsubscribe>
</mailingList>
</mailingLists>
<parent>
<groupId>org.apache</groupId>
<artifactId>apache</artifactId>
<version>21</version>
</parent>
<properties>
<revision>1.3.6-SNAPSHOT</revision>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<curator.version>4.3.0</curator.version>
<spring.version>5.1.18.RELEASE</spring.version>
<spring.boot.version>2.1.17.RELEASE</spring.boot.version>
<java.version>1.8</java.version>
<logback.version>1.2.3</logback.version>
<hadoop.version>2.7.3</hadoop.version>
<quartz.version>2.3.0</quartz.version>
<jackson.version>2.10.5</jackson.version>
<mybatis-plus.version>3.2.0</mybatis-plus.version>
<mybatis.spring.version>2.0.1</mybatis.spring.version>
<cron.utils.version>5.0.5</cron.utils.version>
<druid.version>1.1.22</druid.version>
<h2.version>1.4.200</h2.version>
<commons.codec.version>1.11</commons.codec.version>
<commons.logging.version>1.1.1</commons.logging.version>
<httpclient.version>4.4.1</httpclient.version>
<httpcore.version>4.4.1</httpcore.version>
<junit.version>4.12</junit.version>
<mysql.connector.version>5.1.34</mysql.connector.version>
<slf4j.api.version>1.7.5</slf4j.api.version>
<slf4j.log4j12.version>1.7.5</slf4j.log4j12.version>
<commons.collections.version>3.2.2</commons.collections.version>
<commons.httpclient>3.0.1</commons.httpclient>
<commons.beanutils.version>1.9.4</commons.beanutils.version>
<commons.configuration.version>1.10</commons.configuration.version>
<commons.email.version>1.5</commons.email.version>
<poi.version>3.17</poi.version>
<javax.servlet.api.version>3.1.0</javax.servlet.api.version>
<commons.collections4.version>4.1</commons.collections4.version>
<guava.version>24.1-jre</guava.version>
<postgresql.version>42.1.4</postgresql.version>
<hive.jdbc.version>2.1.0</hive.jdbc.version>
<commons.io.version>2.4</commons.io.version>
<oshi.core.version>3.5.0</oshi.core.version>
<clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version>
<mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version>
<presto.jdbc.version>0.238.1</presto.jdbc.version>
<spotbugs.version>3.1.12</spotbugs.version>
<checkstyle.version>3.0.0</checkstyle.version>
<zookeeper.version>3.4.14</zookeeper.version>
<frontend-maven-plugin.version>1.6</frontend-maven-plugin.version>
<maven-compiler-plugin.version>3.3</maven-compiler-plugin.version>
<maven-assembly-plugin.version>3.1.0</maven-assembly-plugin.version>
<maven-release-plugin.version>2.5.3</maven-release-plugin.version>
<maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version>
<maven-source-plugin.version>2.4</maven-source-plugin.version>
<maven-surefire-plugin.version>2.22.1</maven-surefire-plugin.version>
<maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version>
<rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version>
<jacoco.version>0.8.4</jacoco.version>
<jcip.version>1.0</jcip.version>
<maven.deploy.skip>false</maven.deploy.skip>
<cobertura-maven-plugin.version>2.7</cobertura-maven-plugin.version>
<mockito.version>2.21.0</mockito.version>
<powermock.version>2.0.2</powermock.version>
<servlet-api.version>2.5</servlet-api.version>
<swagger.version>1.9.3</swagger.version>
<springfox.version>2.9.2</springfox.version>
<swagger-models.version>1.5.24</swagger-models.version>
<guava-retry.version>2.0.0</guava-retry.version>
<dep.airlift.version>0.184</dep.airlift.version>
<dep.packaging.version>${dep.airlift.version}</dep.packaging.version>
<protostuff.version>1.7.2</protostuff.version>
<reflections.version>0.9.12</reflections.version>
<byte-buddy.version>1.9.16</byte-buddy.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<!-- quartz-->
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz-jobs</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>com.cronutils</groupId>
<artifactId>cron-utils</artifactId>
<version>${cron.utils.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>${druid.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>${spring.boot.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-core</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-beans</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-tx</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-jdbc</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
<version>${spring.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-server</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert-plugin</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-dao</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-remote</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-service</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-spi</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
<version>${curator.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<artifactId>netty</artifactId>
<groupId>io.netty</groupId>
</exclusion>
<exclusion>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-annotations</artifactId>
</exclusion>
</exclusions>
<version>${zookeeper.version}</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>${commons.codec.version}</version>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons.logging.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>${httpclient.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>${httpcore.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<version>${jackson.version}</version>
</dependency>
<!--protostuff-->
<!-- https://mvnrepository.com/artifact/io.protostuff/protostuff-core -->
<dependency>
<groupId>io.protostuff</groupId>
<artifactId>protostuff-core</artifactId>
<version>${protostuff.version}</version>
</dependency>
<!-- https://mvnrepository.com/artifact/io.protostuff/protostuff-runtime -->
<dependency>
<groupId>io.protostuff</groupId>
<artifactId>protostuff-runtime</artifactId>
<version>${protostuff.version}</version>
</dependency>
<dependency>
<groupId>net.bytebuddy</groupId>
<artifactId>byte-buddy</artifactId>
<version>${byte-buddy.version}</version>
</dependency>
<dependency>
<groupId>org.reflections</groupId>
<artifactId>reflections</artifactId>
<version>${reflections.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>${mockito.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-junit4</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-api-mockito2</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${mysql.connector.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.h2database</groupId>
<artifactId>h2</artifactId>
<version>${h2.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${slf4j.api.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>${slf4j.log4j12.version}</version>
</dependency>
<dependency>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
<version>${commons.collections.version}</version>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>${commons.httpclient}</version>
</dependency>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<version>${commons.beanutils.version}</version>
</dependency>
<dependency>
<groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId>
<version>${commons.configuration.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-email</artifactId>
<version>${commons.email.version}</version>
</dependency>
<!--excel poi-->
<dependency>
<groupId>org.apache.poi</groupId>
<artifactId>poi</artifactId>
<version>${poi.version}</version>
</dependency>
<!-- hadoop -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>com.sun.jersey</artifactId>
<groupId>jersey-json</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-aws</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>${commons.collections4.version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>${postgresql.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>${hive.jdbc.version}</version>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>${commons.io.version}</version>
</dependency>
<dependency>
<groupId>com.github.oshi</groupId>
<artifactId>oshi-core</artifactId>
<version>${oshi.core.version}</version>
</dependency>
<dependency>
<groupId>ru.yandex.clickhouse</groupId>
<artifactId>clickhouse-jdbc</artifactId>
<version>${clickhouse.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
<version>${mssql.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.facebook.presto</groupId>
<artifactId>presto-jdbc</artifactId>
<version>${presto.jdbc.version}</version>
</dependency>
<dependency>
<groupId>net.jcip</groupId>
<artifactId>jcip-annotations</artifactId>
<version>${jcip.version}</version>
<optional>true</optional>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<version>${servlet-api.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
<version>${javax.servlet.api.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger2</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger-ui</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.swagger</groupId>
<artifactId>swagger-models</artifactId>
<version>${swagger-models.version}</version>
</dependency>
<dependency>
<groupId>com.github.xiaoymin</groupId>
<artifactId>swagger-bootstrap-ui</artifactId>
<version>${swagger.version}</version>
</dependency>
<dependency>
<groupId>com.github.rholder</groupId>
<artifactId>guava-retrying</artifactId>
<version>${guava-retry.version}</version>
</dependency>
<dependency>
<groupId>org.sonatype.aether</groupId>
<artifactId>aether-api</artifactId>
<version>1.13.1</version>
</dependency>
<dependency>
<groupId>io.airlift.resolver</groupId>
<artifactId>resolver</artifactId>
<version>1.5</version>
</dependency>
<dependency>
<groupId>org.ow2.asm</groupId>
<artifactId>asm</artifactId>
<version>6.2.1</version>
</dependency>
<dependency>
<groupId>javax.activation</groupId>
<artifactId>activation</artifactId>
<version>1.1</version>
</dependency>
<dependency>
<groupId>com.sun.mail</groupId>
<artifactId>javax.mail</artifactId>
<version>1.6.2</version>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<finalName>apache-dolphinscheduler-incubating-${project.version}</finalName>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-maven-plugin</artifactId>
<version>1.0.0</version>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>ca.vanzyl.maven.plugins</groupId>
<artifactId>provisio-maven-plugin</artifactId>
<version>1.0.4</version>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>rpm-maven-plugin</artifactId>
<version>${rpm-maven-plugion.version}</version>
<inherited>false</inherited>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<testSource>${java.version}</testSource>
<testTarget>${java.version}</testTarget>
</configuration>
<version>${maven-compiler-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<tagNameFormat>@{project.version}</tagNameFormat>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>${maven-assembly-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<configuration>
<source>8</source>
<failOnError>false</failOnError>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>${maven-source-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>${maven-dependency-plugin.version}</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-maven-plugin</artifactId>
<extensions>true</extensions>
<!--<configuration>-->
<!--<allowedProvidedDependencies>-->
<!--<allowedProvidedDependency>org.apache.dolphinscheduler:dolphinscheduler-common</allowedProvidedDependency>-->
<!--</allowedProvidedDependencies>-->
<!--</configuration>-->
</plugin>
<plugin>
<groupId>ca.vanzyl.maven.plugins</groupId>
<artifactId>provisio-maven-plugin</artifactId>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<id>attach-sources</id>
<phase>verify</phase>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
<configuration>
<aggregate>true</aggregate>
<charset>${project.build.sourceEncoding}</charset>
<encoding>${project.build.sourceEncoding}</encoding>
<docencoding>${project.build.sourceEncoding}</docencoding>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<autoVersionSubmodules>true</autoVersionSubmodules>
<tagNameFormat>@{project.version}</tagNameFormat>
<tagBase>${project.version}</tagBase>
<!--<goals>-f pom.xml deploy</goals>-->
</configuration>
<dependencies>
<dependency>
<groupId>org.apache.maven.scm</groupId>
<artifactId>maven-scm-provider-jgit</artifactId>
<version>1.9.5</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<encoding>${project.build.sourceEncoding}</encoding>
<skip>false</skip><!--not skip compile test classes-->
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<includes>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TenantControllerTest.java</include>
<include>**/api/dto/resources/filter/ResourceFilterTest.java</include>
<include>**/api/dto/resources/visitor/ResourceTreeVisitorTest.java</include>
<includeDataxTaskTest>**/api/enums/testGetEnum.java</includeDataxTaskTest>
<include>**/api/enums/StatusTest.java</include>
<include>**/api/exceptions/ApiExceptionHandlerTest.java</include>
<include>**/api/exceptions/ServiceExceptionTest.java</include>
<include>**/api/interceptor/LocaleChangeInterceptorTest.java</include>
<include>**/api/interceptor/LoginHandlerInterceptorTest.java</include>
<include>**/api/security/impl/pwd/PasswordAuthenticatorTest.java</include>
<include>**/api/security/impl/ldap/LdapAuthenticatorTest.java</include>
<include>**/api/security/SecurityConfigLDAPTest.java</include>
<include>**/api/security/SecurityConfigPasswordTest.java</include>
<include>**/api/service/AccessTokenServiceTest.java</include>
<include>**/api/service/AlertGroupServiceTest.java</include>
<include>**/api/service/BaseDAGServiceTest.java</include>
<include>**/api/service/BaseServiceTest.java</include>
<include>**/api/service/DataAnalysisServiceTest.java</include>
<include>**/api/service/AlertPluginInstanceServiceTest.java</include>
<include>**/api/service/DataSourceServiceTest.java</include>
<include>**/api/service/ExecutorService2Test.java</include>
<include>**/api/service/ExecutorServiceTest.java</include>
<include>**/api/service/LoggerServiceTest.java</include>
<include>**/api/service/MonitorServiceTest.java</include>
<include>**/api/service/ProcessDefinitionServiceTest.java</include>
<include>**/api/service/ProcessDefinitionVersionServiceTest.java</include>
<include>**/api/service/ProcessInstanceServiceTest.java</include>
<include>**/api/service/ProjectServiceTest.java</include>
<include>**/api/service/QueueServiceTest.java</include>
<include>**/api/service/ResourcesServiceTest.java</include>
<include>**/api/service/SchedulerServiceTest.java</include>
<include>**/api/service/SessionServiceTest.java</include>
<include>**/api/service/TaskInstanceServiceTest.java</include>
<include>**/api/service/TenantServiceTest.java</include>
<include>**/api/service/UdfFuncServiceTest.java</include>
<include>**/api/service/UiPluginServiceTest.java</include>
<include>**/api/service/UserAlertGroupServiceTest.java</include>
<include>**/api/service/UsersServiceTest.java</include>
<include>**/api/service/WorkerGroupServiceTest.java</include>
<include>**/api/service/WorkFlowLineageServiceTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TaskInstanceControllerTest.java</include>
<include>**/api/controller/WorkFlowLineageControllerTest.java</include>
<include>**/api/utils/exportprocess/DataSourceParamTest.java</include>
<include>**/api/utils/exportprocess/DependentParamTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/FileUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/ResultTest.java</include>
<include>**/common/graph/DAGTest.java</include>
<include>**/common/os/OshiTest.java</include>
<include>**/common/os/OSUtilsTest.java</include>
<include>**/common/shell/ShellExecutorTest.java</include>
<include>**/common/task/DataxParametersTest.java</include>
<include>**/common/task/EntityTestUtils.java</include>
<include>**/common/task/FlinkParametersTest.java</include>
<include>**/common/task/HttpParametersTest.java</include>
<include>**/common/task/SqoopParameterEntityTest.java</include>
<include>**/common/threadutils/ThreadPoolExecutorsTest.java</include>
<include>**/common/threadutils/ThreadUtilsTest.java</include>
<include>**/common/utils/CollectionUtilsTest.java</include>
<include>**/common/utils/CommonUtilsTest.java</include>
<include>**/common/utils/DateUtilsTest.java</include>
<include>**/common/utils/DependentUtilsTest.java</include>
<include>**/common/utils/EncryptionUtilsTest.java</include>
<include>**/common/utils/FileUtilsTest.java</include>
<include>**/common/utils/IpUtilsTest.java</include>
<include>**/common/utils/JSONUtilsTest.java</include>
<include>**/common/utils/LoggerUtilsTest.java</include>
<include>**/common/utils/NetUtilsTest.java</include>
<include>**/common/utils/OSUtilsTest.java</include>
<include>**/common/utils/ParameterUtilsTest.java</include>
<include>**/common/utils/TimePlaceholderUtilsTest.java</include>
<include>**/common/utils/PreconditionsTest.java</include>
<include>**/common/utils/PropertyUtilsTest.java</include>
<include>**/common/utils/SchemaUtilsTest.java</include>
<include>**/common/utils/ScriptRunnerTest.java</include>
<include>**/common/utils/SensitiveLogUtilsTest.java</include>
<include>**/common/utils/StringTest.java</include>
<include>**/common/utils/StringUtilsTest.java</include>
<include>**/common/utils/TaskParametersUtilsTest.java</include>
<include>**/common/utils/VarPoolUtilsTest.java</include>
<include>**/common/utils/HadoopUtilsTest.java</include>
<include>**/common/utils/HttpUtilsTest.java</include>
<include>**/common/utils/KerberosHttpClientTest.java</include>
<include>**/common/utils/HiveConfUtilsTest.java</include>
<include>**/common/ConstantsTest.java</include>
<include>**/common/utils/HadoopUtils.java</include>
<include>**/common/utils/RetryerUtilsTest.java</include>
<include>**/common/plugin/DolphinSchedulerPluginLoaderTest.java</include>
<include>**/common/enums/ExecutionStatusTest</include>
<include>**/dao/mapper/AccessTokenMapperTest.java</include>
<include>**/dao/mapper/AlertGroupMapperTest.java</include>
<include>**/dao/mapper/CommandMapperTest.java</include>
<include>**/dao/mapper/ConnectionFactoryTest.java</include>
<include>**/dao/mapper/DataSourceMapperTest.java</include>
<include>**/dao/datasource/MySQLDataSourceTest.java</include>
<include>**/dao/entity/TaskInstanceTest.java</include>
<include>**/dao/entity/UdfFuncTest.java</include>
<include>**/remote/command/alert/AlertSendRequestCommandTest.java</include>
<include>**/remote/command/alert/AlertSendResponseCommandTest.java</include>
<include>**/remote/command/future/ResponseFutureTest.java</include>
<include>**/remote/command/log/RemoveTaskLogRequestCommandTest.java</include>
<include>**/remote/command/log/RemoveTaskLogResponseCommandTest.java</include>
<include>**/remote/utils/HostTest.java</include>
<include>**/remote/utils/NettyUtilTest.java</include>
<include>**/remote/NettyRemotingClientTest.java</include>
<include>**/rpc/RpcTest.java</include>
<include>**/server/log/LoggerServerTest.java</include>
<include>**/server/entity/SQLTaskExecutionContextTest.java</include>
<include>**/server/log/MasterLogFilterTest.java</include>
<include>**/server/log/SensitiveDataConverterTest.java</include>
<include>**/server/log/LoggerRequestProcessorTest.java</include>
<!--<include>**/server/log/TaskLogDiscriminatorTest.java</include>-->
<include>**/server/log/TaskLogFilterTest.java</include>
<include>**/server/log/WorkerLogFilterTest.java</include>
<include>**/server/master/consumer/TaskPriorityQueueConsumerTest.java</include>
<include>**/server/master/runner/MasterTaskExecThreadTest.java</include>
<!--<include>**/server/master/dispatch/executor/NettyExecutorManagerTest.java</include>-->
<include>**/server/master/dispatch/host/assign/LowerWeightRoundRobinTest.java</include>
<include>**/server/master/dispatch/host/assign/RandomSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/HostWorkerTest.java</include>
<include>**/server/master/register/MasterRegistryTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinHostManagerTest.java</include>
<include>**/server/master/AlertManagerTest.java</include>
<include>**/server/master/MasterCommandTest.java</include>
<include>**/server/master/DependentTaskTest.java</include>
<include>**/server/master/ConditionsTaskTest.java</include>
<include>**/server/master/MasterExecThreadTest.java</include>
<include>**/server/master/ParamsTest.java</include>
<include>**/server/master/SubProcessTaskTest.java</include>
<include>**/server/master/processor/TaskAckProcessorTest.java</include>
<include>**/server/master/processor/TaskKillResponseProcessorTest.java</include>
<include>**/server/master/processor/queue/TaskResponseServiceTest.java</include>
<include>**/server/register/ZookeeperNodeManagerTest.java</include>
<include>**/server/register/ZookeeperRegistryCenterTest.java</include>
<include>**/server/utils/DataxUtilsTest.java</include>
<include>**/server/utils/ExecutionContextTestUtils.java</include>
<include>**/server/utils/FlinkArgsUtilsTest.java</include>
<include>**/server/utils/LogUtilsTest.java</include>
<include>**/server/utils/MapReduceArgsUtilsTest.java</include>
<include>**/server/utils/ParamUtilsTest.java</include>
<include>**/server/utils/ProcessUtilsTest.java</include>
<include>**/server/utils/SparkArgsUtilsTest.java</include>
<include>**/server/worker/processor/TaskCallbackServiceTest.java</include>
<include>**/server/worker/processor/TaskExecuteProcessorTest.java</include>
<include>**/server/worker/registry/WorkerRegistryTest.java</include>
<include>**/server/worker/shell/ShellCommandExecutorTest.java</include>
<include>**/server/worker/sql/SqlExecutorTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<include>**/server/worker/task/EnvFileTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<!--<include>**/server/worker/task/datax/DataxTaskTest.java</include>-->
<!--<include>**/server/worker/task/http/HttpTaskTest.java</include>-->
<include>**/server/worker/task/sqoop/SqoopTaskTest.java</include>
<include>**/server/worker/task/shell/ShellTaskTest.java</include>
<include>**/server/worker/task/TaskManagerTest.java</include>
<include>**/server/worker/task/AbstractCommandExecutorTest.java</include>
<include>**/server/worker/task/ShellTaskReturnTest.java</include>
<include>**/server/worker/EnvFileTest.java</include>
<include>**/server/worker/runner/TaskExecuteThreadTest.java</include>
<include>**/server/worker/runner/WorkerManagerThreadTest.java</include>
<include>**/service/quartz/cron/CronUtilsTest.java</include>
<include>**/service/process/ProcessServiceTest.java</include>
<include>**/service/zk/DefaultEnsembleProviderTest.java</include>
<include>**/service/zk/ZKServerTest.java</include>
<include>**/service/zk/CuratorZookeeperClientTest.java</include>
<include>**/service/zk/RegisterOperatorTest.java</include>
<include>**/service/queue/TaskUpdateQueueTest.java</include>
<include>**/service/queue/PeerTaskInstancePriorityQueueTest.java</include>
<include>**/service/log/LogClientServiceTest.java</include>
<include>**/service/alert/AlertClientServiceTest.java</include>
<include>**/dao/mapper/DataSourceUserMapperTest.java</include>
<!--<iTaskUpdateQueueConsumerThreadnclude>**/dao/mapper/ErrorCommandMapperTest.java</iTaskUpdateQueueConsumerThreadnclude>-->
<include>**/dao/mapper/ProcessDefinitionMapperTest.java</include>
<include>**/dao/mapper/ProcessDefinitionVersionMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapperTest.java</include>
<include>**/dao/mapper/ProjectMapperTest.java</include>
<include>**/dao/mapper/ProjectUserMapperTest.java</include>
<include>**/dao/mapper/QueueMapperTest.java</include>
<include>**/dao/mapper/ResourceUserMapperTest.java</include>
<include>**/dao/mapper/ScheduleMapperTest.java</include>
<include>**/dao/mapper/SessionMapperTest.java</include>
<include>**/dao/mapper/TaskInstanceMapperTest.java</include>
<include>**/dao/mapper/TenantMapperTest.java</include>
<include>**/dao/mapper/UdfFuncMapperTest.java</include>
<include>**/dao/mapper/UDFUserMapperTest.java</include>
<include>**/dao/mapper/UserMapperTest.java</include>
<include>**/dao/mapper/AlertPluginInstanceMapperTest.java</include>
<include>**/dao/mapper/PluginDefineTest.java</include>
<include>**/dao/utils/DagHelperTest.java</include>
<include>**/dao/AlertDaoTest.java</include>
<include>**/dao/datasource/OracleDataSourceTest.java</include>
<include>**/dao/datasource/HiveDataSourceTest.java</include>
<include>**/dao/datasource/BaseDataSourceTest.java</include>
<include>**/dao/upgrade/ProcessDefinitionDaoTest.java</include>
<include>**/dao/upgrade/WokrerGrouopDaoTest.java</include>
<include>**/dao/upgrade/UpgradeDaoTest.java</include>
<include>**/plugin/alert/email/EmailAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/email/EmailAlertChannelTest.java</include>
<include>**/plugin/alert/email/ExcelUtilsTest.java</include>
<include>**/plugin/alert/email/MailUtilsTest.java</include>
<include>**/plugin/alert/email/template/DefaultHTMLTemplateTest.java</include>
<include>**/plugin/alert/dingtalk/DingTalkSenderTest.java</include>
<include>**/plugin/alert/dingtalk/DingTalkAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/wechat/WeChatSenderTest.java</include>
<include>**/plugin/alert/wechat/WeChatAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/script/ProcessUtilsTest.java</include>
<include>**/plugin/alert/script/ScriptAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/script/ScriptSenderTest.java</include>
<include>**/plugin/alert/http/HttpAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/http/HttpAlertChannelTest.java</include>
<include>**/plugin/alert/feishu/FeiShuAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/feishu/FeiShuSenderTest.java</include>
<include>**/plugin/alert/http/HttpAlertPluginTest.java</include>
<include>**/plugin/alert/http/HttpSenderTest.java</include>
<include>**/spi/params/PluginParamsTransferTest.java</include>
<include>**/alert/plugin/EmailAlertPluginTest.java</include>
<include>**/alert/plugin/AlertPluginManagerTest.java</include>
<include>**/alert/plugin/DolphinPluginLoaderTest.java</include>
<include>**/alert/utils/DingTalkUtilsTest.java</include>
<include>**/alert/utils/EnterpriseWeChatUtilsTest.java</include>
<include>**/alert/utils/FuncUtilsTest.java</include>
<include>**/alert/processor/AlertRequestProcessorTest.java</include>
<include>**/alert/runner/AlertSenderTest.java</include>
<include>**/alert/AlertServerTest.java</include>
</includes>
<!-- <skip>true</skip> -->
</configuration>
</plugin>
<!-- jenkins plugin jacoco report-->
<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<version>${jacoco.version}</version>
<configuration>
<destFile>target/jacoco.exec</destFile>
<dataFile>target/jacoco.exec</dataFile>
</configuration>
<executions>
<execution>
<id>jacoco-initialize</id>
<goals>
<goal>prepare-agent</goal>
</goals>
</execution>
<execution>
<id>jacoco-site</id>
<phase>test</phase>
<goals>
<goal>report</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-maven-plugin</artifactId>
<version>${spotbugs.version}</version>
<configuration>
<xmlOutput>true</xmlOutput>
<threshold>medium</threshold>
<effort>default</effort>
<excludeFilterFile>dev-config/spotbugs-exclude.xml</excludeFilterFile>
<failOnError>true</failOnError>
</configuration>
<dependencies>
<dependency>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs</artifactId>
<version>4.0.0-beta4</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>${checkstyle.version}</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
<version>8.18</version>
</dependency>
</dependencies>
<configuration>
<consoleOutput>true</consoleOutput>
<encoding>UTF-8</encoding>
<configLocation>style/checkstyle.xml</configLocation>
<suppressionsLocation>style/checkstyle-suppressions.xml</suppressionsLocation>
<suppressionsFileExpression>checkstyle.suppressions.file</suppressionsFileExpression>
<failOnViolation>true</failOnViolation>
<violationSeverity>warning</violationSeverity>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<sourceDirectories>
<sourceDirectory>${project.build.sourceDirectory}</sourceDirectory>
</sourceDirectories>
<excludes>**\/generated-sources\/</excludes>
<skip>true</skip>
</configuration>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>cobertura-maven-plugin</artifactId>
<version>${cobertura-maven-plugin.version}</version>
<configuration>
<check>
</check>
<aggregate>true</aggregate>
<outputDirectory>./target/cobertura</outputDirectory>
<encoding>${project.build.sourceEncoding}</encoding>
<quiet>true</quiet>
<format>xml</format>
<instrumentation>
<ignoreTrivial>true</ignoreTrivial>
</instrumentation>
</configuration>
</plugin>
</plugins>
</build>
<modules>
<module>dolphinscheduler-alert-plugin</module>
<module>dolphinscheduler-ui</module>
<module>dolphinscheduler-server</module>
<module>dolphinscheduler-common</module>
<module>dolphinscheduler-api</module>
<module>dolphinscheduler-dao</module>
<module>dolphinscheduler-alert</module>
<module>dolphinscheduler-dist</module>
<module>dolphinscheduler-remote</module>
<module>dolphinscheduler-service</module>
<module>dolphinscheduler-spi</module>
<module>dolphinscheduler-microbench</module>
</modules>
</project>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,048 | [Bug][Api] Delete the stopped workflow without deleting the corresponding sub process, the workflow instance name is not displayed in the sub process | 1.stop workflow,Workflow contains sub_process
2.delete the workflow, No sub_process were deleted, the workflow instance name is not displayed in the sub process
![image](https://user-images.githubusercontent.com/55787491/111032444-40d24a80-8447-11eb-9bc9-963c9914a06a.png)
**Expected results**
delete the workflow, need to delete all tasks under the workflow, including sub_process
**Which version of Dolphin Scheduler:**
-[1.3.6-prepare]
| https://github.com/apache/dolphinscheduler/issues/5048 | https://github.com/apache/dolphinscheduler/pull/5066 | ba039dc252a2ddfe316297a2955f0ea9f966432a | f109a758f8388a0499391f5a87ccd6d1e17db3aa | "2021-03-13T14:00:33Z" | java | "2021-03-23T03:26:11Z" | dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/mapper/TaskInstanceMapperTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.mapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.dao.entity.ExecuteStatusCount;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import java.util.Date;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.annotation.Rollback;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.transaction.annotation.Transactional;
@RunWith(SpringRunner.class)
@SpringBootTest
@Transactional
@Rollback(true)
public class TaskInstanceMapperTest {
@Autowired
TaskInstanceMapper taskInstanceMapper;
@Autowired
ProcessDefinitionMapper processDefinitionMapper;
@Autowired
ProcessInstanceMapper processInstanceMapper;
@Autowired
ProcessInstanceMapMapper processInstanceMapMapper;
/**
* insert
*
* @return TaskInstance
*/
private TaskInstance insertOne() {
//insertOne
return insertOne("us task", 1, ExecutionStatus.RUNNING_EXECUTION, TaskType.SHELL.toString());
}
/**
* construct a task instance and then insert
*
* @param taskName
* @param processInstanceId
* @param state
* @param taskType
* @return
*/
private TaskInstance insertOne(String taskName, int processInstanceId, ExecutionStatus state, String taskType) {
TaskInstance taskInstance = new TaskInstance();
taskInstance.setFlag(Flag.YES);
taskInstance.setName(taskName);
taskInstance.setState(state);
taskInstance.setStartTime(new Date());
taskInstance.setEndTime(new Date());
taskInstance.setTaskJson("{}");
taskInstance.setProcessInstanceId(processInstanceId);
taskInstance.setTaskType(taskType);
taskInstanceMapper.insert(taskInstance);
return taskInstance;
}
/**
* test update
*/
@Test
public void testUpdate(){
//insertOne
TaskInstance taskInstance = insertOne();
//update
int update = taskInstanceMapper.updateById(taskInstance);
Assert.assertEquals(1, update);
taskInstanceMapper.deleteById(taskInstance.getId());
}
/**
* test delete
*/
@Test
public void testDelete() {
TaskInstance taskInstance = insertOne();
int delete = taskInstanceMapper.deleteById(taskInstance.getId());
Assert.assertEquals(1, delete);
}
/**
* test query
*/
@Test
public void testQuery() {
TaskInstance taskInstance = insertOne();
//query
List<TaskInstance> taskInstances = taskInstanceMapper.selectList(null);
taskInstanceMapper.deleteById(taskInstance.getId());
Assert.assertNotEquals(taskInstances.size(), 0);
}
/**
* test query task instance by process instance id and state
*/
@Test
public void testQueryTaskByProcessIdAndState() {
TaskInstance task = insertOne();
task.setProcessInstanceId(110);
taskInstanceMapper.updateById(task);
List<Integer> taskInstances = taskInstanceMapper.queryTaskByProcessIdAndState(
task.getProcessInstanceId(),
ExecutionStatus.RUNNING_EXECUTION.ordinal()
);
taskInstanceMapper.deleteById(task.getId());
Assert.assertNotEquals(taskInstances.size(), 0);
}
/**
* test find valid task list by process instance id
*/
@Test
public void testFindValidTaskListByProcessId() {
TaskInstance task = insertOne();
TaskInstance task2 = insertOne();
task.setProcessInstanceId(110);
task2.setProcessInstanceId(110);
taskInstanceMapper.updateById(task);
taskInstanceMapper.updateById(task2);
List<TaskInstance> taskInstances = taskInstanceMapper.findValidTaskListByProcessId(
task.getProcessInstanceId(),
Flag.YES
);
task2.setFlag(Flag.NO);
taskInstanceMapper.updateById(task2);
List<TaskInstance> taskInstances1 = taskInstanceMapper.findValidTaskListByProcessId(task.getProcessInstanceId(),
Flag.NO);
taskInstanceMapper.deleteById(task2.getId());
taskInstanceMapper.deleteById(task.getId());
Assert.assertNotEquals(taskInstances.size(), 0);
Assert.assertNotEquals(taskInstances1.size(), 0);
}
/**
* test query by host and status
*/
@Test
public void testQueryByHostAndStatus() {
TaskInstance task = insertOne();
task.setHost("111.111.11.11");
taskInstanceMapper.updateById(task);
List<TaskInstance> taskInstances = taskInstanceMapper.queryByHostAndStatus(
task.getHost(), new int[]{ExecutionStatus.RUNNING_EXECUTION.ordinal()}
);
taskInstanceMapper.deleteById(task.getId());
Assert.assertNotEquals(taskInstances.size(), 0);
}
/**
* test set failover by host and state array
*/
@Test
public void testSetFailoverByHostAndStateArray() {
TaskInstance task = insertOne();
task.setHost("111.111.11.11");
taskInstanceMapper.updateById(task);
int setResult = taskInstanceMapper.setFailoverByHostAndStateArray(
task.getHost(),
new int[]{ExecutionStatus.RUNNING_EXECUTION.ordinal()},
ExecutionStatus.NEED_FAULT_TOLERANCE
);
taskInstanceMapper.deleteById(task.getId());
Assert.assertNotEquals(setResult, 0);
}
/**
* test query by task instance id and name
*/
@Test
public void testQueryByInstanceIdAndName() {
TaskInstance task = insertOne();
task.setHost("111.111.11.11");
taskInstanceMapper.updateById(task);
TaskInstance taskInstance = taskInstanceMapper.queryByInstanceIdAndName(
task.getProcessInstanceId(),
task.getName()
);
taskInstanceMapper.deleteById(task.getId());
Assert.assertNotEquals(taskInstance, null);
}
/**
* test count task instance
*/
@Test
public void testCountTask() {
TaskInstance task = insertOne();
ProcessDefinition definition = new ProcessDefinition();
definition.setProjectId(1111);
processDefinitionMapper.insert(definition);
task.setProcessDefinitionId(definition.getId());
taskInstanceMapper.updateById(task);
int countTask = taskInstanceMapper.countTask(
new Integer[0],
new int[0]
);
int countTask2 = taskInstanceMapper.countTask(
new Integer[]{definition.getProjectId()},
new int[]{task.getId()}
);
taskInstanceMapper.deleteById(task.getId());
processDefinitionMapper.deleteById(definition.getId());
Assert.assertNotEquals(countTask, 0);
Assert.assertNotEquals(countTask2, 0);
}
/**
* test count task instance state by user
*/
@Test
public void testCountTaskInstanceStateByUser() {
TaskInstance task = insertOne();
ProcessDefinition definition = new ProcessDefinition();
definition.setProjectId(1111);
processDefinitionMapper.insert(definition);
task.setProcessDefinitionId(definition.getId());
taskInstanceMapper.updateById(task);
List<ExecuteStatusCount> count = taskInstanceMapper.countTaskInstanceStateByUser(
null, null,
new Integer[]{definition.getProjectId()}
);
processDefinitionMapper.deleteById(definition.getId());
taskInstanceMapper.deleteById(task.getId());
}
/**
* test page
*/
@Test
public void testQueryTaskInstanceListPaging() {
TaskInstance task = insertOne();
ProcessDefinition definition = new ProcessDefinition();
definition.setProjectId(1111);
processDefinitionMapper.insert(definition);
ProcessInstance processInstance = new ProcessInstance();
processInstance.setProcessDefinitionId(definition.getId());
processInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
processInstance.setName("ut process");
processInstance.setStartTime(new Date());
processInstance.setEndTime(new Date());
processInstance.setCommandType(CommandType.START_PROCESS);
processInstanceMapper.insert(processInstance);
task.setProcessDefinitionId(definition.getId());
task.setProcessInstanceId(processInstance.getId());
taskInstanceMapper.updateById(task);
Page<TaskInstance> page = new Page(1,3);
IPage<TaskInstance> taskInstanceIPage = taskInstanceMapper.queryTaskInstanceListPaging(
page,
definition.getProjectId(),
task.getProcessInstanceId(),
"",
"",
"",
0,
new int[0],
"",
null,null
);
processInstanceMapper.deleteById(processInstance.getId());
taskInstanceMapper.deleteById(task.getId());
processDefinitionMapper.deleteById(definition.getId());
Assert.assertNotEquals(taskInstanceIPage.getTotal(), 0);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,048 | [Bug][Api] Delete the stopped workflow without deleting the corresponding sub process, the workflow instance name is not displayed in the sub process | 1.stop workflow,Workflow contains sub_process
2.delete the workflow, No sub_process were deleted, the workflow instance name is not displayed in the sub process
![image](https://user-images.githubusercontent.com/55787491/111032444-40d24a80-8447-11eb-9bc9-963c9914a06a.png)
**Expected results**
delete the workflow, need to delete all tasks under the workflow, including sub_process
**Which version of Dolphin Scheduler:**
-[1.3.6-prepare]
| https://github.com/apache/dolphinscheduler/issues/5048 | https://github.com/apache/dolphinscheduler/pull/5066 | ba039dc252a2ddfe316297a2955f0ea9f966432a | f109a758f8388a0499391f5a87ccd6d1e17db3aa | "2021-03-13T14:00:33Z" | java | "2021-03-23T03:26:11Z" | sql/dolphinscheduler_postgre.sql | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS;
DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE;
DROP TABLE IF EXISTS QRTZ_LOCKS;
DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_JOB_DETAILS;
DROP TABLE IF EXISTS QRTZ_CALENDARS;
CREATE TABLE QRTZ_JOB_DETAILS(
SCHED_NAME character varying(120) NOT NULL,
JOB_NAME character varying(200) NOT NULL,
JOB_GROUP character varying(200) NOT NULL,
DESCRIPTION character varying(250) NULL,
JOB_CLASS_NAME character varying(250) NOT NULL,
IS_DURABLE boolean NOT NULL,
IS_NONCONCURRENT boolean NOT NULL,
IS_UPDATE_DATA boolean NOT NULL,
REQUESTS_RECOVERY boolean NOT NULL,
JOB_DATA bytea NULL);
alter table QRTZ_JOB_DETAILS add primary key(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE TABLE QRTZ_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
JOB_NAME character varying(200) NOT NULL,
JOB_GROUP character varying(200) NOT NULL,
DESCRIPTION character varying(250) NULL,
NEXT_FIRE_TIME BIGINT NULL,
PREV_FIRE_TIME BIGINT NULL,
PRIORITY INTEGER NULL,
TRIGGER_STATE character varying(16) NOT NULL,
TRIGGER_TYPE character varying(8) NOT NULL,
START_TIME BIGINT NOT NULL,
END_TIME BIGINT NULL,
CALENDAR_NAME character varying(200) NULL,
MISFIRE_INSTR SMALLINT NULL,
JOB_DATA bytea NULL) ;
alter table QRTZ_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_SIMPLE_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
REPEAT_COUNT BIGINT NOT NULL,
REPEAT_INTERVAL BIGINT NOT NULL,
TIMES_TRIGGERED BIGINT NOT NULL) ;
alter table QRTZ_SIMPLE_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_CRON_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
CRON_EXPRESSION character varying(120) NOT NULL,
TIME_ZONE_ID character varying(80)) ;
alter table QRTZ_CRON_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_SIMPROP_TRIGGERS
(
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
STR_PROP_1 character varying(512) NULL,
STR_PROP_2 character varying(512) NULL,
STR_PROP_3 character varying(512) NULL,
INT_PROP_1 INT NULL,
INT_PROP_2 INT NULL,
LONG_PROP_1 BIGINT NULL,
LONG_PROP_2 BIGINT NULL,
DEC_PROP_1 NUMERIC(13,4) NULL,
DEC_PROP_2 NUMERIC(13,4) NULL,
BOOL_PROP_1 boolean NULL,
BOOL_PROP_2 boolean NULL) ;
alter table QRTZ_SIMPROP_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_BLOB_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
BLOB_DATA bytea NULL) ;
alter table QRTZ_BLOB_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_CALENDARS (
SCHED_NAME character varying(120) NOT NULL,
CALENDAR_NAME character varying(200) NOT NULL,
CALENDAR bytea NOT NULL) ;
alter table QRTZ_CALENDARS add primary key(SCHED_NAME,CALENDAR_NAME);
CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL) ;
alter table QRTZ_PAUSED_TRIGGER_GRPS add primary key(SCHED_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_FIRED_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
ENTRY_ID character varying(95) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
INSTANCE_NAME character varying(200) NOT NULL,
FIRED_TIME BIGINT NOT NULL,
SCHED_TIME BIGINT NOT NULL,
PRIORITY INTEGER NOT NULL,
STATE character varying(16) NOT NULL,
JOB_NAME character varying(200) NULL,
JOB_GROUP character varying(200) NULL,
IS_NONCONCURRENT boolean NULL,
REQUESTS_RECOVERY boolean NULL) ;
alter table QRTZ_FIRED_TRIGGERS add primary key(SCHED_NAME,ENTRY_ID);
CREATE TABLE QRTZ_SCHEDULER_STATE (
SCHED_NAME character varying(120) NOT NULL,
INSTANCE_NAME character varying(200) NOT NULL,
LAST_CHECKIN_TIME BIGINT NOT NULL,
CHECKIN_INTERVAL BIGINT NOT NULL) ;
alter table QRTZ_SCHEDULER_STATE add primary key(SCHED_NAME,INSTANCE_NAME);
CREATE TABLE QRTZ_LOCKS (
SCHED_NAME character varying(120) NOT NULL,
LOCK_NAME character varying(40) NOT NULL) ;
alter table QRTZ_LOCKS add primary key(SCHED_NAME,LOCK_NAME);
CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY);
CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME);
CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME);
CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
--
-- Table structure for table t_ds_access_token
--
DROP TABLE IF EXISTS t_ds_access_token;
CREATE TABLE t_ds_access_token (
id int NOT NULL ,
user_id int DEFAULT NULL ,
token varchar(64) DEFAULT NULL ,
expire_time timestamp DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_alert
--
DROP TABLE IF EXISTS t_ds_alert;
CREATE TABLE t_ds_alert (
id int NOT NULL ,
title varchar(64) DEFAULT NULL ,
content text ,
alert_status int DEFAULT '0' ,
log text ,
alertgroup_id int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_alertgroup
--
DROP TABLE IF EXISTS t_ds_alertgroup;
CREATE TABLE t_ds_alertgroup(
id int NOT NULL,
alert_instance_ids varchar (255) DEFAULT NULL,
create_user_id int4 DEFAULT NULL,
group_name varchar(255) DEFAULT NULL,
description varchar(255) DEFAULT NULL,
create_time timestamp DEFAULT NULL,
update_time timestamp DEFAULT NULL,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_command
--
DROP TABLE IF EXISTS t_ds_command;
CREATE TABLE t_ds_command (
id int NOT NULL ,
command_type int DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
executor_id int DEFAULT NULL ,
dependence varchar(255) DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_datasource
--
DROP TABLE IF EXISTS t_ds_datasource;
CREATE TABLE t_ds_datasource (
id int NOT NULL ,
name varchar(64) NOT NULL ,
note varchar(256) DEFAULT NULL ,
type int NOT NULL ,
user_id int NOT NULL ,
connection_params text NOT NULL ,
create_time timestamp NOT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_error_command
--
DROP TABLE IF EXISTS t_ds_error_command;
CREATE TABLE t_ds_error_command (
id int NOT NULL ,
command_type int DEFAULT NULL ,
executor_id int DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
dependence text ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
message text ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_master_server
--
--
-- Table structure for table t_ds_process_definition
--
DROP TABLE IF EXISTS t_ds_process_definition;
CREATE TABLE t_ds_process_definition (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
version int DEFAULT NULL ,
release_state int DEFAULT NULL ,
project_id int DEFAULT NULL ,
user_id int DEFAULT NULL ,
process_definition_json text ,
description text ,
global_params text ,
flag int DEFAULT NULL ,
locations text ,
connects text ,
warning_group_id int4 DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
timeout int DEFAULT '0' ,
tenant_id int NOT NULL DEFAULT '-1' ,
update_time timestamp DEFAULT NULL ,
modify_by varchar(36) DEFAULT '' ,
resource_ids varchar(64),
PRIMARY KEY (id),
CONSTRAINT process_definition_unique UNIQUE (name, project_id)
) ;
create index process_definition_index on t_ds_process_definition (project_id,id);
--
-- Table structure for table t_ds_process_definition_version
--
DROP TABLE IF EXISTS t_ds_process_definition_version;
CREATE TABLE t_ds_process_definition_version (
id int NOT NULL ,
process_definition_id int NOT NULL ,
version int DEFAULT NULL ,
process_definition_json text ,
description text ,
global_params text ,
locations text ,
connects text ,
warning_group_id int4 DEFAULT NULL,
create_time timestamp DEFAULT NULL ,
timeout int DEFAULT '0' ,
resource_ids varchar(64),
PRIMARY KEY (id)
) ;
create index process_definition_id_and_version on t_ds_process_definition_version (process_definition_id,version);
--
-- Table structure for table t_ds_process_instance
--
DROP TABLE IF EXISTS t_ds_process_instance;
CREATE TABLE t_ds_process_instance (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
state int DEFAULT NULL ,
recovery int DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
end_time timestamp DEFAULT NULL ,
run_times int DEFAULT NULL ,
host varchar(135) DEFAULT NULL ,
command_type int DEFAULT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
max_try_times int DEFAULT '0' ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
command_start_time timestamp DEFAULT NULL ,
global_params text ,
process_instance_json text ,
flag int DEFAULT '1' ,
update_time timestamp NULL ,
is_sub_process int DEFAULT '0' ,
executor_id int NOT NULL ,
locations text ,
connects text ,
history_cmd text ,
dependence_schedule_times text ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64) ,
timeout int DEFAULT '0' ,
tenant_id int NOT NULL DEFAULT '-1' ,
var_pool text ,
PRIMARY KEY (id)
) ;
create index process_instance_index on t_ds_process_instance (process_definition_id,id);
create index start_time_index on t_ds_process_instance (start_time);
--
-- Table structure for table t_ds_project
--
DROP TABLE IF EXISTS t_ds_project;
CREATE TABLE t_ds_project (
id int NOT NULL ,
name varchar(100) DEFAULT NULL ,
description varchar(200) DEFAULT NULL ,
user_id int DEFAULT NULL ,
flag int DEFAULT '1' ,
create_time timestamp DEFAULT CURRENT_TIMESTAMP ,
update_time timestamp DEFAULT CURRENT_TIMESTAMP ,
PRIMARY KEY (id)
) ;
create index user_id_index on t_ds_project (user_id);
--
-- Table structure for table t_ds_queue
--
DROP TABLE IF EXISTS t_ds_queue;
CREATE TABLE t_ds_queue (
id int NOT NULL ,
queue_name varchar(64) DEFAULT NULL ,
queue varchar(64) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_relation_datasource_user
--
DROP TABLE IF EXISTS t_ds_relation_datasource_user;
CREATE TABLE t_ds_relation_datasource_user (
id int NOT NULL ,
user_id int NOT NULL ,
datasource_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
;
--
-- Table structure for table t_ds_relation_process_instance
--
DROP TABLE IF EXISTS t_ds_relation_process_instance;
CREATE TABLE t_ds_relation_process_instance (
id int NOT NULL ,
parent_process_instance_id int DEFAULT NULL ,
parent_task_instance_id int DEFAULT NULL ,
process_instance_id int DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_relation_project_user
--
DROP TABLE IF EXISTS t_ds_relation_project_user;
CREATE TABLE t_ds_relation_project_user (
id int NOT NULL ,
user_id int NOT NULL ,
project_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
create index relation_project_user_id_index on t_ds_relation_project_user (user_id);
--
-- Table structure for table t_ds_relation_resources_user
--
DROP TABLE IF EXISTS t_ds_relation_resources_user;
CREATE TABLE t_ds_relation_resources_user (
id int NOT NULL ,
user_id int NOT NULL ,
resources_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_relation_udfs_user
--
DROP TABLE IF EXISTS t_ds_relation_udfs_user;
CREATE TABLE t_ds_relation_udfs_user (
id int NOT NULL ,
user_id int NOT NULL ,
udf_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
;
--
-- Table structure for table t_ds_resources
--
DROP TABLE IF EXISTS t_ds_resources;
CREATE TABLE t_ds_resources (
id int NOT NULL ,
alias varchar(64) DEFAULT NULL ,
file_name varchar(64) DEFAULT NULL ,
description varchar(256) DEFAULT NULL ,
user_id int DEFAULT NULL ,
type int DEFAULT NULL ,
size bigint DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
pid int,
full_name varchar(64),
is_directory int,
PRIMARY KEY (id),
CONSTRAINT t_ds_resources_un UNIQUE (full_name, type)
) ;
--
-- Table structure for table t_ds_schedules
--
DROP TABLE IF EXISTS t_ds_schedules;
CREATE TABLE t_ds_schedules (
id int NOT NULL ,
process_definition_id int NOT NULL ,
start_time timestamp NOT NULL ,
end_time timestamp NOT NULL ,
crontab varchar(256) NOT NULL ,
failure_strategy int NOT NULL ,
user_id int NOT NULL ,
release_state int NOT NULL ,
warning_type int NOT NULL ,
warning_group_id int DEFAULT NULL ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
create_time timestamp NOT NULL ,
update_time timestamp NOT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_session
--
DROP TABLE IF EXISTS t_ds_session;
CREATE TABLE t_ds_session (
id varchar(64) NOT NULL ,
user_id int DEFAULT NULL ,
ip varchar(45) DEFAULT NULL ,
last_login_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_task_instance
--
DROP TABLE IF EXISTS t_ds_task_instance;
CREATE TABLE t_ds_task_instance (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
task_type varchar(64) DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
process_instance_id int DEFAULT NULL ,
task_json text ,
state int DEFAULT NULL ,
submit_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
end_time timestamp DEFAULT NULL ,
host varchar(135) DEFAULT NULL ,
execute_path varchar(200) DEFAULT NULL ,
log_path varchar(200) DEFAULT NULL ,
alert_flag int DEFAULT NULL ,
retry_times int DEFAULT '0' ,
pid int DEFAULT NULL ,
app_link text ,
flag int DEFAULT '1' ,
retry_interval int DEFAULT NULL ,
max_retry_times int DEFAULT NULL ,
task_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
executor_id int DEFAULT NULL ,
first_submit_time timestamp DEFAULT NULL ,
delay_time int DEFAULT '0' ,
var_pool text ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_tenant
--
DROP TABLE IF EXISTS t_ds_tenant;
CREATE TABLE t_ds_tenant (
id int NOT NULL ,
tenant_code varchar(64) DEFAULT NULL ,
description varchar(256) DEFAULT NULL ,
queue_id int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_udfs
--
DROP TABLE IF EXISTS t_ds_udfs;
CREATE TABLE t_ds_udfs (
id int NOT NULL ,
user_id int NOT NULL ,
func_name varchar(100) NOT NULL ,
class_name varchar(255) NOT NULL ,
type int NOT NULL ,
arg_types varchar(255) DEFAULT NULL ,
database varchar(255) DEFAULT NULL ,
description varchar(255) DEFAULT NULL ,
resource_id int NOT NULL ,
resource_name varchar(255) NOT NULL ,
create_time timestamp NOT NULL ,
update_time timestamp NOT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_user
--
DROP TABLE IF EXISTS t_ds_user;
CREATE TABLE t_ds_user (
id int NOT NULL ,
user_name varchar(64) DEFAULT NULL ,
user_password varchar(64) DEFAULT NULL ,
user_type int DEFAULT NULL ,
email varchar(64) DEFAULT NULL ,
phone varchar(11) DEFAULT NULL ,
tenant_id int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
queue varchar(64) DEFAULT NULL ,
state int DEFAULT 1 ,
PRIMARY KEY (id)
);
comment on column t_ds_user.state is 'state 0:disable 1:enable';
--
-- Table structure for table t_ds_version
--
DROP TABLE IF EXISTS t_ds_version;
CREATE TABLE t_ds_version (
id int NOT NULL ,
version varchar(200) NOT NULL,
PRIMARY KEY (id)
) ;
create index version_index on t_ds_version(version);
--
-- Table structure for table t_ds_worker_group
--
DROP TABLE IF EXISTS t_ds_worker_group;
CREATE TABLE t_ds_worker_group (
id bigint NOT NULL ,
name varchar(256) DEFAULT NULL ,
ip_list varchar(256) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_worker_server
--
DROP TABLE IF EXISTS t_ds_worker_server;
CREATE TABLE t_ds_worker_server (
id int NOT NULL ,
host varchar(45) DEFAULT NULL ,
port int DEFAULT NULL ,
zk_directory varchar(64) DEFAULT NULL ,
res_info varchar(255) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
last_heartbeat_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
DROP SEQUENCE IF EXISTS t_ds_access_token_id_sequence;
CREATE SEQUENCE t_ds_access_token_id_sequence;
ALTER TABLE t_ds_access_token ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_access_token_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_alert_id_sequence;
CREATE SEQUENCE t_ds_alert_id_sequence;
ALTER TABLE t_ds_alert ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alert_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_alertgroup_id_sequence;
CREATE SEQUENCE t_ds_alertgroup_id_sequence;
ALTER TABLE t_ds_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alertgroup_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_command_id_sequence;
CREATE SEQUENCE t_ds_command_id_sequence;
ALTER TABLE t_ds_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_command_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_datasource_id_sequence;
CREATE SEQUENCE t_ds_datasource_id_sequence;
ALTER TABLE t_ds_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_datasource_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_definition_id_sequence;
CREATE SEQUENCE t_ds_process_definition_id_sequence;
ALTER TABLE t_ds_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_definition_version_id_sequence;
CREATE SEQUENCE t_ds_process_definition_version_id_sequence;
ALTER TABLE t_ds_process_definition_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_version_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_instance_id_sequence;
CREATE SEQUENCE t_ds_process_instance_id_sequence;
ALTER TABLE t_ds_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_project_id_sequence;
CREATE SEQUENCE t_ds_project_id_sequence;
ALTER TABLE t_ds_project ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_project_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_queue_id_sequence;
CREATE SEQUENCE t_ds_queue_id_sequence;
ALTER TABLE t_ds_queue ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_queue_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_datasource_user_id_sequence;
CREATE SEQUENCE t_ds_relation_datasource_user_id_sequence;
ALTER TABLE t_ds_relation_datasource_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_datasource_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_process_instance_id_sequence;
CREATE SEQUENCE t_ds_relation_process_instance_id_sequence;
ALTER TABLE t_ds_relation_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_process_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_project_user_id_sequence;
CREATE SEQUENCE t_ds_relation_project_user_id_sequence;
ALTER TABLE t_ds_relation_project_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_project_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_resources_user_id_sequence;
CREATE SEQUENCE t_ds_relation_resources_user_id_sequence;
ALTER TABLE t_ds_relation_resources_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_resources_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_udfs_user_id_sequence;
CREATE SEQUENCE t_ds_relation_udfs_user_id_sequence;
ALTER TABLE t_ds_relation_udfs_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_udfs_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_resources_id_sequence;
CREATE SEQUENCE t_ds_resources_id_sequence;
ALTER TABLE t_ds_resources ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_resources_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_schedules_id_sequence;
CREATE SEQUENCE t_ds_schedules_id_sequence;
ALTER TABLE t_ds_schedules ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_schedules_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_task_instance_id_sequence;
CREATE SEQUENCE t_ds_task_instance_id_sequence;
ALTER TABLE t_ds_task_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_tenant_id_sequence;
CREATE SEQUENCE t_ds_tenant_id_sequence;
ALTER TABLE t_ds_tenant ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_tenant_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_udfs_id_sequence;
CREATE SEQUENCE t_ds_udfs_id_sequence;
ALTER TABLE t_ds_udfs ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_udfs_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_user_id_sequence;
CREATE SEQUENCE t_ds_user_id_sequence;
ALTER TABLE t_ds_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_version_id_sequence;
CREATE SEQUENCE t_ds_version_id_sequence;
ALTER TABLE t_ds_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_version_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_worker_group_id_sequence;
CREATE SEQUENCE t_ds_worker_group_id_sequence;
ALTER TABLE t_ds_worker_group ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_group_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_worker_server_id_sequence;
CREATE SEQUENCE t_ds_worker_server_id_sequence;
ALTER TABLE t_ds_worker_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_server_id_sequence');
-- Records of t_ds_user?user : admin , password : dolphinscheduler123
INSERT INTO t_ds_user(user_name, user_password, user_type, email, phone, tenant_id, state, create_time, update_time)
VALUES ('admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', '', '0', 1, '2018-03-27 15:48:50', '2018-10-24 17:40:22');
-- Records of t_ds_alertgroup, default admin warning group
INSERT INTO t_ds_alertgroup(alert_instance_ids, create_user_id, group_name, description, create_time, update_time)
VALUES ('1,2', 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39');
-- Records of t_ds_queue,default queue name : default
INSERT INTO t_ds_queue(queue_name, queue, create_time, update_time)
VALUES ('default', 'default', '2018-11-29 10:22:33', '2018-11-29 10:22:33');
-- Records of t_ds_queue,default queue name : default
INSERT INTO t_ds_version(version) VALUES ('1.4.0');
--
-- Table structure for table t_ds_plugin_define
--
DROP TABLE IF EXISTS t_ds_plugin_define;
CREATE TABLE t_ds_plugin_define (
id serial NOT NULL,
plugin_name varchar(100) NOT NULL,
plugin_type varchar(100) NOT NULL,
plugin_params text NULL,
create_time timestamp NULL,
update_time timestamp NULL,
CONSTRAINT t_ds_plugin_define_pk PRIMARY KEY (id),
CONSTRAINT t_ds_plugin_define_un UNIQUE (plugin_name, plugin_type)
);
--
-- Table structure for table t_ds_alert_plugin_instance
--
DROP TABLE IF EXISTS t_ds_alert_plugin_instance;
CREATE TABLE t_ds_alert_plugin_instance (
id serial NOT NULL,
plugin_define_id int4 NOT NULL,
plugin_instance_params text NULL,
create_time timestamp NULL,
update_time timestamp NULL,
instance_name varchar(200) NULL,
CONSTRAINT t_ds_alert_plugin_instance_pk PRIMARY KEY (id)
); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,048 | [Bug][Api] Delete the stopped workflow without deleting the corresponding sub process, the workflow instance name is not displayed in the sub process | 1.stop workflow,Workflow contains sub_process
2.delete the workflow, No sub_process were deleted, the workflow instance name is not displayed in the sub process
![image](https://user-images.githubusercontent.com/55787491/111032444-40d24a80-8447-11eb-9bc9-963c9914a06a.png)
**Expected results**
delete the workflow, need to delete all tasks under the workflow, including sub_process
**Which version of Dolphin Scheduler:**
-[1.3.6-prepare]
| https://github.com/apache/dolphinscheduler/issues/5048 | https://github.com/apache/dolphinscheduler/pull/5066 | ba039dc252a2ddfe316297a2955f0ea9f966432a | f109a758f8388a0499391f5a87ccd6d1e17db3aa | "2021-03-13T14:00:33Z" | java | "2021-03-23T03:26:11Z" | sql/upgrade/1.3.6_schema/postgresql/dolphinscheduler_ddl.sql | |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,048 | [Bug][Api] Delete the stopped workflow without deleting the corresponding sub process, the workflow instance name is not displayed in the sub process | 1.stop workflow,Workflow contains sub_process
2.delete the workflow, No sub_process were deleted, the workflow instance name is not displayed in the sub process
![image](https://user-images.githubusercontent.com/55787491/111032444-40d24a80-8447-11eb-9bc9-963c9914a06a.png)
**Expected results**
delete the workflow, need to delete all tasks under the workflow, including sub_process
**Which version of Dolphin Scheduler:**
-[1.3.6-prepare]
| https://github.com/apache/dolphinscheduler/issues/5048 | https://github.com/apache/dolphinscheduler/pull/5066 | ba039dc252a2ddfe316297a2955f0ea9f966432a | f109a758f8388a0499391f5a87ccd6d1e17db3aa | "2021-03-13T14:00:33Z" | java | "2021-03-23T03:26:11Z" | sql/upgrade/1.3.6_schema/postgresql/dolphinscheduler_dml.sql | |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,114 | [Question] data.basedir.path cleanup problem. | 1, I have modified the data. The basedir. Path from/tmp/dolphinscheduler to /mydata/dolphinscheduler/tmp/.
2. Developer mode is false.
3, /mydata/dolphinscheduler/tmp folder read and write access to normal, ds is perfectly normal.
4, after running for a period of time only, find/data/dolphinscheduler files in the/tmp directory cannot be automatically deleted, lead to disk pile up.
Q: Can temporary files in this directory only be removed manually?Cannot automatically delete??
1、我修改了data.basedir.path从/tmp/dolphinscheduler/ 到数据盘/data/dolphinscheduler/tmp.
2、开发者模式为false.
3、/data/dolphinscheduler/tmp文件夹读写权限正常,ds工作完全正常.
4、在运行了一段时间只有,发现/data/dolphinscheduler/tmp目录中的文件不能自动删除,导致磁盘被占满.
问题:这个目录中的临时文件只能手动删除么?不能自动删除??? | https://github.com/apache/dolphinscheduler/issues/4114 | https://github.com/apache/dolphinscheduler/pull/5123 | a8b47e1d4f4ebb9795239ef69b57f2928a81f44c | 492b318bd321d35247488e1f181e3ea9d1259963 | "2020-11-27T02:11:34Z" | java | "2021-03-26T02:11:56Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/FileUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import static org.apache.dolphinscheduler.common.Constants.DATA_BASEDIR_PATH;
import static org.apache.dolphinscheduler.common.Constants.RESOURCE_VIEW_SUFFIXS;
import static org.apache.dolphinscheduler.common.Constants.RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE;
import static org.apache.dolphinscheduler.common.Constants.YYYYMMDDHHMMSS;
import org.apache.commons.io.Charsets;
import org.apache.commons.io.IOUtils;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.StringReader;
import java.io.UnsupportedEncodingException;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.nio.charset.UnsupportedCharsetException;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* file utils
*/
public class FileUtils {
public static final Logger logger = LoggerFactory.getLogger(FileUtils.class);
public static final String DATA_BASEDIR = PropertyUtils.getString(DATA_BASEDIR_PATH, "/tmp/dolphinscheduler");
public static final ThreadLocal<Logger> taskLoggerThreadLocal = new ThreadLocal<>();
private FileUtils() {
throw new UnsupportedOperationException("Construct FileUtils");
}
/**
* get file suffix
*
* @param filename file name
* @return file suffix
*/
public static String suffix(String filename) {
String fileSuffix = "";
if (StringUtils.isNotEmpty(filename)) {
int lastIndex = filename.lastIndexOf('.');
if (lastIndex > 0) {
fileSuffix = filename.substring(lastIndex + 1);
}
}
return fileSuffix;
}
/**
* get download file absolute path and name
*
* @param filename file name
* @return download file name
*/
public static String getDownloadFilename(String filename) {
String fileName = String.format("%s/download/%s/%s", DATA_BASEDIR, DateUtils.getCurrentTime(YYYYMMDDHHMMSS), filename);
File file = new File(fileName);
if (!file.getParentFile().exists()) {
file.getParentFile().mkdirs();
}
return fileName;
}
/**
* get upload file absolute path and name
*
* @param tenantCode tenant code
* @param filename file name
* @return local file path
*/
public static String getUploadFilename(String tenantCode, String filename) {
String fileName = String.format("%s/%s/resources/%s", DATA_BASEDIR, tenantCode, filename);
File file = new File(fileName);
if (!file.getParentFile().exists()) {
file.getParentFile().mkdirs();
}
return fileName;
}
/**
* directory of process execution
*
* @param projectId project id
* @param processDefineId process definition id
* @param processInstanceId process instance id
* @param taskInstanceId task instance id
* @return directory of process execution
*/
public static String getProcessExecDir(int projectId, int processDefineId, int processInstanceId, int taskInstanceId) {
String fileName = String.format("%s/exec/process/%s/%s/%s/%s", DATA_BASEDIR, Integer.toString(projectId),
Integer.toString(processDefineId), Integer.toString(processInstanceId), Integer.toString(taskInstanceId));
File file = new File(fileName);
if (!file.getParentFile().exists()) {
file.getParentFile().mkdirs();
}
return fileName;
}
/**
* directory of process instances
*
* @param projectId project id
* @param processDefineId process definition id
* @param processInstanceId process instance id
* @return directory of process instances
*/
public static String getProcessExecDir(int projectId, int processDefineId, int processInstanceId) {
String fileName = String.format("%s/exec/process/%s/%s/%s", DATA_BASEDIR, Integer.toString(projectId),
Integer.toString(processDefineId), Integer.toString(processInstanceId));
File file = new File(fileName);
if (!file.getParentFile().exists()) {
file.getParentFile().mkdirs();
}
return fileName;
}
/**
* @return get suffixes for resource files that support online viewing
*/
public static String getResourceViewSuffixs() {
return PropertyUtils.getString(RESOURCE_VIEW_SUFFIXS, RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE);
}
/**
* create directory if absent
*
* @param execLocalPath execute local path
* @throws IOException errors
*/
public static void createWorkDirIfAbsent(String execLocalPath) throws IOException {
//if work dir exists, first delete
File execLocalPathFile = new File(execLocalPath);
if (execLocalPathFile.exists()) {
org.apache.commons.io.FileUtils.forceDelete(execLocalPathFile);
}
//create work dir
org.apache.commons.io.FileUtils.forceMkdir(execLocalPathFile);
String mkdirLog = "create dir success " + execLocalPath;
LoggerUtils.logInfo(Optional.ofNullable(logger), mkdirLog);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), mkdirLog);
}
/**
* write content to file ,if parent path not exists, it will do one's utmost to mkdir
*
* @param content content
* @param filePath target file path
* @return true if write success
*/
public static boolean writeContent2File(String content, String filePath) {
boolean flag = true;
BufferedReader bufferedReader = null;
BufferedWriter bufferedWriter = null;
try {
File distFile = new File(filePath);
if (!distFile.getParentFile().exists() && !distFile.getParentFile().mkdirs()) {
FileUtils.logger.error("mkdir parent failed");
return false;
}
bufferedReader = new BufferedReader(new StringReader(content));
bufferedWriter = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(distFile), StandardCharsets.UTF_8));
char[] buf = new char[1024];
int len;
while ((len = bufferedReader.read(buf)) != -1) {
bufferedWriter.write(buf, 0, len);
}
bufferedWriter.flush();
bufferedReader.close();
bufferedWriter.close();
} catch (IOException e) {
FileUtils.logger.error(e.getMessage(), e);
flag = false;
return flag;
} finally {
IOUtils.closeQuietly(bufferedWriter);
IOUtils.closeQuietly(bufferedReader);
}
return flag;
}
/**
* Writes a String to a file creating the file if it does not exist.
* <p>
* NOTE: As from v1.3, the parent directories of the file will be created
* if they do not exist.
*
* @param file the file to write
* @param data the content to write to the file
* @param encoding the encoding to use, {@code null} means platform default
* @throws IOException in case of an I/O error
* @throws java.io.UnsupportedEncodingException if the encoding is not supported by the VM
* @since 2.4
*/
public static void writeStringToFile(File file, String data, Charset encoding) throws IOException {
writeStringToFile(file, data, encoding, false);
}
/**
* Writes a String to a file creating the file if it does not exist.
* <p>
* NOTE: As from v1.3, the parent directories of the file will be created
* if they do not exist.
*
* @param file the file to write
* @param data the content to write to the file
* @param encoding the encoding to use, {@code null} means platform default
* @throws IOException in case of an I/O error
* @throws java.io.UnsupportedEncodingException if the encoding is not supported by the VM
*/
public static void writeStringToFile(File file, String data, String encoding) throws IOException {
writeStringToFile(file, data, encoding, false);
}
/**
* Writes a String to a file creating the file if it does not exist.
*
* @param file the file to write
* @param data the content to write to the file
* @param encoding the encoding to use, {@code null} means platform default
* @param append if {@code true}, then the String will be added to the
* end of the file rather than overwriting
* @throws IOException in case of an I/O error
* @since 2.3
*/
public static void writeStringToFile(File file, String data, Charset encoding, boolean append) throws IOException {
OutputStream out = null;
try {
out = openOutputStream(file, append);
IOUtils.write(data, out, encoding);
out.close(); // don't swallow close Exception if copy completes normally
} finally {
IOUtils.closeQuietly(out);
}
}
/**
* Writes a String to a file creating the file if it does not exist.
*
* @param file the file to write
* @param data the content to write to the file
* @param encoding the encoding to use, {@code null} means platform default
* @param append if {@code true}, then the String will be added to the
* end of the file rather than overwriting
* @throws IOException in case of an I/O error
* @throws UnsupportedCharsetException thrown instead of {@link UnsupportedEncodingException} in version 2.2 if the encoding is not
* supported by the VM
* @since 2.1
*/
public static void writeStringToFile(File file, String data, String encoding, boolean append) throws IOException {
writeStringToFile(file, data, Charsets.toCharset(encoding), append);
}
/**
* Writes a String to a file creating the file if it does not exist using the default encoding for the VM.
*
* @param file the file to write
* @param data the content to write to the file
* @throws IOException in case of an I/O error
*/
public static void writeStringToFile(File file, String data) throws IOException {
writeStringToFile(file, data, Charset.defaultCharset(), false);
}
/**
* Writes a String to a file creating the file if it does not exist using the default encoding for the VM.
*
* @param file the file to write
* @param data the content to write to the file
* @param append if {@code true}, then the String will be added to the
* end of the file rather than overwriting
* @throws IOException in case of an I/O error
* @since 2.1
*/
public static void writeStringToFile(File file, String data, boolean append) throws IOException {
writeStringToFile(file, data, Charset.defaultCharset(), append);
}
/**
* Opens a {@link FileOutputStream} for the specified file, checking and
* creating the parent directory if it does not exist.
* <p>
* At the end of the method either the stream will be successfully opened,
* or an exception will have been thrown.
* <p>
* The parent directory will be created if it does not exist.
* The file will be created if it does not exist.
* An exception is thrown if the file object exists but is a directory.
* An exception is thrown if the file exists but cannot be written to.
* An exception is thrown if the parent directory cannot be created.
*
* @param file the file to open for output, must not be {@code null}
* @return a new {@link FileOutputStream} for the specified file
* @throws IOException if the file object is a directory
* @throws IOException if the file cannot be written to
* @throws IOException if a parent directory needs creating but that fails
* @since 1.3
*/
public static FileOutputStream openOutputStream(File file) throws IOException {
return openOutputStream(file, false);
}
/**
* Opens a {@link FileOutputStream} for the specified file, checking and
* creating the parent directory if it does not exist.
* <p>
* At the end of the method either the stream will be successfully opened,
* or an exception will have been thrown.
* <p>
* The parent directory will be created if it does not exist.
* The file will be created if it does not exist.
* An exception is thrown if the file object exists but is a directory.
* An exception is thrown if the file exists but cannot be written to.
* An exception is thrown if the parent directory cannot be created.
*
* @param file the file to open for output, must not be {@code null}
* @param append if {@code true}, then bytes will be added to the
* end of the file rather than overwriting
* @return a new {@link FileOutputStream} for the specified file
* @throws IOException if the file object is a directory
* @throws IOException if the file cannot be written to
* @throws IOException if a parent directory needs creating but that fails
* @since 2.1
*/
public static FileOutputStream openOutputStream(File file, boolean append) throws IOException {
if (file.exists()) {
if (file.isDirectory()) {
throw new IOException("File '" + file + "' exists but is a directory");
}
if (!file.canWrite()) {
throw new IOException("File '" + file + "' cannot be written to");
}
} else {
File parent = file.getParentFile();
if (parent != null && !parent.mkdirs() && !parent.isDirectory()) {
throw new IOException("Directory '" + parent + "' could not be created");
}
}
return new FileOutputStream(file, append);
}
/**
* deletes a directory recursively
*
* @param dir directory
* @throws IOException in case deletion is unsuccessful
*/
public static void deleteDir(String dir) throws IOException {
org.apache.commons.io.FileUtils.deleteDirectory(new File(dir));
}
/**
* Deletes a file. If file is a directory, delete it and all sub-directories.
* <p>
* The difference between File.delete() and this method are:
* <ul>
* <li>A directory to be deleted does not have to be empty.</li>
* <li>You get exceptions when a file or directory cannot be deleted.
* (java.io.File methods returns a boolean)</li>
* </ul>
*
* @param filename file name
* @throws IOException in case deletion is unsuccessful
*/
public static void deleteFile(String filename) throws IOException {
org.apache.commons.io.FileUtils.forceDelete(new File(filename));
}
/**
* Gets all the parent subdirectories of the parentDir directory
*
* @param parentDir parent dir
* @return all dirs
*/
public static File[] getAllDir(String parentDir) {
if (parentDir == null || "".equals(parentDir)) {
throw new RuntimeException("parentDir can not be empty");
}
File file = new File(parentDir);
if (!file.exists() || !file.isDirectory()) {
throw new RuntimeException("parentDir not exist, or is not a directory:" + parentDir);
}
return file.listFiles(File::isDirectory);
}
/**
* Get Content
*
* @param inputStream input stream
* @return string of input stream
*/
public static String readFile2Str(InputStream inputStream) {
try {
ByteArrayOutputStream output = new ByteArrayOutputStream();
byte[] buffer = new byte[1024];
int length;
while ((length = inputStream.read(buffer)) != -1) {
output.write(buffer, 0, length);
}
return output.toString();
} catch (Exception e) {
logger.error(e.getMessage(), e);
throw new RuntimeException(e);
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,114 | [Question] data.basedir.path cleanup problem. | 1, I have modified the data. The basedir. Path from/tmp/dolphinscheduler to /mydata/dolphinscheduler/tmp/.
2. Developer mode is false.
3, /mydata/dolphinscheduler/tmp folder read and write access to normal, ds is perfectly normal.
4, after running for a period of time only, find/data/dolphinscheduler files in the/tmp directory cannot be automatically deleted, lead to disk pile up.
Q: Can temporary files in this directory only be removed manually?Cannot automatically delete??
1、我修改了data.basedir.path从/tmp/dolphinscheduler/ 到数据盘/data/dolphinscheduler/tmp.
2、开发者模式为false.
3、/data/dolphinscheduler/tmp文件夹读写权限正常,ds工作完全正常.
4、在运行了一段时间只有,发现/data/dolphinscheduler/tmp目录中的文件不能自动删除,导致磁盘被占满.
问题:这个目录中的临时文件只能手动删除么?不能自动删除??? | https://github.com/apache/dolphinscheduler/issues/4114 | https://github.com/apache/dolphinscheduler/pull/5123 | a8b47e1d4f4ebb9795239ef69b57f2928a81f44c | 492b318bd321d35247488e1f181e3ea9d1259963 | "2020-11-27T02:11:34Z" | java | "2021-03-26T02:11:56Z" | dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/FileUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import static org.apache.dolphinscheduler.common.Constants.YYYYMMDDHHMMSS;
import org.apache.dolphinscheduler.common.Constants;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
@RunWith(PowerMockRunner.class)
@PrepareForTest(DateUtils.class)
public class FileUtilsTest {
@Test
public void suffix() {
Assert.assertEquals("java", FileUtils.suffix("ninfor.java"));
Assert.assertEquals("", FileUtils.suffix(null));
Assert.assertEquals("", FileUtils.suffix(""));
Assert.assertEquals("", FileUtils.suffix("ninfor-java"));
}
@Test
public void testGetDownloadFilename() {
PowerMockito.mockStatic(DateUtils.class);
PowerMockito.when(DateUtils.getCurrentTime(YYYYMMDDHHMMSS)).thenReturn("20190101101059");
Assert.assertEquals("/tmp/dolphinscheduler/download/20190101101059/test",
FileUtils.getDownloadFilename("test"));
}
@Test
public void testGetUploadFilename() {
Assert.assertEquals("/tmp/dolphinscheduler/aaa/resources/bbb",
FileUtils.getUploadFilename("aaa","bbb"));
}
@Test
public void testGetProcessExecDir() {
String dir = FileUtils.getProcessExecDir(1,2,3, 4);
Assert.assertEquals("/tmp/dolphinscheduler/exec/process/1/2/3/4", dir);
dir = FileUtils.getProcessExecDir(1,2,3);
Assert.assertEquals("/tmp/dolphinscheduler/exec/process/1/2/3", dir);
}
@Test
public void testCreateWorkDirIfAbsent() {
try {
FileUtils.createWorkDirIfAbsent("/tmp/createWorkDirAndUserIfAbsent");
Assert.assertTrue(true);
} catch (Exception e) {
Assert.assertTrue(false);
}
}
@Test
public void testSetValue() {
try {
PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE,"true");
Assert.assertTrue(PropertyUtils.getBoolean(Constants.DATASOURCE_ENCRYPTION_ENABLE));
PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE,"false");
Assert.assertFalse(PropertyUtils.getBoolean(Constants.DATASOURCE_ENCRYPTION_ENABLE));
} catch (Exception e) {
Assert.assertTrue(false);
}
}
@Test
public void testWriteContent2File() throws FileNotFoundException {
// file exists, fmt is invalid
String filePath = "test/testFile.txt";
String content = "正正正faffdasfasdfas";
FileUtils.writeContent2File(content, filePath);
String fileContent = FileUtils.readFile2Str(new FileInputStream(new File(filePath)));
Assert.assertEquals(content, fileContent);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,114 | [Question] data.basedir.path cleanup problem. | 1, I have modified the data. The basedir. Path from/tmp/dolphinscheduler to /mydata/dolphinscheduler/tmp/.
2. Developer mode is false.
3, /mydata/dolphinscheduler/tmp folder read and write access to normal, ds is perfectly normal.
4, after running for a period of time only, find/data/dolphinscheduler files in the/tmp directory cannot be automatically deleted, lead to disk pile up.
Q: Can temporary files in this directory only be removed manually?Cannot automatically delete??
1、我修改了data.basedir.path从/tmp/dolphinscheduler/ 到数据盘/data/dolphinscheduler/tmp.
2、开发者模式为false.
3、/data/dolphinscheduler/tmp文件夹读写权限正常,ds工作完全正常.
4、在运行了一段时间只有,发现/data/dolphinscheduler/tmp目录中的文件不能自动删除,导致磁盘被占满.
问题:这个目录中的临时文件只能手动删除么?不能自动删除??? | https://github.com/apache/dolphinscheduler/issues/4114 | https://github.com/apache/dolphinscheduler/pull/5123 | a8b47e1d4f4ebb9795239ef69b57f2928a81f44c | 492b318bd321d35247488e1f181e3ea9d1259963 | "2020-11-27T02:11:34Z" | java | "2021-03-26T02:11:56Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.runner;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVERY_START_NODE_STRING;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODE_NAMES;
import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP;
import static org.apache.dolphinscheduler.common.Constants.LOCAL_PARAMS;
import static org.apache.dolphinscheduler.common.Constants.SEC_2_MINUTES_TIME_UNIT;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.DependResult;
import org.apache.dolphinscheduler.common.enums.Direct;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.enums.Priority;
import org.apache.dolphinscheduler.common.enums.TaskDependType;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.common.utils.VarPoolUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.ProjectUser;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.apache.dolphinscheduler.remote.NettyRemotingClient;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.utils.AlertManager;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.apache.dolphinscheduler.service.queue.PeerTaskInstancePriorityQueue;
import org.apache.commons.io.FileUtils;
import java.io.File;
import java.io.IOException;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Lists;
/**
* master exec thread,split dag
*/
public class MasterExecThread implements Runnable {
/**
* logger of MasterExecThread
*/
private static final Logger logger = LoggerFactory.getLogger(MasterExecThread.class);
/**
* runing TaskNode
*/
private final Map<MasterBaseTaskExecThread, Future<Boolean>> activeTaskNode = new ConcurrentHashMap<>();
/**
* task exec service
*/
private final ExecutorService taskExecService;
/**
* process instance
*/
private ProcessInstance processInstance;
/**
* submit failure nodes
*/
private boolean taskFailedSubmit = false;
/**
* recover node id list
*/
private List<TaskInstance> recoverNodeIdList = new ArrayList<>();
/**
* error task list
*/
private Map<String, TaskInstance> errorTaskList = new ConcurrentHashMap<>();
/**
* complete task list
*/
private Map<String, TaskInstance> completeTaskList = new ConcurrentHashMap<>();
/**
* ready to submit task queue
*/
private PeerTaskInstancePriorityQueue readyToSubmitTaskQueue = new PeerTaskInstancePriorityQueue();
/**
* depend failed task map
*/
private Map<String, TaskInstance> dependFailedTask = new ConcurrentHashMap<>();
/**
* forbidden task map
*/
private Map<String, TaskNode> forbiddenTaskList = new ConcurrentHashMap<>();
/**
* skip task map
*/
private Map<String, TaskNode> skipTaskNodeList = new ConcurrentHashMap<>();
/**
* recover tolerance fault task list
*/
private List<TaskInstance> recoverToleranceFaultTaskList = new ArrayList<>();
/**
* alert manager
*/
private AlertManager alertManager;
/**
* the object of DAG
*/
private DAG<String, TaskNode, TaskNodeRelation> dag;
/**
* process service
*/
private ProcessService processService;
/**
* master config
*/
private MasterConfig masterConfig;
/**
*
*/
private NettyRemotingClient nettyRemotingClient;
/**
* submit post node
*
* @param parentNodeName parent node name
*/
private Map<String, Object> propToValue = new ConcurrentHashMap<String, Object>();
/**
* constructor of MasterExecThread
*
* @param processInstance processInstance
* @param processService processService
* @param nettyRemotingClient nettyRemotingClient
*/
public MasterExecThread(ProcessInstance processInstance
, ProcessService processService
, NettyRemotingClient nettyRemotingClient
, AlertManager alertManager
, MasterConfig masterConfig) {
this.processService = processService;
this.processInstance = processInstance;
this.masterConfig = masterConfig;
int masterTaskExecNum = masterConfig.getMasterExecTaskNum();
this.taskExecService = ThreadUtils.newDaemonFixedThreadExecutor("Master-Task-Exec-Thread",
masterTaskExecNum);
this.nettyRemotingClient = nettyRemotingClient;
this.alertManager = alertManager;
}
@Override
public void run() {
// process instance is null
if (processInstance == null) {
logger.info("process instance is not exists");
return;
}
// check to see if it's done
if (processInstance.getState().typeIsFinished()) {
logger.info("process instance is done : {}", processInstance.getId());
return;
}
try {
if (processInstance.isComplementData() && Flag.NO == processInstance.getIsSubProcess()) {
// sub process complement data
executeComplementProcess();
} else {
// execute flow
executeProcess();
}
} catch (Exception e) {
logger.error("master exec thread exception", e);
logger.error("process execute failed, process id:{}", processInstance.getId());
processInstance.setState(ExecutionStatus.FAILURE);
processInstance.setEndTime(new Date());
processService.updateProcessInstance(processInstance);
} finally {
taskExecService.shutdown();
// post handle
postHandle();
}
}
/**
* execute process
*
* @throws Exception exception
*/
private void executeProcess() throws Exception {
prepareProcess();
runProcess();
endProcess();
}
/**
* execute complement process
*
* @throws Exception exception
*/
private void executeComplementProcess() throws Exception {
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
Date startDate = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
Date endDate = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
processService.saveProcessInstance(processInstance);
// get schedules
int processDefinitionId = processInstance.getProcessDefinitionId();
List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId);
List<Date> listDate = Lists.newLinkedList();
if (!CollectionUtils.isEmpty(schedules)) {
for (Schedule schedule : schedules) {
listDate.addAll(CronUtils.getSelfFireDateList(startDate, endDate, schedule.getCrontab()));
}
}
// get first fire date
Iterator<Date> iterator = null;
Date scheduleDate = null;
if (!CollectionUtils.isEmpty(listDate)) {
iterator = listDate.iterator();
scheduleDate = iterator.next();
processInstance.setScheduleTime(scheduleDate);
processService.updateProcessInstance(processInstance);
} else {
scheduleDate = processInstance.getScheduleTime();
if (scheduleDate == null) {
scheduleDate = startDate;
}
}
while (Stopper.isRunning()) {
logger.info("process {} start to complement {} data",
processInstance.getId(), DateUtils.dateToString(scheduleDate));
// prepare dag and other info
prepareProcess();
if (dag == null) {
logger.error("process {} dag is null, please check out parameters",
processInstance.getId());
processInstance.setState(ExecutionStatus.SUCCESS);
processService.updateProcessInstance(processInstance);
return;
}
// execute process ,waiting for end
runProcess();
endProcess();
// process instance failure ,no more complements
if (!processInstance.getState().typeIsSuccess()) {
logger.info("process {} state {}, complement not completely!",
processInstance.getId(), processInstance.getState());
break;
}
// current process instance success ,next execute
if (null == iterator) {
// loop by day
scheduleDate = DateUtils.getSomeDay(scheduleDate, 1);
if (scheduleDate.after(endDate)) {
// all success
logger.info("process {} complement completely!", processInstance.getId());
break;
}
} else {
// loop by schedule date
if (!iterator.hasNext()) {
// all success
logger.info("process {} complement completely!", processInstance.getId());
break;
}
scheduleDate = iterator.next();
}
// flow end
// execute next process instance complement data
processInstance.setScheduleTime(scheduleDate);
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) {
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
}
processInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processInstance.getProcessDefinition().getGlobalParamMap(),
processInstance.getProcessDefinition().getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
processInstance.setId(0);
processInstance.setStartTime(new Date());
processInstance.setEndTime(null);
processService.saveProcessInstance(processInstance);
}
}
/**
* prepare process parameter
*
* @throws Exception exception
*/
private void prepareProcess() throws Exception {
// gen process dag
buildFlowDag();
// init task queue
initTaskQueue();
logger.info("prepare process :{} end", processInstance.getId());
}
/**
* process end handle
*/
private void endProcess() {
processInstance.setEndTime(new Date());
processService.updateProcessInstance(processInstance);
if (processInstance.getState().typeIsWaitingThread()) {
processService.createRecoveryWaitingThreadCommand(null, processInstance);
}
List<TaskInstance> taskInstances = processService.findValidTaskListByProcessId(processInstance.getId());
ProjectUser projectUser = processService.queryProjectWithUserByProcessInstanceId(processInstance.getId());
alertManager.sendAlertProcessInstance(processInstance, taskInstances, projectUser);
}
/**
* generate process dag
*
* @throws Exception exception
*/
private void buildFlowDag() throws Exception {
recoverNodeIdList = getStartTaskInstanceList(processInstance.getCommandParam());
forbiddenTaskList = DagHelper.getForbiddenTaskNodeMaps(processInstance.getProcessInstanceJson());
// generate process to get DAG info
List<String> recoveryNameList = getRecoveryNodeNameList();
List<String> startNodeNameList = parseStartNodeName(processInstance.getCommandParam());
ProcessDag processDag = generateFlowDag(processInstance.getProcessInstanceJson(),
startNodeNameList, recoveryNameList, processInstance.getTaskDependType());
if (processDag == null) {
logger.error("processDag is null");
return;
}
// generate process dag
dag = DagHelper.buildDagGraph(processDag);
}
/**
* init task queue
*/
private void initTaskQueue() {
taskFailedSubmit = false;
activeTaskNode.clear();
dependFailedTask.clear();
completeTaskList.clear();
errorTaskList.clear();
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processInstance.getId());
for (TaskInstance task : taskInstanceList) {
if (task.isTaskComplete()) {
completeTaskList.put(task.getName(), task);
}
if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(task.getName(), dag)) {
continue;
}
if (task.getState().typeIsFailure() && !task.taskCanRetry()) {
errorTaskList.put(task.getName(), task);
}
}
}
/**
* process post handle
*/
private void postHandle() {
logger.info("develop mode is: {}", CommonUtils.isDevelopMode());
if (!CommonUtils.isDevelopMode()) {
// get exec dir
String execLocalPath = org.apache.dolphinscheduler.common.utils.FileUtils
.getProcessExecDir(processInstance.getProcessDefinition().getProjectId(),
processInstance.getProcessDefinitionId(),
processInstance.getId());
try {
FileUtils.deleteDirectory(new File(execLocalPath));
} catch (IOException e) {
logger.error("delete exec dir failed ", e);
}
}
}
/**
* submit task to execute
*
* @param taskInstance task instance
* @return TaskInstance
*/
private TaskInstance submitTaskExec(TaskInstance taskInstance) {
MasterBaseTaskExecThread abstractExecThread = null;
if (taskInstance.isSubProcess()) {
abstractExecThread = new SubProcessTaskExecThread(taskInstance);
} else if (taskInstance.isDependTask()) {
abstractExecThread = new DependentTaskExecThread(taskInstance);
} else if (taskInstance.isConditionsTask()) {
abstractExecThread = new ConditionsTaskExecThread(taskInstance);
} else {
abstractExecThread = new MasterTaskExecThread(taskInstance);
}
Future<Boolean> future = taskExecService.submit(abstractExecThread);
activeTaskNode.putIfAbsent(abstractExecThread, future);
return abstractExecThread.getTaskInstance();
}
/**
* find task instance in db.
* in case submit more than one same name task in the same time.
*
* @param taskName task name
* @return TaskInstance
*/
private TaskInstance findTaskIfExists(String taskName) {
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(this.processInstance.getId());
for (TaskInstance taskInstance : taskInstanceList) {
if (taskInstance.getName().equals(taskName)) {
return taskInstance;
}
}
return null;
}
/**
* encapsulation task
*
* @param processInstance process instance
* @param nodeName node name
* @return TaskInstance
*/
private TaskInstance createTaskInstance(ProcessInstance processInstance, String nodeName,
TaskNode taskNode) {
//update processInstance for update the globalParams
this.processInstance = this.processService.findProcessInstanceById(this.processInstance.getId());
TaskInstance taskInstance = findTaskIfExists(nodeName);
if (taskInstance == null) {
taskInstance = new TaskInstance();
// task name
taskInstance.setName(nodeName);
// process instance define id
taskInstance.setProcessDefinitionId(processInstance.getProcessDefinitionId());
// task instance state
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
// process instance id
taskInstance.setProcessInstanceId(processInstance.getId());
// task instance node json
taskInstance.setTaskJson(JSONUtils.toJsonString(taskNode));
// task instance type
taskInstance.setTaskType(taskNode.getType());
// task instance whether alert
taskInstance.setAlertFlag(Flag.NO);
// task instance start time
taskInstance.setStartTime(null);
// task instance flag
taskInstance.setFlag(Flag.YES);
// task instance retry times
taskInstance.setRetryTimes(0);
// max task instance retry times
taskInstance.setMaxRetryTimes(taskNode.getMaxRetryTimes());
// retry task instance interval
taskInstance.setRetryInterval(taskNode.getRetryInterval());
// task instance priority
if (taskNode.getTaskInstancePriority() == null) {
taskInstance.setTaskInstancePriority(Priority.MEDIUM);
} else {
taskInstance.setTaskInstancePriority(taskNode.getTaskInstancePriority());
}
String processWorkerGroup = processInstance.getWorkerGroup();
processWorkerGroup = StringUtils.isBlank(processWorkerGroup) ? DEFAULT_WORKER_GROUP : processWorkerGroup;
String taskWorkerGroup = StringUtils.isBlank(taskNode.getWorkerGroup()) ? processWorkerGroup : taskNode.getWorkerGroup();
if (!processWorkerGroup.equals(DEFAULT_WORKER_GROUP) && taskWorkerGroup.equals(DEFAULT_WORKER_GROUP)) {
taskInstance.setWorkerGroup(processWorkerGroup);
} else {
taskInstance.setWorkerGroup(taskWorkerGroup);
}
//get process global
setProcessGlobal(taskNode, taskInstance);
// delay execution time
taskInstance.setDelayTime(taskNode.getDelayTime());
}
return taskInstance;
}
private void setProcessGlobal(TaskNode taskNode, TaskInstance taskInstance) {
String globalParams = this.processInstance.getGlobalParams();
if (StringUtils.isNotEmpty(globalParams)) {
Map<String, String> globalMap = processService.getGlobalParamMap(globalParams);
if (globalMap != null && globalMap.size() != 0) {
setGlobalMapToTask(taskNode, taskInstance, globalMap);
}
}
}
private void setGlobalMapToTask(TaskNode taskNode, TaskInstance taskInstance, Map<String, String> globalMap) {
// the param save in localParams
Map<String, Object> result = JSONUtils.toMap(taskNode.getParams(), String.class, Object.class);
Object localParams = result.get(LOCAL_PARAMS);
if (localParams != null) {
List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class);
for (Property info : allParam) {
if (info.getDirect().equals(Direct.IN)) {
String paramName = info.getProp();
String value = globalMap.get(paramName);
if (StringUtils.isNotEmpty(value)) {
info.setValue(value);
}
}
}
result.put(LOCAL_PARAMS, allParam);
taskNode.setParams(JSONUtils.toJsonString(result));
// task instance node json
taskInstance.setTaskJson(JSONUtils.toJsonString(taskNode));
}
}
private void submitPostNode(String parentNodeName) {
Set<String> submitTaskNodeList = DagHelper.parsePostNodes(parentNodeName, skipTaskNodeList, dag, completeTaskList);
List<TaskInstance> taskInstances = new ArrayList<>();
for (String taskNode : submitTaskNodeList) {
try {
VarPoolUtils.convertVarPoolToMap(propToValue, processInstance.getVarPool());
} catch (ParseException e) {
logger.error("parse {} exception", processInstance.getVarPool(), e);
throw new RuntimeException();
}
TaskNode taskNodeObject = dag.getNode(taskNode);
VarPoolUtils.setTaskNodeLocalParams(taskNodeObject, propToValue);
taskInstances.add(createTaskInstance(processInstance, taskNode,
taskNodeObject));
}
// if previous node success , post node submit
for (TaskInstance task : taskInstances) {
if (readyToSubmitTaskQueue.contains(task)) {
continue;
}
if (completeTaskList.containsKey(task.getName())) {
logger.info("task {} has already run success", task.getName());
continue;
}
if (task.getState().typeIsPause() || task.getState().typeIsCancel()) {
logger.info("task {} stopped, the state is {}", task.getName(), task.getState());
} else {
addTaskToStandByList(task);
}
}
}
/**
* determine whether the dependencies of the task node are complete
*
* @return DependResult
*/
private DependResult isTaskDepsComplete(String taskName) {
Collection<String> startNodes = dag.getBeginNode();
// if vertex,returns true directly
if (startNodes.contains(taskName)) {
return DependResult.SUCCESS;
}
TaskNode taskNode = dag.getNode(taskName);
List<String> depNameList = taskNode.getDepList();
for (String depsNode : depNameList) {
if (!dag.containsNode(depsNode)
|| forbiddenTaskList.containsKey(depsNode)
|| skipTaskNodeList.containsKey(depsNode)) {
continue;
}
// dependencies must be fully completed
if (!completeTaskList.containsKey(depsNode)) {
return DependResult.WAITING;
}
ExecutionStatus depTaskState = completeTaskList.get(depsNode).getState();
if (depTaskState.typeIsPause() || depTaskState.typeIsCancel()) {
return DependResult.WAITING;
}
// ignore task state if current task is condition
if (taskNode.isConditionsTask()) {
continue;
}
if (!dependTaskSuccess(depsNode, taskName)) {
return DependResult.FAILED;
}
}
logger.info("taskName: {} completeDependTaskList: {}", taskName, Arrays.toString(completeTaskList.keySet().toArray()));
return DependResult.SUCCESS;
}
/**
* depend node is completed, but here need check the condition task branch is the next node
*/
private boolean dependTaskSuccess(String dependNodeName, String nextNodeName) {
if (dag.getNode(dependNodeName).isConditionsTask()) {
//condition task need check the branch to run
List<String> nextTaskList = DagHelper.parseConditionTask(dependNodeName, skipTaskNodeList, dag, completeTaskList);
if (!nextTaskList.contains(nextNodeName)) {
return false;
}
} else {
ExecutionStatus depTaskState = completeTaskList.get(dependNodeName).getState();
if (depTaskState.typeIsFailure()) {
return false;
}
}
return true;
}
/**
* query task instance by complete state
*
* @param state state
* @return task instance list
*/
private List<TaskInstance> getCompleteTaskByState(ExecutionStatus state) {
List<TaskInstance> resultList = new ArrayList<>();
for (Map.Entry<String, TaskInstance> entry : completeTaskList.entrySet()) {
if (entry.getValue().getState() == state) {
resultList.add(entry.getValue());
}
}
return resultList;
}
/**
* where there are ongoing tasks
*
* @param state state
* @return ExecutionStatus
*/
private ExecutionStatus runningState(ExecutionStatus state) {
if (state == ExecutionStatus.READY_STOP
|| state == ExecutionStatus.READY_PAUSE
|| state == ExecutionStatus.WAITTING_THREAD
|| state == ExecutionStatus.DELAY_EXECUTION) {
// if the running task is not completed, the state remains unchanged
return state;
} else {
return ExecutionStatus.RUNNING_EXECUTION;
}
}
/**
* exists failure task,contains submit failure、dependency failure,execute failure(retry after)
*
* @return Boolean whether has failed task
*/
private boolean hasFailedTask() {
if (this.taskFailedSubmit) {
return true;
}
if (this.errorTaskList.size() > 0) {
return true;
}
return this.dependFailedTask.size() > 0;
}
/**
* process instance failure
*
* @return Boolean whether process instance failed
*/
private boolean processFailed() {
if (hasFailedTask()) {
if (processInstance.getFailureStrategy() == FailureStrategy.END) {
return true;
}
if (processInstance.getFailureStrategy() == FailureStrategy.CONTINUE) {
return readyToSubmitTaskQueue.size() == 0 || activeTaskNode.size() == 0;
}
}
return false;
}
/**
* whether task for waiting thread
*
* @return Boolean whether has waiting thread task
*/
private boolean hasWaitingThreadTask() {
List<TaskInstance> waitingList = getCompleteTaskByState(ExecutionStatus.WAITTING_THREAD);
return CollectionUtils.isNotEmpty(waitingList);
}
/**
* prepare for pause
* 1,failed retry task in the preparation queue , returns to failure directly
* 2,exists pause task,complement not completed, pending submission of tasks, return to suspension
* 3,success
*
* @return ExecutionStatus
*/
private ExecutionStatus processReadyPause() {
if (hasRetryTaskInStandBy()) {
return ExecutionStatus.FAILURE;
}
List<TaskInstance> pauseList = getCompleteTaskByState(ExecutionStatus.PAUSE);
if (CollectionUtils.isNotEmpty(pauseList)
|| !isComplementEnd()
|| readyToSubmitTaskQueue.size() > 0) {
return ExecutionStatus.PAUSE;
} else {
return ExecutionStatus.SUCCESS;
}
}
/**
* generate the latest process instance status by the tasks state
*
* @return process instance execution status
*/
private ExecutionStatus getProcessInstanceState() {
ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId());
ExecutionStatus state = instance.getState();
if (activeTaskNode.size() > 0 || hasRetryTaskInStandBy()) {
// active task and retry task exists
return runningState(state);
}
// process failure
if (processFailed()) {
return ExecutionStatus.FAILURE;
}
// waiting thread
if (hasWaitingThreadTask()) {
return ExecutionStatus.WAITTING_THREAD;
}
// pause
if (state == ExecutionStatus.READY_PAUSE) {
return processReadyPause();
}
// stop
if (state == ExecutionStatus.READY_STOP) {
List<TaskInstance> stopList = getCompleteTaskByState(ExecutionStatus.STOP);
List<TaskInstance> killList = getCompleteTaskByState(ExecutionStatus.KILL);
if (CollectionUtils.isNotEmpty(stopList)
|| CollectionUtils.isNotEmpty(killList)
|| !isComplementEnd()) {
return ExecutionStatus.STOP;
} else {
return ExecutionStatus.SUCCESS;
}
}
// success
if (state == ExecutionStatus.RUNNING_EXECUTION) {
List<TaskInstance> killTasks = getCompleteTaskByState(ExecutionStatus.KILL);
if (readyToSubmitTaskQueue.size() > 0) {
//tasks currently pending submission, no retries, indicating that depend is waiting to complete
return ExecutionStatus.RUNNING_EXECUTION;
} else if (CollectionUtils.isNotEmpty(killTasks)) {
// tasks maybe killed manually
return ExecutionStatus.FAILURE;
} else {
// if the waiting queue is empty and the status is in progress, then success
return ExecutionStatus.SUCCESS;
}
}
return state;
}
/**
* whether standby task list have retry tasks
*/
private boolean retryTaskExists() {
boolean result = false;
for (Iterator<TaskInstance> iter = readyToSubmitTaskQueue.iterator(); iter.hasNext(); ) {
TaskInstance task = iter.next();
if (task.getState().typeIsFailure()) {
result = true;
break;
}
}
return result;
}
/**
* whether complement end
*
* @return Boolean whether is complement end
*/
private boolean isComplementEnd() {
if (!processInstance.isComplementData()) {
return true;
}
try {
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
Date endTime = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
return processInstance.getScheduleTime().equals(endTime);
} catch (Exception e) {
logger.error("complement end failed ", e);
return false;
}
}
/**
* updateProcessInstance process instance state
* after each batch of tasks is executed, the status of the process instance is updated
*/
private void updateProcessInstanceState() {
ExecutionStatus state = getProcessInstanceState();
if (processInstance.getState() != state) {
logger.info(
"work flow process instance [id: {}, name:{}], state change from {} to {}, cmd type: {}",
processInstance.getId(), processInstance.getName(),
processInstance.getState(), state,
processInstance.getCommandType());
ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId());
instance.setState(state);
instance.setProcessDefinition(processInstance.getProcessDefinition());
processService.updateProcessInstance(instance);
processInstance = instance;
}
}
/**
* get task dependency result
*
* @param taskInstance task instance
* @return DependResult
*/
private DependResult getDependResultForTask(TaskInstance taskInstance) {
return isTaskDepsComplete(taskInstance.getName());
}
/**
* add task to standby list
*
* @param taskInstance task instance
*/
private void addTaskToStandByList(TaskInstance taskInstance) {
logger.info("add task to stand by list: {}", taskInstance.getName());
try {
readyToSubmitTaskQueue.put(taskInstance);
} catch (Exception e) {
logger.error("add task instance to readyToSubmitTaskQueue error");
}
}
/**
* remove task from stand by list
*
* @param taskInstance task instance
*/
private void removeTaskFromStandbyList(TaskInstance taskInstance) {
logger.info("remove task from stand by list: {}", taskInstance.getName());
try {
readyToSubmitTaskQueue.remove(taskInstance);
} catch (Exception e) {
logger.error("remove task instance from readyToSubmitTaskQueue error");
}
}
/**
* has retry task in standby
*
* @return Boolean whether has retry task in standby
*/
private boolean hasRetryTaskInStandBy() {
for (Iterator<TaskInstance> iter = readyToSubmitTaskQueue.iterator(); iter.hasNext(); ) {
if (iter.next().getState().typeIsFailure()) {
return true;
}
}
return false;
}
/**
* submit and watch the tasks, until the work flow stop
*/
private void runProcess() {
// submit start node
submitPostNode(null);
boolean sendTimeWarning = false;
while (!processInstance.isProcessInstanceStop() && Stopper.isRunning()) {
// send warning email if process time out.
if (!sendTimeWarning && checkProcessTimeOut(processInstance)) {
alertManager.sendProcessTimeoutAlert(processInstance,
processService.findProcessDefineById(processInstance.getProcessDefinitionId()));
sendTimeWarning = true;
}
for (Map.Entry<MasterBaseTaskExecThread, Future<Boolean>> entry : activeTaskNode.entrySet()) {
Future<Boolean> future = entry.getValue();
TaskInstance task = entry.getKey().getTaskInstance();
if (!future.isDone()) {
continue;
}
// node monitor thread complete
task = this.processService.findTaskInstanceById(task.getId());
if (task == null) {
this.taskFailedSubmit = true;
activeTaskNode.remove(entry.getKey());
continue;
}
// node monitor thread complete
if (task.getState().typeIsFinished()) {
activeTaskNode.remove(entry.getKey());
}
logger.info("task :{}, id:{} complete, state is {} ",
task.getName(), task.getId(), task.getState());
// node success , post node submit
if (task.getState() == ExecutionStatus.SUCCESS) {
processInstance = processService.findProcessInstanceById(processInstance.getId());
processInstance.setVarPool(task.getVarPool());
processService.updateProcessInstance(processInstance);
completeTaskList.put(task.getName(), task);
submitPostNode(task.getName());
continue;
}
// node fails, retry first, and then execute the failure process
if (task.getState().typeIsFailure()) {
if (task.getState() == ExecutionStatus.NEED_FAULT_TOLERANCE) {
this.recoverToleranceFaultTaskList.add(task);
}
if (task.taskCanRetry()) {
addTaskToStandByList(task);
} else {
completeTaskList.put(task.getName(), task);
if (task.isConditionsTask()
|| DagHelper.haveConditionsAfterNode(task.getName(), dag)) {
submitPostNode(task.getName());
} else {
errorTaskList.put(task.getName(), task);
if (processInstance.getFailureStrategy() == FailureStrategy.END) {
killTheOtherTasks();
}
}
}
continue;
}
// other status stop/pause
completeTaskList.put(task.getName(), task);
}
// send alert
if (CollectionUtils.isNotEmpty(this.recoverToleranceFaultTaskList)) {
alertManager.sendAlertWorkerToleranceFault(processInstance, recoverToleranceFaultTaskList);
this.recoverToleranceFaultTaskList.clear();
}
// updateProcessInstance completed task status
// failure priority is higher than pause
// if a task fails, other suspended tasks need to be reset kill
// check if there exists forced success nodes in errorTaskList
if (errorTaskList.size() > 0) {
for (Map.Entry<String, TaskInstance> entry : completeTaskList.entrySet()) {
TaskInstance completeTask = entry.getValue();
if (completeTask.getState() == ExecutionStatus.PAUSE) {
completeTask.setState(ExecutionStatus.KILL);
completeTaskList.put(entry.getKey(), completeTask);
processService.updateTaskInstance(completeTask);
}
}
for (Map.Entry<String, TaskInstance> entry : errorTaskList.entrySet()) {
TaskInstance errorTask = entry.getValue();
TaskInstance currentTask = processService.findTaskInstanceById(errorTask.getId());
if (currentTask == null) {
continue;
}
// for nodes that have been forced success
if (errorTask.getState().typeIsFailure() && currentTask.getState().equals(ExecutionStatus.FORCED_SUCCESS)) {
// update state in this thread and remove from errorTaskList
errorTask.setState(currentTask.getState());
logger.info("task: {} has been forced success, remove it from error task list", errorTask.getName());
errorTaskList.remove(errorTask.getName());
// submit post nodes
submitPostNode(errorTask.getName());
}
}
}
if (canSubmitTaskToQueue()) {
submitStandByTask();
}
try {
Thread.sleep(Constants.SLEEP_TIME_MILLIS);
} catch (InterruptedException e) {
logger.error(e.getMessage(), e);
Thread.currentThread().interrupt();
}
updateProcessInstanceState();
}
logger.info("process:{} end, state :{}", processInstance.getId(), processInstance.getState());
}
/**
* whether check process time out
*
* @param processInstance task instance
* @return true if time out of process instance > running time of process instance
*/
private boolean checkProcessTimeOut(ProcessInstance processInstance) {
if (processInstance.getTimeout() == 0) {
return false;
}
Date now = new Date();
long runningTime = DateUtils.diffMin(now, processInstance.getStartTime());
return runningTime > processInstance.getTimeout();
}
/**
* whether can submit task to queue
*
* @return boolean
*/
private boolean canSubmitTaskToQueue() {
return OSUtils.checkResource(masterConfig.getMasterMaxCpuloadAvg(), masterConfig.getMasterReservedMemory());
}
/**
* close the on going tasks
*/
private void killTheOtherTasks() {
logger.info("kill called on process instance id: {}, num: {}", processInstance.getId(),
activeTaskNode.size());
for (Map.Entry<MasterBaseTaskExecThread, Future<Boolean>> entry : activeTaskNode.entrySet()) {
MasterBaseTaskExecThread taskExecThread = entry.getKey();
Future<Boolean> future = entry.getValue();
TaskInstance taskInstance = taskExecThread.getTaskInstance();
taskInstance = processService.findTaskInstanceById(taskInstance.getId());
if (taskInstance != null && taskInstance.getState().typeIsFinished()) {
continue;
}
if (!future.isDone()) {
// record kill info
logger.info("kill process instance, id: {}, task: {}", processInstance.getId(), taskExecThread.getTaskInstance().getId());
// kill node
taskExecThread.kill();
}
}
}
/**
* whether the retry interval is timed out
*
* @param taskInstance task instance
* @return Boolean
*/
private boolean retryTaskIntervalOverTime(TaskInstance taskInstance) {
if (taskInstance.getState() != ExecutionStatus.FAILURE) {
return true;
}
if (taskInstance.getId() == 0
||
taskInstance.getMaxRetryTimes() == 0
||
taskInstance.getRetryInterval() == 0) {
return true;
}
Date now = new Date();
long failedTimeInterval = DateUtils.differSec(now, taskInstance.getEndTime());
// task retry does not over time, return false
return taskInstance.getRetryInterval() * SEC_2_MINUTES_TIME_UNIT < failedTimeInterval;
}
/**
* handling the list of tasks to be submitted
*/
private void submitStandByTask() {
try {
int length = readyToSubmitTaskQueue.size();
for (int i = 0; i < length; i++) {
TaskInstance task = readyToSubmitTaskQueue.peek();
// stop tasks which is retrying if forced success happens
if (task.taskCanRetry()) {
TaskInstance retryTask = processService.findTaskInstanceById(task.getId());
if (retryTask != null && retryTask.getState().equals(ExecutionStatus.FORCED_SUCCESS)) {
task.setState(retryTask.getState());
logger.info("task: {} has been forced success, put it into complete task list and stop retrying", task.getName());
removeTaskFromStandbyList(task);
completeTaskList.put(task.getName(), task);
submitPostNode(task.getName());
continue;
}
}
DependResult dependResult = getDependResultForTask(task);
if (DependResult.SUCCESS == dependResult) {
if (retryTaskIntervalOverTime(task)) {
submitTaskExec(task);
removeTaskFromStandbyList(task);
}
} else if (DependResult.FAILED == dependResult) {
// if the dependency fails, the current node is not submitted and the state changes to failure.
dependFailedTask.put(task.getName(), task);
removeTaskFromStandbyList(task);
logger.info("task {},id:{} depend result : {}", task.getName(), task.getId(), dependResult);
}
}
} catch (Exception e) {
logger.error("submit standby task error", e);
}
}
/**
* get recovery task instance
*
* @param taskId task id
* @return recovery task instance
*/
private TaskInstance getRecoveryTaskInstance(String taskId) {
if (!StringUtils.isNotEmpty(taskId)) {
return null;
}
try {
Integer intId = Integer.valueOf(taskId);
TaskInstance task = processService.findTaskInstanceById(intId);
if (task == null) {
logger.error("start node id cannot be found: {}", taskId);
} else {
return task;
}
} catch (Exception e) {
logger.error("get recovery task instance failed ", e);
}
return null;
}
/**
* get start task instance list
*
* @param cmdParam command param
* @return task instance list
*/
private List<TaskInstance> getStartTaskInstanceList(String cmdParam) {
List<TaskInstance> instanceList = new ArrayList<>();
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
if (paramMap != null && paramMap.containsKey(CMD_PARAM_RECOVERY_START_NODE_STRING)) {
String[] idList = paramMap.get(CMD_PARAM_RECOVERY_START_NODE_STRING).split(Constants.COMMA);
for (String nodeId : idList) {
TaskInstance task = getRecoveryTaskInstance(nodeId);
if (task != null) {
instanceList.add(task);
}
}
}
return instanceList;
}
/**
* parse "StartNodeNameList" from cmd param
*
* @param cmdParam command param
* @return start node name list
*/
private List<String> parseStartNodeName(String cmdParam) {
List<String> startNodeNameList = new ArrayList<>();
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
if (paramMap == null) {
return startNodeNameList;
}
if (paramMap.containsKey(CMD_PARAM_START_NODE_NAMES)) {
startNodeNameList = Arrays.asList(paramMap.get(CMD_PARAM_START_NODE_NAMES).split(Constants.COMMA));
}
return startNodeNameList;
}
/**
* generate start node name list from parsing command param;
* if "StartNodeIdList" exists in command param, return StartNodeIdList
*
* @return recovery node name list
*/
private List<String> getRecoveryNodeNameList() {
List<String> recoveryNodeNameList = new ArrayList<>();
if (CollectionUtils.isNotEmpty(recoverNodeIdList)) {
for (TaskInstance task : recoverNodeIdList) {
recoveryNodeNameList.add(task.getName());
}
}
return recoveryNodeNameList;
}
/**
* generate flow dag
*
* @param processDefinitionJson process definition json
* @param startNodeNameList start node name list
* @param recoveryNodeNameList recovery node name list
* @param depNodeType depend node type
* @return ProcessDag process dag
* @throws Exception exception
*/
public ProcessDag generateFlowDag(String processDefinitionJson,
List<String> startNodeNameList,
List<String> recoveryNodeNameList,
TaskDependType depNodeType) throws Exception {
return DagHelper.generateFlowDag(processDefinitionJson, startNodeNameList, recoveryNodeNameList, depNodeType);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,114 | [Question] data.basedir.path cleanup problem. | 1, I have modified the data. The basedir. Path from/tmp/dolphinscheduler to /mydata/dolphinscheduler/tmp/.
2. Developer mode is false.
3, /mydata/dolphinscheduler/tmp folder read and write access to normal, ds is perfectly normal.
4, after running for a period of time only, find/data/dolphinscheduler files in the/tmp directory cannot be automatically deleted, lead to disk pile up.
Q: Can temporary files in this directory only be removed manually?Cannot automatically delete??
1、我修改了data.basedir.path从/tmp/dolphinscheduler/ 到数据盘/data/dolphinscheduler/tmp.
2、开发者模式为false.
3、/data/dolphinscheduler/tmp文件夹读写权限正常,ds工作完全正常.
4、在运行了一段时间只有,发现/data/dolphinscheduler/tmp目录中的文件不能自动删除,导致磁盘被占满.
问题:这个目录中的临时文件只能手动删除么?不能自动删除??? | https://github.com/apache/dolphinscheduler/issues/4114 | https://github.com/apache/dolphinscheduler/pull/5123 | a8b47e1d4f4ebb9795239ef69b57f2928a81f44c | 492b318bd321d35247488e1f181e3ea9d1259963 | "2020-11-27T02:11:34Z" | java | "2021-03-26T02:11:56Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.processor;
import org.apache.dolphinscheduler.common.enums.Event;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.LoggerUtils;
import org.apache.dolphinscheduler.common.utils.NetUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.Preconditions;
import org.apache.dolphinscheduler.remote.command.Command;
import org.apache.dolphinscheduler.remote.command.CommandType;
import org.apache.dolphinscheduler.remote.command.TaskExecuteAckCommand;
import org.apache.dolphinscheduler.remote.command.TaskExecuteRequestCommand;
import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.LogUtils;
import org.apache.dolphinscheduler.server.worker.cache.ResponceCache;
import org.apache.dolphinscheduler.server.worker.cache.TaskExecutionContextCacheManager;
import org.apache.dolphinscheduler.server.worker.cache.impl.TaskExecutionContextCacheManagerImpl;
import org.apache.dolphinscheduler.server.worker.config.WorkerConfig;
import org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread;
import org.apache.dolphinscheduler.server.worker.runner.WorkerManagerThread;
import org.apache.dolphinscheduler.service.alert.AlertClientService;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import java.util.Date;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.netty.channel.Channel;
/**
* worker request processor
*/
public class TaskExecuteProcessor implements NettyRequestProcessor {
private static final Logger logger = LoggerFactory.getLogger(TaskExecuteProcessor.class);
/**
* worker config
*/
private final WorkerConfig workerConfig;
/**
* task callback service
*/
private final TaskCallbackService taskCallbackService;
/**
* alert client service
*/
private AlertClientService alertClientService;
/**
* taskExecutionContextCacheManager
*/
private final TaskExecutionContextCacheManager taskExecutionContextCacheManager;
/*
* task execute manager
*/
private final WorkerManagerThread workerManager;
public TaskExecuteProcessor() {
this.taskCallbackService = SpringApplicationContext.getBean(TaskCallbackService.class);
this.workerConfig = SpringApplicationContext.getBean(WorkerConfig.class);
this.taskExecutionContextCacheManager = SpringApplicationContext.getBean(TaskExecutionContextCacheManagerImpl.class);
this.workerManager = SpringApplicationContext.getBean(WorkerManagerThread.class);
}
/**
* Pre-cache task to avoid extreme situations when kill task. There is no such task in the cache
*
* @param taskExecutionContext task
*/
private void setTaskCache(TaskExecutionContext taskExecutionContext) {
TaskExecutionContext preTaskCache = new TaskExecutionContext();
preTaskCache.setTaskInstanceId(taskExecutionContext.getTaskInstanceId());
taskExecutionContextCacheManager.cacheTaskExecutionContext(preTaskCache);
}
public TaskExecuteProcessor(AlertClientService alertClientService) {
this();
this.alertClientService = alertClientService;
}
@Override
public void process(Channel channel, Command command) {
Preconditions.checkArgument(CommandType.TASK_EXECUTE_REQUEST == command.getType(),
String.format("invalid command type : %s", command.getType()));
TaskExecuteRequestCommand taskRequestCommand = JSONUtils.parseObject(
command.getBody(), TaskExecuteRequestCommand.class);
logger.info("received command : {}", taskRequestCommand);
if (taskRequestCommand == null) {
logger.error("task execute request command is null");
return;
}
String contextJson = taskRequestCommand.getTaskExecutionContext();
TaskExecutionContext taskExecutionContext = JSONUtils.parseObject(contextJson, TaskExecutionContext.class);
if (taskExecutionContext == null) {
logger.error("task execution context is null");
return;
}
setTaskCache(taskExecutionContext);
// custom logger
Logger taskLogger = LoggerFactory.getLogger(LoggerUtils.buildTaskId(LoggerUtils.TASK_LOGGER_INFO_PREFIX,
taskExecutionContext.getProcessDefineId(),
taskExecutionContext.getProcessInstanceId(),
taskExecutionContext.getTaskInstanceId()));
taskExecutionContext.setHost(NetUtils.getAddr(workerConfig.getListenPort()));
taskExecutionContext.setLogPath(LogUtils.getTaskLogPath(taskExecutionContext));
// local execute path
String execLocalPath = getExecLocalPath(taskExecutionContext);
logger.info("task instance local execute path : {} ", execLocalPath);
taskExecutionContext.setExecutePath(execLocalPath);
FileUtils.taskLoggerThreadLocal.set(taskLogger);
try {
FileUtils.createWorkDirIfAbsent(execLocalPath);
if (workerConfig.getWorkerTenantAutoCreate()) {
OSUtils.createUserIfAbsent(taskExecutionContext.getTenantCode());
}
} catch (Throwable ex) {
String errorLog = String.format("create execLocalPath : %s", execLocalPath);
LoggerUtils.logError(Optional.of(logger), errorLog, ex);
LoggerUtils.logError(Optional.ofNullable(taskLogger), errorLog, ex);
taskExecutionContextCacheManager.removeByTaskInstanceId(taskExecutionContext.getTaskInstanceId());
}
FileUtils.taskLoggerThreadLocal.remove();
taskCallbackService.addRemoteChannel(taskExecutionContext.getTaskInstanceId(),
new NettyRemoteChannel(channel, command.getOpaque()));
// delay task process
long remainTime = DateUtils.getRemainTime(taskExecutionContext.getFirstSubmitTime(), taskExecutionContext.getDelayTime() * 60L);
if (remainTime > 0) {
logger.info("delay the execution of task instance {}, delay time: {} s", taskExecutionContext.getTaskInstanceId(), remainTime);
taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.DELAY_EXECUTION);
taskExecutionContext.setStartTime(null);
} else {
taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.RUNNING_EXECUTION);
taskExecutionContext.setStartTime(new Date());
}
this.doAck(taskExecutionContext);
// submit task to manager
if (!workerManager.offer(new TaskExecuteThread(taskExecutionContext, taskCallbackService, taskLogger, alertClientService))) {
logger.info("submit task to manager error, queue is full, queue size is {}", workerManager.getQueueSize());
}
}
private void doAck(TaskExecutionContext taskExecutionContext) {
// tell master that task is in executing
TaskExecuteAckCommand ackCommand = buildAckCommand(taskExecutionContext);
ResponceCache.get().cache(taskExecutionContext.getTaskInstanceId(), ackCommand.convert2Command(), Event.ACK);
taskCallbackService.sendAck(taskExecutionContext.getTaskInstanceId(), ackCommand.convert2Command());
}
/**
* build ack command
*
* @param taskExecutionContext taskExecutionContext
* @return TaskExecuteAckCommand
*/
private TaskExecuteAckCommand buildAckCommand(TaskExecutionContext taskExecutionContext) {
TaskExecuteAckCommand ackCommand = new TaskExecuteAckCommand();
ackCommand.setTaskInstanceId(taskExecutionContext.getTaskInstanceId());
ackCommand.setStatus(taskExecutionContext.getCurrentExecutionStatus().getCode());
ackCommand.setLogPath(LogUtils.getTaskLogPath(taskExecutionContext));
ackCommand.setHost(taskExecutionContext.getHost());
ackCommand.setStartTime(taskExecutionContext.getStartTime());
if (taskExecutionContext.getTaskType().equals(TaskType.SQL.name()) || taskExecutionContext.getTaskType().equals(TaskType.PROCEDURE.name())) {
ackCommand.setExecutePath(null);
} else {
ackCommand.setExecutePath(taskExecutionContext.getExecutePath());
}
taskExecutionContext.setLogPath(ackCommand.getLogPath());
return ackCommand;
}
/**
* get execute local path
*
* @param taskExecutionContext taskExecutionContext
* @return execute local path
*/
private String getExecLocalPath(TaskExecutionContext taskExecutionContext) {
return FileUtils.getProcessExecDir(taskExecutionContext.getProjectId(),
taskExecutionContext.getProcessDefineId(),
taskExecutionContext.getProcessInstanceId(),
taskExecutionContext.getTaskInstanceId());
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,114 | [Question] data.basedir.path cleanup problem. | 1, I have modified the data. The basedir. Path from/tmp/dolphinscheduler to /mydata/dolphinscheduler/tmp/.
2. Developer mode is false.
3, /mydata/dolphinscheduler/tmp folder read and write access to normal, ds is perfectly normal.
4, after running for a period of time only, find/data/dolphinscheduler files in the/tmp directory cannot be automatically deleted, lead to disk pile up.
Q: Can temporary files in this directory only be removed manually?Cannot automatically delete??
1、我修改了data.basedir.path从/tmp/dolphinscheduler/ 到数据盘/data/dolphinscheduler/tmp.
2、开发者模式为false.
3、/data/dolphinscheduler/tmp文件夹读写权限正常,ds工作完全正常.
4、在运行了一段时间只有,发现/data/dolphinscheduler/tmp目录中的文件不能自动删除,导致磁盘被占满.
问题:这个目录中的临时文件只能手动删除么?不能自动删除??? | https://github.com/apache/dolphinscheduler/issues/4114 | https://github.com/apache/dolphinscheduler/pull/5123 | a8b47e1d4f4ebb9795239ef69b57f2928a81f44c | 492b318bd321d35247488e1f181e3ea9d1259963 | "2020-11-27T02:11:34Z" | java | "2021-03-26T02:11:56Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskExecuteThread.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.runner;
import org.apache.dolphinscheduler.common.enums.Event;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.RetryerUtils;
import org.apache.dolphinscheduler.remote.command.Command;
import org.apache.dolphinscheduler.remote.command.TaskExecuteAckCommand;
import org.apache.dolphinscheduler.remote.command.TaskExecuteResponseCommand;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.worker.cache.ResponceCache;
import org.apache.dolphinscheduler.server.worker.cache.TaskExecutionContextCacheManager;
import org.apache.dolphinscheduler.server.worker.cache.impl.TaskExecutionContextCacheManagerImpl;
import org.apache.dolphinscheduler.server.worker.processor.TaskCallbackService;
import org.apache.dolphinscheduler.server.worker.task.AbstractTask;
import org.apache.dolphinscheduler.server.worker.task.TaskManager;
import org.apache.dolphinscheduler.service.alert.AlertClientService;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.commons.collections.MapUtils;
import java.io.File;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Delayed;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.rholder.retry.RetryException;
/**
* task scheduler thread
*/
public class TaskExecuteThread implements Runnable, Delayed {
/**
* logger
*/
private final Logger logger = LoggerFactory.getLogger(TaskExecuteThread.class);
/**
* task instance
*/
private TaskExecutionContext taskExecutionContext;
/**
* abstract task
*/
private AbstractTask task;
/**
* task callback service
*/
private TaskCallbackService taskCallbackService;
/**
* taskExecutionContextCacheManager
*/
private TaskExecutionContextCacheManager taskExecutionContextCacheManager;
/**
* task logger
*/
private Logger taskLogger;
/**
* alert client server
*/
private AlertClientService alertClientService;
/**
* constructor
* @param taskExecutionContext taskExecutionContext
* @param taskCallbackService taskCallbackService
*/
public TaskExecuteThread(TaskExecutionContext taskExecutionContext
, TaskCallbackService taskCallbackService
, Logger taskLogger, AlertClientService alertClientService) {
this.taskExecutionContext = taskExecutionContext;
this.taskCallbackService = taskCallbackService;
this.taskExecutionContextCacheManager = SpringApplicationContext.getBean(TaskExecutionContextCacheManagerImpl.class);
this.taskLogger = taskLogger;
this.alertClientService = alertClientService;
}
@Override
public void run() {
TaskExecuteResponseCommand responseCommand = new TaskExecuteResponseCommand(taskExecutionContext.getTaskInstanceId());
try {
logger.info("script path : {}", taskExecutionContext.getExecutePath());
// check if the OS user exists
if (!OSUtils.getUserList().contains(taskExecutionContext.getTenantCode())) {
String errorLog = String.format("tenantCode: %s does not exist", taskExecutionContext.getTenantCode());
taskLogger.error(errorLog);
responseCommand.setStatus(ExecutionStatus.FAILURE.getCode());
responseCommand.setEndTime(new Date());
return;
}
// task node
TaskNode taskNode = JSONUtils.parseObject(taskExecutionContext.getTaskJson(), TaskNode.class);
if (taskExecutionContext.getStartTime() == null) {
taskExecutionContext.setStartTime(new Date());
}
if (taskExecutionContext.getCurrentExecutionStatus() != ExecutionStatus.RUNNING_EXECUTION) {
changeTaskExecutionStatusToRunning();
}
logger.info("the task begins to execute. task instance id: {}", taskExecutionContext.getTaskInstanceId());
// copy hdfs/minio file to local
downloadResource(taskExecutionContext.getExecutePath(),
taskExecutionContext.getResources(),
logger);
taskExecutionContext.setTaskParams(taskNode.getParams());
taskExecutionContext.setEnvFile(CommonUtils.getSystemEnvPath());
taskExecutionContext.setDefinedParams(getGlobalParamsMap());
// set task timeout
setTaskTimeout(taskExecutionContext, taskNode);
taskExecutionContext.setTaskAppId(String.format("%s_%s_%s",
taskExecutionContext.getProcessDefineId(),
taskExecutionContext.getProcessInstanceId(),
taskExecutionContext.getTaskInstanceId()));
task = TaskManager.newTask(taskExecutionContext, taskLogger, alertClientService);
// task init
task.init();
// task handle
task.handle();
// task result process
task.after();
responseCommand.setStatus(task.getExitStatus().getCode());
responseCommand.setEndTime(new Date());
responseCommand.setProcessId(task.getProcessId());
responseCommand.setAppIds(task.getAppIds());
responseCommand.setVarPool(task.getVarPool());
responseCommand.setResult(task.getResultString());
logger.info("task instance id : {},task final status : {}", taskExecutionContext.getTaskInstanceId(), task.getExitStatus());
} catch (Exception e) {
logger.error("task scheduler failure", e);
kill();
responseCommand.setStatus(ExecutionStatus.FAILURE.getCode());
responseCommand.setEndTime(new Date());
responseCommand.setProcessId(task.getProcessId());
responseCommand.setAppIds(task.getAppIds());
} finally {
taskExecutionContextCacheManager.removeByTaskInstanceId(taskExecutionContext.getTaskInstanceId());
ResponceCache.get().cache(taskExecutionContext.getTaskInstanceId(),responseCommand.convert2Command(),Event.RESULT);
taskCallbackService.sendResult(taskExecutionContext.getTaskInstanceId(), responseCommand.convert2Command());
}
}
/**
* get global paras map
* @return
*/
private Map<String, String> getGlobalParamsMap() {
Map<String,String> globalParamsMap = new HashMap<>(16);
// global params string
String globalParamsStr = taskExecutionContext.getGlobalParams();
if (globalParamsStr != null) {
List<Property> globalParamsList = JSONUtils.toList(globalParamsStr, Property.class);
globalParamsMap.putAll(globalParamsList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)));
}
return globalParamsMap;
}
/**
* set task timeout
* @param taskExecutionContext TaskExecutionContext
* @param taskNode
*/
private void setTaskTimeout(TaskExecutionContext taskExecutionContext, TaskNode taskNode) {
// the default timeout is the maximum value of the integer
taskExecutionContext.setTaskTimeout(Integer.MAX_VALUE);
TaskTimeoutParameter taskTimeoutParameter = taskNode.getTaskTimeoutParameter();
if (taskTimeoutParameter.getEnable()) {
// get timeout strategy
taskExecutionContext.setTaskTimeoutStrategy(taskTimeoutParameter.getStrategy().getCode());
switch (taskTimeoutParameter.getStrategy()) {
case WARN:
break;
case FAILED:
if (Integer.MAX_VALUE > taskTimeoutParameter.getInterval() * 60) {
taskExecutionContext.setTaskTimeout(taskTimeoutParameter.getInterval() * 60);
}
break;
case WARNFAILED:
if (Integer.MAX_VALUE > taskTimeoutParameter.getInterval() * 60) {
taskExecutionContext.setTaskTimeout(taskTimeoutParameter.getInterval() * 60);
}
break;
default:
logger.error("not support task timeout strategy: {}", taskTimeoutParameter.getStrategy());
throw new IllegalArgumentException("not support task timeout strategy");
}
}
}
/**
* kill task
*/
public void kill() {
if (task != null) {
try {
task.cancelApplication(true);
} catch (Exception e) {
logger.error(e.getMessage(),e);
}
}
}
/**
* download resource file
*
* @param execLocalPath
* @param projectRes
* @param logger
*/
private void downloadResource(String execLocalPath,
Map<String,String> projectRes,
Logger logger) throws Exception {
if (MapUtils.isEmpty(projectRes)) {
return;
}
Set<Map.Entry<String, String>> resEntries = projectRes.entrySet();
for (Map.Entry<String,String> resource : resEntries) {
String fullName = resource.getKey();
String tenantCode = resource.getValue();
File resFile = new File(execLocalPath, fullName);
if (!resFile.exists()) {
try {
// query the tenant code of the resource according to the name of the resource
String resHdfsPath = HadoopUtils.getHdfsResourceFileName(tenantCode, fullName);
logger.info("get resource file from hdfs :{}", resHdfsPath);
HadoopUtils.getInstance().copyHdfsToLocal(resHdfsPath, execLocalPath + File.separator + fullName, false, true);
} catch (Exception e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage());
}
} else {
logger.info("file : {} exists ", resFile.getName());
}
}
}
/**
* send an ack to change the status of the task.
*/
private void changeTaskExecutionStatusToRunning() {
taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.RUNNING_EXECUTION);
Command ackCommand = buildAckCommand().convert2Command();
try {
RetryerUtils.retryCall(() -> {
taskCallbackService.sendAck(taskExecutionContext.getTaskInstanceId(), ackCommand);
return Boolean.TRUE;
});
} catch (ExecutionException | RetryException e) {
logger.error(e.getMessage(), e);
}
}
/**
* build ack command.
*
* @return TaskExecuteAckCommand
*/
private TaskExecuteAckCommand buildAckCommand() {
TaskExecuteAckCommand ackCommand = new TaskExecuteAckCommand();
ackCommand.setTaskInstanceId(taskExecutionContext.getTaskInstanceId());
ackCommand.setStatus(taskExecutionContext.getCurrentExecutionStatus().getCode());
ackCommand.setStartTime(taskExecutionContext.getStartTime());
ackCommand.setLogPath(taskExecutionContext.getLogPath());
ackCommand.setHost(taskExecutionContext.getHost());
if (taskExecutionContext.getTaskType().equals(TaskType.SQL.name())
|| taskExecutionContext.getTaskType().equals(TaskType.PROCEDURE.name())) {
ackCommand.setExecutePath(null);
} else {
ackCommand.setExecutePath(taskExecutionContext.getExecutePath());
}
return ackCommand;
}
/**
* get current TaskExecutionContext
* @return TaskExecutionContext
*/
public TaskExecutionContext getTaskExecutionContext() {
return this.taskExecutionContext;
}
@Override
public long getDelay(TimeUnit unit) {
return unit.convert(DateUtils.getRemainTime(taskExecutionContext.getFirstSubmitTime(),
taskExecutionContext.getDelayTime() * 60L), TimeUnit.SECONDS);
}
@Override
public int compareTo(Delayed o) {
if (o == null) {
return 1;
}
return Long.compare(this.getDelay(TimeUnit.MILLISECONDS), o.getDelay(TimeUnit.MILLISECONDS));
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,114 | [Question] data.basedir.path cleanup problem. | 1, I have modified the data. The basedir. Path from/tmp/dolphinscheduler to /mydata/dolphinscheduler/tmp/.
2. Developer mode is false.
3, /mydata/dolphinscheduler/tmp folder read and write access to normal, ds is perfectly normal.
4, after running for a period of time only, find/data/dolphinscheduler files in the/tmp directory cannot be automatically deleted, lead to disk pile up.
Q: Can temporary files in this directory only be removed manually?Cannot automatically delete??
1、我修改了data.basedir.path从/tmp/dolphinscheduler/ 到数据盘/data/dolphinscheduler/tmp.
2、开发者模式为false.
3、/data/dolphinscheduler/tmp文件夹读写权限正常,ds工作完全正常.
4、在运行了一段时间只有,发现/data/dolphinscheduler/tmp目录中的文件不能自动删除,导致磁盘被占满.
问题:这个目录中的临时文件只能手动删除么?不能自动删除??? | https://github.com/apache/dolphinscheduler/issues/4114 | https://github.com/apache/dolphinscheduler/pull/5123 | a8b47e1d4f4ebb9795239ef69b57f2928a81f44c | 492b318bd321d35247488e1f181e3ea9d1259963 | "2020-11-27T02:11:34Z" | java | "2021-03-26T02:11:56Z" | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/MasterExecThreadTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVERY_START_NODE_STRING;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODE_NAMES;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.powermock.api.mockito.PowerMockito.mock;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.master.runner.MasterExecThread;
import org.apache.dolphinscheduler.service.process.ProcessService;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.text.ParseException;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.springframework.context.ApplicationContext;
/**
* test for MasterExecThread
*/
@RunWith(PowerMockRunner.class)
@PrepareForTest({MasterExecThread.class})
public class MasterExecThreadTest {
private MasterExecThread masterExecThread;
private ProcessInstance processInstance;
private ProcessService processService;
private int processDefinitionId = 1;
private MasterConfig config;
private ApplicationContext applicationContext;
@Before
public void init() throws Exception {
processService = mock(ProcessService.class);
applicationContext = mock(ApplicationContext.class);
config = new MasterConfig();
config.setMasterExecTaskNum(1);
Mockito.when(applicationContext.getBean(MasterConfig.class)).thenReturn(config);
processInstance = mock(ProcessInstance.class);
Mockito.when(processInstance.getProcessDefinitionId()).thenReturn(processDefinitionId);
Mockito.when(processInstance.getState()).thenReturn(ExecutionStatus.SUCCESS);
Mockito.when(processInstance.getHistoryCmd()).thenReturn(CommandType.COMPLEMENT_DATA.toString());
Mockito.when(processInstance.getIsSubProcess()).thenReturn(Flag.NO);
Mockito.when(processInstance.getScheduleTime()).thenReturn(DateUtils.stringToDate("2020-01-01 00:00:00"));
Map<String, String> cmdParam = new HashMap<>();
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, "2020-01-01 00:00:00");
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, "2020-01-20 23:00:00");
Mockito.when(processInstance.getCommandParam()).thenReturn(JSONUtils.toJsonString(cmdParam));
ProcessDefinition processDefinition = new ProcessDefinition();
processDefinition.setGlobalParamMap(Collections.EMPTY_MAP);
processDefinition.setGlobalParamList(Collections.EMPTY_LIST);
Mockito.when(processInstance.getProcessDefinition()).thenReturn(processDefinition);
masterExecThread = PowerMockito.spy(new MasterExecThread(
processInstance
, processService
, null, null, config));
// prepareProcess init dag
Field dag = MasterExecThread.class.getDeclaredField("dag");
dag.setAccessible(true);
dag.set(masterExecThread, new DAG());
PowerMockito.doNothing().when(masterExecThread, "executeProcess");
PowerMockito.doNothing().when(masterExecThread, "postHandle");
PowerMockito.doNothing().when(masterExecThread, "prepareProcess");
PowerMockito.doNothing().when(masterExecThread, "runProcess");
PowerMockito.doNothing().when(masterExecThread, "endProcess");
}
/**
* without schedule
*/
@Test
public void testParallelWithOutSchedule() throws ParseException {
try {
Mockito.when(processService.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId)).thenReturn(zeroSchedulerList());
Method method = MasterExecThread.class.getDeclaredMethod("executeComplementProcess");
method.setAccessible(true);
method.invoke(masterExecThread);
// one create save, and 1-30 for next save, and last day 20 no save
verify(processService, times(20)).saveProcessInstance(processInstance);
} catch (Exception e) {
e.printStackTrace();
Assert.fail();
}
}
/**
* with schedule
*/
@Test
public void testParallelWithSchedule() {
try {
Mockito.when(processService.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId)).thenReturn(oneSchedulerList());
Method method = MasterExecThread.class.getDeclaredMethod("executeComplementProcess");
method.setAccessible(true);
method.invoke(masterExecThread);
// one create save, and 9(1 to 20 step 2) for next save, and last day 31 no save
verify(processService, times(9)).saveProcessInstance(processInstance);
} catch (Exception e) {
Assert.fail();
}
}
@Test
public void testParseStartNodeName() throws ParseException {
try {
Map<String, String> cmdParam = new HashMap<>();
cmdParam.put(CMD_PARAM_START_NODE_NAMES, "t1,t2,t3");
Mockito.when(processInstance.getCommandParam()).thenReturn(JSONUtils.toJsonString(cmdParam));
Class<MasterExecThread> masterExecThreadClass = MasterExecThread.class;
Method method = masterExecThreadClass.getDeclaredMethod("parseStartNodeName", String.class);
method.setAccessible(true);
List<String> nodeNames = (List<String>) method.invoke(masterExecThread, JSONUtils.toJsonString(cmdParam));
Assert.assertEquals(3, nodeNames.size());
} catch (Exception e) {
Assert.fail();
}
}
@Test
public void testRetryTaskIntervalOverTime() {
try {
TaskInstance taskInstance = new TaskInstance();
taskInstance.setId(0);
taskInstance.setMaxRetryTimes(0);
taskInstance.setRetryInterval(0);
taskInstance.setState(ExecutionStatus.FAILURE);
Class<MasterExecThread> masterExecThreadClass = MasterExecThread.class;
Method method = masterExecThreadClass.getDeclaredMethod("retryTaskIntervalOverTime", TaskInstance.class);
method.setAccessible(true);
Assert.assertTrue((Boolean) method.invoke(masterExecThread, taskInstance));
} catch (Exception e) {
Assert.fail();
}
}
@Test
public void testGetStartTaskInstanceList() {
try {
TaskInstance taskInstance1 = new TaskInstance();
taskInstance1.setId(1);
TaskInstance taskInstance2 = new TaskInstance();
taskInstance2.setId(2);
TaskInstance taskInstance3 = new TaskInstance();
taskInstance3.setId(3);
TaskInstance taskInstance4 = new TaskInstance();
taskInstance4.setId(4);
Map<String, String> cmdParam = new HashMap<>();
cmdParam.put(CMD_PARAM_RECOVERY_START_NODE_STRING, "1,2,3,4");
Mockito.when(processService.findTaskInstanceById(1)).thenReturn(taskInstance1);
Mockito.when(processService.findTaskInstanceById(2)).thenReturn(taskInstance2);
Mockito.when(processService.findTaskInstanceById(3)).thenReturn(taskInstance3);
Mockito.when(processService.findTaskInstanceById(4)).thenReturn(taskInstance4);
Class<MasterExecThread> masterExecThreadClass = MasterExecThread.class;
Method method = masterExecThreadClass.getDeclaredMethod("getStartTaskInstanceList", String.class);
method.setAccessible(true);
List<TaskInstance> taskInstances = (List<TaskInstance>) method.invoke(masterExecThread, JSONUtils.toJsonString(cmdParam));
Assert.assertEquals(4, taskInstances.size());
} catch (Exception e) {
Assert.fail();
}
}
private List<Schedule> zeroSchedulerList() {
return Collections.EMPTY_LIST;
}
private List<Schedule> oneSchedulerList() {
List<Schedule> schedulerList = new LinkedList<>();
Schedule schedule = new Schedule();
schedule.setCrontab("0 0 0 1/2 * ?");
schedulerList.add(schedule);
return schedulerList;
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,144 | [Improvement][UI] error v-show directives cannot be put on template tags | **Describe the question**
error 'v-show' directives cannot be put on <template> tags vue/valid-v-show
**Which version of DolphinScheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/5144 | https://github.com/apache/dolphinscheduler/pull/5145 | 492b318bd321d35247488e1f181e3ea9d1259963 | f6d62a4924417fbcdea35ba2896a652eb9bc735c | "2021-03-25T06:13:05Z" | java | "2021-03-26T08:42:13Z" | dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/instance/pages/gantt/index.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<m-list-construction :title="$t('Gantt')">
<template slot="content">
<div class="gantt-model">
<div class="gantt-state">
<div class="state-tasks-color-sp">
<a href="javascript:">
<span>{{$t('Task Status')}}</span>
</a>
<a href="javascript:" v-for="(item) in tasksState" :key="item.id">
<em class="ri-checkbox-blank-fill" :style="{color:item.color}"></em>
<span>{{item.desc}}</span>
</a>
</div>
</div>
<template v-show="!isNodata">
<div class="gantt"></div>
</template>
<template v-if="isNodata">
<m-no-data></m-no-data>
</template>
<m-spin :is-spin="isLoading">
</m-spin>
</div>
</template>
</m-list-construction>
</template>
<script>
import { mapActions } from 'vuex'
import Gantt from './_source/gantt'
import mSpin from '@/module/components/spin/spin'
import mNoData from '@/module/components/noData/noData'
import { tasksState } from '@/conf/home/pages/dag/_source/config'
import mListConstruction from '@/module/components/listConstruction/listConstruction'
export default {
name: 'instance-gantt-index',
data () {
return {
// Node state
tasksState: tasksState,
// loading
isLoading: true,
// gantt data
ganttData: {
taskNames: []
},
// Data available
isNodata: false
}
},
props: {},
methods: {
...mapActions('dag', ['getViewGantt']),
/**
* get data
*/
_getViewGantt () {
this.isLoading = true
this.getViewGantt({
processInstanceId: this.$route.params.id
}).then(res => {
this.ganttData = res
if (!res.taskNames.length || !res) {
this.isLoading = false
this.isNodata = true
return
}
// Gantt
Gantt.init({
el: '.gantt',
tasks: res.tasks
})
setTimeout(() => {
this.isLoading = false
}, 200)
}).catch(e => {
this.isLoading = false
})
}
},
watch: {},
created () {
},
mounted () {
this._getViewGantt()
},
updated () {
},
beforeDestroy () {
},
destroyed () {
},
computed: {},
components: { mListConstruction, mSpin, mNoData }
}
</script>
<style lang="scss" rel="stylesheet/scss">
.d3-toottip {
text-align: left;
ul {
li {
overflow: hidden;
span {
&.sp1 {
width: 70px;
text-align: right;
display: inline-block;
padding-right: 6px;
}
}
}
}
}
.gantt-model {
background: url('img/dag_bg.png');
height: calc(100vh - 148px);
.gantt-state {
background: #fff;
height: 48px;
line-height: 48px;
padding-left: 20px;
}
.gantt {
height: calc(100vh - 220px);
overflow-y: scroll;
}
rect {
cursor: pointer;
}
path {
&.link{
fill: none;
stroke: #666;
stroke-width: 2px;
}
}
g.tick line{
shape-rendering: crispEdges;
}
.axis {
path,line {
fill: none;
stroke: #000;
shape-rendering: crispEdges;
}
text {
font: 11px sans-serif;
}
}
circle {
stroke: #666;
fill: #0097e0;
stroke-width: 1.5px;
}
g.axis path {
shape-rendering: crispEdges;
}
}
</style>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,144 | [Improvement][UI] error v-show directives cannot be put on template tags | **Describe the question**
error 'v-show' directives cannot be put on <template> tags vue/valid-v-show
**Which version of DolphinScheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/5144 | https://github.com/apache/dolphinscheduler/pull/5145 | 492b318bd321d35247488e1f181e3ea9d1259963 | f6d62a4924417fbcdea35ba2896a652eb9bc735c | "2021-03-25T06:13:05Z" | java | "2021-03-26T08:42:13Z" | dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/edit/index.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<m-list-construction :title="$t('File Details')">
<div slot="content" style="margin: 20px">
<div class="file-edit-content">
<h2>
<span>{{name}}</span>
</h2>
<template v-show="isViewType">
<template v-if="!msg">
<div class="code-mirror-model">
<textarea id="code-edit-mirror" name="code-edit-mirror"></textarea>
</div>
<div class="submit-c">
<el-button type="text" @click="close()" :disabled="disabled" size="small"> {{$t('Return')}} </el-button>
<el-button type="primary" :loading="spinnerLoading" @click="ok()" round size="small">{{spinnerLoading ? 'Loading...' : $t('Save')}} </el-button>
</div>
</template>
<m-no-data :msg="msg" v-if="msg"></m-no-data>
</template>
<template v-if="!isViewType">
<m-no-type></m-no-type>
</template>
</div>
<m-spin :is-spin="isLoading">
</m-spin>
</div>
</m-list-construction>
</template>
<script>
import i18n from '@/module/i18n'
import _ from 'lodash'
import { mapActions } from 'vuex'
import { filtTypeArr } from '../_source/common'
import mNoType from '../details/_source/noType'
import { bytesToSize } from '@/module/util/util'
import codemirror from '../_source/codemirror'
import mSpin from '@/module/components/spin/spin'
import localStore from '@/module/util/localStorage'
import mNoData from '@/module/components/noData/noData'
import { handlerSuffix } from '../details/_source/utils'
import mListConstruction from '@/module/components/listConstruction/listConstruction'
let editor
export default {
name: 'file-details',
data () {
return {
name: '',
isViewType: true,
isLoading: false,
filtTypeArr: filtTypeArr,
loadingIndex: 0,
mode: 'python',
isData: true,
size: null,
spinnerLoading: false,
msg: ''
}
},
props: {},
methods: {
...mapActions('resource', ['getViewResources', 'updateContent']),
ok () {
if (this._validation()) {
this.spinnerLoading = true
this.updateContent({
id: this.$route.params.id,
content: editor.getValue()
}).then(res => {
this.$message.success(res.msg)
setTimeout(() => {
this.spinnerLoading = false
this.close()
}, 800)
}).catch(e => {
this.$message.error(e.msg || '')
this.spinnerLoading = false
})
}
},
_validation () {
if (editor.doc.size > 3000) {
this.$message.warning(`${i18n.$t('Resource content cannot exceed 3000 lines')}`)
return false
}
return true
},
close () {
this.$router.go(-1)
},
_getViewResources () {
this.isLoading = true
this.getViewResources({
id: this.$route.params.id,
skipLineNum: 0,
limit: 3000
}).then(res => {
this.name = res.data.alias.split('.')[0]
if (!res.data) {
this.isData = false
} else {
this.isData = true
let content = res.data.content ? res.data.content + '\n' : ''
this._handlerEditor().setValue(content)
setTimeout(() => {
$('.code-mirror-model').scrollTop(12).scrollLeft(0)
}, 200)
}
this.isLoading = false
}).catch(e => {
this.msg = e.msg || 'error'
this.$message.error(e.msg || '')
this.isLoading = false
})
},
/**
* Processing code highlighting
*/
_handlerEditor () {
// editor
editor = codemirror('code-edit-mirror', {
mode: this.mode,
readOnly: false
})
this.keypress = () => {
if (!editor.getOption('readOnly')) {
editor.showHint({
completeSingle: false
})
}
}
// Monitor keyboard
editor.on('keypress', this.keypress)
return editor
}
},
watch: {
},
created () {
let file = _.split(localStore.getItem('file'), '|', 2)
let fileName = file[0]
let fileSize = file[1]
let i = fileName.lastIndexOf('.')
let a = fileName.substring(i, fileName.length)
this.mode = handlerSuffix[a]
this.size = bytesToSize(parseInt(fileSize))
this.isViewType = _.includes(this.filtTypeArr, _.trimStart(a, '.'))
},
mounted () {
if (this.isViewType) {
// get data
this._getViewResources()
}
},
destroyed () {
if (editor) {
editor.toTextArea()
editor.off($('.code-edit-mirror'), 'keypress', this.keypress)
}
},
computed: {
},
components: { mListConstruction, mNoType, mSpin, mNoData }
}
</script>
<style lang="scss" rel="stylesheet/scss">
.file-edit-content {
width: 100%;
background: #fff;
padding-bottom: 20px;
>h2 {
line-height: 60px;
text-align: center;
padding-bottom: 6px;
position: relative;
.down {
position: absolute;
right: 0;
top: 0;
>i {
font-size: 20px;
color: #2d8cf0;
cursor: pointer;
vertical-align: middle;
}
em {
font-size: 12px;
font-style: normal;
vertical-align: middle;
color: #777;
margin-left: -2px;
}
}
}
.code-mirror-model {
height: calc(100vh - 300px);
.cm-s-mdn-like.CodeMirror {
height: calc(100vh - 310px);
}
}
.submit-c {
text-align: center;
padding-top: 12px;
}
}
.file-operation {
padding: 30px 0;
text-align: center;
}
</style>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,132 | [Bug][server] when an exception occurs in the taskExecuteThread, task cannot stop immediately |
**Describe the bug**
When an exception occurs in the taskExecuteThread, task will be called the `cancelApplication()` method to stop the task immediately。Howerver,the task whose` cancelApplication()` method base on the `AbstractCommandExecutor.cancelApplication()` cannot be killed immediately,because `AbstractCommandExecutor.cancelApplication()` would not call `hardKill()` method when soft kill process fail;
**To Reproduce**
**Expected behavior**
In order to make the error task stop immediately ,It should call the `hardKill()` method when soft kill process fails;
**Screenshots**
![image](https://user-images.githubusercontent.com/68894048/112130011-f600c180-8c02-11eb-934c-e490f63c0d73.png)
**Which version of Dolphin Scheduler:**
-[dev]
**Additional context**
It is a very simple Bug, the `softKill()` method returns the opposite state, so just modify the return value can fix this bug. I will fix this Bug.
**Requirement or improvement**
| https://github.com/apache/dolphinscheduler/issues/5132 | https://github.com/apache/dolphinscheduler/pull/5133 | 573252f3e1baba5e133b6c8f608c22e3c2267a03 | 9c0439621836b5134b8a7da4e671cb74edc7c31b | "2021-03-23T10:23:38Z" | java | "2021-03-29T02:41:51Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task;
import static org.apache.dolphinscheduler.common.Constants.EXIT_CODE_FAILURE;
import static org.apache.dolphinscheduler.common.Constants.EXIT_CODE_KILL;
import static org.apache.dolphinscheduler.common.Constants.EXIT_CODE_SUCCESS;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.LoggerUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.ProcessUtils;
import org.apache.dolphinscheduler.server.worker.cache.TaskExecutionContextCacheManager;
import org.apache.dolphinscheduler.server.worker.cache.impl.TaskExecutionContextCacheManagerImpl;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.lang.reflect.Field;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.slf4j.Logger;
/**
* abstract command executor
*/
public abstract class AbstractCommandExecutor {
/**
* rules for extracting application ID
*/
protected static final Pattern APPLICATION_REGEX = Pattern.compile(Constants.APPLICATION_REGEX);
protected StringBuilder varPool = new StringBuilder();
/**
* process
*/
private Process process;
/**
* log handler
*/
protected Consumer<List<String>> logHandler;
/**
* logger
*/
protected Logger logger;
/**
* log list
*/
protected final List<String> logBuffer;
protected boolean logOutputIsScuccess = false;
/**
* SHELL result string
*/
protected String taskResultString;
/**
* taskExecutionContext
*/
protected TaskExecutionContext taskExecutionContext;
/**
* taskExecutionContextCacheManager
*/
private TaskExecutionContextCacheManager taskExecutionContextCacheManager;
public AbstractCommandExecutor(Consumer<List<String>> logHandler,
TaskExecutionContext taskExecutionContext,
Logger logger) {
this.logHandler = logHandler;
this.taskExecutionContext = taskExecutionContext;
this.logger = logger;
this.logBuffer = Collections.synchronizedList(new ArrayList<>());
this.taskExecutionContextCacheManager = SpringApplicationContext.getBean(TaskExecutionContextCacheManagerImpl.class);
}
protected AbstractCommandExecutor(List<String> logBuffer) {
this.logBuffer = logBuffer;
}
/**
* build process
*
* @param commandFile command file
* @throws IOException IO Exception
*/
private void buildProcess(String commandFile) throws IOException {
// setting up user to run commands
List<String> command = new LinkedList<>();
//init process builder
ProcessBuilder processBuilder = new ProcessBuilder();
// setting up a working directory
processBuilder.directory(new File(taskExecutionContext.getExecutePath()));
// merge error information to standard output stream
processBuilder.redirectErrorStream(true);
// setting up user to run commands
command.add("sudo");
command.add("-u");
command.add(taskExecutionContext.getTenantCode());
command.add(commandInterpreter());
command.addAll(commandOptions());
command.add(commandFile);
// setting commands
processBuilder.command(command);
process = processBuilder.start();
// print command
printCommand(command);
}
/**
* task specific execution logic
*
* @param execCommand execCommand
* @return CommandExecuteResult
* @throws Exception if error throws Exception
*/
public CommandExecuteResult run(String execCommand) throws Exception {
CommandExecuteResult result = new CommandExecuteResult();
int taskInstanceId = taskExecutionContext.getTaskInstanceId();
// If the task has been killed, then the task in the cache is null
if (null == taskExecutionContextCacheManager.getByTaskInstanceId(taskInstanceId)) {
result.setExitStatusCode(EXIT_CODE_KILL);
return result;
}
if (StringUtils.isEmpty(execCommand)) {
taskExecutionContextCacheManager.removeByTaskInstanceId(taskInstanceId);
return result;
}
String commandFilePath = buildCommandFilePath();
// create command file if not exists
createCommandFileIfNotExists(execCommand, commandFilePath);
//build process
buildProcess(commandFilePath);
// parse process output
parseProcessOutput(process);
Integer processId = getProcessId(process);
result.setProcessId(processId);
// cache processId
taskExecutionContext.setProcessId(processId);
boolean updateTaskExecutionContextStatus = taskExecutionContextCacheManager.updateTaskExecutionContext(taskExecutionContext);
if (Boolean.FALSE.equals(updateTaskExecutionContextStatus)) {
ProcessUtils.kill(taskExecutionContext);
result.setExitStatusCode(EXIT_CODE_KILL);
return result;
}
// print process id
logger.info("process start, process id is: {}", processId);
// if timeout occurs, exit directly
long remainTime = getRemaintime();
// waiting for the run to finish
boolean status = process.waitFor(remainTime, TimeUnit.SECONDS);
logger.info("process has exited, execute path:{}, processId:{} ,exitStatusCode:{}",
taskExecutionContext.getExecutePath(),
processId
, result.getExitStatusCode());
// if SHELL task exit
if (status) {
// set appIds
List<String> appIds = getAppIds(taskExecutionContext.getLogPath());
result.setAppIds(String.join(Constants.COMMA, appIds));
// SHELL task state
result.setExitStatusCode(process.exitValue());
// if yarn task , yarn state is final state
if (process.exitValue() == 0) {
result.setExitStatusCode(isSuccessOfYarnState(appIds) ? EXIT_CODE_SUCCESS : EXIT_CODE_FAILURE);
}
} else {
logger.error("process has failure , exitStatusCode : {} , ready to kill ...", result.getExitStatusCode());
ProcessUtils.kill(taskExecutionContext);
result.setExitStatusCode(EXIT_CODE_FAILURE);
}
return result;
}
public String getVarPool() {
return varPool.toString();
}
/**
* cancel application
*
* @throws Exception exception
*/
public void cancelApplication() throws Exception {
if (process == null) {
return;
}
// clear log
clear();
int processId = getProcessId(process);
logger.info("cancel process: {}", processId);
// kill , waiting for completion
boolean killed = softKill(processId);
if (!killed) {
// hard kill
hardKill(processId);
// destory
process.destroy();
process = null;
}
}
/**
* soft kill
*
* @param processId process id
* @return process is alive
* @throws InterruptedException interrupted exception
*/
private boolean softKill(int processId) {
if (processId != 0 && process.isAlive()) {
try {
// sudo -u user command to run command
String cmd = String.format("kill %d", processId);
cmd = OSUtils.getSudoCmd(taskExecutionContext.getTenantCode(), cmd);
logger.info("soft kill task:{}, process id:{}, cmd:{}", taskExecutionContext.getTaskAppId(), processId, cmd);
Runtime.getRuntime().exec(cmd);
} catch (IOException e) {
logger.info("kill attempt failed", e);
}
}
return process.isAlive();
}
/**
* hard kill
*
* @param processId process id
*/
private void hardKill(int processId) {
if (processId != 0 && process.isAlive()) {
try {
String cmd = String.format("kill -9 %d", processId);
cmd = OSUtils.getSudoCmd(taskExecutionContext.getTenantCode(), cmd);
logger.info("hard kill task:{}, process id:{}, cmd:{}", taskExecutionContext.getTaskAppId(), processId, cmd);
Runtime.getRuntime().exec(cmd);
} catch (IOException e) {
logger.error("kill attempt failed ", e);
}
}
}
/**
* print command
*
* @param commands process builder
*/
private void printCommand(List<String> commands) {
String cmdStr;
try {
cmdStr = ProcessUtils.buildCommandStr(commands);
logger.info("task run command:\n{}", cmdStr);
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
/**
* clear
*/
private void clear() {
List<String> markerList = new ArrayList<>();
markerList.add(ch.qos.logback.classic.ClassicConstants.FINALIZE_SESSION_MARKER.toString());
if (!logBuffer.isEmpty()) {
// log handle
logHandler.accept(logBuffer);
logBuffer.clear();
}
logHandler.accept(markerList);
}
/**
* get the standard output of the process
*
* @param process process
*/
private void parseProcessOutput(Process process) {
String threadLoggerInfoName = String.format(LoggerUtils.TASK_LOGGER_THREAD_NAME + "-%s", taskExecutionContext.getTaskAppId());
ExecutorService getOutputLogService = ThreadUtils.newDaemonSingleThreadExecutor(threadLoggerInfoName + "-" + "getOutputLogService");
getOutputLogService.submit(() -> {
BufferedReader inReader = null;
try {
inReader = new BufferedReader(new InputStreamReader(process.getInputStream()));
String line;
logBuffer.add("welcome to use bigdata scheduling system...");
while ((line = inReader.readLine()) != null) {
if (line.startsWith("${setValue(")) {
varPool.append(line.substring("${setValue(".length(), line.length() - 2));
varPool.append("$VarPool$");
} else {
logBuffer.add(line);
taskResultString = line;
}
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
} finally {
logOutputIsScuccess = true;
close(inReader);
}
});
getOutputLogService.shutdown();
ExecutorService parseProcessOutputExecutorService = ThreadUtils.newDaemonSingleThreadExecutor(threadLoggerInfoName);
parseProcessOutputExecutorService.submit(() -> {
try {
long lastFlushTime = System.currentTimeMillis();
while (logBuffer.size() > 0 || !logOutputIsScuccess) {
if (logBuffer.size() > 0) {
lastFlushTime = flush(lastFlushTime);
} else {
Thread.sleep(Constants.DEFAULT_LOG_FLUSH_INTERVAL);
}
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
} finally {
clear();
}
});
parseProcessOutputExecutorService.shutdown();
}
/**
* check yarn state
*
* @param appIds application id list
* @return is success of yarn task state
*/
public boolean isSuccessOfYarnState(List<String> appIds) {
boolean result = true;
try {
for (String appId : appIds) {
while (Stopper.isRunning()) {
ExecutionStatus applicationStatus = HadoopUtils.getInstance().getApplicationStatus(appId);
logger.info("appId:{}, final state:{}", appId, applicationStatus.name());
if (applicationStatus.equals(ExecutionStatus.FAILURE)
|| applicationStatus.equals(ExecutionStatus.KILL)) {
return false;
}
if (applicationStatus.equals(ExecutionStatus.SUCCESS)) {
break;
}
Thread.sleep(Constants.SLEEP_TIME_MILLIS);
}
}
} catch (Exception e) {
logger.error(String.format("yarn applications: %s status failed ", appIds.toString()), e);
result = false;
}
return result;
}
public int getProcessId() {
return getProcessId(process);
}
/**
* get app links
*
* @param logPath log path
* @return app id list
*/
private List<String> getAppIds(String logPath) {
List<String> logs = convertFile2List(logPath);
List<String> appIds = new ArrayList<>();
/**
* analysis log?get submited yarn application id
*/
for (String log : logs) {
String appId = findAppId(log);
if (StringUtils.isNotEmpty(appId) && !appIds.contains(appId)) {
logger.info("find app id: {}", appId);
appIds.add(appId);
}
}
return appIds;
}
/**
* convert file to list
*
* @param filename file name
* @return line list
*/
private List<String> convertFile2List(String filename) {
List lineList = new ArrayList<String>(100);
File file = new File(filename);
if (!file.exists()) {
return lineList;
}
BufferedReader br = null;
try {
br = new BufferedReader(new InputStreamReader(new FileInputStream(filename), StandardCharsets.UTF_8));
String line = null;
while ((line = br.readLine()) != null) {
lineList.add(line);
}
} catch (Exception e) {
logger.error(String.format("read file: %s failed : ", filename), e);
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
logger.error(e.getMessage(), e);
}
}
}
return lineList;
}
/**
* find app id
*
* @param line line
* @return appid
*/
private String findAppId(String line) {
Matcher matcher = APPLICATION_REGEX.matcher(line);
if (matcher.find()) {
return matcher.group();
}
return null;
}
/**
* get remain time(s)
*
* @return remain time
*/
private long getRemaintime() {
long usedTime = (System.currentTimeMillis() - taskExecutionContext.getStartTime().getTime()) / 1000;
long remainTime = taskExecutionContext.getTaskTimeout() - usedTime;
if (remainTime < 0) {
throw new RuntimeException("task execution time out");
}
return remainTime;
}
/**
* get process id
*
* @param process process
* @return process id
*/
private int getProcessId(Process process) {
int processId = 0;
try {
Field f = process.getClass().getDeclaredField(Constants.PID);
f.setAccessible(true);
processId = f.getInt(process);
} catch (Throwable e) {
logger.error(e.getMessage(), e);
}
return processId;
}
/**
* when log buffer siz or flush time reach condition , then flush
*
* @param lastFlushTime last flush time
* @return last flush time
*/
private long flush(long lastFlushTime) {
long now = System.currentTimeMillis();
/**
* when log buffer siz or flush time reach condition , then flush
*/
if (logBuffer.size() >= Constants.DEFAULT_LOG_ROWS_NUM || now - lastFlushTime > Constants.DEFAULT_LOG_FLUSH_INTERVAL) {
lastFlushTime = now;
/** log handle */
logHandler.accept(logBuffer);
logBuffer.clear();
}
return lastFlushTime;
}
/**
* close buffer reader
*
* @param inReader in reader
*/
private void close(BufferedReader inReader) {
if (inReader != null) {
try {
inReader.close();
} catch (IOException e) {
logger.error(e.getMessage(), e);
}
}
}
protected List<String> commandOptions() {
return Collections.emptyList();
}
protected abstract String buildCommandFilePath();
protected abstract String commandInterpreter();
protected abstract void createCommandFileIfNotExists(String execCommand, String commandFile) throws IOException;
public String getTaskResultString() {
return taskResultString;
}
public void setTaskResultString(String taskResultString) {
this.taskResultString = taskResultString;
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,128 | [Feature][Worker] Support turning off sudo permissions | *For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/community/development/issue.html when describe an issue.*
**Describe the feature**
The current version of multi-tenant implementation requires sudo permission. Most of the time the deployment execution user does not have sudo permission and does not need to be executed by the tenant account. At this time, the sudo permission dependency can be turned off, and the task execution is executed by the deployment user.
当前版本多租户的实现,需要sudo权限,大多数时候部署执行用户没有sudo权限,并且不需要利用租户账户执行,这时候可以关掉sudo权限依赖,任务执行采用部署用户执行
sudo stands for privilege escalation. If privilege escalation is not configured, multi-tenant is only used to isolate resources and users. After privilege escalation is configured, task execution will be performed by tenants, and deployment users will not be configured; and if privilege escalation is not configured, tenants cannot be automatically created.
sudo 代表提权,如果不配置提权,多租户仅用来隔离资源与用户,配置提权以后,任务执行使用租户执行,不配置使用部署用户执行;并且不配置提权不允许自动创建租户 | https://github.com/apache/dolphinscheduler/issues/5128 | https://github.com/apache/dolphinscheduler/pull/5129 | 9c0439621836b5134b8a7da4e671cb74edc7c31b | a6a1b94df4f5c4d22845fe99f2f778ba53d60e5c | "2021-03-23T04:00:15Z" | java | "2021-03-29T03:07:39Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import java.util.regex.Pattern;
/**
* Constants
*/
public final class Constants {
private Constants() {
throw new UnsupportedOperationException("Construct Constants");
}
/**
* quartz config
*/
public static final String ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS = "org.quartz.jobStore.driverDelegateClass";
public static final String ORG_QUARTZ_SCHEDULER_INSTANCENAME = "org.quartz.scheduler.instanceName";
public static final String ORG_QUARTZ_SCHEDULER_INSTANCEID = "org.quartz.scheduler.instanceId";
public static final String ORG_QUARTZ_SCHEDULER_MAKESCHEDULERTHREADDAEMON = "org.quartz.scheduler.makeSchedulerThreadDaemon";
public static final String ORG_QUARTZ_JOBSTORE_USEPROPERTIES = "org.quartz.jobStore.useProperties";
public static final String ORG_QUARTZ_THREADPOOL_CLASS = "org.quartz.threadPool.class";
public static final String ORG_QUARTZ_THREADPOOL_THREADCOUNT = "org.quartz.threadPool.threadCount";
public static final String ORG_QUARTZ_THREADPOOL_MAKETHREADSDAEMONS = "org.quartz.threadPool.makeThreadsDaemons";
public static final String ORG_QUARTZ_THREADPOOL_THREADPRIORITY = "org.quartz.threadPool.threadPriority";
public static final String ORG_QUARTZ_JOBSTORE_CLASS = "org.quartz.jobStore.class";
public static final String ORG_QUARTZ_JOBSTORE_TABLEPREFIX = "org.quartz.jobStore.tablePrefix";
public static final String ORG_QUARTZ_JOBSTORE_ISCLUSTERED = "org.quartz.jobStore.isClustered";
public static final String ORG_QUARTZ_JOBSTORE_MISFIRETHRESHOLD = "org.quartz.jobStore.misfireThreshold";
public static final String ORG_QUARTZ_JOBSTORE_CLUSTERCHECKININTERVAL = "org.quartz.jobStore.clusterCheckinInterval";
public static final String ORG_QUARTZ_JOBSTORE_ACQUIRETRIGGERSWITHINLOCK = "org.quartz.jobStore.acquireTriggersWithinLock";
public static final String ORG_QUARTZ_JOBSTORE_DATASOURCE = "org.quartz.jobStore.dataSource";
public static final String ORG_QUARTZ_DATASOURCE_MYDS_CONNECTIONPROVIDER_CLASS = "org.quartz.dataSource.myDs.connectionProvider.class";
/**
* quartz config default value
*/
public static final String QUARTZ_TABLE_PREFIX = "QRTZ_";
public static final String QUARTZ_MISFIRETHRESHOLD = "60000";
public static final String QUARTZ_CLUSTERCHECKININTERVAL = "5000";
public static final String QUARTZ_DATASOURCE = "myDs";
public static final String QUARTZ_THREADCOUNT = "25";
public static final String QUARTZ_THREADPRIORITY = "5";
public static final String QUARTZ_INSTANCENAME = "DolphinScheduler";
public static final String QUARTZ_INSTANCEID = "AUTO";
public static final String QUARTZ_ACQUIRETRIGGERSWITHINLOCK = "true";
/**
* common properties path
*/
public static final String COMMON_PROPERTIES_PATH = "/common.properties";
/**
* fs.defaultFS
*/
public static final String FS_DEFAULTFS = "fs.defaultFS";
/**
* fs s3a endpoint
*/
public static final String FS_S3A_ENDPOINT = "fs.s3a.endpoint";
/**
* fs s3a access key
*/
public static final String FS_S3A_ACCESS_KEY = "fs.s3a.access.key";
/**
* fs s3a secret key
*/
public static final String FS_S3A_SECRET_KEY = "fs.s3a.secret.key";
/**
* yarn.resourcemanager.ha.rm.ids
*/
public static final String YARN_RESOURCEMANAGER_HA_RM_IDS = "yarn.resourcemanager.ha.rm.ids";
public static final String YARN_RESOURCEMANAGER_HA_XX = "xx";
/**
* yarn.application.status.address
*/
public static final String YARN_APPLICATION_STATUS_ADDRESS = "yarn.application.status.address";
/**
* yarn.job.history.status.address
*/
public static final String YARN_JOB_HISTORY_STATUS_ADDRESS = "yarn.job.history.status.address";
/**
* hdfs configuration
* hdfs.root.user
*/
public static final String HDFS_ROOT_USER = "hdfs.root.user";
/**
* hdfs/s3 configuration
* resource.upload.path
*/
public static final String RESOURCE_UPLOAD_PATH = "resource.upload.path";
/**
* data basedir path
*/
public static final String DATA_BASEDIR_PATH = "data.basedir.path";
/**
* dolphinscheduler.env.path
*/
public static final String DOLPHINSCHEDULER_ENV_PATH = "dolphinscheduler.env.path";
/**
* environment properties default path
*/
public static final String ENV_PATH = "env/dolphinscheduler_env.sh";
/**
* python home
*/
public static final String PYTHON_HOME = "PYTHON_HOME";
/**
* resource.view.suffixs
*/
public static final String RESOURCE_VIEW_SUFFIXS = "resource.view.suffixs";
public static final String RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE = "txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js";
/**
* development.state
*/
public static final String DEVELOPMENT_STATE = "development.state";
public static final String DEVELOPMENT_STATE_DEFAULT_VALUE = "true";
/**
* string true
*/
public static final String STRING_TRUE = "true";
/**
* string false
*/
public static final String STRING_FALSE = "false";
/**
* resource storage type
*/
public static final String RESOURCE_STORAGE_TYPE = "resource.storage.type";
/**
* MasterServer directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_MASTERS = "/nodes/master";
/**
* WorkerServer directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_WORKERS = "/nodes/worker";
/**
* all servers directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_DEAD_SERVERS = "/dead-servers";
/**
* MasterServer lock directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_MASTERS = "/lock/masters";
/**
* MasterServer failover directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_MASTERS = "/lock/failover/masters";
/**
* WorkerServer failover directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_WORKERS = "/lock/failover/workers";
/**
* MasterServer startup failover runing and fault tolerance process
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS = "/lock/failover/startup-masters";
/**
* comma ,
*/
public static final String COMMA = ",";
/**
* slash /
*/
public static final String SLASH = "/";
/**
* COLON :
*/
public static final String COLON = ":";
/**
* SPACE " "
*/
public static final String SPACE = " ";
/**
* SINGLE_SLASH /
*/
public static final String SINGLE_SLASH = "/";
/**
* DOUBLE_SLASH //
*/
public static final String DOUBLE_SLASH = "//";
/**
* SINGLE_QUOTES "'"
*/
public static final String SINGLE_QUOTES = "'";
/**
* DOUBLE_QUOTES "\""
*/
public static final String DOUBLE_QUOTES = "\"";
/**
* SEMICOLON ;
*/
public static final String SEMICOLON = ";";
/**
* EQUAL SIGN
*/
public static final String EQUAL_SIGN = "=";
/**
* AT SIGN
*/
public static final String AT_SIGN = "@";
public static final String WORKER_MAX_CPULOAD_AVG = "worker.max.cpuload.avg";
public static final String WORKER_RESERVED_MEMORY = "worker.reserved.memory";
public static final String MASTER_MAX_CPULOAD_AVG = "master.max.cpuload.avg";
public static final String MASTER_RESERVED_MEMORY = "master.reserved.memory";
/**
* date format of yyyy-MM-dd HH:mm:ss
*/
public static final String YYYY_MM_DD_HH_MM_SS = "yyyy-MM-dd HH:mm:ss";
/**
* date format of yyyyMMddHHmmss
*/
public static final String YYYYMMDDHHMMSS = "yyyyMMddHHmmss";
/**
* date format of yyyyMMddHHmmssSSS
*/
public static final String YYYYMMDDHHMMSSSSS = "yyyyMMddHHmmssSSS";
/**
* http connect time out
*/
public static final int HTTP_CONNECT_TIMEOUT = 60 * 1000;
/**
* http connect request time out
*/
public static final int HTTP_CONNECTION_REQUEST_TIMEOUT = 60 * 1000;
/**
* httpclient soceket time out
*/
public static final int SOCKET_TIMEOUT = 60 * 1000;
/**
* http header
*/
public static final String HTTP_HEADER_UNKNOWN = "unKnown";
/**
* http X-Forwarded-For
*/
public static final String HTTP_X_FORWARDED_FOR = "X-Forwarded-For";
/**
* http X-Real-IP
*/
public static final String HTTP_X_REAL_IP = "X-Real-IP";
/**
* UTF-8
*/
public static final String UTF_8 = "UTF-8";
/**
* user name regex
*/
public static final Pattern REGEX_USER_NAME = Pattern.compile("^[a-zA-Z0-9._-]{3,39}$");
/**
* email regex
*/
public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^([a-z0-9A-Z]+[_|\\-|\\.]?)+[a-z0-9A-Z]@([a-z0-9A-Z]+(-[a-z0-9A-Z]+)?\\.)+[a-zA-Z]{2,}$");
/**
* default display rows
*/
public static final int DEFAULT_DISPLAY_ROWS = 10;
/**
* read permission
*/
public static final int READ_PERMISSION = 2 * 1;
/**
* write permission
*/
public static final int WRITE_PERMISSION = 2 * 2;
/**
* execute permission
*/
public static final int EXECUTE_PERMISSION = 1;
/**
* default admin permission
*/
public static final int DEFAULT_ADMIN_PERMISSION = 7;
/**
* all permissions
*/
public static final int ALL_PERMISSIONS = READ_PERMISSION | WRITE_PERMISSION | EXECUTE_PERMISSION;
/**
* max task timeout
*/
public static final int MAX_TASK_TIMEOUT = 24 * 3600;
/**
* master cpu load
*/
public static final int DEFAULT_MASTER_CPU_LOAD = Runtime.getRuntime().availableProcessors() * 2;
/**
* master reserved memory
*/
public static final double DEFAULT_MASTER_RESERVED_MEMORY = OSUtils.totalMemorySize() / 10;
/**
* worker cpu load
*/
public static final int DEFAULT_WORKER_CPU_LOAD = Runtime.getRuntime().availableProcessors() * 2;
/**
* worker reserved memory
*/
public static final double DEFAULT_WORKER_RESERVED_MEMORY = OSUtils.totalMemorySize() / 10;
/**
* worker host weight
*/
public static final int DEFAULT_WORKER_HOST_WEIGHT = 100;
/**
* default log cache rows num,output when reach the number
*/
public static final int DEFAULT_LOG_ROWS_NUM = 4 * 16;
/**
* log flush interval?output when reach the interval
*/
public static final int DEFAULT_LOG_FLUSH_INTERVAL = 1000;
/**
* time unit secong to minutes
*/
public static final int SEC_2_MINUTES_TIME_UNIT = 60;
/***
*
* rpc port
*/
public static final int RPC_PORT = 50051;
/***
* alert rpc port
*/
public static final int ALERT_RPC_PORT = 50052;
/**
* forbid running task
*/
public static final String FLOWNODE_RUN_FLAG_FORBIDDEN = "FORBIDDEN";
/**
* datasource configuration path
*/
public static final String DATASOURCE_PROPERTIES = "/datasource.properties";
public static final String TASK_RECORD_URL = "task.record.datasource.url";
public static final String TASK_RECORD_FLAG = "task.record.flag";
public static final String TASK_RECORD_USER = "task.record.datasource.username";
public static final String TASK_RECORD_PWD = "task.record.datasource.password";
public static final String DEFAULT = "Default";
public static final String USER = "user";
public static final String PASSWORD = "password";
public static final String XXXXXX = "******";
public static final String NULL = "NULL";
public static final String THREAD_NAME_MASTER_SERVER = "Master-Server";
public static final String THREAD_NAME_WORKER_SERVER = "Worker-Server";
public static final String TASK_RECORD_TABLE_HIVE_LOG = "eamp_hive_log_hd";
public static final String TASK_RECORD_TABLE_HISTORY_HIVE_LOG = "eamp_hive_hist_log_hd";
/**
* command parameter keys
*/
public static final String CMD_PARAM_RECOVER_PROCESS_ID_STRING = "ProcessInstanceId";
public static final String CMD_PARAM_RECOVERY_START_NODE_STRING = "StartNodeIdList";
public static final String CMD_PARAM_RECOVERY_WAITING_THREAD = "WaitingThreadInstanceId";
public static final String CMD_PARAM_SUB_PROCESS = "processInstanceId";
public static final String CMD_PARAM_EMPTY_SUB_PROCESS = "0";
public static final String CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID = "parentProcessInstanceId";
public static final String CMD_PARAM_SUB_PROCESS_DEFINE_ID = "processDefinitionId";
public static final String CMD_PARAM_START_NODE_NAMES = "StartNodeNameList";
public static final String CMD_PARAM_START_PARAMS = "StartParams";
public static final String CMD_PARAM_FATHER_PARAMS = "fatherParams";
/**
* complement data start date
*/
public static final String CMDPARAM_COMPLEMENT_DATA_START_DATE = "complementStartDate";
/**
* complement data end date
*/
public static final String CMDPARAM_COMPLEMENT_DATA_END_DATE = "complementEndDate";
/**
* hadoop configuration
*/
public static final String HADOOP_RM_STATE_ACTIVE = "ACTIVE";
public static final String HADOOP_RM_STATE_STANDBY = "STANDBY";
public static final String HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT = "resource.manager.httpaddress.port";
/**
* data source config
*/
public static final String SPRING_DATASOURCE_DRIVER_CLASS_NAME = "spring.datasource.driver-class-name";
public static final String SPRING_DATASOURCE_URL = "spring.datasource.url";
public static final String SPRING_DATASOURCE_USERNAME = "spring.datasource.username";
public static final String SPRING_DATASOURCE_PASSWORD = "spring.datasource.password";
public static final String SPRING_DATASOURCE_VALIDATION_QUERY_TIMEOUT = "spring.datasource.validationQueryTimeout";
public static final String SPRING_DATASOURCE_INITIAL_SIZE = "spring.datasource.initialSize";
public static final String SPRING_DATASOURCE_MIN_IDLE = "spring.datasource.minIdle";
public static final String SPRING_DATASOURCE_MAX_ACTIVE = "spring.datasource.maxActive";
public static final String SPRING_DATASOURCE_MAX_WAIT = "spring.datasource.maxWait";
public static final String SPRING_DATASOURCE_TIME_BETWEEN_EVICTION_RUNS_MILLIS = "spring.datasource.timeBetweenEvictionRunsMillis";
public static final String SPRING_DATASOURCE_TIME_BETWEEN_CONNECT_ERROR_MILLIS = "spring.datasource.timeBetweenConnectErrorMillis";
public static final String SPRING_DATASOURCE_MIN_EVICTABLE_IDLE_TIME_MILLIS = "spring.datasource.minEvictableIdleTimeMillis";
public static final String SPRING_DATASOURCE_VALIDATION_QUERY = "spring.datasource.validationQuery";
public static final String SPRING_DATASOURCE_TEST_WHILE_IDLE = "spring.datasource.testWhileIdle";
public static final String SPRING_DATASOURCE_TEST_ON_BORROW = "spring.datasource.testOnBorrow";
public static final String SPRING_DATASOURCE_TEST_ON_RETURN = "spring.datasource.testOnReturn";
public static final String SPRING_DATASOURCE_POOL_PREPARED_STATEMENTS = "spring.datasource.poolPreparedStatements";
public static final String SPRING_DATASOURCE_DEFAULT_AUTO_COMMIT = "spring.datasource.defaultAutoCommit";
public static final String SPRING_DATASOURCE_KEEP_ALIVE = "spring.datasource.keepAlive";
public static final String SPRING_DATASOURCE_MAX_POOL_PREPARED_STATEMENT_PER_CONNECTION_SIZE = "spring.datasource.maxPoolPreparedStatementPerConnectionSize";
public static final String DEVELOPMENT = "development";
public static final String QUARTZ_PROPERTIES_PATH = "quartz.properties";
/**
* sleep time
*/
public static final int SLEEP_TIME_MILLIS = 1000;
/**
* heartbeat for zk info length
*/
public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 10;
public static final int HEARTBEAT_WITH_WEIGHT_FOR_ZOOKEEPER_INFO_LENGTH = 11;
/**
* jar
*/
public static final String JAR = "jar";
/**
* hadoop
*/
public static final String HADOOP = "hadoop";
/**
* -D <property>=<value>
*/
public static final String D = "-D";
/**
* -D mapreduce.job.name=name
*/
public static final String MR_NAME = "mapreduce.job.name";
/**
* -D mapreduce.job.queuename=queuename
*/
public static final String MR_QUEUE = "mapreduce.job.queuename";
/**
* spark params constant
*/
public static final String MASTER = "--master";
public static final String DEPLOY_MODE = "--deploy-mode";
/**
* --class CLASS_NAME
*/
public static final String MAIN_CLASS = "--class";
/**
* --driver-cores NUM
*/
public static final String DRIVER_CORES = "--driver-cores";
/**
* --driver-memory MEM
*/
public static final String DRIVER_MEMORY = "--driver-memory";
/**
* --num-executors NUM
*/
public static final String NUM_EXECUTORS = "--num-executors";
/**
* --executor-cores NUM
*/
public static final String EXECUTOR_CORES = "--executor-cores";
/**
* --executor-memory MEM
*/
public static final String EXECUTOR_MEMORY = "--executor-memory";
/**
* --name NAME
*/
public static final String SPARK_NAME = "--name";
/**
* --queue QUEUE
*/
public static final String SPARK_QUEUE = "--queue";
/**
* exit code success
*/
public static final int EXIT_CODE_SUCCESS = 0;
/**
* exit code kill
*/
public static final int EXIT_CODE_KILL = 137;
/**
* exit code failure
*/
public static final int EXIT_CODE_FAILURE = -1;
/**
* date format of yyyyMMdd
*/
public static final String PARAMETER_FORMAT_DATE = "yyyyMMdd";
/**
* date format of yyyyMMddHHmmss
*/
public static final String PARAMETER_FORMAT_TIME = "yyyyMMddHHmmss";
/**
* system date(yyyyMMddHHmmss)
*/
public static final String PARAMETER_DATETIME = "system.datetime";
/**
* system date(yyyymmdd) today
*/
public static final String PARAMETER_CURRENT_DATE = "system.biz.curdate";
/**
* system date(yyyymmdd) yesterday
*/
public static final String PARAMETER_BUSINESS_DATE = "system.biz.date";
/**
* ACCEPTED
*/
public static final String ACCEPTED = "ACCEPTED";
/**
* SUCCEEDED
*/
public static final String SUCCEEDED = "SUCCEEDED";
/**
* NEW
*/
public static final String NEW = "NEW";
/**
* NEW_SAVING
*/
public static final String NEW_SAVING = "NEW_SAVING";
/**
* SUBMITTED
*/
public static final String SUBMITTED = "SUBMITTED";
/**
* FAILED
*/
public static final String FAILED = "FAILED";
/**
* KILLED
*/
public static final String KILLED = "KILLED";
/**
* RUNNING
*/
public static final String RUNNING = "RUNNING";
/**
* underline "_"
*/
public static final String UNDERLINE = "_";
/**
* quartz job prifix
*/
public static final String QUARTZ_JOB_PRIFIX = "job";
/**
* quartz job group prifix
*/
public static final String QUARTZ_JOB_GROUP_PRIFIX = "jobgroup";
/**
* projectId
*/
public static final String PROJECT_ID = "projectId";
/**
* processId
*/
public static final String SCHEDULE_ID = "scheduleId";
/**
* schedule
*/
public static final String SCHEDULE = "schedule";
/**
* application regex
*/
public static final String APPLICATION_REGEX = "application_\\d+_\\d+";
public static final String PID = OSUtils.isWindows() ? "handle" : "pid";
/**
* month_begin
*/
public static final String MONTH_BEGIN = "month_begin";
/**
* add_months
*/
public static final String ADD_MONTHS = "add_months";
/**
* month_end
*/
public static final String MONTH_END = "month_end";
/**
* week_begin
*/
public static final String WEEK_BEGIN = "week_begin";
/**
* week_end
*/
public static final String WEEK_END = "week_end";
/**
* timestamp
*/
public static final String TIMESTAMP = "timestamp";
public static final char SUBTRACT_CHAR = '-';
public static final char ADD_CHAR = '+';
public static final char MULTIPLY_CHAR = '*';
public static final char DIVISION_CHAR = '/';
public static final char LEFT_BRACE_CHAR = '(';
public static final char RIGHT_BRACE_CHAR = ')';
public static final String ADD_STRING = "+";
public static final String MULTIPLY_STRING = "*";
public static final String DIVISION_STRING = "/";
public static final String LEFT_BRACE_STRING = "(";
public static final char P = 'P';
public static final char N = 'N';
public static final String SUBTRACT_STRING = "-";
public static final String GLOBAL_PARAMS = "globalParams";
public static final String LOCAL_PARAMS = "localParams";
public static final String LOCAL_PARAMS_LIST = "localParamsList";
public static final String SUBPROCESS_INSTANCE_ID = "subProcessInstanceId";
public static final String PROCESS_INSTANCE_STATE = "processInstanceState";
public static final String PARENT_WORKFLOW_INSTANCE = "parentWorkflowInstance";
public static final String TASK_TYPE = "taskType";
public static final String TASK_LIST = "taskList";
public static final String RWXR_XR_X = "rwxr-xr-x";
public static final String QUEUE = "queue";
public static final String QUEUE_NAME = "queueName";
public static final int LOG_QUERY_SKIP_LINE_NUMBER = 0;
public static final int LOG_QUERY_LIMIT = 4096;
/**
* master/worker server use for zk
*/
public static final String MASTER_TYPE = "master";
public static final String WORKER_TYPE = "worker";
public static final String DELETE_ZK_OP = "delete";
public static final String ADD_ZK_OP = "add";
public static final String ALIAS = "alias";
public static final String CONTENT = "content";
public static final String DEPENDENT_SPLIT = ":||";
public static final String DEPENDENT_ALL = "ALL";
/**
* preview schedule execute count
*/
public static final int PREVIEW_SCHEDULE_EXECUTE_COUNT = 5;
/**
* kerberos
*/
public static final String KERBEROS = "kerberos";
/**
* kerberos expire time
*/
public static final String KERBEROS_EXPIRE_TIME = "kerberos.expire.time";
/**
* java.security.krb5.conf
*/
public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf";
/**
* java.security.krb5.conf.path
*/
public static final String JAVA_SECURITY_KRB5_CONF_PATH = "java.security.krb5.conf.path";
/**
* hadoop.security.authentication
*/
public static final String HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authentication";
/**
* hadoop.security.authentication
*/
public static final String HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE = "hadoop.security.authentication.startup.state";
/**
* com.amazonaws.services.s3.enableV4
*/
public static final String AWS_S3_V4 = "com.amazonaws.services.s3.enableV4";
/**
* loginUserFromKeytab user
*/
public static final String LOGIN_USER_KEY_TAB_USERNAME = "login.user.keytab.username";
/**
* default worker group id
*/
public static final int DEFAULT_WORKER_ID = -1;
/**
* loginUserFromKeytab path
*/
public static final String LOGIN_USER_KEY_TAB_PATH = "login.user.keytab.path";
/**
* task log info format
*/
public static final String TASK_LOG_INFO_FORMAT = "TaskLogInfo-%s";
/**
* hive conf
*/
public static final String HIVE_CONF = "hiveconf:";
/**
* flink
*/
public static final String FLINK_YARN_CLUSTER = "yarn-cluster";
public static final String FLINK_RUN_MODE = "-m";
public static final String FLINK_YARN_SLOT = "-ys";
public static final String FLINK_APP_NAME = "-ynm";
public static final String FLINK_QUEUE = "-yqu";
public static final String FLINK_TASK_MANAGE = "-yn";
public static final String FLINK_JOB_MANAGE_MEM = "-yjm";
public static final String FLINK_TASK_MANAGE_MEM = "-ytm";
public static final String FLINK_MAIN_CLASS = "-c";
public static final String FLINK_PARALLELISM = "-p";
public static final String FLINK_SHUTDOWN_ON_ATTACHED_EXIT = "-sae";
public static final int[] NOT_TERMINATED_STATES = new int[] {
ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
ExecutionStatus.RUNNING_EXECUTION.ordinal(),
ExecutionStatus.DELAY_EXECUTION.ordinal(),
ExecutionStatus.READY_PAUSE.ordinal(),
ExecutionStatus.READY_STOP.ordinal(),
ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(),
ExecutionStatus.WAITTING_THREAD.ordinal(),
ExecutionStatus.WAITTING_DEPEND.ordinal()
};
/**
* status
*/
public static final String STATUS = "status";
/**
* message
*/
public static final String MSG = "msg";
/**
* data total
*/
public static final String COUNT = "count";
/**
* page size
*/
public static final String PAGE_SIZE = "pageSize";
/**
* current page no
*/
public static final String PAGE_NUMBER = "pageNo";
/**
*
*/
public static final String DATA_LIST = "data";
public static final String TOTAL_LIST = "totalList";
public static final String CURRENT_PAGE = "currentPage";
public static final String TOTAL_PAGE = "totalPage";
public static final String TOTAL = "total";
/**
* workflow
*/
public static final String WORKFLOW_LIST = "workFlowList";
public static final String WORKFLOW_RELATION_LIST = "workFlowRelationList";
/**
* session user
*/
public static final String SESSION_USER = "session.user";
public static final String SESSION_ID = "sessionId";
public static final String PASSWORD_DEFAULT = "******";
/**
* locale
*/
public static final String LOCALE_LANGUAGE = "language";
/**
* driver
*/
public static final String ORG_POSTGRESQL_DRIVER = "org.postgresql.Driver";
public static final String COM_MYSQL_JDBC_DRIVER = "com.mysql.jdbc.Driver";
public static final String ORG_APACHE_HIVE_JDBC_HIVE_DRIVER = "org.apache.hive.jdbc.HiveDriver";
public static final String COM_CLICKHOUSE_JDBC_DRIVER = "ru.yandex.clickhouse.ClickHouseDriver";
public static final String COM_ORACLE_JDBC_DRIVER = "oracle.jdbc.driver.OracleDriver";
public static final String COM_SQLSERVER_JDBC_DRIVER = "com.microsoft.sqlserver.jdbc.SQLServerDriver";
public static final String COM_DB2_JDBC_DRIVER = "com.ibm.db2.jcc.DB2Driver";
public static final String COM_PRESTO_JDBC_DRIVER = "com.facebook.presto.jdbc.PrestoDriver";
/**
* database type
*/
public static final String MYSQL = "MYSQL";
public static final String POSTGRESQL = "POSTGRESQL";
public static final String HIVE = "HIVE";
public static final String SPARK = "SPARK";
public static final String CLICKHOUSE = "CLICKHOUSE";
public static final String ORACLE = "ORACLE";
public static final String SQLSERVER = "SQLSERVER";
public static final String DB2 = "DB2";
public static final String PRESTO = "PRESTO";
/**
* jdbc url
*/
public static final String JDBC_MYSQL = "jdbc:mysql://";
public static final String JDBC_POSTGRESQL = "jdbc:postgresql://";
public static final String JDBC_HIVE_2 = "jdbc:hive2://";
public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse://";
public static final String JDBC_ORACLE_SID = "jdbc:oracle:thin:@";
public static final String JDBC_ORACLE_SERVICE_NAME = "jdbc:oracle:thin:@//";
public static final String JDBC_SQLSERVER = "jdbc:sqlserver://";
public static final String JDBC_DB2 = "jdbc:db2://";
public static final String JDBC_PRESTO = "jdbc:presto://";
public static final String ADDRESS = "address";
public static final String DATABASE = "database";
public static final String JDBC_URL = "jdbcUrl";
public static final String PRINCIPAL = "principal";
public static final String OTHER = "other";
public static final String ORACLE_DB_CONNECT_TYPE = "connectType";
public static final String KERBEROS_KRB5_CONF_PATH = "javaSecurityKrb5Conf";
public static final String KERBEROS_KEY_TAB_USERNAME = "loginUserKeytabUsername";
public static final String KERBEROS_KEY_TAB_PATH = "loginUserKeytabPath";
/**
* session timeout
*/
public static final int SESSION_TIME_OUT = 7200;
public static final int MAX_FILE_SIZE = 1024 * 1024 * 1024;
public static final String UDF = "UDF";
public static final String CLASS = "class";
public static final String RECEIVERS = "receivers";
public static final String RECEIVERS_CC = "receiversCc";
/**
* dataSource sensitive param
*/
public static final String DATASOURCE_PASSWORD_REGEX = "(?<=(\"password\":\")).*?(?=(\"))";
/**
* default worker group
*/
public static final String DEFAULT_WORKER_GROUP = "default";
public static final Integer TASK_INFO_LENGTH = 5;
/**
* new
* schedule time
*/
public static final String PARAMETER_SHECDULE_TIME = "schedule.time";
/**
* authorize writable perm
*/
public static final int AUTHORIZE_WRITABLE_PERM = 7;
/**
* authorize readable perm
*/
public static final int AUTHORIZE_READABLE_PERM = 4;
/**
* plugin configurations
*/
public static final String PLUGIN_JAR_SUFFIX = ".jar";
public static final int NORMAL_NODE_STATUS = 0;
public static final int ABNORMAL_NODE_STATUS = 1;
public static final String START_TIME = "start time";
public static final String END_TIME = "end time";
public static final String START_END_DATE = "startDate,endDate";
/**
* system line separator
*/
public static final String SYSTEM_LINE_SEPARATOR = System.getProperty("line.separator");
/**
* net system properties
*/
public static final String DOLPHIN_SCHEDULER_PREFERRED_NETWORK_INTERFACE = "dolphin.scheduler.network.interface.preferred";
public static final String EXCEL_SUFFIX_XLS = ".xls";
/**
* datasource encryption salt
*/
public static final String DATASOURCE_ENCRYPTION_SALT_DEFAULT = "!@#$%^&*";
public static final String DATASOURCE_ENCRYPTION_ENABLE = "datasource.encryption.enable";
public static final String DATASOURCE_ENCRYPTION_SALT = "datasource.encryption.salt";
/**
* Network IP gets priority, default inner outer
*/
public static final String NETWORK_PRIORITY_STRATEGY = "dolphin.scheduler.network.priority.strategy";
/**
* exec shell scripts
*/
public static final String SH = "sh";
/**
* pstree, get pud and sub pid
*/
public static final String PSTREE = "pstree";
/**
* docker & kubernetes
*/
public static final boolean DOCKER_MODE = StringUtils.isNotEmpty(System.getenv("DOCKER"));
public static final boolean KUBERNETES_MODE = StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_HOST")) && StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_PORT"));
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,128 | [Feature][Worker] Support turning off sudo permissions | *For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/community/development/issue.html when describe an issue.*
**Describe the feature**
The current version of multi-tenant implementation requires sudo permission. Most of the time the deployment execution user does not have sudo permission and does not need to be executed by the tenant account. At this time, the sudo permission dependency can be turned off, and the task execution is executed by the deployment user.
当前版本多租户的实现,需要sudo权限,大多数时候部署执行用户没有sudo权限,并且不需要利用租户账户执行,这时候可以关掉sudo权限依赖,任务执行采用部署用户执行
sudo stands for privilege escalation. If privilege escalation is not configured, multi-tenant is only used to isolate resources and users. After privilege escalation is configured, task execution will be performed by tenants, and deployment users will not be configured; and if privilege escalation is not configured, tenants cannot be automatically created.
sudo 代表提权,如果不配置提权,多租户仅用来隔离资源与用户,配置提权以后,任务执行使用租户执行,不配置使用部署用户执行;并且不配置提权不允许自动创建租户 | https://github.com/apache/dolphinscheduler/issues/5128 | https://github.com/apache/dolphinscheduler/pull/5129 | 9c0439621836b5134b8a7da4e671cb74edc7c31b | a6a1b94df4f5c4d22845fe99f2f778ba53d60e5c | "2021-03-23T04:00:15Z" | java | "2021-03-29T03:07:39Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CommonUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResUploadType;
import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* common utils
*/
public class CommonUtils {
private static final Logger logger = LoggerFactory.getLogger(CommonUtils.class);
private static final Base64 BASE64 = new Base64();
private CommonUtils() {
throw new UnsupportedOperationException("Construct CommonUtils");
}
/**
* @return get the path of system environment variables
*/
public static String getSystemEnvPath() {
String envPath = PropertyUtils.getString(Constants.DOLPHINSCHEDULER_ENV_PATH);
if (StringUtils.isEmpty(envPath)) {
URL envDefaultPath = CommonUtils.class.getClassLoader().getResource(Constants.ENV_PATH);
if (envDefaultPath != null) {
envPath = envDefaultPath.getPath();
logger.debug("env path :{}", envPath);
} else {
envPath = "/etc/profile";
}
}
return envPath;
}
/**
* @return is develop mode
*/
public static boolean isDevelopMode() {
return PropertyUtils.getBoolean(Constants.DEVELOPMENT_STATE, true);
}
/**
* if upload resource is HDFS and kerberos startup is true , else false
*
* @return true if upload resource is HDFS and kerberos startup
*/
public static boolean getKerberosStartupState() {
String resUploadStartupType = PropertyUtils.getUpperCaseString(Constants.RESOURCE_STORAGE_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
Boolean kerberosStartupState = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false);
return resUploadType == ResUploadType.HDFS && kerberosStartupState;
}
/**
* load kerberos configuration
*
* @throws Exception errors
*/
public static void loadKerberosConf() throws Exception {
loadKerberosConf(PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH),
PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME),
PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH));
}
/**
* load kerberos configuration
* @param javaSecurityKrb5Conf javaSecurityKrb5Conf
* @param loginUserKeytabUsername loginUserKeytabUsername
* @param loginUserKeytabPath loginUserKeytabPath
* @throws Exception errors
*/
public static void loadKerberosConf(String javaSecurityKrb5Conf, String loginUserKeytabUsername, String loginUserKeytabPath) throws Exception {
if (CommonUtils.getKerberosStartupState()) {
System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF, StringUtils.defaultIfBlank(javaSecurityKrb5Conf, PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH)));
Configuration configuration = new Configuration();
configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION, Constants.KERBEROS);
UserGroupInformation.setConfiguration(configuration);
UserGroupInformation.loginUserFromKeytab(StringUtils.defaultIfBlank(loginUserKeytabUsername, PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME)),
StringUtils.defaultIfBlank(loginUserKeytabPath, PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH)));
}
}
/**
* encode password
*/
public static String encodePassword(String password) {
if (StringUtils.isEmpty(password)) {
return StringUtils.EMPTY;
}
//if encryption is not turned on, return directly
boolean encryptionEnable = PropertyUtils.getBoolean(Constants.DATASOURCE_ENCRYPTION_ENABLE, false);
if (!encryptionEnable) {
return password;
}
// Using Base64 + salt to process password
String salt = PropertyUtils.getString(Constants.DATASOURCE_ENCRYPTION_SALT, Constants.DATASOURCE_ENCRYPTION_SALT_DEFAULT);
String passwordWithSalt = salt + new String(BASE64.encode(password.getBytes(StandardCharsets.UTF_8)));
return new String(BASE64.encode(passwordWithSalt.getBytes(StandardCharsets.UTF_8)));
}
/**
* decode password
*/
public static String decodePassword(String password) {
if (StringUtils.isEmpty(password)) {
return StringUtils.EMPTY;
}
//if encryption is not turned on, return directly
boolean encryptionEnable = PropertyUtils.getBoolean(Constants.DATASOURCE_ENCRYPTION_ENABLE, false);
if (!encryptionEnable) {
return password;
}
// Using Base64 + salt to process password
String salt = PropertyUtils.getString(Constants.DATASOURCE_ENCRYPTION_SALT, Constants.DATASOURCE_ENCRYPTION_SALT_DEFAULT);
String passwordWithSalt = new String(BASE64.decode(password), StandardCharsets.UTF_8);
if (!passwordWithSalt.startsWith(salt)) {
logger.warn("There is a password and salt mismatch: {} ", password);
return password;
}
return new String(BASE64.decode(passwordWithSalt.substring(salt.length())), StandardCharsets.UTF_8);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,128 | [Feature][Worker] Support turning off sudo permissions | *For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/community/development/issue.html when describe an issue.*
**Describe the feature**
The current version of multi-tenant implementation requires sudo permission. Most of the time the deployment execution user does not have sudo permission and does not need to be executed by the tenant account. At this time, the sudo permission dependency can be turned off, and the task execution is executed by the deployment user.
当前版本多租户的实现,需要sudo权限,大多数时候部署执行用户没有sudo权限,并且不需要利用租户账户执行,这时候可以关掉sudo权限依赖,任务执行采用部署用户执行
sudo stands for privilege escalation. If privilege escalation is not configured, multi-tenant is only used to isolate resources and users. After privilege escalation is configured, task execution will be performed by tenants, and deployment users will not be configured; and if privilege escalation is not configured, tenants cannot be automatically created.
sudo 代表提权,如果不配置提权,多租户仅用来隔离资源与用户,配置提权以后,任务执行使用租户执行,不配置使用部署用户执行;并且不配置提权不允许自动创建租户 | https://github.com/apache/dolphinscheduler/issues/5128 | https://github.com/apache/dolphinscheduler/pull/5129 | 9c0439621836b5134b8a7da4e671cb74edc7c31b | a6a1b94df4f5c4d22845fe99f2f778ba53d60e5c | "2021-03-23T04:00:15Z" | java | "2021-03-29T03:07:39Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.shell.ShellExecutor;
import org.apache.commons.configuration.Configuration;
import java.lang.management.OperatingSystemMXBean;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.lang.management.ManagementFactory;
import java.lang.management.RuntimeMXBean;
import java.math.RoundingMode;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.StringTokenizer;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import oshi.SystemInfo;
import oshi.hardware.CentralProcessor;
import oshi.hardware.GlobalMemory;
import oshi.hardware.HardwareAbstractionLayer;
/**
* os utils
*/
public class OSUtils {
private static final Logger logger = LoggerFactory.getLogger(OSUtils.class);
public static final ThreadLocal<Logger> taskLoggerThreadLocal = new ThreadLocal<>();
private static final SystemInfo SI = new SystemInfo();
public static final String TWO_DECIMAL = "0.00";
/**
* return -1 when the function can not get hardware env info
* e.g {@link OSUtils#loadAverage()} {@link OSUtils#cpuUsage()}
*/
public static final double NEGATIVE_ONE = -1;
private static HardwareAbstractionLayer hal = SI.getHardware();
private OSUtils() {
throw new UnsupportedOperationException("Construct OSUtils");
}
/**
* Initialization regularization, solve the problem of pre-compilation performance,
* avoid the thread safety problem of multi-thread operation
*/
private static final Pattern PATTERN = Pattern.compile("\\s+");
/**
* get memory usage
* Keep 2 decimal
*
* @return percent %
*/
public static double memoryUsage() {
GlobalMemory memory = hal.getMemory();
double memoryUsage = (memory.getTotal() - memory.getAvailable() - memory.getSwapUsed()) * 0.1 / memory.getTotal() * 10;
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(memoryUsage));
}
/**
* get available physical memory size
* <p>
* Keep 2 decimal
*
* @return available Physical Memory Size, unit: G
*/
public static double availablePhysicalMemorySize() {
GlobalMemory memory = hal.getMemory();
double availablePhysicalMemorySize = (memory.getAvailable() + memory.getSwapUsed()) / 1024.0 / 1024 / 1024;
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(availablePhysicalMemorySize));
}
/**
* get total physical memory size
* <p>
* Keep 2 decimal
*
* @return available Physical Memory Size, unit: G
*/
public static double totalMemorySize() {
GlobalMemory memory = hal.getMemory();
double availablePhysicalMemorySize = memory.getTotal() / 1024.0 / 1024 / 1024;
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(availablePhysicalMemorySize));
}
/**
* load average
*
* @return load average
*/
public static double loadAverage() {
double loadAverage;
try {
OperatingSystemMXBean osBean = ManagementFactory.getPlatformMXBean(OperatingSystemMXBean.class);
loadAverage = osBean.getSystemLoadAverage();
} catch (Exception e) {
logger.error("get operation system load average exception, try another method ", e);
loadAverage = hal.getProcessor().getSystemLoadAverage();
if (Double.isNaN(loadAverage)) {
return NEGATIVE_ONE;
}
}
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(loadAverage));
}
/**
* get cpu usage
*
* @return cpu usage
*/
public static double cpuUsage() {
CentralProcessor processor = hal.getProcessor();
double cpuUsage = processor.getSystemCpuLoad();
if (Double.isNaN(cpuUsage)) {
return NEGATIVE_ONE;
}
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(cpuUsage));
}
public static List<String> getUserList() {
try {
if (isMacOS()) {
return getUserListFromMac();
} else if (isWindows()) {
return getUserListFromWindows();
} else {
return getUserListFromLinux();
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
return Collections.emptyList();
}
/**
* get user list from linux
*
* @return user list
*/
private static List<String> getUserListFromLinux() throws IOException {
List<String> userList = new ArrayList<>();
try (BufferedReader bufferedReader = new BufferedReader(
new InputStreamReader(new FileInputStream("/etc/passwd")))) {
String line;
while ((line = bufferedReader.readLine()) != null) {
if (line.contains(":")) {
String[] userInfo = line.split(":");
userList.add(userInfo[0]);
}
}
}
return userList;
}
/**
* get user list from mac
*
* @return user list
*/
private static List<String> getUserListFromMac() throws IOException {
String result = exeCmd("dscl . list /users");
if (StringUtils.isNotEmpty(result)) {
return Arrays.asList(result.split("\n"));
}
return Collections.emptyList();
}
/**
* get user list from windows
*
* @return user list
*/
private static List<String> getUserListFromWindows() throws IOException {
String result = exeCmd("net user");
String[] lines = result.split("\n");
int startPos = 0;
int endPos = lines.length - 2;
for (int i = 0; i < lines.length; i++) {
if (lines[i].isEmpty()) {
continue;
}
int count = 0;
if (lines[i].charAt(0) == '-') {
for (int j = 0; j < lines[i].length(); j++) {
if (lines[i].charAt(i) == '-') {
count++;
}
}
}
if (count == lines[i].length()) {
startPos = i + 1;
break;
}
}
List<String> users = new ArrayList<>();
while (startPos <= endPos) {
users.addAll(Arrays.asList(PATTERN.split(lines[startPos])));
startPos++;
}
return users;
}
/**
* create user
*
* @param userName user name
*/
public static void createUserIfAbsent(String userName) {
// if not exists this user, then create
taskLoggerThreadLocal.set(taskLoggerThreadLocal.get());
if (!getUserList().contains(userName)) {
boolean isSuccess = createUser(userName);
String infoLog = String.format("create user %s %s", userName, isSuccess ? "success" : "fail");
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog);
}
taskLoggerThreadLocal.remove();
}
/**
* create user
*
* @param userName user name
* @return true if creation was successful, otherwise false
*/
public static boolean createUser(String userName) {
try {
String userGroup = getGroup();
if (StringUtils.isEmpty(userGroup)) {
String errorLog = String.format("%s group does not exist for this operating system.", userGroup);
LoggerUtils.logError(Optional.ofNullable(logger), errorLog);
LoggerUtils.logError(Optional.ofNullable(taskLoggerThreadLocal.get()), errorLog);
return false;
}
if (isMacOS()) {
createMacUser(userName, userGroup);
} else if (isWindows()) {
createWindowsUser(userName, userGroup);
} else {
createLinuxUser(userName, userGroup);
}
return true;
} catch (Exception e) {
LoggerUtils.logError(Optional.ofNullable(logger), e);
LoggerUtils.logError(Optional.ofNullable(taskLoggerThreadLocal.get()), e);
}
return false;
}
/**
* create linux user
*
* @param userName user name
* @param userGroup user group
* @throws IOException in case of an I/O error
*/
private static void createLinuxUser(String userName, String userGroup) throws IOException {
String infoLog1 = String.format("create linux os user : %s", userName);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog1);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog1);
String cmd = String.format("sudo useradd -g %s %s", userGroup, userName);
String infoLog2 = String.format("execute cmd : %s", cmd);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog2);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog2);
exeCmd(cmd);
}
/**
* create mac user (Supports Mac OSX 10.10+)
*
* @param userName user name
* @param userGroup user group
* @throws IOException in case of an I/O error
*/
private static void createMacUser(String userName, String userGroup) throws IOException {
Optional<Logger> optionalLogger = Optional.ofNullable(logger);
Optional<Logger> optionalTaskLogger = Optional.ofNullable(taskLoggerThreadLocal.get());
String infoLog1 = String.format("create mac os user : %s", userName);
LoggerUtils.logInfo(optionalLogger, infoLog1);
LoggerUtils.logInfo(optionalTaskLogger, infoLog1);
String createUserCmd = String.format("sudo sysadminctl -addUser %s -password %s", userName, userName);
String infoLog2 = String.format("create user command : %s", createUserCmd);
LoggerUtils.logInfo(optionalLogger, infoLog2);
LoggerUtils.logInfo(optionalTaskLogger, infoLog2);
exeCmd(createUserCmd);
String appendGroupCmd = String.format("sudo dseditgroup -o edit -a %s -t user %s", userName, userGroup);
String infoLog3 = String.format("append user to group : %s", appendGroupCmd);
LoggerUtils.logInfo(optionalLogger, infoLog3);
LoggerUtils.logInfo(optionalTaskLogger, infoLog3);
exeCmd(appendGroupCmd);
}
/**
* create windows user
*
* @param userName user name
* @param userGroup user group
* @throws IOException in case of an I/O error
*/
private static void createWindowsUser(String userName, String userGroup) throws IOException {
String infoLog1 = String.format("create windows os user : %s", userName);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog1);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog1);
String userCreateCmd = String.format("net user \"%s\" /add", userName);
String infoLog2 = String.format("execute create user command : %s", userCreateCmd);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog2);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog2);
exeCmd(userCreateCmd);
String appendGroupCmd = String.format("net localgroup \"%s\" \"%s\" /add", userGroup, userName);
String infoLog3 = String.format("execute append user to group : %s", appendGroupCmd);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog3);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog3);
exeCmd(appendGroupCmd);
}
/**
* get system group information
*
* @return system group info
* @throws IOException errors
*/
public static String getGroup() throws IOException {
if (isWindows()) {
String currentProcUserName = System.getProperty("user.name");
String result = exeCmd(String.format("net user \"%s\"", currentProcUserName));
String line = result.split("\n")[22];
String group = PATTERN.split(line)[1];
if (group.charAt(0) == '*') {
return group.substring(1);
} else {
return group;
}
} else {
String result = exeCmd("groups");
if (StringUtils.isNotEmpty(result)) {
String[] groupInfo = result.split(" ");
return groupInfo[0];
}
}
return null;
}
/**
* get sudo command
* @param tenantCode tenantCode
* @param command command
* @return result of sudo execute command
*/
public static String getSudoCmd(String tenantCode, String command) {
return StringUtils.isEmpty(tenantCode) ? command : "sudo -u " + tenantCode + " " + command;
}
/**
* Execute the corresponding command of Linux or Windows
*
* @param command command
* @return result of execute command
* @throws IOException errors
*/
public static String exeCmd(String command) throws IOException {
StringTokenizer st = new StringTokenizer(command);
String[] cmdArray = new String[st.countTokens()];
for (int i = 0; st.hasMoreTokens(); i++) {
cmdArray[i] = st.nextToken();
}
return exeShell(cmdArray);
}
/**
* Execute the shell
*
* @param command command
* @return result of execute the shell
* @throws IOException errors
*/
public static String exeShell(String[] command) throws IOException {
return ShellExecutor.execCommand(command);
}
/**
* get process id
*
* @return process id
*/
public static int getProcessID() {
RuntimeMXBean runtimeMXBean = ManagementFactory.getRuntimeMXBean();
return Integer.parseInt(runtimeMXBean.getName().split("@")[0]);
}
/**
* whether is macOS
*
* @return true if mac
*/
public static boolean isMacOS() {
return getOSName().startsWith("Mac");
}
/**
* whether is windows
*
* @return true if windows
*/
public static boolean isWindows() {
return getOSName().startsWith("Windows");
}
/**
* get current OS name
*
* @return current OS name
*/
public static String getOSName() {
return System.getProperty("os.name");
}
/**
* check memory and cpu usage
*
* @param systemCpuLoad systemCpuLoad
* @param systemReservedMemory systemReservedMemory
* @return check memory and cpu usage
*/
public static Boolean checkResource(double systemCpuLoad, double systemReservedMemory) {
// system load average
double loadAverage = loadAverage();
// system available physical memory
double availablePhysicalMemorySize = availablePhysicalMemorySize();
if (loadAverage > systemCpuLoad || availablePhysicalMemorySize < systemReservedMemory) {
logger.warn("load is too high or availablePhysicalMemorySize(G) is too low, it's availablePhysicalMemorySize(G):{},loadAvg:{}", availablePhysicalMemorySize, loadAverage);
return false;
} else {
return true;
}
}
/**
* check memory and cpu usage
*
* @param conf conf
* @param isMaster is master
* @return check memory and cpu usage
*/
public static Boolean checkResource(Configuration conf, Boolean isMaster) {
double systemCpuLoad;
double systemReservedMemory;
if (Boolean.TRUE.equals(isMaster)) {
systemCpuLoad = conf.getDouble(Constants.MASTER_MAX_CPULOAD_AVG, Constants.DEFAULT_MASTER_CPU_LOAD);
systemReservedMemory = conf.getDouble(Constants.MASTER_RESERVED_MEMORY, Constants.DEFAULT_MASTER_RESERVED_MEMORY);
} else {
systemCpuLoad = conf.getDouble(Constants.WORKER_MAX_CPULOAD_AVG, Constants.DEFAULT_WORKER_CPU_LOAD);
systemReservedMemory = conf.getDouble(Constants.WORKER_RESERVED_MEMORY, Constants.DEFAULT_WORKER_RESERVED_MEMORY);
}
return checkResource(systemCpuLoad, systemReservedMemory);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,128 | [Feature][Worker] Support turning off sudo permissions | *For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/community/development/issue.html when describe an issue.*
**Describe the feature**
The current version of multi-tenant implementation requires sudo permission. Most of the time the deployment execution user does not have sudo permission and does not need to be executed by the tenant account. At this time, the sudo permission dependency can be turned off, and the task execution is executed by the deployment user.
当前版本多租户的实现,需要sudo权限,大多数时候部署执行用户没有sudo权限,并且不需要利用租户账户执行,这时候可以关掉sudo权限依赖,任务执行采用部署用户执行
sudo stands for privilege escalation. If privilege escalation is not configured, multi-tenant is only used to isolate resources and users. After privilege escalation is configured, task execution will be performed by tenants, and deployment users will not be configured; and if privilege escalation is not configured, tenants cannot be automatically created.
sudo 代表提权,如果不配置提权,多租户仅用来隔离资源与用户,配置提权以后,任务执行使用租户执行,不配置使用部署用户执行;并且不配置提权不允许自动创建租户 | https://github.com/apache/dolphinscheduler/issues/5128 | https://github.com/apache/dolphinscheduler/pull/5129 | 9c0439621836b5134b8a7da4e671cb74edc7c31b | a6a1b94df4f5c4d22845fe99f2f778ba53d60e5c | "2021-03-23T04:00:15Z" | java | "2021-03-29T03:07:39Z" | dolphinscheduler-common/src/main/resources/common.properties | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# resource storage type : HDFS, S3, NONE
resource.storage.type=NONE
# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
resource.upload.path=/dolphinscheduler
# user data local directory path, please make sure the directory exists and have read write permissions
#data.basedir.path=/tmp/dolphinscheduler
# whether kerberos starts
hadoop.security.authentication.startup.state=false
# java.security.krb5.conf path
java.security.krb5.conf.path=/opt/krb5.conf
# login user from keytab username
login.user.keytab.username=hdfs-mycluster@ESZ.COM
# login user from keytab path
login.user.keytab.path=/opt/hdfs.headless.keytab
#resource.view.suffixs
#resource.view.suffixs=txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js
# if resource.storage.type=HDFS, the user need to have permission to create directories under the HDFS root path
hdfs.root.user=hdfs
# if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS, When namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
fs.defaultFS=hdfs://mycluster:8020
# if resource.storage.type=S3, s3 endpoint
fs.s3a.endpoint=http://192.168.xx.xx:9010
# if resource.storage.type=S3, s3 access key
fs.s3a.access.key=A3DXS30FO22544RE
# if resource.storage.type=S3, s3 secret key
fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
# if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
# if resourcemanager HA enable or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname.
yarn.application.status.address=http://ds1:8088/ws/v1/cluster/apps/%s
# job history status url when application number threshold is reached(default 10000,maybe it was set to 1000)
yarn.job.history.status.address=http://ds1:19888/ws/v1/history/mapreduce/jobs/%s
# system env path, If you want to set your own path, you need to set this env file to an absolute path
#dolphinscheduler.env.path=env/dolphinscheduler_env.sh
development.state=false
# kerberos tgt expire time, unit is hours
kerberos.expire.time=2
# datasource encryption salt
datasource.encryption.enable=false
datasource.encryption.salt=!@#$%^&*
# Network IP gets priority, default inner outer
#dolphin.scheduler.network.priority.strategy=default
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,128 | [Feature][Worker] Support turning off sudo permissions | *For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/community/development/issue.html when describe an issue.*
**Describe the feature**
The current version of multi-tenant implementation requires sudo permission. Most of the time the deployment execution user does not have sudo permission and does not need to be executed by the tenant account. At this time, the sudo permission dependency can be turned off, and the task execution is executed by the deployment user.
当前版本多租户的实现,需要sudo权限,大多数时候部署执行用户没有sudo权限,并且不需要利用租户账户执行,这时候可以关掉sudo权限依赖,任务执行采用部署用户执行
sudo stands for privilege escalation. If privilege escalation is not configured, multi-tenant is only used to isolate resources and users. After privilege escalation is configured, task execution will be performed by tenants, and deployment users will not be configured; and if privilege escalation is not configured, tenants cannot be automatically created.
sudo 代表提权,如果不配置提权,多租户仅用来隔离资源与用户,配置提权以后,任务执行使用租户执行,不配置使用部署用户执行;并且不配置提权不允许自动创建租户 | https://github.com/apache/dolphinscheduler/issues/5128 | https://github.com/apache/dolphinscheduler/pull/5129 | 9c0439621836b5134b8a7da4e671cb74edc7c31b | a6a1b94df4f5c4d22845fe99f2f778ba53d60e5c | "2021-03-23T04:00:15Z" | java | "2021-03-29T03:07:39Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.processor;
import org.apache.dolphinscheduler.common.enums.Event;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.LoggerUtils;
import org.apache.dolphinscheduler.common.utils.NetUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.Preconditions;
import org.apache.dolphinscheduler.remote.command.Command;
import org.apache.dolphinscheduler.remote.command.CommandType;
import org.apache.dolphinscheduler.remote.command.TaskExecuteAckCommand;
import org.apache.dolphinscheduler.remote.command.TaskExecuteRequestCommand;
import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.LogUtils;
import org.apache.dolphinscheduler.server.worker.cache.ResponceCache;
import org.apache.dolphinscheduler.server.worker.cache.TaskExecutionContextCacheManager;
import org.apache.dolphinscheduler.server.worker.cache.impl.TaskExecutionContextCacheManagerImpl;
import org.apache.dolphinscheduler.server.worker.config.WorkerConfig;
import org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread;
import org.apache.dolphinscheduler.server.worker.runner.WorkerManagerThread;
import org.apache.dolphinscheduler.service.alert.AlertClientService;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import java.util.Date;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.netty.channel.Channel;
/**
* worker request processor
*/
public class TaskExecuteProcessor implements NettyRequestProcessor {
private static final Logger logger = LoggerFactory.getLogger(TaskExecuteProcessor.class);
/**
* worker config
*/
private final WorkerConfig workerConfig;
/**
* task callback service
*/
private final TaskCallbackService taskCallbackService;
/**
* alert client service
*/
private AlertClientService alertClientService;
/**
* taskExecutionContextCacheManager
*/
private final TaskExecutionContextCacheManager taskExecutionContextCacheManager;
/*
* task execute manager
*/
private final WorkerManagerThread workerManager;
public TaskExecuteProcessor() {
this.taskCallbackService = SpringApplicationContext.getBean(TaskCallbackService.class);
this.workerConfig = SpringApplicationContext.getBean(WorkerConfig.class);
this.taskExecutionContextCacheManager = SpringApplicationContext.getBean(TaskExecutionContextCacheManagerImpl.class);
this.workerManager = SpringApplicationContext.getBean(WorkerManagerThread.class);
}
/**
* Pre-cache task to avoid extreme situations when kill task. There is no such task in the cache
*
* @param taskExecutionContext task
*/
private void setTaskCache(TaskExecutionContext taskExecutionContext) {
TaskExecutionContext preTaskCache = new TaskExecutionContext();
preTaskCache.setTaskInstanceId(taskExecutionContext.getTaskInstanceId());
taskExecutionContextCacheManager.cacheTaskExecutionContext(preTaskCache);
}
public TaskExecuteProcessor(AlertClientService alertClientService) {
this();
this.alertClientService = alertClientService;
}
@Override
public void process(Channel channel, Command command) {
Preconditions.checkArgument(CommandType.TASK_EXECUTE_REQUEST == command.getType(),
String.format("invalid command type : %s", command.getType()));
TaskExecuteRequestCommand taskRequestCommand = JSONUtils.parseObject(
command.getBody(), TaskExecuteRequestCommand.class);
logger.info("received command : {}", taskRequestCommand);
if (taskRequestCommand == null) {
logger.error("task execute request command is null");
return;
}
String contextJson = taskRequestCommand.getTaskExecutionContext();
TaskExecutionContext taskExecutionContext = JSONUtils.parseObject(contextJson, TaskExecutionContext.class);
if (taskExecutionContext == null) {
logger.error("task execution context is null");
return;
}
setTaskCache(taskExecutionContext);
// custom logger
Logger taskLogger = LoggerFactory.getLogger(LoggerUtils.buildTaskId(LoggerUtils.TASK_LOGGER_INFO_PREFIX,
taskExecutionContext.getProcessDefineId(),
taskExecutionContext.getProcessInstanceId(),
taskExecutionContext.getTaskInstanceId()));
taskExecutionContext.setHost(NetUtils.getAddr(workerConfig.getListenPort()));
taskExecutionContext.setLogPath(LogUtils.getTaskLogPath(taskExecutionContext));
// local execute path
String execLocalPath = getExecLocalPath(taskExecutionContext);
logger.info("task instance local execute path : {}", execLocalPath);
taskExecutionContext.setExecutePath(execLocalPath);
FileUtils.taskLoggerThreadLocal.set(taskLogger);
try {
FileUtils.createWorkDirIfAbsent(execLocalPath);
if (workerConfig.getWorkerTenantAutoCreate()) {
OSUtils.createUserIfAbsent(taskExecutionContext.getTenantCode());
}
} catch (Throwable ex) {
String errorLog = String.format("create execLocalPath : %s", execLocalPath);
LoggerUtils.logError(Optional.of(logger), errorLog, ex);
LoggerUtils.logError(Optional.ofNullable(taskLogger), errorLog, ex);
taskExecutionContextCacheManager.removeByTaskInstanceId(taskExecutionContext.getTaskInstanceId());
}
FileUtils.taskLoggerThreadLocal.remove();
taskCallbackService.addRemoteChannel(taskExecutionContext.getTaskInstanceId(),
new NettyRemoteChannel(channel, command.getOpaque()));
// delay task process
long remainTime = DateUtils.getRemainTime(taskExecutionContext.getFirstSubmitTime(), taskExecutionContext.getDelayTime() * 60L);
if (remainTime > 0) {
logger.info("delay the execution of task instance {}, delay time: {} s", taskExecutionContext.getTaskInstanceId(), remainTime);
taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.DELAY_EXECUTION);
taskExecutionContext.setStartTime(null);
} else {
taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.RUNNING_EXECUTION);
taskExecutionContext.setStartTime(new Date());
}
this.doAck(taskExecutionContext);
// submit task to manager
if (!workerManager.offer(new TaskExecuteThread(taskExecutionContext, taskCallbackService, taskLogger, alertClientService))) {
logger.info("submit task to manager error, queue is full, queue size is {}", workerManager.getQueueSize());
}
}
private void doAck(TaskExecutionContext taskExecutionContext) {
// tell master that task is in executing
TaskExecuteAckCommand ackCommand = buildAckCommand(taskExecutionContext);
ResponceCache.get().cache(taskExecutionContext.getTaskInstanceId(), ackCommand.convert2Command(), Event.ACK);
taskCallbackService.sendAck(taskExecutionContext.getTaskInstanceId(), ackCommand.convert2Command());
}
/**
* build ack command
*
* @param taskExecutionContext taskExecutionContext
* @return TaskExecuteAckCommand
*/
private TaskExecuteAckCommand buildAckCommand(TaskExecutionContext taskExecutionContext) {
TaskExecuteAckCommand ackCommand = new TaskExecuteAckCommand();
ackCommand.setTaskInstanceId(taskExecutionContext.getTaskInstanceId());
ackCommand.setStatus(taskExecutionContext.getCurrentExecutionStatus().getCode());
ackCommand.setLogPath(LogUtils.getTaskLogPath(taskExecutionContext));
ackCommand.setHost(taskExecutionContext.getHost());
ackCommand.setStartTime(taskExecutionContext.getStartTime());
if (taskExecutionContext.getTaskType().equals(TaskType.SQL.name()) || taskExecutionContext.getTaskType().equals(TaskType.PROCEDURE.name())) {
ackCommand.setExecutePath(null);
} else {
ackCommand.setExecutePath(taskExecutionContext.getExecutePath());
}
taskExecutionContext.setLogPath(ackCommand.getLogPath());
return ackCommand;
}
/**
* get execute local path
*
* @param taskExecutionContext taskExecutionContext
* @return execute local path
*/
private String getExecLocalPath(TaskExecutionContext taskExecutionContext) {
return FileUtils.getProcessExecDir(taskExecutionContext.getProjectId(),
taskExecutionContext.getProcessDefineId(),
taskExecutionContext.getProcessInstanceId(),
taskExecutionContext.getTaskInstanceId());
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,128 | [Feature][Worker] Support turning off sudo permissions | *For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/community/development/issue.html when describe an issue.*
**Describe the feature**
The current version of multi-tenant implementation requires sudo permission. Most of the time the deployment execution user does not have sudo permission and does not need to be executed by the tenant account. At this time, the sudo permission dependency can be turned off, and the task execution is executed by the deployment user.
当前版本多租户的实现,需要sudo权限,大多数时候部署执行用户没有sudo权限,并且不需要利用租户账户执行,这时候可以关掉sudo权限依赖,任务执行采用部署用户执行
sudo stands for privilege escalation. If privilege escalation is not configured, multi-tenant is only used to isolate resources and users. After privilege escalation is configured, task execution will be performed by tenants, and deployment users will not be configured; and if privilege escalation is not configured, tenants cannot be automatically created.
sudo 代表提权,如果不配置提权,多租户仅用来隔离资源与用户,配置提权以后,任务执行使用租户执行,不配置使用部署用户执行;并且不配置提权不允许自动创建租户 | https://github.com/apache/dolphinscheduler/issues/5128 | https://github.com/apache/dolphinscheduler/pull/5129 | 9c0439621836b5134b8a7da4e671cb74edc7c31b | a6a1b94df4f5c4d22845fe99f2f778ba53d60e5c | "2021-03-23T04:00:15Z" | java | "2021-03-29T03:07:39Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task;
import static org.apache.dolphinscheduler.common.Constants.EXIT_CODE_FAILURE;
import static org.apache.dolphinscheduler.common.Constants.EXIT_CODE_KILL;
import static org.apache.dolphinscheduler.common.Constants.EXIT_CODE_SUCCESS;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.LoggerUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.ProcessUtils;
import org.apache.dolphinscheduler.server.worker.cache.TaskExecutionContextCacheManager;
import org.apache.dolphinscheduler.server.worker.cache.impl.TaskExecutionContextCacheManagerImpl;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.lang.reflect.Field;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.slf4j.Logger;
/**
* abstract command executor
*/
public abstract class AbstractCommandExecutor {
/**
* rules for extracting application ID
*/
protected static final Pattern APPLICATION_REGEX = Pattern.compile(Constants.APPLICATION_REGEX);
protected StringBuilder varPool = new StringBuilder();
/**
* process
*/
private Process process;
/**
* log handler
*/
protected Consumer<List<String>> logHandler;
/**
* logger
*/
protected Logger logger;
/**
* log list
*/
protected final List<String> logBuffer;
protected boolean logOutputIsScuccess = false;
/**
* SHELL result string
*/
protected String taskResultString;
/**
* taskExecutionContext
*/
protected TaskExecutionContext taskExecutionContext;
/**
* taskExecutionContextCacheManager
*/
private TaskExecutionContextCacheManager taskExecutionContextCacheManager;
public AbstractCommandExecutor(Consumer<List<String>> logHandler,
TaskExecutionContext taskExecutionContext,
Logger logger) {
this.logHandler = logHandler;
this.taskExecutionContext = taskExecutionContext;
this.logger = logger;
this.logBuffer = Collections.synchronizedList(new ArrayList<>());
this.taskExecutionContextCacheManager = SpringApplicationContext.getBean(TaskExecutionContextCacheManagerImpl.class);
}
protected AbstractCommandExecutor(List<String> logBuffer) {
this.logBuffer = logBuffer;
}
/**
* build process
*
* @param commandFile command file
* @throws IOException IO Exception
*/
private void buildProcess(String commandFile) throws IOException {
// setting up user to run commands
List<String> command = new LinkedList<>();
//init process builder
ProcessBuilder processBuilder = new ProcessBuilder();
// setting up a working directory
processBuilder.directory(new File(taskExecutionContext.getExecutePath()));
// merge error information to standard output stream
processBuilder.redirectErrorStream(true);
// setting up user to run commands
command.add("sudo");
command.add("-u");
command.add(taskExecutionContext.getTenantCode());
command.add(commandInterpreter());
command.addAll(commandOptions());
command.add(commandFile);
// setting commands
processBuilder.command(command);
process = processBuilder.start();
// print command
printCommand(command);
}
/**
* task specific execution logic
*
* @param execCommand execCommand
* @return CommandExecuteResult
* @throws Exception if error throws Exception
*/
public CommandExecuteResult run(String execCommand) throws Exception {
CommandExecuteResult result = new CommandExecuteResult();
int taskInstanceId = taskExecutionContext.getTaskInstanceId();
// If the task has been killed, then the task in the cache is null
if (null == taskExecutionContextCacheManager.getByTaskInstanceId(taskInstanceId)) {
result.setExitStatusCode(EXIT_CODE_KILL);
return result;
}
if (StringUtils.isEmpty(execCommand)) {
taskExecutionContextCacheManager.removeByTaskInstanceId(taskInstanceId);
return result;
}
String commandFilePath = buildCommandFilePath();
// create command file if not exists
createCommandFileIfNotExists(execCommand, commandFilePath);
//build process
buildProcess(commandFilePath);
// parse process output
parseProcessOutput(process);
Integer processId = getProcessId(process);
result.setProcessId(processId);
// cache processId
taskExecutionContext.setProcessId(processId);
boolean updateTaskExecutionContextStatus = taskExecutionContextCacheManager.updateTaskExecutionContext(taskExecutionContext);
if (Boolean.FALSE.equals(updateTaskExecutionContextStatus)) {
ProcessUtils.kill(taskExecutionContext);
result.setExitStatusCode(EXIT_CODE_KILL);
return result;
}
// print process id
logger.info("process start, process id is: {}", processId);
// if timeout occurs, exit directly
long remainTime = getRemaintime();
// waiting for the run to finish
boolean status = process.waitFor(remainTime, TimeUnit.SECONDS);
logger.info("process has exited, execute path:{}, processId:{} ,exitStatusCode:{}",
taskExecutionContext.getExecutePath(),
processId
, result.getExitStatusCode());
// if SHELL task exit
if (status) {
// set appIds
List<String> appIds = getAppIds(taskExecutionContext.getLogPath());
result.setAppIds(String.join(Constants.COMMA, appIds));
// SHELL task state
result.setExitStatusCode(process.exitValue());
// if yarn task , yarn state is final state
if (process.exitValue() == 0) {
result.setExitStatusCode(isSuccessOfYarnState(appIds) ? EXIT_CODE_SUCCESS : EXIT_CODE_FAILURE);
}
} else {
logger.error("process has failure , exitStatusCode : {} , ready to kill ...", result.getExitStatusCode());
ProcessUtils.kill(taskExecutionContext);
result.setExitStatusCode(EXIT_CODE_FAILURE);
}
return result;
}
public String getVarPool() {
return varPool.toString();
}
/**
* cancel application
*
* @throws Exception exception
*/
public void cancelApplication() throws Exception {
if (process == null) {
return;
}
// clear log
clear();
int processId = getProcessId(process);
logger.info("cancel process: {}", processId);
// kill , waiting for completion
boolean killed = softKill(processId);
if (!killed) {
// hard kill
hardKill(processId);
// destory
process.destroy();
process = null;
}
}
/**
* soft kill
*
* @param processId process id
* @return process is alive
* @throws InterruptedException interrupted exception
*/
private boolean softKill(int processId) {
if (processId != 0 && process.isAlive()) {
try {
// sudo -u user command to run command
String cmd = String.format("kill %d", processId);
cmd = OSUtils.getSudoCmd(taskExecutionContext.getTenantCode(), cmd);
logger.info("soft kill task:{}, process id:{}, cmd:{}", taskExecutionContext.getTaskAppId(), processId, cmd);
Runtime.getRuntime().exec(cmd);
} catch (IOException e) {
logger.info("kill attempt failed", e);
}
}
return !process.isAlive();
}
/**
* hard kill
*
* @param processId process id
*/
private void hardKill(int processId) {
if (processId != 0 && process.isAlive()) {
try {
String cmd = String.format("kill -9 %d", processId);
cmd = OSUtils.getSudoCmd(taskExecutionContext.getTenantCode(), cmd);
logger.info("hard kill task:{}, process id:{}, cmd:{}", taskExecutionContext.getTaskAppId(), processId, cmd);
Runtime.getRuntime().exec(cmd);
} catch (IOException e) {
logger.error("kill attempt failed ", e);
}
}
}
/**
* print command
*
* @param commands process builder
*/
private void printCommand(List<String> commands) {
String cmdStr;
try {
cmdStr = ProcessUtils.buildCommandStr(commands);
logger.info("task run command:\n{}", cmdStr);
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
/**
* clear
*/
private void clear() {
List<String> markerList = new ArrayList<>();
markerList.add(ch.qos.logback.classic.ClassicConstants.FINALIZE_SESSION_MARKER.toString());
if (!logBuffer.isEmpty()) {
// log handle
logHandler.accept(logBuffer);
logBuffer.clear();
}
logHandler.accept(markerList);
}
/**
* get the standard output of the process
*
* @param process process
*/
private void parseProcessOutput(Process process) {
String threadLoggerInfoName = String.format(LoggerUtils.TASK_LOGGER_THREAD_NAME + "-%s", taskExecutionContext.getTaskAppId());
ExecutorService getOutputLogService = ThreadUtils.newDaemonSingleThreadExecutor(threadLoggerInfoName + "-" + "getOutputLogService");
getOutputLogService.submit(() -> {
BufferedReader inReader = null;
try {
inReader = new BufferedReader(new InputStreamReader(process.getInputStream()));
String line;
logBuffer.add("welcome to use bigdata scheduling system...");
while ((line = inReader.readLine()) != null) {
if (line.startsWith("${setValue(")) {
varPool.append(line.substring("${setValue(".length(), line.length() - 2));
varPool.append("$VarPool$");
} else {
logBuffer.add(line);
taskResultString = line;
}
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
} finally {
logOutputIsScuccess = true;
close(inReader);
}
});
getOutputLogService.shutdown();
ExecutorService parseProcessOutputExecutorService = ThreadUtils.newDaemonSingleThreadExecutor(threadLoggerInfoName);
parseProcessOutputExecutorService.submit(() -> {
try {
long lastFlushTime = System.currentTimeMillis();
while (logBuffer.size() > 0 || !logOutputIsScuccess) {
if (logBuffer.size() > 0) {
lastFlushTime = flush(lastFlushTime);
} else {
Thread.sleep(Constants.DEFAULT_LOG_FLUSH_INTERVAL);
}
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
} finally {
clear();
}
});
parseProcessOutputExecutorService.shutdown();
}
/**
* check yarn state
*
* @param appIds application id list
* @return is success of yarn task state
*/
public boolean isSuccessOfYarnState(List<String> appIds) {
boolean result = true;
try {
for (String appId : appIds) {
while (Stopper.isRunning()) {
ExecutionStatus applicationStatus = HadoopUtils.getInstance().getApplicationStatus(appId);
logger.info("appId:{}, final state:{}", appId, applicationStatus.name());
if (applicationStatus.equals(ExecutionStatus.FAILURE)
|| applicationStatus.equals(ExecutionStatus.KILL)) {
return false;
}
if (applicationStatus.equals(ExecutionStatus.SUCCESS)) {
break;
}
Thread.sleep(Constants.SLEEP_TIME_MILLIS);
}
}
} catch (Exception e) {
logger.error(String.format("yarn applications: %s status failed ", appIds.toString()), e);
result = false;
}
return result;
}
public int getProcessId() {
return getProcessId(process);
}
/**
* get app links
*
* @param logPath log path
* @return app id list
*/
private List<String> getAppIds(String logPath) {
List<String> logs = convertFile2List(logPath);
List<String> appIds = new ArrayList<>();
/**
* analysis log?get submited yarn application id
*/
for (String log : logs) {
String appId = findAppId(log);
if (StringUtils.isNotEmpty(appId) && !appIds.contains(appId)) {
logger.info("find app id: {}", appId);
appIds.add(appId);
}
}
return appIds;
}
/**
* convert file to list
*
* @param filename file name
* @return line list
*/
private List<String> convertFile2List(String filename) {
List lineList = new ArrayList<String>(100);
File file = new File(filename);
if (!file.exists()) {
return lineList;
}
BufferedReader br = null;
try {
br = new BufferedReader(new InputStreamReader(new FileInputStream(filename), StandardCharsets.UTF_8));
String line = null;
while ((line = br.readLine()) != null) {
lineList.add(line);
}
} catch (Exception e) {
logger.error(String.format("read file: %s failed : ", filename), e);
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
logger.error(e.getMessage(), e);
}
}
}
return lineList;
}
/**
* find app id
*
* @param line line
* @return appid
*/
private String findAppId(String line) {
Matcher matcher = APPLICATION_REGEX.matcher(line);
if (matcher.find()) {
return matcher.group();
}
return null;
}
/**
* get remain time(s)
*
* @return remain time
*/
private long getRemaintime() {
long usedTime = (System.currentTimeMillis() - taskExecutionContext.getStartTime().getTime()) / 1000;
long remainTime = taskExecutionContext.getTaskTimeout() - usedTime;
if (remainTime < 0) {
throw new RuntimeException("task execution time out");
}
return remainTime;
}
/**
* get process id
*
* @param process process
* @return process id
*/
private int getProcessId(Process process) {
int processId = 0;
try {
Field f = process.getClass().getDeclaredField(Constants.PID);
f.setAccessible(true);
processId = f.getInt(process);
} catch (Throwable e) {
logger.error(e.getMessage(), e);
}
return processId;
}
/**
* when log buffer siz or flush time reach condition , then flush
*
* @param lastFlushTime last flush time
* @return last flush time
*/
private long flush(long lastFlushTime) {
long now = System.currentTimeMillis();
/**
* when log buffer siz or flush time reach condition , then flush
*/
if (logBuffer.size() >= Constants.DEFAULT_LOG_ROWS_NUM || now - lastFlushTime > Constants.DEFAULT_LOG_FLUSH_INTERVAL) {
lastFlushTime = now;
/** log handle */
logHandler.accept(logBuffer);
logBuffer.clear();
}
return lastFlushTime;
}
/**
* close buffer reader
*
* @param inReader in reader
*/
private void close(BufferedReader inReader) {
if (inReader != null) {
try {
inReader.close();
} catch (IOException e) {
logger.error(e.getMessage(), e);
}
}
}
protected List<String> commandOptions() {
return Collections.emptyList();
}
protected abstract String buildCommandFilePath();
protected abstract String commandInterpreter();
protected abstract void createCommandFileIfNotExists(String execCommand, String commandFile) throws IOException;
public String getTaskResultString() {
return taskResultString;
}
public void setTaskResultString(String taskResultString) {
this.taskResultString = taskResultString;
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,721 | [Bug][worker] The shell background starts the YARN task scenario, and the kill function is abnormal |
**Describe the bug**
The shell background starts the YARN task scenario, and the kill function is abnormal
**To Reproduce**
1.Create a new shell task to start the yarn task in the background
2.While the yarn task is running, the page clicks on this task kill function
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Which version of Dolphin Scheduler:**
-[1.3.4-release,dev]
---
**描述 bug**
在shell后台启动yarn任务场景,kill功能异常
**To Reproduce**
1.新建一个shell任务,用于后台启动yarn任务
2.在yarn任务正在运行时,页面点击此任务kill功能
异常:
页面点击kill后,工作流实例显示状态为准备停止
![image](https://user-images.githubusercontent.com/37063904/107142822-606be400-696c-11eb-97a8-466dbdc1dd80.png)
![image](https://user-images.githubusercontent.com/37063904/107142827-6661c500-696c-11eb-99ed-2bcf204b3412.png)
worker日志
任务正在运行日志
![image](https://user-images.githubusercontent.com/37063904/107142955-2cdd8980-696d-11eb-96e9-300a28b5e156.png)
任务kill时日志
![image](https://user-images.githubusercontent.com/37063904/107143000-86de4f00-696d-11eb-977a-e168b1b757c3.png)
| https://github.com/apache/dolphinscheduler/issues/4721 | https://github.com/apache/dolphinscheduler/pull/4722 | 9c3cec5bbdec2eea90e4525da9e4b915d4b5bb82 | e53369318bdf61f169dcbf2644caf8521b3dd536 | "2021-02-07T09:54:49Z" | java | "2021-03-30T14:33:49Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import static org.apache.dolphinscheduler.common.Constants.RESOURCE_UPLOAD_PATH;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.ResUploadType;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.exception.BaseException;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.client.cli.RMAdminCLI;
import java.io.BufferedReader;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.security.PrivilegedExceptionAction;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
/**
* hadoop utils
* single instance
*/
public class HadoopUtils implements Closeable {
private static final Logger logger = LoggerFactory.getLogger(HadoopUtils.class);
private static String hdfsUser = PropertyUtils.getString(Constants.HDFS_ROOT_USER);
public static final String resourceUploadPath = PropertyUtils.getString(RESOURCE_UPLOAD_PATH, "/dolphinscheduler");
public static final String rmHaIds = PropertyUtils.getString(Constants.YARN_RESOURCEMANAGER_HA_RM_IDS);
public static final String appAddress = PropertyUtils.getString(Constants.YARN_APPLICATION_STATUS_ADDRESS);
public static final String jobHistoryAddress = PropertyUtils.getString(Constants.YARN_JOB_HISTORY_STATUS_ADDRESS);
private static final String HADOOP_UTILS_KEY = "HADOOP_UTILS_KEY";
private static final LoadingCache<String, HadoopUtils> cache = CacheBuilder
.newBuilder()
.expireAfterWrite(PropertyUtils.getInt(Constants.KERBEROS_EXPIRE_TIME, 2), TimeUnit.HOURS)
.build(new CacheLoader<String, HadoopUtils>() {
@Override
public HadoopUtils load(String key) throws Exception {
return new HadoopUtils();
}
});
private static volatile boolean yarnEnabled = false;
private Configuration configuration;
private FileSystem fs;
private HadoopUtils() {
init();
initHdfsPath();
}
public static HadoopUtils getInstance() {
return cache.getUnchecked(HADOOP_UTILS_KEY);
}
/**
* init dolphinscheduler root path in hdfs
*/
private void initHdfsPath() {
Path path = new Path(resourceUploadPath);
try {
if (!fs.exists(path)) {
fs.mkdirs(path);
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
/**
* init hadoop configuration
*/
private void init() {
try {
configuration = new HdfsConfiguration();
String resourceStorageType = PropertyUtils.getUpperCaseString(Constants.RESOURCE_STORAGE_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resourceStorageType);
if (resUploadType == ResUploadType.HDFS) {
if (PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false)) {
System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF,
PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH));
configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
hdfsUser = "";
UserGroupInformation.setConfiguration(configuration);
UserGroupInformation.loginUserFromKeytab(PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME),
PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH));
}
String defaultFS = configuration.get(Constants.FS_DEFAULTFS);
//first get key from core-site.xml hdfs-site.xml ,if null ,then try to get from properties file
// the default is the local file system
if (defaultFS.startsWith("file")) {
String defaultFSProp = PropertyUtils.getString(Constants.FS_DEFAULTFS);
if (StringUtils.isNotBlank(defaultFSProp)) {
Map<String, String> fsRelatedProps = PropertyUtils.getPrefixedProperties("fs.");
configuration.set(Constants.FS_DEFAULTFS, defaultFSProp);
fsRelatedProps.forEach((key, value) -> configuration.set(key, value));
} else {
logger.error("property:{} can not to be empty, please set!", Constants.FS_DEFAULTFS);
throw new RuntimeException(
String.format("property: %s can not to be empty, please set!", Constants.FS_DEFAULTFS)
);
}
} else {
logger.info("get property:{} -> {}, from core-site.xml hdfs-site.xml ", Constants.FS_DEFAULTFS, defaultFS);
}
if (fs == null) {
if (StringUtils.isNotEmpty(hdfsUser)) {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(hdfsUser);
ugi.doAs(new PrivilegedExceptionAction<Boolean>() {
@Override
public Boolean run() throws Exception {
fs = FileSystem.get(configuration);
return true;
}
});
} else {
logger.warn("hdfs.root.user is not set value!");
fs = FileSystem.get(configuration);
}
}
} else if (resUploadType == ResUploadType.S3) {
System.setProperty(Constants.AWS_S3_V4, Constants.STRING_TRUE);
configuration.set(Constants.FS_DEFAULTFS, PropertyUtils.getString(Constants.FS_DEFAULTFS));
configuration.set(Constants.FS_S3A_ENDPOINT, PropertyUtils.getString(Constants.FS_S3A_ENDPOINT));
configuration.set(Constants.FS_S3A_ACCESS_KEY, PropertyUtils.getString(Constants.FS_S3A_ACCESS_KEY));
configuration.set(Constants.FS_S3A_SECRET_KEY, PropertyUtils.getString(Constants.FS_S3A_SECRET_KEY));
fs = FileSystem.get(configuration);
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
/**
* @return Configuration
*/
public Configuration getConfiguration() {
return configuration;
}
/**
* get application url
*
* @param applicationId application id
* @return url of application
*/
public String getApplicationUrl(String applicationId) throws Exception {
/**
* if rmHaIds contains xx, it signs not use resourcemanager
* otherwise:
* if rmHaIds is empty, single resourcemanager enabled
* if rmHaIds not empty: resourcemanager HA enabled
*/
yarnEnabled = true;
String appUrl = StringUtils.isEmpty(rmHaIds) ? appAddress : getAppAddress(appAddress, rmHaIds);
if (StringUtils.isBlank(appUrl)) {
throw new BaseException("yarn application url generation failed");
}
if (logger.isDebugEnabled()) {
logger.debug("yarn application url:{}, applicationId:{}", appUrl, applicationId);
}
return String.format(appUrl, applicationId);
}
public String getJobHistoryUrl(String applicationId) {
//eg:application_1587475402360_712719 -> job_1587475402360_712719
String jobId = applicationId.replace("application", "job");
return String.format(jobHistoryAddress, jobId);
}
/**
* cat file on hdfs
*
* @param hdfsFilePath hdfs file path
* @return byte[] byte array
* @throws IOException errors
*/
public byte[] catFile(String hdfsFilePath) throws IOException {
if (StringUtils.isBlank(hdfsFilePath)) {
logger.error("hdfs file path:{} is blank", hdfsFilePath);
return new byte[0];
}
try (FSDataInputStream fsDataInputStream = fs.open(new Path(hdfsFilePath))) {
return IOUtils.toByteArray(fsDataInputStream);
}
}
/**
* cat file on hdfs
*
* @param hdfsFilePath hdfs file path
* @param skipLineNums skip line numbers
* @param limit read how many lines
* @return content of file
* @throws IOException errors
*/
public List<String> catFile(String hdfsFilePath, int skipLineNums, int limit) throws IOException {
if (StringUtils.isBlank(hdfsFilePath)) {
logger.error("hdfs file path:{} is blank", hdfsFilePath);
return Collections.emptyList();
}
try (FSDataInputStream in = fs.open(new Path(hdfsFilePath))) {
BufferedReader br = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8));
Stream<String> stream = br.lines().skip(skipLineNums).limit(limit);
return stream.collect(Collectors.toList());
}
}
/**
* make the given file and all non-existent parents into
* directories. Has the semantics of Unix 'mkdir -p'.
* Existence of the directory hierarchy is not an error.
*
* @param hdfsPath path to create
* @return mkdir result
* @throws IOException errors
*/
public boolean mkdir(String hdfsPath) throws IOException {
return fs.mkdirs(new Path(hdfsPath));
}
/**
* copy files between FileSystems
*
* @param srcPath source hdfs path
* @param dstPath destination hdfs path
* @param deleteSource whether to delete the src
* @param overwrite whether to overwrite an existing file
* @return if success or not
* @throws IOException errors
*/
public boolean copy(String srcPath, String dstPath, boolean deleteSource, boolean overwrite) throws IOException {
return FileUtil.copy(fs, new Path(srcPath), fs, new Path(dstPath), deleteSource, overwrite, fs.getConf());
}
/**
* the src file is on the local disk. Add it to FS at
* the given dst name.
*
* @param srcFile local file
* @param dstHdfsPath destination hdfs path
* @param deleteSource whether to delete the src
* @param overwrite whether to overwrite an existing file
* @return if success or not
* @throws IOException errors
*/
public boolean copyLocalToHdfs(String srcFile, String dstHdfsPath, boolean deleteSource, boolean overwrite) throws IOException {
Path srcPath = new Path(srcFile);
Path dstPath = new Path(dstHdfsPath);
fs.copyFromLocalFile(deleteSource, overwrite, srcPath, dstPath);
return true;
}
/**
* copy hdfs file to local
*
* @param srcHdfsFilePath source hdfs file path
* @param dstFile destination file
* @param deleteSource delete source
* @param overwrite overwrite
* @return result of copy hdfs file to local
* @throws IOException errors
*/
public boolean copyHdfsToLocal(String srcHdfsFilePath, String dstFile, boolean deleteSource, boolean overwrite) throws IOException {
Path srcPath = new Path(srcHdfsFilePath);
File dstPath = new File(dstFile);
if (dstPath.exists()) {
if (dstPath.isFile()) {
if (overwrite) {
Files.delete(dstPath.toPath());
}
} else {
logger.error("destination file must be a file");
}
}
if (!dstPath.getParentFile().exists()) {
dstPath.getParentFile().mkdirs();
}
return FileUtil.copy(fs, srcPath, dstPath, deleteSource, fs.getConf());
}
/**
* delete a file
*
* @param hdfsFilePath the path to delete.
* @param recursive if path is a directory and set to
* true, the directory is deleted else throws an exception. In
* case of a file the recursive can be set to either true or false.
* @return true if delete is successful else false.
* @throws IOException errors
*/
public boolean delete(String hdfsFilePath, boolean recursive) throws IOException {
return fs.delete(new Path(hdfsFilePath), recursive);
}
/**
* check if exists
*
* @param hdfsFilePath source file path
* @return result of exists or not
* @throws IOException errors
*/
public boolean exists(String hdfsFilePath) throws IOException {
return fs.exists(new Path(hdfsFilePath));
}
/**
* Gets a list of files in the directory
*
* @param filePath file path
* @return {@link FileStatus} file status
* @throws Exception errors
*/
public FileStatus[] listFileStatus(String filePath) throws Exception {
try {
return fs.listStatus(new Path(filePath));
} catch (IOException e) {
logger.error("Get file list exception", e);
throw new Exception("Get file list exception", e);
}
}
/**
* Renames Path src to Path dst. Can take place on local fs
* or remote DFS.
*
* @param src path to be renamed
* @param dst new path after rename
* @return true if rename is successful
* @throws IOException on failure
*/
public boolean rename(String src, String dst) throws IOException {
return fs.rename(new Path(src), new Path(dst));
}
/**
* hadoop resourcemanager enabled or not
*
* @return result
*/
public boolean isYarnEnabled() {
return yarnEnabled;
}
/**
* get the state of an application
*
* @param applicationId application id
* @return the return may be null or there may be other parse exceptions
*/
public ExecutionStatus getApplicationStatus(String applicationId) throws Exception {
if (StringUtils.isEmpty(applicationId)) {
return null;
}
String result = Constants.FAILED;
String applicationUrl = getApplicationUrl(applicationId);
logger.info("applicationUrl={}", applicationUrl);
String responseContent = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false) ? KerberosHttpClient.get(applicationUrl) : HttpUtils.get(applicationUrl);
if (responseContent != null) {
ObjectNode jsonObject = JSONUtils.parseObject(responseContent);
if (!jsonObject.has("app")) {
return ExecutionStatus.FAILURE;
}
result = jsonObject.path("app").path("finalStatus").asText();
} else {
//may be in job history
String jobHistoryUrl = getJobHistoryUrl(applicationId);
logger.info("jobHistoryUrl={}", jobHistoryUrl);
responseContent = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false) ? KerberosHttpClient.get(jobHistoryUrl) : HttpUtils.get(jobHistoryUrl);
if (null != responseContent) {
ObjectNode jsonObject = JSONUtils.parseObject(responseContent);
if (!jsonObject.has("job")) {
return ExecutionStatus.FAILURE;
}
result = jsonObject.path("job").path("state").asText();
} else {
return ExecutionStatus.FAILURE;
}
}
switch (result) {
case Constants.ACCEPTED:
return ExecutionStatus.SUBMITTED_SUCCESS;
case Constants.SUCCEEDED:
return ExecutionStatus.SUCCESS;
case Constants.NEW:
case Constants.NEW_SAVING:
case Constants.SUBMITTED:
case Constants.FAILED:
return ExecutionStatus.FAILURE;
case Constants.KILLED:
return ExecutionStatus.KILL;
case Constants.RUNNING:
default:
return ExecutionStatus.RUNNING_EXECUTION;
}
}
/**
* get data hdfs path
*
* @return data hdfs path
*/
public static String getHdfsDataBasePath() {
if ("/".equals(resourceUploadPath)) {
// if basepath is configured to /, the generated url may be //default/resources (with extra leading /)
return "";
} else {
return resourceUploadPath;
}
}
/**
* hdfs resource dir
*
* @param tenantCode tenant code
* @param resourceType resource type
* @return hdfs resource dir
*/
public static String getHdfsDir(ResourceType resourceType, String tenantCode) {
String hdfsDir = "";
if (resourceType.equals(ResourceType.FILE)) {
hdfsDir = getHdfsResDir(tenantCode);
} else if (resourceType.equals(ResourceType.UDF)) {
hdfsDir = getHdfsUdfDir(tenantCode);
}
return hdfsDir;
}
/**
* hdfs resource dir
*
* @param tenantCode tenant code
* @return hdfs resource dir
*/
public static String getHdfsResDir(String tenantCode) {
return String.format("%s/resources", getHdfsTenantDir(tenantCode));
}
/**
* hdfs user dir
*
* @param tenantCode tenant code
* @param userId user id
* @return hdfs resource dir
*/
public static String getHdfsUserDir(String tenantCode, int userId) {
return String.format("%s/home/%d", getHdfsTenantDir(tenantCode), userId);
}
/**
* hdfs udf dir
*
* @param tenantCode tenant code
* @return get udf dir on hdfs
*/
public static String getHdfsUdfDir(String tenantCode) {
return String.format("%s/udfs", getHdfsTenantDir(tenantCode));
}
/**
* get hdfs file name
*
* @param resourceType resource type
* @param tenantCode tenant code
* @param fileName file name
* @return hdfs file name
*/
public static String getHdfsFileName(ResourceType resourceType, String tenantCode, String fileName) {
if (fileName.startsWith("/")) {
fileName = fileName.replaceFirst("/", "");
}
return String.format("%s/%s", getHdfsDir(resourceType, tenantCode), fileName);
}
/**
* get absolute path and name for resource file on hdfs
*
* @param tenantCode tenant code
* @param fileName file name
* @return get absolute path and name for file on hdfs
*/
public static String getHdfsResourceFileName(String tenantCode, String fileName) {
if (fileName.startsWith("/")) {
fileName = fileName.replaceFirst("/", "");
}
return String.format("%s/%s", getHdfsResDir(tenantCode), fileName);
}
/**
* get absolute path and name for udf file on hdfs
*
* @param tenantCode tenant code
* @param fileName file name
* @return get absolute path and name for udf file on hdfs
*/
public static String getHdfsUdfFileName(String tenantCode, String fileName) {
if (fileName.startsWith("/")) {
fileName = fileName.replaceFirst("/", "");
}
return String.format("%s/%s", getHdfsUdfDir(tenantCode), fileName);
}
/**
* @param tenantCode tenant code
* @return file directory of tenants on hdfs
*/
public static String getHdfsTenantDir(String tenantCode) {
return String.format("%s/%s", getHdfsDataBasePath(), tenantCode);
}
/**
* getAppAddress
*
* @param appAddress app address
* @param rmHa resource manager ha
* @return app address
*/
public static String getAppAddress(String appAddress, String rmHa) {
//get active ResourceManager
String activeRM = YarnHAAdminUtils.getAcitveRMName(rmHa);
if (StringUtils.isEmpty(activeRM)) {
return null;
}
String[] split1 = appAddress.split(Constants.DOUBLE_SLASH);
if (split1.length != 2) {
return null;
}
String start = split1[0] + Constants.DOUBLE_SLASH;
String[] split2 = split1[1].split(Constants.COLON);
if (split2.length != 2) {
return null;
}
String end = Constants.COLON + split2[1];
return start + activeRM + end;
}
@Override
public void close() throws IOException {
if (fs != null) {
try {
fs.close();
} catch (IOException e) {
logger.error("Close HadoopUtils instance failed", e);
throw new IOException("Close HadoopUtils instance failed", e);
}
}
}
/**
* yarn ha admin utils
*/
private static final class YarnHAAdminUtils extends RMAdminCLI {
/**
* get active resourcemanager
*
* @param rmIds
* @return
*/
public static String getAcitveRMName(String rmIds) {
String[] rmIdArr = rmIds.split(Constants.COMMA);
int activeResourceManagerPort = PropertyUtils.getInt(Constants.HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT, 8088);
String yarnUrl = "http://%s:" + activeResourceManagerPort + "/ws/v1/cluster/info";
try {
/**
* send http get request to rm
*/
for (String rmId : rmIdArr) {
String state = getRMState(String.format(yarnUrl, rmId));
if (Constants.HADOOP_RM_STATE_ACTIVE.equals(state)) {
return rmId;
}
}
} catch (Exception e) {
logger.error("yarn ha application url generation failed, message:{}", e.getMessage());
}
return null;
}
/**
* get ResourceManager state
*
* @param url
* @return
*/
public static String getRMState(String url) {
String retStr = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false) ? KerberosHttpClient.get(url) : HttpUtils.get(url);
if (StringUtils.isEmpty(retStr)) {
return null;
}
//to json
ObjectNode jsonObject = JSONUtils.parseObject(retStr);
//get ResourceManager state
if (!jsonObject.has("clusterInfo")) {
return null;
}
return jsonObject.get("clusterInfo").path("haState").asText();
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,721 | [Bug][worker] The shell background starts the YARN task scenario, and the kill function is abnormal |
**Describe the bug**
The shell background starts the YARN task scenario, and the kill function is abnormal
**To Reproduce**
1.Create a new shell task to start the yarn task in the background
2.While the yarn task is running, the page clicks on this task kill function
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Which version of Dolphin Scheduler:**
-[1.3.4-release,dev]
---
**描述 bug**
在shell后台启动yarn任务场景,kill功能异常
**To Reproduce**
1.新建一个shell任务,用于后台启动yarn任务
2.在yarn任务正在运行时,页面点击此任务kill功能
异常:
页面点击kill后,工作流实例显示状态为准备停止
![image](https://user-images.githubusercontent.com/37063904/107142822-606be400-696c-11eb-97a8-466dbdc1dd80.png)
![image](https://user-images.githubusercontent.com/37063904/107142827-6661c500-696c-11eb-99ed-2bcf204b3412.png)
worker日志
任务正在运行日志
![image](https://user-images.githubusercontent.com/37063904/107142955-2cdd8980-696d-11eb-96e9-300a28b5e156.png)
任务kill时日志
![image](https://user-images.githubusercontent.com/37063904/107143000-86de4f00-696d-11eb-977a-e168b1b757c3.png)
| https://github.com/apache/dolphinscheduler/issues/4721 | https://github.com/apache/dolphinscheduler/pull/4722 | 9c3cec5bbdec2eea90e4525da9e4b915d4b5bb82 | e53369318bdf61f169dcbf2644caf8521b3dd536 | "2021-02-07T09:54:49Z" | java | "2021-03-30T14:33:49Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/StringUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
public class StringUtils {
public static final String EMPTY = "";
private StringUtils() {
throw new UnsupportedOperationException("Construct StringUtils");
}
public static boolean isEmpty(final CharSequence cs) {
return cs == null || cs.length() == 0;
}
public static boolean isNotEmpty(final CharSequence cs) {
return !isEmpty(cs);
}
public static boolean isBlank(String str) {
int strLen;
if (str != null && (strLen = str.length()) != 0) {
for (int i = 0; i < strLen; ++i) {
if (!Character.isWhitespace(str.charAt(i))) {
return false;
}
}
}
return true;
}
public static boolean isNotBlank(String s) {
return !isBlank(s);
}
public static String trim(String str) {
return str == null ? null : str.trim();
}
public static String defaultIfBlank(String str, String defaultStr) {
return isBlank(str) ? defaultStr : str;
}
public static boolean equalsIgnoreCase(String str1, String str2) {
return str1 == null ? str2 == null : str1.equalsIgnoreCase(str2);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,721 | [Bug][worker] The shell background starts the YARN task scenario, and the kill function is abnormal |
**Describe the bug**
The shell background starts the YARN task scenario, and the kill function is abnormal
**To Reproduce**
1.Create a new shell task to start the yarn task in the background
2.While the yarn task is running, the page clicks on this task kill function
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Which version of Dolphin Scheduler:**
-[1.3.4-release,dev]
---
**描述 bug**
在shell后台启动yarn任务场景,kill功能异常
**To Reproduce**
1.新建一个shell任务,用于后台启动yarn任务
2.在yarn任务正在运行时,页面点击此任务kill功能
异常:
页面点击kill后,工作流实例显示状态为准备停止
![image](https://user-images.githubusercontent.com/37063904/107142822-606be400-696c-11eb-97a8-466dbdc1dd80.png)
![image](https://user-images.githubusercontent.com/37063904/107142827-6661c500-696c-11eb-99ed-2bcf204b3412.png)
worker日志
任务正在运行日志
![image](https://user-images.githubusercontent.com/37063904/107142955-2cdd8980-696d-11eb-96e9-300a28b5e156.png)
任务kill时日志
![image](https://user-images.githubusercontent.com/37063904/107143000-86de4f00-696d-11eb-977a-e168b1b757c3.png)
| https://github.com/apache/dolphinscheduler/issues/4721 | https://github.com/apache/dolphinscheduler/pull/4722 | 9c3cec5bbdec2eea90e4525da9e4b915d4b5bb82 | e53369318bdf61f169dcbf2644caf8521b3dd536 | "2021-02-07T09:54:49Z" | java | "2021-03-30T14:33:49Z" | dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/StringUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import org.junit.Assert;
import org.junit.Test;
public class StringUtilsTest {
@Test
public void testIsNotEmpty() {
//null string
boolean b = StringUtils.isNotEmpty(null);
Assert.assertFalse(b);
//"" string
b = StringUtils.isNotEmpty("");
Assert.assertFalse(b);
//" " string
b = StringUtils.isNotEmpty(" ");
Assert.assertTrue(b);
//"test" string
b = StringUtils.isNotEmpty("test");
Assert.assertTrue(b);
}
@Test
public void testIsNotBlank() {
//null string
boolean b = StringUtils.isNotBlank(null);
Assert.assertFalse(b);
//"" string
b = StringUtils.isNotBlank("");
Assert.assertFalse(b);
//" " string
b = StringUtils.isNotBlank(" ");
Assert.assertFalse(b);
//" test " string
b = StringUtils.isNotBlank(" test ");
Assert.assertTrue(b);
//"test" string
b = StringUtils.isNotBlank("test");
Assert.assertTrue(b);
}
@Test
public void testTrim() {
String trim = StringUtils.trim(null);
Assert.assertNull(trim);
trim = StringUtils.trim(" test ");
Assert.assertEquals("test", trim);
}
@Test
public void testDefaultIfBlank() {
String defaultStr = StringUtils.defaultIfBlank("", "defaultStr");
Assert.assertEquals("defaultStr", defaultStr);
defaultStr = StringUtils.defaultIfBlank("test", "defaultStr");
Assert.assertEquals("test", defaultStr);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,721 | [Bug][worker] The shell background starts the YARN task scenario, and the kill function is abnormal |
**Describe the bug**
The shell background starts the YARN task scenario, and the kill function is abnormal
**To Reproduce**
1.Create a new shell task to start the yarn task in the background
2.While the yarn task is running, the page clicks on this task kill function
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Which version of Dolphin Scheduler:**
-[1.3.4-release,dev]
---
**描述 bug**
在shell后台启动yarn任务场景,kill功能异常
**To Reproduce**
1.新建一个shell任务,用于后台启动yarn任务
2.在yarn任务正在运行时,页面点击此任务kill功能
异常:
页面点击kill后,工作流实例显示状态为准备停止
![image](https://user-images.githubusercontent.com/37063904/107142822-606be400-696c-11eb-97a8-466dbdc1dd80.png)
![image](https://user-images.githubusercontent.com/37063904/107142827-6661c500-696c-11eb-99ed-2bcf204b3412.png)
worker日志
任务正在运行日志
![image](https://user-images.githubusercontent.com/37063904/107142955-2cdd8980-696d-11eb-96e9-300a28b5e156.png)
任务kill时日志
![image](https://user-images.githubusercontent.com/37063904/107143000-86de4f00-696d-11eb-977a-e168b1b757c3.png)
| https://github.com/apache/dolphinscheduler/issues/4721 | https://github.com/apache/dolphinscheduler/pull/4722 | 9c3cec5bbdec2eea90e4525da9e4b915d4b5bb82 | e53369318bdf61f169dcbf2644caf8521b3dd536 | "2021-02-07T09:54:49Z" | java | "2021-03-30T14:33:49Z" | dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/AlertGroupMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.mapper;
import org.apache.dolphinscheduler.common.enums.AlertType;
import org.apache.dolphinscheduler.dao.entity.AlertGroup;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.ibatis.annotations.Param;
import java.util.List;
/**
* alertgroup mapper interface
*/
public interface AlertGroupMapper extends BaseMapper<AlertGroup> {
/**
* alertgroup page
* @param page page
* @param groupName groupName
* @return alertgroup Ipage
*/
IPage<AlertGroup> queryAlertGroupPage(Page page,
@Param("groupName") String groupName);
/**
* query by group name
* @param groupName groupName
* @return alertgroup list
*/
List<AlertGroup> queryByGroupName(@Param("groupName") String groupName);
/**
* Judge whether the alert group exist
* @param groupName groupName
* @return if exist return true else return null
*/
Boolean existGroupName(@Param("groupName") String groupName);
/**
* query by userId
* @param userId userId
* @return alertgroup list
*/
List<AlertGroup> queryByUserId(@Param("userId") int userId);
/**
* query all group list
* @return alertgroup list
*/
List<AlertGroup> queryAllGroupList();
/**
* query instance ids All
* @return list
*/
List<String> queryInstanceIdsList();
/**
* queryAlertGroupInstanceIdsById
* @param alertGroupId
* @return
*/
String queryAlertGroupInstanceIdsById(@Param("alertGroupId") int alertGroupId);
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,721 | [Bug][worker] The shell background starts the YARN task scenario, and the kill function is abnormal |
**Describe the bug**
The shell background starts the YARN task scenario, and the kill function is abnormal
**To Reproduce**
1.Create a new shell task to start the yarn task in the background
2.While the yarn task is running, the page clicks on this task kill function
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Which version of Dolphin Scheduler:**
-[1.3.4-release,dev]
---
**描述 bug**
在shell后台启动yarn任务场景,kill功能异常
**To Reproduce**
1.新建一个shell任务,用于后台启动yarn任务
2.在yarn任务正在运行时,页面点击此任务kill功能
异常:
页面点击kill后,工作流实例显示状态为准备停止
![image](https://user-images.githubusercontent.com/37063904/107142822-606be400-696c-11eb-97a8-466dbdc1dd80.png)
![image](https://user-images.githubusercontent.com/37063904/107142827-6661c500-696c-11eb-99ed-2bcf204b3412.png)
worker日志
任务正在运行日志
![image](https://user-images.githubusercontent.com/37063904/107142955-2cdd8980-696d-11eb-96e9-300a28b5e156.png)
任务kill时日志
![image](https://user-images.githubusercontent.com/37063904/107143000-86de4f00-696d-11eb-977a-e168b1b757c3.png)
| https://github.com/apache/dolphinscheduler/issues/4721 | https://github.com/apache/dolphinscheduler/pull/4722 | 9c3cec5bbdec2eea90e4525da9e4b915d4b5bb82 | e53369318bdf61f169dcbf2644caf8521b3dd536 | "2021-02-07T09:54:49Z" | java | "2021-03-30T14:33:49Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/ProcessUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.utils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.LoggerUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.PropertyUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.service.log.LogClientService;
import java.io.File;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* mainly used to get the start command line of a process.
*/
public class ProcessUtils {
/**
* logger
*/
private static final Logger logger = LoggerFactory.getLogger(ProcessUtils.class);
/**
* Initialization regularization, solve the problem of pre-compilation performance,
* avoid the thread safety problem of multi-thread operation
*/
private static final Pattern MACPATTERN = Pattern.compile("-[+|-]-\\s(\\d+)");
/**
* Expression of PID recognition in Windows scene
*/
private static final Pattern WINDOWSATTERN = Pattern.compile("\\w+\\((\\d+)\\)");
private static final String LOCAL_PROCESS_EXEC = "jdk.lang.Process.allowAmbiguousCommands";
/**
* build command line characters.
*
* @param commandList command list
* @return command
*/
public static String buildCommandStr(List<String> commandList) {
String cmdstr;
String[] cmd = commandList.toArray(new String[0]);
SecurityManager security = System.getSecurityManager();
boolean allowAmbiguousCommands = isAllowAmbiguousCommands(security);
if (allowAmbiguousCommands) {
String executablePath = new File(cmd[0]).getPath();
if (needsEscaping(VERIFICATION_LEGACY, executablePath)) {
executablePath = quoteString(executablePath);
}
cmdstr = createCommandLine(
VERIFICATION_LEGACY, executablePath, cmd);
} else {
String executablePath;
try {
executablePath = getExecutablePath(cmd[0]);
} catch (IllegalArgumentException e) {
StringBuilder join = new StringBuilder();
for (String s : cmd) {
join.append(s).append(' ');
}
cmd = getTokensFromCommand(join.toString());
executablePath = getExecutablePath(cmd[0]);
// Check new executable name once more
if (security != null) {
security.checkExec(executablePath);
}
}
cmdstr = createCommandLine(
isShellFile(executablePath) ? VERIFICATION_CMD_BAT : VERIFICATION_WIN32, quoteString(executablePath), cmd);
}
return cmdstr;
}
/**
* check is allow ambiguous commands
*
* @param security security manager
* @return allow ambiguous command flag
*/
private static boolean isAllowAmbiguousCommands(SecurityManager security) {
boolean allowAmbiguousCommands = false;
if (security == null) {
allowAmbiguousCommands = true;
String value = System.getProperty(LOCAL_PROCESS_EXEC);
if (value != null) {
allowAmbiguousCommands = !Constants.STRING_FALSE.equalsIgnoreCase(value);
}
}
return allowAmbiguousCommands;
}
/**
* get executable path.
*
* @param path path
* @return executable path
*/
private static String getExecutablePath(String path) {
boolean pathIsQuoted = isQuoted(true, path, "Executable name has embedded quote, split the arguments");
File fileToRun = new File(pathIsQuoted ? path.substring(1, path.length() - 1) : path);
return fileToRun.getPath();
}
/**
* whether is shell file.
*
* @param executablePath executable path
* @return true if endsWith .CMD or .BAT
*/
private static boolean isShellFile(String executablePath) {
String upPath = executablePath.toUpperCase();
return (upPath.endsWith(".CMD") || upPath.endsWith(".BAT"));
}
/**
* quote string.
*
* @param arg argument
* @return format arg
*/
private static String quoteString(String arg) {
return '"' + arg + '"';
}
/**
* get tokens from command.
*
* @param command command
* @return token string array
*/
private static String[] getTokensFromCommand(String command) {
ArrayList<String> matchList = new ArrayList<>(8);
Matcher regexMatcher = LazyPattern.PATTERN.matcher(command);
while (regexMatcher.find()) {
matchList.add(regexMatcher.group());
}
return matchList.toArray(new String[0]);
}
/**
* Lazy Pattern.
*/
private static class LazyPattern {
/**
* Escape-support version:
* "(\")((?:\\\\\\1|.)+?)\\1|([^\\s\"]+)";
*/
private static final Pattern PATTERN = Pattern.compile("[^\\s\"]+|\"[^\"]*\"");
}
/**
* verification cmd bat.
*/
private static final int VERIFICATION_CMD_BAT = 0;
/**
* verification win32.
*/
private static final int VERIFICATION_WIN32 = 1;
/**
* verification legacy.
*/
private static final int VERIFICATION_LEGACY = 2;
/**
* escape verification.
*/
private static final char[][] ESCAPE_VERIFICATION = {{' ', '\t', '<', '>', '&', '|', '^'},
{' ', '\t', '<', '>'}, {' ', '\t'}};
/**
* create command line.
*
* @param verificationType verification type
* @param executablePath executable path
* @param cmd cmd
* @return command line
*/
private static String createCommandLine(int verificationType, final String executablePath, final String[] cmd) {
StringBuilder cmdbuf = new StringBuilder(80);
cmdbuf.append(executablePath);
for (int i = 1; i < cmd.length; ++i) {
cmdbuf.append(' ');
String s = cmd[i];
if (needsEscaping(verificationType, s)) {
cmdbuf.append('"').append(s);
if ((verificationType != VERIFICATION_CMD_BAT) && s.endsWith("\\")) {
cmdbuf.append('\\');
}
cmdbuf.append('"');
} else {
cmdbuf.append(s);
}
}
return cmdbuf.toString();
}
/**
* whether is quoted.
*
* @param noQuotesInside no quotes inside
* @param arg arg
* @param errorMessage error message
* @return boolean
*/
private static boolean isQuoted(boolean noQuotesInside, String arg, String errorMessage) {
int lastPos = arg.length() - 1;
if (lastPos >= 1 && arg.charAt(0) == '"' && arg.charAt(lastPos) == '"') {
// The argument has already been quoted.
if (noQuotesInside && arg.indexOf('"', 1) != lastPos) {
// There is ["] inside.
throw new IllegalArgumentException(errorMessage);
}
return true;
}
if (noQuotesInside && arg.indexOf('"') >= 0) {
// There is ["] inside.
throw new IllegalArgumentException(errorMessage);
}
return false;
}
/**
* whether needs escaping.
*
* @param verificationType verification type
* @param arg arg
* @return boolean
*/
private static boolean needsEscaping(int verificationType, String arg) {
boolean argIsQuoted = isQuoted((verificationType == VERIFICATION_CMD_BAT), arg, "Argument has embedded quote, use the explicit CMD.EXE call.");
if (!argIsQuoted) {
char[] testEscape = ESCAPE_VERIFICATION[verificationType];
for (char c : testEscape) {
if (arg.indexOf(c) >= 0) {
return true;
}
}
}
return false;
}
/**
* kill yarn application.
*
* @param appIds app id list
* @param logger logger
* @param tenantCode tenant code
* @param executePath execute path
*/
public static void cancelApplication(List<String> appIds, Logger logger, String tenantCode, String executePath) {
if (CollectionUtils.isNotEmpty(appIds)) {
for (String appId : appIds) {
try {
ExecutionStatus applicationStatus = HadoopUtils.getInstance().getApplicationStatus(appId);
if (!applicationStatus.typeIsFinished()) {
String commandFile = String
.format("%s/%s.kill", executePath, appId);
String cmd = getKerberosInitCommand() + "yarn application -kill " + appId;
execYarnKillCommand(logger, tenantCode, appId, commandFile, cmd);
}
} catch (Exception e) {
logger.error(String.format("Get yarn application app id [%s] status failed: [%s]", appId, e.getMessage()));
}
}
}
}
/**
* get kerberos init command
*/
public static String getKerberosInitCommand() {
logger.info("get kerberos init command");
StringBuilder kerberosCommandBuilder = new StringBuilder();
boolean hadoopKerberosState = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE,false);
if (hadoopKerberosState) {
kerberosCommandBuilder.append("export KRB5_CONFIG=")
.append(PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH))
.append("\n\n")
.append(String.format("kinit -k -t %s %s || true",PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH),PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME)))
.append("\n\n");
logger.info("kerberos init command: {}", kerberosCommandBuilder);
}
return kerberosCommandBuilder.toString();
}
/**
* build kill command for yarn application
*
* @param logger logger
* @param tenantCode tenant code
* @param appId app id
* @param commandFile command file
* @param cmd cmd
*/
private static void execYarnKillCommand(Logger logger, String tenantCode, String appId, String commandFile, String cmd) {
try {
StringBuilder sb = new StringBuilder();
sb.append("#!/bin/sh\n");
sb.append("BASEDIR=$(cd `dirname $0`; pwd)\n");
sb.append("cd $BASEDIR\n");
if (CommonUtils.getSystemEnvPath() != null) {
sb.append("source ").append(CommonUtils.getSystemEnvPath()).append("\n");
}
sb.append("\n\n");
sb.append(cmd);
File f = new File(commandFile);
if (!f.exists()) {
FileUtils.writeStringToFile(new File(commandFile), sb.toString(), StandardCharsets.UTF_8);
}
String runCmd = String.format("%s %s", Constants.SH, commandFile);
runCmd = OSUtils.getSudoCmd(tenantCode, runCmd);
logger.info("kill cmd:{}", runCmd);
OSUtils.exeCmd(runCmd);
} catch (Exception e) {
logger.error(String.format("Kill yarn application app id [%s] failed: [%s]", appId, e.getMessage()));
}
}
/**
* kill tasks according to different task types.
*
* @param taskExecutionContext taskExecutionContext
*/
public static void kill(TaskExecutionContext taskExecutionContext) {
try {
int processId = taskExecutionContext.getProcessId();
if (processId == 0) {
logger.error("process kill failed, process id :{}, task id:{}",
processId, taskExecutionContext.getTaskInstanceId());
return;
}
String cmd = String.format("kill -9 %s", getPidsStr(processId));
cmd = OSUtils.getSudoCmd(taskExecutionContext.getTenantCode(), cmd);
logger.info("process id:{}, cmd:{}", processId, cmd);
OSUtils.exeCmd(cmd);
} catch (Exception e) {
logger.error("kill task failed", e);
}
// find log and kill yarn job
killYarnJob(taskExecutionContext);
}
/**
* get pids str.
*
* @param processId process id
* @return pids pid String
* @throws Exception exception
*/
public static String getPidsStr(int processId) throws Exception {
StringBuilder sb = new StringBuilder();
Matcher mat = null;
// pstree pid get sub pids
if (OSUtils.isMacOS()) {
String pids = OSUtils.exeCmd(String.format("%s -sp %d", Constants.PSTREE, processId));
if (null != pids) {
mat = MACPATTERN.matcher(pids);
}
} else {
String pids = OSUtils.exeCmd(String.format("%s -p %d", Constants.PSTREE, processId));
mat = WINDOWSATTERN.matcher(pids);
}
if (null != mat) {
while (mat.find()) {
sb.append(mat.group(1)).append(" ");
}
}
return sb.toString().trim();
}
/**
* find logs and kill yarn tasks.
*
* @param taskExecutionContext taskExecutionContext
*/
public static void killYarnJob(TaskExecutionContext taskExecutionContext) {
try {
Thread.sleep(Constants.SLEEP_TIME_MILLIS);
LogClientService logClient = null;
String log;
try {
logClient = new LogClientService();
log = logClient.viewLog(Host.of(taskExecutionContext.getHost()).getIp(),
Constants.RPC_PORT,
taskExecutionContext.getLogPath());
} finally {
if (logClient != null) {
logClient.close();
}
}
if (StringUtils.isNotEmpty(log)) {
List<String> appIds = LoggerUtils.getAppIds(log, logger);
String workerDir = taskExecutionContext.getExecutePath();
if (StringUtils.isEmpty(workerDir)) {
logger.error("task instance work dir is empty");
throw new RuntimeException("task instance work dir is empty");
}
if (CollectionUtils.isNotEmpty(appIds)) {
cancelApplication(appIds, logger, taskExecutionContext.getTenantCode(), taskExecutionContext.getExecutePath());
}
}
} catch (Exception e) {
logger.error("kill yarn job failure", e);
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,721 | [Bug][worker] The shell background starts the YARN task scenario, and the kill function is abnormal |
**Describe the bug**
The shell background starts the YARN task scenario, and the kill function is abnormal
**To Reproduce**
1.Create a new shell task to start the yarn task in the background
2.While the yarn task is running, the page clicks on this task kill function
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Which version of Dolphin Scheduler:**
-[1.3.4-release,dev]
---
**描述 bug**
在shell后台启动yarn任务场景,kill功能异常
**To Reproduce**
1.新建一个shell任务,用于后台启动yarn任务
2.在yarn任务正在运行时,页面点击此任务kill功能
异常:
页面点击kill后,工作流实例显示状态为准备停止
![image](https://user-images.githubusercontent.com/37063904/107142822-606be400-696c-11eb-97a8-466dbdc1dd80.png)
![image](https://user-images.githubusercontent.com/37063904/107142827-6661c500-696c-11eb-99ed-2bcf204b3412.png)
worker日志
任务正在运行日志
![image](https://user-images.githubusercontent.com/37063904/107142955-2cdd8980-696d-11eb-96e9-300a28b5e156.png)
任务kill时日志
![image](https://user-images.githubusercontent.com/37063904/107143000-86de4f00-696d-11eb-977a-e168b1b757c3.png)
| https://github.com/apache/dolphinscheduler/issues/4721 | https://github.com/apache/dolphinscheduler/pull/4722 | 9c3cec5bbdec2eea90e4525da9e4b915d4b5bb82 | e53369318bdf61f169dcbf2644caf8521b3dd536 | "2021-02-07T09:54:49Z" | java | "2021-03-30T14:33:49Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskKillProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.processor;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.LoggerUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.Preconditions;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.remote.command.Command;
import org.apache.dolphinscheduler.remote.command.CommandType;
import org.apache.dolphinscheduler.remote.command.TaskKillRequestCommand;
import org.apache.dolphinscheduler.remote.command.TaskKillResponseCommand;
import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.remote.utils.Pair;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.ProcessUtils;
import org.apache.dolphinscheduler.server.worker.cache.TaskExecutionContextCacheManager;
import org.apache.dolphinscheduler.server.worker.cache.impl.TaskExecutionContextCacheManagerImpl;
import org.apache.dolphinscheduler.server.worker.config.WorkerConfig;
import org.apache.dolphinscheduler.server.worker.runner.WorkerManagerThread;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.log.LogClientService;
import java.util.Collections;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.netty.channel.Channel;
/**
* task kill processor
*/
public class TaskKillProcessor implements NettyRequestProcessor {
private final Logger logger = LoggerFactory.getLogger(TaskKillProcessor.class);
/**
* worker config
*/
private final WorkerConfig workerConfig;
/**
* task callback service
*/
private final TaskCallbackService taskCallbackService;
/**
* taskExecutionContextCacheManager
*/
private TaskExecutionContextCacheManager taskExecutionContextCacheManager;
/*
* task execute manager
*/
private final WorkerManagerThread workerManager;
public TaskKillProcessor() {
this.taskCallbackService = SpringApplicationContext.getBean(TaskCallbackService.class);
this.workerConfig = SpringApplicationContext.getBean(WorkerConfig.class);
this.taskExecutionContextCacheManager = SpringApplicationContext.getBean(TaskExecutionContextCacheManagerImpl.class);
this.workerManager = SpringApplicationContext.getBean(WorkerManagerThread.class);
}
/**
* task kill process
*
* @param channel channel channel
* @param command command command
*/
@Override
public void process(Channel channel, Command command) {
Preconditions.checkArgument(CommandType.TASK_KILL_REQUEST == command.getType(), String.format("invalid command type : %s", command.getType()));
TaskKillRequestCommand killCommand = JSONUtils.parseObject(command.getBody(), TaskKillRequestCommand.class);
logger.info("received kill command : {}", killCommand);
Pair<Boolean, List<String>> result = doKill(killCommand);
taskCallbackService.addRemoteChannel(killCommand.getTaskInstanceId(),
new NettyRemoteChannel(channel, command.getOpaque()));
TaskKillResponseCommand taskKillResponseCommand = buildKillTaskResponseCommand(killCommand, result);
taskCallbackService.sendResult(taskKillResponseCommand.getTaskInstanceId(), taskKillResponseCommand.convert2Command());
taskExecutionContextCacheManager.removeByTaskInstanceId(taskKillResponseCommand.getTaskInstanceId());
}
/**
* do kill
*
* @param killCommand
* @return kill result
*/
private Pair<Boolean, List<String>> doKill(TaskKillRequestCommand killCommand) {
boolean processFlag = true;
List<String> appIds = Collections.emptyList();
int taskInstanceId = killCommand.getTaskInstanceId();
TaskExecutionContext taskExecutionContext = taskExecutionContextCacheManager.getByTaskInstanceId(taskInstanceId);
try {
Integer processId = taskExecutionContext.getProcessId();
if (processId.equals(0)) {
workerManager.killTaskBeforeExecuteByInstanceId(taskInstanceId);
taskExecutionContextCacheManager.removeByTaskInstanceId(taskInstanceId);
logger.info("the task has not been executed and has been cancelled, task id:{}", taskInstanceId);
return Pair.of(true, appIds);
}
String cmd = String.format("kill -9 %s", ProcessUtils.getPidsStr(taskExecutionContext.getProcessId()));
cmd = OSUtils.getSudoCmd(taskExecutionContext.getTenantCode(), cmd);
logger.info("process id:{}, cmd:{}", taskExecutionContext.getProcessId(), cmd);
OSUtils.exeCmd(cmd);
} catch (Exception e) {
processFlag = false;
logger.error("kill task error", e);
}
// find log and kill yarn job
Pair<Boolean, List<String>> yarnResult = killYarnJob(Host.of(taskExecutionContext.getHost()).getIp(),
taskExecutionContext.getLogPath(),
taskExecutionContext.getExecutePath(),
taskExecutionContext.getTenantCode());
return Pair.of(processFlag && yarnResult.getLeft(), yarnResult.getRight());
}
/**
* build TaskKillResponseCommand
*
* @param killCommand kill command
* @param result exe result
* @return build TaskKillResponseCommand
*/
private TaskKillResponseCommand buildKillTaskResponseCommand(TaskKillRequestCommand killCommand,
Pair<Boolean, List<String>> result) {
TaskKillResponseCommand taskKillResponseCommand = new TaskKillResponseCommand();
taskKillResponseCommand.setStatus(result.getLeft() ? ExecutionStatus.SUCCESS.getCode() : ExecutionStatus.FAILURE.getCode());
taskKillResponseCommand.setAppIds(result.getRight());
TaskExecutionContext taskExecutionContext = taskExecutionContextCacheManager.getByTaskInstanceId(killCommand.getTaskInstanceId());
if (taskExecutionContext != null) {
taskKillResponseCommand.setTaskInstanceId(taskExecutionContext.getTaskInstanceId());
taskKillResponseCommand.setHost(taskExecutionContext.getHost());
taskKillResponseCommand.setProcessId(taskExecutionContext.getProcessId());
}
return taskKillResponseCommand;
}
/**
* kill yarn job
*
* @param host host
* @param logPath logPath
* @param executePath executePath
* @param tenantCode tenantCode
* @return Pair<Boolean, List<String>> yarn kill result
*/
private Pair<Boolean, List<String>> killYarnJob(String host, String logPath, String executePath, String tenantCode) {
LogClientService logClient = null;
try {
logClient = new LogClientService();
logger.info("view log host : {},logPath : {}", host, logPath);
String log = logClient.viewLog(host, Constants.RPC_PORT, logPath);
List<String> appIds = Collections.emptyList();
if (StringUtils.isNotEmpty(log)) {
appIds = LoggerUtils.getAppIds(log, logger);
if (StringUtils.isEmpty(executePath)) {
logger.error("task instance execute path is empty");
throw new RuntimeException("task instance execute path is empty");
}
if (appIds.size() > 0) {
ProcessUtils.cancelApplication(appIds, logger, tenantCode, executePath);
}
}
return Pair.of(true, appIds);
} catch (Exception e) {
logger.error("kill yarn job error", e);
} finally {
if (logClient != null) {
logClient.close();
}
}
return Pair.of(false, Collections.emptyList());
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 4,721 | [Bug][worker] The shell background starts the YARN task scenario, and the kill function is abnormal |
**Describe the bug**
The shell background starts the YARN task scenario, and the kill function is abnormal
**To Reproduce**
1.Create a new shell task to start the yarn task in the background
2.While the yarn task is running, the page clicks on this task kill function
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Which version of Dolphin Scheduler:**
-[1.3.4-release,dev]
---
**描述 bug**
在shell后台启动yarn任务场景,kill功能异常
**To Reproduce**
1.新建一个shell任务,用于后台启动yarn任务
2.在yarn任务正在运行时,页面点击此任务kill功能
异常:
页面点击kill后,工作流实例显示状态为准备停止
![image](https://user-images.githubusercontent.com/37063904/107142822-606be400-696c-11eb-97a8-466dbdc1dd80.png)
![image](https://user-images.githubusercontent.com/37063904/107142827-6661c500-696c-11eb-99ed-2bcf204b3412.png)
worker日志
任务正在运行日志
![image](https://user-images.githubusercontent.com/37063904/107142955-2cdd8980-696d-11eb-96e9-300a28b5e156.png)
任务kill时日志
![image](https://user-images.githubusercontent.com/37063904/107143000-86de4f00-696d-11eb-977a-e168b1b757c3.png)
| https://github.com/apache/dolphinscheduler/issues/4721 | https://github.com/apache/dolphinscheduler/pull/4722 | 9c3cec5bbdec2eea90e4525da9e4b915d4b5bb82 | e53369318bdf61f169dcbf2644caf8521b3dd536 | "2021-02-07T09:54:49Z" | java | "2021-03-30T14:33:49Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task;
import static org.apache.dolphinscheduler.common.Constants.EXIT_CODE_FAILURE;
import static org.apache.dolphinscheduler.common.Constants.EXIT_CODE_KILL;
import static org.apache.dolphinscheduler.common.Constants.EXIT_CODE_SUCCESS;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.LoggerUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.ProcessUtils;
import org.apache.dolphinscheduler.server.worker.cache.TaskExecutionContextCacheManager;
import org.apache.dolphinscheduler.server.worker.cache.impl.TaskExecutionContextCacheManagerImpl;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.lang.reflect.Field;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.slf4j.Logger;
/**
* abstract command executor
*/
public abstract class AbstractCommandExecutor {
/**
* rules for extracting application ID
*/
protected static final Pattern APPLICATION_REGEX = Pattern.compile(Constants.APPLICATION_REGEX);
protected StringBuilder varPool = new StringBuilder();
/**
* process
*/
private Process process;
/**
* log handler
*/
protected Consumer<List<String>> logHandler;
/**
* logger
*/
protected Logger logger;
/**
* log list
*/
protected final List<String> logBuffer;
protected boolean logOutputIsScuccess = false;
/**
* SHELL result string
*/
protected String taskResultString;
/**
* taskExecutionContext
*/
protected TaskExecutionContext taskExecutionContext;
/**
* taskExecutionContextCacheManager
*/
private TaskExecutionContextCacheManager taskExecutionContextCacheManager;
public AbstractCommandExecutor(Consumer<List<String>> logHandler,
TaskExecutionContext taskExecutionContext,
Logger logger) {
this.logHandler = logHandler;
this.taskExecutionContext = taskExecutionContext;
this.logger = logger;
this.logBuffer = Collections.synchronizedList(new ArrayList<>());
this.taskExecutionContextCacheManager = SpringApplicationContext.getBean(TaskExecutionContextCacheManagerImpl.class);
}
protected AbstractCommandExecutor(List<String> logBuffer) {
this.logBuffer = logBuffer;
}
/**
* build process
*
* @param commandFile command file
* @throws IOException IO Exception
*/
private void buildProcess(String commandFile) throws IOException {
// setting up user to run commands
List<String> command = new LinkedList<>();
//init process builder
ProcessBuilder processBuilder = new ProcessBuilder();
// setting up a working directory
processBuilder.directory(new File(taskExecutionContext.getExecutePath()));
// merge error information to standard output stream
processBuilder.redirectErrorStream(true);
// setting up user to run commands
if (CommonUtils.isSudoEnable()) {
command.add("sudo");
command.add("-u");
command.add(taskExecutionContext.getTenantCode());
}
command.add(commandInterpreter());
command.addAll(commandOptions());
command.add(commandFile);
// setting commands
processBuilder.command(command);
process = processBuilder.start();
// print command
printCommand(command);
}
/**
* task specific execution logic
*
* @param execCommand execCommand
* @return CommandExecuteResult
* @throws Exception if error throws Exception
*/
public CommandExecuteResult run(String execCommand) throws Exception {
CommandExecuteResult result = new CommandExecuteResult();
int taskInstanceId = taskExecutionContext.getTaskInstanceId();
// If the task has been killed, then the task in the cache is null
if (null == taskExecutionContextCacheManager.getByTaskInstanceId(taskInstanceId)) {
result.setExitStatusCode(EXIT_CODE_KILL);
return result;
}
if (StringUtils.isEmpty(execCommand)) {
taskExecutionContextCacheManager.removeByTaskInstanceId(taskInstanceId);
return result;
}
String commandFilePath = buildCommandFilePath();
// create command file if not exists
createCommandFileIfNotExists(execCommand, commandFilePath);
//build process
buildProcess(commandFilePath);
// parse process output
parseProcessOutput(process);
Integer processId = getProcessId(process);
result.setProcessId(processId);
// cache processId
taskExecutionContext.setProcessId(processId);
boolean updateTaskExecutionContextStatus = taskExecutionContextCacheManager.updateTaskExecutionContext(taskExecutionContext);
if (Boolean.FALSE.equals(updateTaskExecutionContextStatus)) {
ProcessUtils.kill(taskExecutionContext);
result.setExitStatusCode(EXIT_CODE_KILL);
return result;
}
// print process id
logger.info("process start, process id is: {}", processId);
// if timeout occurs, exit directly
long remainTime = getRemaintime();
// waiting for the run to finish
boolean status = process.waitFor(remainTime, TimeUnit.SECONDS);
logger.info("process has exited, execute path:{}, processId:{} ,exitStatusCode:{}",
taskExecutionContext.getExecutePath(),
processId
, result.getExitStatusCode());
// if SHELL task exit
if (status) {
// set appIds
List<String> appIds = getAppIds(taskExecutionContext.getLogPath());
result.setAppIds(String.join(Constants.COMMA, appIds));
// SHELL task state
result.setExitStatusCode(process.exitValue());
// if yarn task , yarn state is final state
if (process.exitValue() == 0) {
result.setExitStatusCode(isSuccessOfYarnState(appIds) ? EXIT_CODE_SUCCESS : EXIT_CODE_FAILURE);
}
} else {
logger.error("process has failure , exitStatusCode : {} , ready to kill ...", result.getExitStatusCode());
ProcessUtils.kill(taskExecutionContext);
result.setExitStatusCode(EXIT_CODE_FAILURE);
}
return result;
}
public String getVarPool() {
return varPool.toString();
}
/**
* cancel application
*
* @throws Exception exception
*/
public void cancelApplication() throws Exception {
if (process == null) {
return;
}
// clear log
clear();
int processId = getProcessId(process);
logger.info("cancel process: {}", processId);
// kill , waiting for completion
boolean killed = softKill(processId);
if (!killed) {
// hard kill
hardKill(processId);
// destory
process.destroy();
process = null;
}
}
/**
* soft kill
*
* @param processId process id
* @return process is alive
* @throws InterruptedException interrupted exception
*/
private boolean softKill(int processId) {
if (processId != 0 && process.isAlive()) {
try {
// sudo -u user command to run command
String cmd = String.format("kill %d", processId);
cmd = OSUtils.getSudoCmd(taskExecutionContext.getTenantCode(), cmd);
logger.info("soft kill task:{}, process id:{}, cmd:{}", taskExecutionContext.getTaskAppId(), processId, cmd);
Runtime.getRuntime().exec(cmd);
} catch (IOException e) {
logger.info("kill attempt failed", e);
}
}
return !process.isAlive();
}
/**
* hard kill
*
* @param processId process id
*/
private void hardKill(int processId) {
if (processId != 0 && process.isAlive()) {
try {
String cmd = String.format("kill -9 %d", processId);
cmd = OSUtils.getSudoCmd(taskExecutionContext.getTenantCode(), cmd);
logger.info("hard kill task:{}, process id:{}, cmd:{}", taskExecutionContext.getTaskAppId(), processId, cmd);
Runtime.getRuntime().exec(cmd);
} catch (IOException e) {
logger.error("kill attempt failed ", e);
}
}
}
/**
* print command
*
* @param commands process builder
*/
private void printCommand(List<String> commands) {
String cmdStr;
try {
cmdStr = ProcessUtils.buildCommandStr(commands);
logger.info("task run command:\n{}", cmdStr);
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
/**
* clear
*/
private void clear() {
List<String> markerList = new ArrayList<>();
markerList.add(ch.qos.logback.classic.ClassicConstants.FINALIZE_SESSION_MARKER.toString());
if (!logBuffer.isEmpty()) {
// log handle
logHandler.accept(logBuffer);
logBuffer.clear();
}
logHandler.accept(markerList);
}
/**
* get the standard output of the process
*
* @param process process
*/
private void parseProcessOutput(Process process) {
String threadLoggerInfoName = String.format(LoggerUtils.TASK_LOGGER_THREAD_NAME + "-%s", taskExecutionContext.getTaskAppId());
ExecutorService getOutputLogService = ThreadUtils.newDaemonSingleThreadExecutor(threadLoggerInfoName + "-" + "getOutputLogService");
getOutputLogService.submit(() -> {
BufferedReader inReader = null;
try {
inReader = new BufferedReader(new InputStreamReader(process.getInputStream()));
String line;
logBuffer.add("welcome to use bigdata scheduling system...");
while ((line = inReader.readLine()) != null) {
if (line.startsWith("${setValue(")) {
varPool.append(line.substring("${setValue(".length(), line.length() - 2));
varPool.append("$VarPool$");
} else {
logBuffer.add(line);
taskResultString = line;
}
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
} finally {
logOutputIsScuccess = true;
close(inReader);
}
});
getOutputLogService.shutdown();
ExecutorService parseProcessOutputExecutorService = ThreadUtils.newDaemonSingleThreadExecutor(threadLoggerInfoName);
parseProcessOutputExecutorService.submit(() -> {
try {
long lastFlushTime = System.currentTimeMillis();
while (logBuffer.size() > 0 || !logOutputIsScuccess) {
if (logBuffer.size() > 0) {
lastFlushTime = flush(lastFlushTime);
} else {
Thread.sleep(Constants.DEFAULT_LOG_FLUSH_INTERVAL);
}
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
} finally {
clear();
}
});
parseProcessOutputExecutorService.shutdown();
}
/**
* check yarn state
*
* @param appIds application id list
* @return is success of yarn task state
*/
public boolean isSuccessOfYarnState(List<String> appIds) {
boolean result = true;
try {
for (String appId : appIds) {
while (Stopper.isRunning()) {
ExecutionStatus applicationStatus = HadoopUtils.getInstance().getApplicationStatus(appId);
logger.info("appId:{}, final state:{}", appId, applicationStatus.name());
if (applicationStatus.equals(ExecutionStatus.FAILURE)
|| applicationStatus.equals(ExecutionStatus.KILL)) {
return false;
}
if (applicationStatus.equals(ExecutionStatus.SUCCESS)) {
break;
}
Thread.sleep(Constants.SLEEP_TIME_MILLIS);
}
}
} catch (Exception e) {
logger.error(String.format("yarn applications: %s status failed ", appIds.toString()), e);
result = false;
}
return result;
}
public int getProcessId() {
return getProcessId(process);
}
/**
* get app links
*
* @param logPath log path
* @return app id list
*/
private List<String> getAppIds(String logPath) {
List<String> logs = convertFile2List(logPath);
List<String> appIds = new ArrayList<>();
/**
* analysis log?get submited yarn application id
*/
for (String log : logs) {
String appId = findAppId(log);
if (StringUtils.isNotEmpty(appId) && !appIds.contains(appId)) {
logger.info("find app id: {}", appId);
appIds.add(appId);
}
}
return appIds;
}
/**
* convert file to list
*
* @param filename file name
* @return line list
*/
private List<String> convertFile2List(String filename) {
List lineList = new ArrayList<String>(100);
File file = new File(filename);
if (!file.exists()) {
return lineList;
}
BufferedReader br = null;
try {
br = new BufferedReader(new InputStreamReader(new FileInputStream(filename), StandardCharsets.UTF_8));
String line = null;
while ((line = br.readLine()) != null) {
lineList.add(line);
}
} catch (Exception e) {
logger.error(String.format("read file: %s failed : ", filename), e);
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
logger.error(e.getMessage(), e);
}
}
}
return lineList;
}
/**
* find app id
*
* @param line line
* @return appid
*/
private String findAppId(String line) {
Matcher matcher = APPLICATION_REGEX.matcher(line);
if (matcher.find()) {
return matcher.group();
}
return null;
}
/**
* get remain time(s)
*
* @return remain time
*/
private long getRemaintime() {
long usedTime = (System.currentTimeMillis() - taskExecutionContext.getStartTime().getTime()) / 1000;
long remainTime = taskExecutionContext.getTaskTimeout() - usedTime;
if (remainTime < 0) {
throw new RuntimeException("task execution time out");
}
return remainTime;
}
/**
* get process id
*
* @param process process
* @return process id
*/
private int getProcessId(Process process) {
int processId = 0;
try {
Field f = process.getClass().getDeclaredField(Constants.PID);
f.setAccessible(true);
processId = f.getInt(process);
} catch (Throwable e) {
logger.error(e.getMessage(), e);
}
return processId;
}
/**
* when log buffer siz or flush time reach condition , then flush
*
* @param lastFlushTime last flush time
* @return last flush time
*/
private long flush(long lastFlushTime) {
long now = System.currentTimeMillis();
/**
* when log buffer siz or flush time reach condition , then flush
*/
if (logBuffer.size() >= Constants.DEFAULT_LOG_ROWS_NUM || now - lastFlushTime > Constants.DEFAULT_LOG_FLUSH_INTERVAL) {
lastFlushTime = now;
/** log handle */
logHandler.accept(logBuffer);
logBuffer.clear();
}
return lastFlushTime;
}
/**
* close buffer reader
*
* @param inReader in reader
*/
private void close(BufferedReader inReader) {
if (inReader != null) {
try {
inReader.close();
} catch (IOException e) {
logger.error(e.getMessage(), e);
}
}
}
protected List<String> commandOptions() {
return Collections.emptyList();
}
protected abstract String buildCommandFilePath();
protected abstract String commandInterpreter();
protected abstract void createCommandFileIfNotExists(String execCommand, String commandFile) throws IOException;
public String getTaskResultString() {
return taskResultString;
}
public void setTaskResultString(String taskResultString) {
this.taskResultString = taskResultString;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,124 | [Bug][API] do not set file name when reupload | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
1. upload file with name "ojdbc3.jar"
2. reupload file with name "ojdbc3.jar" and real file name is "ojdbc6jar"
![image](https://user-images.githubusercontent.com/11975398/111952083-88c83000-8b1f-11eb-8b76-70b72eb3372a.png)
**Expected behavior**
fileName is same with what we specify
| https://github.com/apache/dolphinscheduler/issues/5124 | https://github.com/apache/dolphinscheduler/pull/5125 | f20bb54896a9ac7b65c20b0cc7b0a744d1741155 | 6b565f0aed07cd328702d9fc1bec99e682c7525f | "2021-03-22T07:14:07Z" | java | "2021-04-01T12:44:03Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ResourcesServiceImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service.impl;
import static org.apache.dolphinscheduler.common.Constants.ALIAS;
import static org.apache.dolphinscheduler.common.Constants.CONTENT;
import static org.apache.dolphinscheduler.common.Constants.JAR;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter;
import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor;
import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
import org.apache.dolphinscheduler.api.service.ResourcesService;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.RegexUtils;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.utils.BooleanUtils;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.PropertyUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.ResourcesUser;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper;
import org.apache.dolphinscheduler.dao.mapper.TenantMapper;
import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper;
import org.apache.dolphinscheduler.dao.mapper.UserMapper;
import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils;
import org.apache.commons.beanutils.BeanMap;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.regex.Matcher;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.fasterxml.jackson.databind.SerializationFeature;
/**
* resources service impl
*/
@Service
public class ResourcesServiceImpl extends BaseServiceImpl implements ResourcesService {
private static final Logger logger = LoggerFactory.getLogger(ResourcesServiceImpl.class);
@Autowired
private ResourceMapper resourcesMapper;
@Autowired
private UdfFuncMapper udfFunctionMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private UserMapper userMapper;
@Autowired
private ResourceUserMapper resourceUserMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
/**
* create directory
*
* @param loginUser login user
* @param name alias
* @param description description
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create directory result
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Result<Object> createDirectory(User loginUser,
String name,
String description,
ResourceType type,
int pid,
String currentDir) {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name) : String.format("%s/%s",currentDir,name);
result = verifyResource(loginUser, type, fullName, pid);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource directory {} has exist, can't recreate", fullName);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,true,description,name,loginUser.getId(),type,0,now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (DuplicateKeyException e) {
logger.error("resource directory {} has exist, can't recreate", fullName);
putMsg(result, Status.RESOURCE_EXIST);
return result;
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new ServiceException("resource already exists, can't recreate");
}
//create directory in hdfs
createDirectory(loginUser,fullName,type,result);
return result;
}
/**
* create resource
*
* @param loginUser login user
* @param name alias
* @param desc description
* @param file file
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create result code
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Result<Object> createResource(User loginUser,
String name,
String desc,
ResourceType type,
MultipartFile file,
int pid,
String currentDir) {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
result = verifyPid(loginUser, pid);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
result = verifyFile(name, type, file);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// check resource name exists
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name) : String.format("%s/%s",currentDir,name);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} has exist, can't recreate", RegexUtils.escapeNRT(name));
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new ServiceException("resource already exists, can't recreate");
}
// fail upload
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", RegexUtils.escapeNRT(name), RegexUtils.escapeNRT(file.getOriginalFilename()));
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new ServiceException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
}
return result;
}
/**
* check resource is exists
*
* @param fullName fullName
* @param userId user id
* @param type type
* @return true if resource exists
*/
private boolean checkResourceExists(String fullName, int userId, int type) {
Boolean existResource = resourcesMapper.existResource(fullName, userId, type);
return BooleanUtils.isTrue(existResource);
}
/**
* update resource
* @param loginUser login user
* @param resourceId resource id
* @param name name
* @param desc description
* @param type resource type
* @param file resource file
* @return update result code
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Result<Object> updateResource(User loginUser,
int resourceId,
String name,
String desc,
ResourceType type,
MultipartFile file) {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
if (file == null && name.equals(resource.getAlias()) && desc.equals(resource.getDescription())) {
putMsg(result, Status.SUCCESS);
return result;
}
//check resource already exists
String originFullName = resource.getFullName();
String originResourceName = resource.getAlias();
String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/") + 1),name);
if (!originResourceName.equals(name) && checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} already exists, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
result = verifyFile(name, type, file);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// query tenant by user id
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)) {
return result;
}
// verify whether the resource exists in storage
// get the path of origin file in storage
String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName);
try {
if (!HadoopUtils.getInstance().exists(originHdfsFileName)) {
logger.error("{} not exist", originHdfsFileName);
putMsg(result,Status.RESOURCE_NOT_EXIST);
return result;
}
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new ServiceException(Status.HDFS_OPERATION_ERROR);
}
if (!resource.isDirectory()) {
//get the origin file suffix
String originSuffix = FileUtils.suffix(originFullName);
String suffix = FileUtils.suffix(fullName);
boolean suffixIsChanged = false;
if (StringUtils.isBlank(suffix) && StringUtils.isNotBlank(originSuffix)) {
suffixIsChanged = true;
}
if (StringUtils.isNotBlank(suffix) && !suffix.equals(originSuffix)) {
suffixIsChanged = true;
}
//verify whether suffix is changed
if (suffixIsChanged) {
//need verify whether this resource is authorized to other users
Map<String, Object> columnMap = new HashMap<>();
columnMap.put("resources_id", resourceId);
List<ResourcesUser> resourcesUsers = resourceUserMapper.selectByMap(columnMap);
if (CollectionUtils.isNotEmpty(resourcesUsers)) {
List<Integer> userIds = resourcesUsers.stream().map(ResourcesUser::getUserId).collect(Collectors.toList());
List<User> users = userMapper.selectBatchIds(userIds);
String userNames = users.stream().map(User::getUserName).collect(Collectors.toList()).toString();
logger.error("resource is authorized to user {},suffix not allowed to be modified", userNames);
putMsg(result,Status.RESOURCE_IS_AUTHORIZED,userNames);
return result;
}
}
}
// updateResource data
Date now = new Date();
resource.setAlias(name);
resource.setFileName(name);
resource.setFullName(fullName);
resource.setDescription(desc);
resource.setUpdateTime(now);
if (file != null) {
resource.setFileName(file.getOriginalFilename());
resource.setSize(file.getSize());
}
try {
resourcesMapper.updateById(resource);
if (resource.isDirectory()) {
List<Integer> childrenResource = listAllChildren(resource,false);
if (CollectionUtils.isNotEmpty(childrenResource)) {
String matcherFullName = Matcher.quoteReplacement(fullName);
List<Resource> childResourceList;
Integer[] childResIdArray = childrenResource.toArray(new Integer[childrenResource.size()]);
List<Resource> resourceList = resourcesMapper.listResourceByIds(childResIdArray);
childResourceList = resourceList.stream().map(t -> {
t.setFullName(t.getFullName().replaceFirst(originFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
resourcesMapper.batchUpdateResource(childResourceList);
if (ResourceType.UDF.equals(resource.getType())) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(childResIdArray);
if (CollectionUtils.isNotEmpty(udfFuncs)) {
udfFuncs = udfFuncs.stream().map(t -> {
t.setResourceName(t.getResourceName().replaceFirst(originFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
udfFunctionMapper.batchUpdateUdfFunc(udfFuncs);
}
}
}
} else if (ResourceType.UDF.equals(resource.getType())) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(new Integer[]{resourceId});
if (CollectionUtils.isNotEmpty(udfFuncs)) {
udfFuncs = udfFuncs.stream().map(t -> {
t.setResourceName(fullName);
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
udfFunctionMapper.batchUpdateUdfFunc(udfFuncs);
}
}
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error(Status.UPDATE_RESOURCE_ERROR.getMsg(), e);
throw new ServiceException(Status.UPDATE_RESOURCE_ERROR);
}
// if name unchanged, return directly without moving on HDFS
if (originResourceName.equals(name) && file == null) {
return result;
}
if (file != null) {
// fail upload
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, RegexUtils.escapeNRT(file.getOriginalFilename()));
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new ServiceException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
}
if (!fullName.equals(originFullName)) {
try {
HadoopUtils.getInstance().delete(originHdfsFileName,false);
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new ServiceException(String.format("delete resource: %s failed.", originFullName));
}
}
return result;
}
// get the path of dest file in hdfs
String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName);
try {
logger.info("start hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName);
HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true);
} catch (Exception e) {
logger.error(MessageFormat.format("hdfs copy {0} -> {1} fail", originHdfsFileName, destHdfsFileName), e);
putMsg(result,Status.HDFS_COPY_FAIL);
throw new ServiceException(Status.HDFS_COPY_FAIL);
}
return result;
}
private Result<Object> verifyFile(String name, ResourceType type, MultipartFile file) {
Result<Object> result = new Result<>();
putMsg(result, Status.SUCCESS);
if (file != null) {
// file is empty
if (file.isEmpty()) {
logger.error("file is empty: {}", RegexUtils.escapeNRT(file.getOriginalFilename()));
putMsg(result, Status.RESOURCE_FILE_IS_EMPTY);
return result;
}
// file suffix
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(name);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
// rename file suffix and original suffix must be consistent
logger.error("rename file suffix and original suffix must be consistent: {}", RegexUtils.escapeNRT(file.getOriginalFilename()));
putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE);
return result;
}
//If resource type is UDF, only jar packages are allowed to be uploaded, and the suffix must be .jar
if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(fileSuffix)) {
logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg());
putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR);
return result;
}
if (file.getSize() > Constants.MAX_FILE_SIZE) {
logger.error("file size is too large: {}", RegexUtils.escapeNRT(file.getOriginalFilename()));
putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT);
return result;
}
}
return result;
}
/**
* query resources list paging
*
* @param loginUser login user
* @param type resource type
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @return resource list page
*/
@Override
public Map<String, Object> queryResourceListPaging(User loginUser, int directoryId, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) {
HashMap<String, Object> result = new HashMap<>();
Page<Resource> page = new Page<>(pageNo, pageSize);
int userId = loginUser.getId();
if (isAdmin(loginUser)) {
userId = 0;
}
if (directoryId != -1) {
Resource directory = resourcesMapper.selectById(directoryId);
if (directory == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
}
List<Integer> resourcesIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 0);
IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page, userId, directoryId, type.ordinal(), searchVal,resourcesIds);
PageInfo<Resource> pageInfo = new PageInfo<>(pageNo, pageSize);
pageInfo.setTotalCount((int)resourceIPage.getTotal());
pageInfo.setLists(resourceIPage.getRecords());
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* create directory
* @param loginUser login user
* @param fullName full name
* @param type resource type
* @param result Result
*/
private void createDirectory(User loginUser,String fullName,ResourceType type,Result<Object> result) {
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
String directoryName = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourceRootPath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
if (!HadoopUtils.getInstance().exists(resourceRootPath)) {
createTenantDirIfNotExists(tenantCode);
}
if (!HadoopUtils.getInstance().mkdir(directoryName)) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new ServiceException(String.format("create resource directory: %s failed.", directoryName));
}
} catch (Exception e) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new ServiceException(String.format("create resource directory: %s failed.", directoryName));
}
}
/**
* upload file to hdfs
*
* @param loginUser login user
* @param fullName full name
* @param file file
*/
private boolean upload(User loginUser, String fullName, MultipartFile file, ResourceType type) {
// save to local
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(fullName);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
return false;
}
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
// random file name
String localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
// save file to hdfs, and delete original file
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourcePath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
// if tenant dir not exists
if (!HadoopUtils.getInstance().exists(resourcePath)) {
createTenantDirIfNotExists(tenantCode);
}
org.apache.dolphinscheduler.api.utils.FileUtils.copyFile(file, localFilename);
HadoopUtils.getInstance().copyLocalToHdfs(localFilename, hdfsFilename, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
return false;
}
return true;
}
/**
* query resource list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@Override
public Map<String, Object> queryResourceList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>();
List<Resource> allResourceList = queryAuthoredResourceList(loginUser, type);
Visitor resourceTreeVisitor = new ResourceTreeVisitor(allResourceList);
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query resource list by program type
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@Override
public Map<String, Object> queryResourceByProgramType(User loginUser, ResourceType type, ProgramType programType) {
Map<String, Object> result = new HashMap<>();
List<Resource> allResourceList = queryAuthoredResourceList(loginUser, type);
String suffix = ".jar";
if (programType != null) {
switch (programType) {
case JAVA:
case SCALA:
break;
case PYTHON:
suffix = ".py";
break;
default:
}
}
List<Resource> resources = new ResourceFilter(suffix, new ArrayList<>(allResourceList)).filter();
Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources);
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result, Status.SUCCESS);
return result;
}
/**
* delete resource
*
* @param loginUser login user
* @param resourceId resource id
* @return delete result code
* @throws IOException exception
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Result<Object> delete(User loginUser, int resourceId) throws IOException {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// get resource by id
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)) {
return result;
}
// get all resource id of process definitions those is released
List<Map<String, Object>> list = processDefinitionMapper.listResources();
Map<Integer, Set<Integer>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list);
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
// get all children of the resource
List<Integer> allChildren = listAllChildren(resource,true);
Integer[] needDeleteResourceIdArray = allChildren.toArray(new Integer[allChildren.size()]);
//if resource type is UDF,need check whether it is bound by UDF function
if (resource.getType() == (ResourceType.UDF)) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(needDeleteResourceIdArray);
if (CollectionUtils.isNotEmpty(udfFuncs)) {
logger.error("can't be deleted,because it is bound by UDF functions:{}", udfFuncs);
putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName());
return result;
}
}
if (resourceIdSet.contains(resource.getPid())) {
logger.error("can't be deleted,because it is used of process definition");
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
resourceIdSet.retainAll(allChildren);
if (CollectionUtils.isNotEmpty(resourceIdSet)) {
logger.error("can't be deleted,because it is used of process definition");
for (Integer resId : resourceIdSet) {
logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId));
}
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
// get hdfs file by type
String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
//delete data in database
resourcesMapper.deleteIds(needDeleteResourceIdArray);
resourceUserMapper.deleteResourceUserArray(0, needDeleteResourceIdArray);
//delete file on hdfs
HadoopUtils.getInstance().delete(hdfsFilename, true);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* verify resource by name and type
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
@Override
public Result<Object> verifyResourceName(String fullName, ResourceType type, User loginUser) {
Result<Object> result = new Result<>();
putMsg(result, Status.SUCCESS);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource type:{} name:{} has exist, can't create again.", type, RegexUtils.escapeNRT(fullName));
putMsg(result, Status.RESOURCE_EXIST);
} else {
// query tenant
Tenant tenant = tenantMapper.queryById(loginUser.getTenantId());
if (tenant != null) {
String tenantCode = tenant.getTenantCode();
try {
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
if (HadoopUtils.getInstance().exists(hdfsFilename)) {
logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, RegexUtils.escapeNRT(fullName), hdfsFilename);
putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename);
}
} catch (Exception e) {
logger.error(e.getMessage(),e);
putMsg(result,Status.HDFS_OPERATION_ERROR);
}
} else {
putMsg(result,Status.TENANT_NOT_EXIST);
}
}
return result;
}
/**
* verify resource by full name or pid and type
* @param fullName resource full name
* @param id resource id
* @param type resource type
* @return true if the resource full name or pid not exists, otherwise return false
*/
@Override
public Result<Object> queryResource(String fullName, Integer id, ResourceType type) {
Result<Object> result = new Result<>();
if (StringUtils.isBlank(fullName) && id == null) {
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
return result;
}
if (StringUtils.isNotBlank(fullName)) {
List<Resource> resourceList = resourcesMapper.queryResource(fullName,type.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(resourceList.get(0));
} else {
Resource resource = resourcesMapper.selectById(id);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
Resource parentResource = resourcesMapper.selectById(resource.getPid());
if (parentResource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(parentResource);
}
return result;
}
/**
* view resource file online
*
* @param resourceId resource id
* @param skipLineNum skip line number
* @param limit limit
* @return resource content
*/
@Override
public Result<Object> readResource(int resourceId, int skipLineNum, int limit) {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// get resource by id
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check preview or not by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support view, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)) {
return result;
}
// hdfs path
String hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resource.getFullName());
logger.info("resource hdfs path is {}", hdfsFileName);
try {
if (HadoopUtils.getInstance().exists(hdfsFileName)) {
List<String> content = HadoopUtils.getInstance().catFile(hdfsFileName, skipLineNum, limit);
putMsg(result, Status.SUCCESS);
Map<String, Object> map = new HashMap<>();
map.put(ALIAS, resource.getAlias());
map.put(CONTENT, String.join("\n", content));
result.setData(map);
} else {
logger.error("read file {} not exist in hdfs", hdfsFileName);
putMsg(result, Status.RESOURCE_FILE_NOT_EXIST,hdfsFileName);
}
} catch (Exception e) {
logger.error("Resource {} read failed", hdfsFileName, e);
putMsg(result, Status.HDFS_OPERATION_ERROR);
}
return result;
}
/**
* create resource file online
*
* @param loginUser login user
* @param type resource type
* @param fileName file name
* @param fileSuffix file suffix
* @param desc description
* @param content content
* @param pid pid
* @param currentDir current directory
* @return create result code
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Result<Object> onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content,int pid,String currentDir) {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
//check file suffix
String nameSuffix = fileSuffix.trim();
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support create", nameSuffix);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String name = fileName.trim() + "." + nameSuffix;
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name) : String.format("%s/%s",currentDir,name);
result = verifyResource(loginUser, type, fullName, pid);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// save data
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,name,loginUser.getId(),type,content.getBytes().length,now,now);
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
result = uploadContentToHdfs(fullName, tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new ServiceException(result.getMsg());
}
return result;
}
private Result<Object> checkResourceUploadStartupState() {
Result<Object> result = new Result<>();
putMsg(result, Status.SUCCESS);
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()) {
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
return result;
}
private Result<Object> verifyResource(User loginUser, ResourceType type, String fullName, int pid) {
Result<Object> result = verifyResourceName(fullName, type, loginUser);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
return verifyPid(loginUser, pid);
}
private Result<Object> verifyPid(User loginUser, int pid) {
Result<Object> result = new Result<>();
putMsg(result, Status.SUCCESS);
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
return result;
}
/**
* updateProcessInstance resource
*
* @param resourceId resource id
* @param content content
* @return update result cod
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Result<Object> updateResourceContent(int resourceId, String content) {
Result<Object> result = checkResourceUploadStartupState();
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("read file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check can edit by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support updateProcessInstance, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)) {
return result;
}
resource.setSize(content.getBytes().length);
resource.setUpdateTime(new Date());
resourcesMapper.updateById(resource);
result = uploadContentToHdfs(resource.getFullName(), tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new ServiceException(result.getMsg());
}
return result;
}
/**
* @param resourceName resource name
* @param tenantCode tenant code
* @param content content
* @return result
*/
private Result<Object> uploadContentToHdfs(String resourceName, String tenantCode, String content) {
Result<Object> result = new Result<>();
String localFilename = "";
String hdfsFileName = "";
try {
localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
if (!FileUtils.writeContent2File(content, localFilename)) {
// write file fail
logger.error("file {} fail, content is {}", localFilename, RegexUtils.escapeNRT(content));
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
// get resource file hdfs path
hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resourceName);
String resourcePath = HadoopUtils.getHdfsResDir(tenantCode);
logger.info("resource hdfs path is {}, resource dir is {}", hdfsFileName, resourcePath);
HadoopUtils hadoopUtils = HadoopUtils.getInstance();
if (!hadoopUtils.exists(resourcePath)) {
// create if tenant dir not exists
createTenantDirIfNotExists(tenantCode);
}
if (hadoopUtils.exists(hdfsFileName)) {
hadoopUtils.delete(hdfsFileName, false);
}
hadoopUtils.copyLocalToHdfs(localFilename, hdfsFileName, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
result.setCode(Status.HDFS_OPERATION_ERROR.getCode());
result.setMsg(String.format("copy %s to hdfs %s fail", localFilename, hdfsFileName));
return result;
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* download file
*
* @param resourceId resource id
* @return resource content
* @throws IOException exception
*/
@Override
public org.springframework.core.io.Resource downloadResource(int resourceId) throws IOException {
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()) {
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
throw new ServiceException("hdfs not startup");
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("download file not exist, resource id {}", resourceId);
return null;
}
if (resource.isDirectory()) {
logger.error("resource id {} is directory,can't download it", resourceId);
throw new ServiceException("can't download directory");
}
int userId = resource.getUserId();
User user = userMapper.selectById(userId);
if (user == null) {
logger.error("user id {} not exists", userId);
throw new ServiceException(String.format("resource owner id %d not exist",userId));
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if (tenant == null) {
logger.error("tenant id {} not exists", user.getTenantId());
throw new ServiceException(String.format("The tenant id %d of resource owner not exist",user.getTenantId()));
}
String tenantCode = tenant.getTenantCode();
String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
String localFileName = FileUtils.getDownloadFilename(resource.getAlias());
logger.info("resource hdfs path is {}, download local filename is {}", hdfsFileName, localFileName);
HadoopUtils.getInstance().copyHdfsToLocal(hdfsFileName, localFileName, false, true);
return org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(localFileName);
}
/**
* list all file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
@Override
public Map<String, Object> authorizeResourceTree(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<ResourceComponent> list;
if (CollectionUtils.isNotEmpty(resourceList)) {
Visitor visitor = new ResourceTreeVisitor(resourceList);
list = visitor.visit().getChildren();
} else {
list = new ArrayList<>(0);
}
result.put(Constants.DATA_LIST, list);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* unauthorized file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
@Override
public Map<String, Object> unauthorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<Resource> list;
if (resourceList != null && !resourceList.isEmpty()) {
Set<Resource> resourceSet = new HashSet<>(resourceList);
List<Resource> authedResourceList = queryResourceList(userId, Constants.AUTHORIZE_WRITABLE_PERM);
getAuthorizedResourceList(resourceSet, authedResourceList);
list = new ArrayList<>(resourceSet);
} else {
list = new ArrayList<>(0);
}
Visitor visitor = new ResourceTreeVisitor(list);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result, Status.SUCCESS);
return result;
}
/**
* unauthorized udf function
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
@Override
public Map<String, Object> unauthorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
//only admin can operate
if (isNotAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncList = udfFunctionMapper.queryUdfFuncExceptUserId(userId);
List<UdfFunc> resultList = new ArrayList<>();
Set<UdfFunc> udfFuncSet;
if (CollectionUtils.isNotEmpty(udfFuncList)) {
udfFuncSet = new HashSet<>(udfFuncList);
List<UdfFunc> authedUDFFuncList = udfFunctionMapper.queryAuthedUdfFunc(userId);
getAuthorizedResourceList(udfFuncSet, authedUDFFuncList);
resultList = new ArrayList<>(udfFuncSet);
}
result.put(Constants.DATA_LIST, resultList);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* authorized udf function
*
* @param loginUser login user
* @param userId user id
* @return authorized result code
*/
@Override
public Map<String, Object> authorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncs = udfFunctionMapper.queryAuthedUdfFunc(userId);
result.put(Constants.DATA_LIST, udfFuncs);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* authorized file
*
* @param loginUser login user
* @param userId user id
* @return authorized result
*/
@Override
public Map<String, Object> authorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
List<Resource> authedResources = queryResourceList(userId, Constants.AUTHORIZE_WRITABLE_PERM);
Visitor visitor = new ResourceTreeVisitor(authedResources);
String visit = JSONUtils.toJsonString(visitor.visit(), SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS);
logger.info(visit);
String jsonTreeStr = JSONUtils.toJsonString(visitor.visit().getChildren(), SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS);
logger.info(jsonTreeStr);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* get authorized resource list
*
* @param resourceSet resource set
* @param authedResourceList authorized resource list
*/
private void getAuthorizedResourceList(Set<?> resourceSet, List<?> authedResourceList) {
Set<?> authedResourceSet;
if (CollectionUtils.isNotEmpty(authedResourceList)) {
authedResourceSet = new HashSet<>(authedResourceList);
resourceSet.removeAll(authedResourceSet);
}
}
/**
* get tenantCode by UserId
*
* @param userId user id
* @param result return result
* @return tenant code
*/
private String getTenantCode(int userId,Result<Object> result) {
User user = userMapper.selectById(userId);
if (user == null) {
logger.error("user {} not exists", userId);
putMsg(result, Status.USER_NOT_EXIST,userId);
return null;
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if (tenant == null) {
logger.error("tenant not exists");
putMsg(result, Status.TENANT_NOT_EXIST);
return null;
}
return tenant.getTenantCode();
}
/**
* list all children id
* @param resource resource
* @param containSelf whether add self to children list
* @return all children id
*/
List<Integer> listAllChildren(Resource resource,boolean containSelf) {
List<Integer> childList = new ArrayList<>();
if (resource.getId() != -1 && containSelf) {
childList.add(resource.getId());
}
if (resource.isDirectory()) {
listAllChildren(resource.getId(),childList);
}
return childList;
}
/**
* list all children id
* @param resourceId resource id
* @param childList child list
*/
void listAllChildren(int resourceId,List<Integer> childList) {
List<Integer> children = resourcesMapper.listChildren(resourceId);
for (int childId : children) {
childList.add(childId);
listAllChildren(childId, childList);
}
}
/**
* query authored resource list (own and authorized)
* @param loginUser login user
* @param type ResourceType
* @return all authored resource list
*/
private List<Resource> queryAuthoredResourceList(User loginUser, ResourceType type) {
List<Resource> relationResources;
int userId = loginUser.getId();
if (isAdmin(loginUser)) {
userId = 0;
relationResources = new ArrayList<>();
} else {
// query resource relation
relationResources = queryResourceList(userId, 0);
}
List<Resource> ownResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal());
ownResourceList.addAll(relationResources);
return ownResourceList;
}
/**
* query resource list by userId and perm
* @param userId userId
* @param perm perm
* @return resource list
*/
private List<Resource> queryResourceList(Integer userId, int perm) {
List<Integer> resIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, perm);
return CollectionUtils.isEmpty(resIds) ? new ArrayList<>() : resourcesMapper.queryResourceListById(resIds);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,055 | [Bug][UI] Can not upload file to correct dir | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Create 2 file resources dir on UI,named dirA and dirB
2. upload a file to dirA
3. upload another file to dirB,but this file appear at dirA but not dirB
**Expected behavior**
Upload file to correct dir
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/5055 | https://github.com/apache/dolphinscheduler/pull/5056 | 5d7dc1cb2aff2147b09b13ed002cbe54f3478a2c | e8c9c33d7e45b361c3c25085b01d3d78b6b11d90 | "2021-03-15T10:09:31Z" | java | "2021-04-01T12:57:46Z" | dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/subdirectory/_source/list.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="list-model">
<div class="table-box">
<el-table :data="list" size="mini" style="width: 100%">
<el-table-column type="index" :label="$t('#')" width="50"></el-table-column>
<el-table-column :label="$t('Name')">
<template slot-scope="scope">
<el-popover trigger="hover" placement="top">
<p>{{ scope.row.alias }}</p>
<div slot="reference" class="name-wrapper">
<a href="javascript:" class="links" @click="_go(scope.row)">{{ scope.row.alias }}</a>
</div>
</el-popover>
</template>
</el-table-column>
<el-table-column :label="$t('Whether directory')" width="100">
<template slot-scope="scope">
{{scope.row.directory? $t('Yes') : $t('No')}}
</template>
</el-table-column>
<el-table-column prop="fileName" :label="$t('File Name')"></el-table-column>
<el-table-column :label="$t('Description')" width="200">
<template slot-scope="scope">
<span>{{scope.row.description | filterNull}}</span>
</template>
</el-table-column>
<el-table-column :label="$t('Size')">
<template slot-scope="scope">
{{_rtSize(scope.row.size)}}
</template>
</el-table-column>
<el-table-column :label="$t('Update Time')" min-width="120">
<template slot-scope="scope">
<span>{{scope.row.updateTime | formatDate}}</span>
</template>
</el-table-column>
<el-table-column :label="$t('Operation')" width="150">
<template slot-scope="scope">
<el-tooltip :content="$t('Edit')" placement="top" :enterable="false">
<span><el-button type="primary" size="mini" icon="el-icon-edit-outline" @click="_edit(scope.row)" :disabled="_rtDisb(scope.row)" circle></el-button></span>
</el-tooltip>
<el-tooltip :content="$t('Rename')" placement="top" :enterable="false">
<span><el-button type="primary" size="mini" icon="el-icon-edit" @click="_rename(scope.row,scope.$index)" circle></el-button></span>
</el-tooltip>
<el-tooltip :content="$t('Download')" placement="top" :enterable="false">
<span><el-button type="primary" size="mini" icon="el-icon-download" @click="_downloadFile(scope.row)" :disabled="scope.row.directory? true: false" circle></el-button></span>
</el-tooltip>
<el-tooltip :content="$t('Delete')" placement="top" :enterable="false">
<el-popconfirm
:confirmButtonText="$t('Confirm')"
:cancelButtonText="$t('Cancel')"
icon="el-icon-info"
iconColor="red"
:title="$t('Delete?')"
@onConfirm="_delete(scope.row,scope.row.id)"
>
<el-button type="danger" size="mini" icon="el-icon-delete" circle slot="reference"></el-button>
</el-popconfirm>
</el-tooltip>
</template>
</el-table-column>
</el-table>
</div>
<el-dialog
:visible.sync="renameDialog"
width="auto">
<m-rename :item="item" @onUpDate="onUpDate" @close="close"></m-rename>
</el-dialog>
</div>
</template>
<script>
import _ from 'lodash'
import mRename from './rename'
import { mapActions } from 'vuex'
import { filtTypeArr } from '../../_source/common'
import { bytesToSize } from '@/module/util/util'
import { downloadFile } from '@/module/download'
import localStore from '@/module/util/localStorage'
export default {
name: 'file-manage-list',
data () {
return {
list: [],
renameDialog: false
}
},
props: {
fileResourcesList: Array,
pageNo: Number,
pageSize: Number
},
methods: {
...mapActions('resource', ['deleteResource']),
_edit (item) {
localStore.setItem('file', `${item.alias}|${item.size}`)
this.$router.push({ path: `/resource/file/edit/${item.id}` })
},
_go (item) {
localStore.setItem('file', `${item.alias}|${item.size}`)
if (item.directory) {
localStore.setItem('currentDir', `${item.fullName}`)
this.$router.push({ path: `/resource/file/subdirectory/${item.id}` })
} else {
this.$router.push({ path: `/resource/file/list/${item.id}` })
}
},
_downloadFile (item) {
downloadFile('resources/download', {
id: item.id
})
},
_rtSize (val) {
return bytesToSize(parseInt(val))
},
_delete (item, i) {
this.deleteResource({
id: item.id
}).then(res => {
this.$emit('on-update')
this.$message.success(res.msg)
}).catch(e => {
this.$message.error(e.msg || '')
})
},
_rename (item, i) {
this.item = item
this.index = i
this.renameDialog = true
},
onUpDate (item) {
this.$set(this.list, this.index, item)
this.renameDialog = false
},
close () {
this.renameDialog = false
},
_rtDisb ({ alias, size }) {
let i = alias.lastIndexOf('.')
let a = alias.substring(i, alias.length)
let flag = _.includes(filtTypeArr, _.trimStart(a, '.'))
if (flag && (size < 1000000)) {
flag = true
} else {
flag = false
}
return !flag
}
},
watch: {
fileResourcesList (a) {
this.list = []
setTimeout(() => {
this.list = a
})
}
// Listening for routing changes
// '$route': {
// deep: false,
// handler () {
// this.$emit('on-update',this.$route.params.id)
// }
// }
},
beforeRouteUpdate (to, from, next) {
next() // 一定要有next
},
created () {
},
mounted () {
this.list = this.fileResourcesList
},
components: { mRename }
}
</script>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,055 | [Bug][UI] Can not upload file to correct dir | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Create 2 file resources dir on UI,named dirA and dirB
2. upload a file to dirA
3. upload another file to dirB,but this file appear at dirA but not dirB
**Expected behavior**
Upload file to correct dir
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/5055 | https://github.com/apache/dolphinscheduler/pull/5056 | 5d7dc1cb2aff2147b09b13ed002cbe54f3478a2c | e8c9c33d7e45b361c3c25085b01d3d78b6b11d90 | "2021-03-15T10:09:31Z" | java | "2021-04-01T12:57:46Z" | dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/_source/list.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="list-model">
<div class="table-box">
<el-table :data="list" size="mini" style="width: 100%">
<el-table-column type="index" :label="$t('#')" width="50"></el-table-column>
<el-table-column :label="$t('UDF Resource Name')" min-width="100">
<template slot-scope="scope">
<el-popover trigger="hover" placement="top">
<p>{{ scope.row.alias }}</p>
<div slot="reference" class="name-wrapper">
<a href="javascript:" class="links" @click="_go(scope.row)">{{ scope.row.alias }}</a>
</div>
</el-popover>
</template>
</el-table-column>
<el-table-column :label="$t('Whether directory')" min-width="100">
<template slot-scope="scope">
{{scope.row.directory? $t('Yes') : $t('No')}}
</template>
</el-table-column>
<el-table-column prop="fileName" :label="$t('File Name')"></el-table-column>
<el-table-column :label="$t('File Size')">
<template slot-scope="scope">
{{_rtSize(scope.row.size)}}
</template>
</el-table-column>
<el-table-column :label="$t('Description')" width="200">
<template slot-scope="scope">
<span>{{scope.row.description | filterNull}}</span>
</template>
</el-table-column>
<el-table-column :label="$t('Create Time')" min-width="120">
<template slot-scope="scope">
<span>{{scope.row.createTime | formatDate}}</span>
</template>
</el-table-column>
<el-table-column :label="$t('Update Time')" min-width="120">
<template slot-scope="scope">
<span>{{scope.row.updateTime | formatDate}}</span>
</template>
</el-table-column>
<el-table-column :label="$t('Operation')" min-width="120">
<template slot-scope="scope">
<el-tooltip :content="$t('Rename')" placement="top" :enterable="false">
<span><el-button type="primary" size="mini" icon="el-icon-edit" @click="_rename(scope.row,scope.$index)" circle></el-button></span>
</el-tooltip>
<el-tooltip :content="$t('Download')" placement="top" :enterable="false">
<span><el-button type="primary" size="mini" icon="el-icon-download" @click="_downloadFile(scope.row)" :disabled="scope.row.directory? true: false" circle></el-button></span>
</el-tooltip>
<el-tooltip :content="$t('Delete')" placement="top" :enterable="false">
<el-popconfirm
:confirmButtonText="$t('Confirm')"
:cancelButtonText="$t('Cancel')"
icon="el-icon-info"
iconColor="red"
:title="$t('Delete?')"
@onConfirm="_delete(scope.row,scope.row.id)"
>
<el-button type="danger" size="mini" icon="el-icon-delete" circle slot="reference"></el-button>
</el-popconfirm>
</el-tooltip>
</template>
</el-table-column>
</el-table>
</div>
<el-dialog
:visible.sync="renameDialog"
width="45%">
<m-rename :item="item" @onUpDate="onUpDate" @close="close"></m-rename>
</el-dialog>
</div>
</template>
<script>
import { mapActions } from 'vuex'
import mRename from './rename'
import { downloadFile } from '@/module/download'
import { bytesToSize } from '@/module/util/util'
import localStore from '@/module/util/localStorage'
export default {
name: 'udf-manage-list',
data () {
return {
list: [],
renameDialog: false,
index: null
}
},
props: {
udfResourcesList: Array,
pageNo: Number,
pageSize: Number
},
methods: {
...mapActions('resource', ['deleteResource']),
_downloadFile (item) {
downloadFile('resources/download', {
id: item.id
})
},
_go (item) {
localStore.setItem('file', `${item.alias}|${item.size}`)
if (item.directory) {
localStore.setItem('currentDir', `${item.fullName}`)
this.$router.push({ path: `/resource/udf/subUdfDirectory/${item.id}` })
}
},
_rtSize (val) {
return bytesToSize(parseInt(val))
},
_delete (item, i) {
this.deleteResource({
id: item.id
}).then(res => {
this.$emit('on-update')
this.$message.success(res.msg)
}).catch(e => {
this.$message.error(e.msg || '')
})
},
_rename (item, i) {
this.item = item
this.index = i
this.renameDialog = true
},
onUpDate (item) {
this.$set(this.list, this.index, item)
this.renameDialog = false
},
close () {
this.renameDialog = false
}
},
watch: {
udfResourcesList (a) {
this.list = []
setTimeout(() => {
this.list = a
})
}
},
created () {
},
mounted () {
this.list = this.udfResourcesList
},
components: { mRename }
}
</script>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,055 | [Bug][UI] Can not upload file to correct dir | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Create 2 file resources dir on UI,named dirA and dirB
2. upload a file to dirA
3. upload another file to dirB,but this file appear at dirA but not dirB
**Expected behavior**
Upload file to correct dir
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/5055 | https://github.com/apache/dolphinscheduler/pull/5056 | 5d7dc1cb2aff2147b09b13ed002cbe54f3478a2c | e8c9c33d7e45b361c3c25085b01d3d78b6b11d90 | "2021-03-15T10:09:31Z" | java | "2021-04-01T12:57:46Z" | dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/subUdfDirectory/_source/list.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="list-model">
<div class="table-box">
<el-table :data="list" size="mini" style="width: 100%">
<el-table-column type="index" :label="$t('#')" width="50"></el-table-column>
<el-table-column :label="$t('UDF Resource Name')" min-width="150">
<template slot-scope="scope">
<el-popover trigger="hover" placement="top">
<p>{{ scope.row.alias }}</p>
<div slot="reference" class="name-wrapper">
<a href="javascript:" class="links" @click="_go(scope.row)">{{ scope.row.alias }}</a>
</div>
</el-popover>
</template>
</el-table-column>
<el-table-column :label="$t('Whether directory')" min-width="100">
<template slot-scope="scope">
{{scope.row.directory? $t('Yes') : $t('No')}}
</template>
</el-table-column>
<el-table-column prop="fileName" :label="$t('File Name')" min-width="150"></el-table-column>
<el-table-column :label="$t('File Size')">
<template slot-scope="scope">
{{_rtSize(scope.row.size)}}
</template>
</el-table-column>
<el-table-column prop="description" :label="$t('Description')" min-width="180"></el-table-column>
<el-table-column :label="$t('Create Time')" min-width="120">
<template slot-scope="scope">
<span>{{scope.row.createTime | formatDate}}</span>
</template>
</el-table-column>
<el-table-column :label="$t('Update Time')" min-width="120">
<template slot-scope="scope">
<span>{{scope.row.updateTime | formatDate}}</span>
</template>
</el-table-column>
<el-table-column :label="$t('Operation')" min-width="120">
<template slot-scope="scope">
<el-tooltip :content="$t('Rename')" placement="top" :enterable="false">
<span><el-button type="primary" size="mini" icon="el-icon-edit" @click="_rename(scope.row,scope.$index)" circle></el-button></span>
</el-tooltip>
<el-tooltip :content="$t('Download')" placement="top" :enterable="false">
<span><el-button type="primary" size="mini" icon="el-icon-download" @click="_downloadFile(scope.row)" :disabled="scope.row.directory? true: false" circle></el-button></span>
</el-tooltip>
<el-tooltip :content="$t('Delete')" placement="top" :enterable="false">
<el-popconfirm
:confirmButtonText="$t('Confirm')"
:cancelButtonText="$t('Cancel')"
icon="el-icon-info"
iconColor="red"
:title="$t('Delete?')"
@onConfirm="_delete(scope.row,scope.row.id)"
>
<el-button type="danger" size="mini" icon="el-icon-delete" circle slot="reference"></el-button>
</el-popconfirm>
</el-tooltip>
</template>
</el-table-column>
</el-table>
</div>
<el-dialog
:visible.sync="renameDialog"
width="auto">
<m-rename :item="item" @onUpDate="onUpDate" @close="close"></m-rename>
</el-dialog>
</div>
</template>
<script>
import { mapActions } from 'vuex'
import mRename from './rename'
import { downloadFile } from '@/module/download'
import { bytesToSize } from '@/module/util/util'
import localStore from '@/module/util/localStorage'
export default {
name: 'udf-manage-list',
data () {
return {
list: [],
renameDialog: false,
index: null
}
},
props: {
udfResourcesList: Array,
pageNo: Number,
pageSize: Number
},
methods: {
...mapActions('resource', ['deleteResource']),
_downloadFile (item) {
downloadFile('resources/download', {
id: item.id
})
},
_go (item) {
localStore.setItem('file', `${item.alias}|${item.size}`)
if (item.directory) {
localStore.setItem('currentDir', `${item.fullName}`)
this.$router.push({ path: `/resource/udf/subUdfDirectory/${item.id}` })
}
},
_rtSize (val) {
return bytesToSize(parseInt(val))
},
_closeDelete (i) {
this.$refs[`poptip-${i}`][0].doClose()
},
_delete (item, i) {
this.deleteResource({
id: item.id
}).then(res => {
this.$refs[`poptip-${i}`][0].doClose()
this.$emit('on-update')
this.$message.success(res.msg)
}).catch(e => {
this.$refs[`poptip-${i}`][0].doClose()
this.$message.error(e.msg || '')
})
},
_rename (item, i) {
this.item = item
this.index = i
this.renameDialog = true
},
onUpDate (item) {
this.$set(this.list, this.index, item)
this.renameDialog = false
},
close () {
this.renameDialog = false
}
},
watch: {
udfResourcesList (a) {
this.list = []
setTimeout(() => {
this.list = a
})
}
},
created () {
},
mounted () {
this.list = this.udfResourcesList
},
components: { mRename }
}
</script>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,055 | [Bug][UI] Can not upload file to correct dir | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Create 2 file resources dir on UI,named dirA and dirB
2. upload a file to dirA
3. upload another file to dirB,but this file appear at dirA but not dirB
**Expected behavior**
Upload file to correct dir
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/5055 | https://github.com/apache/dolphinscheduler/pull/5056 | 5d7dc1cb2aff2147b09b13ed002cbe54f3478a2c | e8c9c33d7e45b361c3c25085b01d3d78b6b11d90 | "2021-03-15T10:09:31Z" | java | "2021-04-01T12:57:46Z" | dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/subUdfDirectory/index.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="home-main list-construction-model">
<div class="content-title">
<a class="bread" style="padding-left: 15px;" @click="() => $router.push({path: `/resource/udf`})">{{$t('UDF Resources')}}</a>
<a class="bread" v-for="(item,$index) in breadList" :key="$index" @click="_ckOperation($index)">{{'>'+item}}</a>
</div>
<div class="conditions-box">
<m-conditions @on-conditions="_onConditions">
<template slot="button-group">
<el-button-group size="small" >
<el-button size="mini" @click="() => $router.push({name: 'resource-udf-subCreateUdfFolder'})">{{$t('Create folder')}}</el-button>
<el-button size="mini" @click="_uploading">{{$t('Upload UDF Resources')}}</el-button>
</el-button-group>
</template>
</m-conditions>
</div>
<div class="list-box">
<template v-if="udfResourcesList.length || total>0">
<m-list @on-update="_onUpdate" :udf-resources-list="udfResourcesList" :page-no="searchParams.pageNo" :page-size="searchParams.pageSize">
</m-list>
<div class="page-box">
<el-pagination
background
@current-change="_page"
@size-change="_pageSize"
:page-size="searchParams.pageSize"
:current-page.sync="searchParams.pageNo"
:page-sizes="[10, 30, 50]"
layout="sizes, prev, pager, next, jumper"
:total="total">
</el-pagination>
</div>
</template>
<template v-if="!udfResourcesList.length && total<=0">
<m-no-data></m-no-data>
</template>
<m-spin :is-spin="isLoading" :is-left="isLeft">
</m-spin>
</div>
</div>
</template>
<script>
import _ from 'lodash'
import { mapActions } from 'vuex'
import mList from './_source/list'
import localStore from '@/module/util/localStorage'
import mSpin from '@/module/components/spin/spin'
import { findComponentDownward } from '@/module/util/'
import mNoData from '@/module/components/noData/noData'
import listUrlParamHandle from '@/module/mixin/listUrlParamHandle'
import mConditions from '@/module/components/conditions/conditions'
export default {
name: 'resource-list-index-UDF',
data () {
return {
total: null,
isLoading: false,
udfResourcesList: [],
searchParams: {
id: this.$route.params.id,
pageSize: 10,
pageNo: 1,
searchVal: '',
type: 'UDF'
},
isLeft: true,
breadList: []
}
},
mixins: [listUrlParamHandle],
props: {},
methods: {
...mapActions('resource', ['getResourcesListP', 'getResourceId']),
/**
* File Upload
*/
_uploading () {
findComponentDownward(this.$root, 'roof-nav')._resourceChildUpdate('UDF', this.searchParams.id)
},
_onConditions (o) {
this.searchParams = _.assign(this.searchParams, o)
this.searchParams.pageNo = 1
},
_page (val) {
this.searchParams.pageNo = val
},
_pageSize (val) {
this.searchParams.pageSize = val
},
_onUpdate () {
this.searchParams.id = this.$route.params.id
this._debounceGET()
},
_updateList (data) {
this.searchParams.id = data
this.searchParams.pageNo = 1
this.searchParams.searchVal = ''
this._debounceGET()
},
_getList (flag) {
if (sessionStorage.getItem('isLeft') === 0) {
this.isLeft = false
} else {
this.isLeft = true
}
this.isLoading = !flag
this.getResourcesListP(this.searchParams).then(res => {
if (this.searchParams.pageNo > 1 && res.totalList.length === 0) {
this.searchParams.pageNo = this.searchParams.pageNo - 1
} else {
this.udfResourcesList = []
this.udfResourcesList = res.totalList
this.total = res.total
this.isLoading = false
}
}).catch(e => {
this.isLoading = false
})
},
_ckOperation (index) {
let breadName = ''
this.breadList.forEach((item, i) => {
if (i <= index) {
breadName = breadName + '/' + item
}
})
this.transferApi(breadName)
},
transferApi (api) {
this.getResourceId({
type: 'UDF',
fullName: api
}).then(res => {
localStore.setItem('currentDir', `${res.fullName}`)
this.$router.push({ path: `/resource/udf/subUdfDirectory/${res.id}` })
}).catch(e => {
this.$message.error(e.msg || '')
})
}
},
watch: {
// router
'$route' (a) {
// url no params get instance list
this.searchParams.pageNo = _.isEmpty(a.query) ? 1 : a.query.pageNo
this.searchParams.id = a.params.id
let dir = localStore.getItem('currentDir').split('/')
dir.shift()
this.breadList = dir
}
},
created () {
},
mounted () {
let dir = localStore.getItem('currentDir').split('/')
dir.shift()
this.breadList = dir
},
beforeDestroy () {
sessionStorage.setItem('isLeft', 1)
},
components: { mConditions, mList, mSpin, mNoData }
}
</script>
<style lang="scss" rel="stylesheet/scss">
.bread {
font-size: 22px;
padding-top: 10px;
color: #2a455b;
display: inline-block;
cursor: pointer;
}
</style>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,055 | [Bug][UI] Can not upload file to correct dir | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Create 2 file resources dir on UI,named dirA and dirB
2. upload a file to dirA
3. upload another file to dirB,but this file appear at dirA but not dirB
**Expected behavior**
Upload file to correct dir
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/5055 | https://github.com/apache/dolphinscheduler/pull/5056 | 5d7dc1cb2aff2147b09b13ed002cbe54f3478a2c | e8c9c33d7e45b361c3c25085b01d3d78b6b11d90 | "2021-03-15T10:09:31Z" | java | "2021-04-01T12:57:46Z" | dolphinscheduler-ui/src/js/module/components/fileUpdate/fileChildUpdate.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<m-popup
ref="popup"
:ok-text="$t('Upload')"
:nameText="$t('File Upload')"
@ok="_ok"
@close="close"
:disabled="progress === 0 ? false : true">
<template slot="content">
<form name="files" enctype="multipart/form-data" method="post">
<div class="file-update-model"
@drop.prevent="_onDrop"
@dragover.prevent="dragOver = true"
@dragleave.prevent="dragOver = false"
id="file-update-model">
<div class="tooltip-info">
<em class="ans el-icon-warning"></em>
<span>{{$t('Drag the file into the current upload window')}}</span>
</div>
<div class="update-popup" v-if="dragOver">
<div class="icon-box">
<em class="ans el-icon-upload"></em>
</div>
<p class="p1">
<span>{{$t('Drag area upload')}}</span>
</p>
</div>
<m-list-box-f>
<template slot="name"><strong>*</strong>{{$t('File Name')}}</template>
<template slot="content">
<el-input
type="input"
v-model="name"
:disabled="progress !== 0"
size="small"
:placeholder="$t('Please enter name')">
</el-input>
</template>
</m-list-box-f>
<m-list-box-f>
<template slot="name">{{$t('Description')}}</template>
<template slot="content">
<el-input
type="textarea"
v-model="description"
:disabled="progress !== 0"
size="small"
:placeholder="$t('Please enter description')">
</el-input>
</template>
</m-list-box-f>
<m-list-box-f>
<template slot="name"><strong>*</strong>{{$t('Upload Files')}}</template>
<template slot="content">
<div class="file-update-box">
<template v-if="progress === 0">
<input ref="file" name="file" type="file" class="file-update" @change="_onChange">
<el-button size="mini">{{$t('Upload')}}<em class="el-icon-upload"></em></el-button>
</template>
<div class="progress-box" v-if="progress !== 0">
<m-progress-bar :value="progress" text-placement="left-right"></m-progress-bar>
</div>
</div>
</template>
</m-list-box-f>
</div>
</form>
</template>
</m-popup>
</template>
<script>
import io from '@/module/io'
import i18n from '@/module/i18n'
import store from '@/conf/home/store'
import localStore from '@/module/util/localStorage'
import mPopup from '@/module/components/popup/popup'
import mListBoxF from '@/module/components/listBoxF/listBoxF'
import mProgressBar from '@/module/components/progressBar/progressBar'
export default {
name: 'file-update',
data () {
return {
store,
// name
name: '',
// description
description: '',
// progress
progress: 0,
// file
file: '',
currentDir: localStore.getItem('currentDir'),
pid: this.id,
// Whether to drag upload
dragOver: false
}
},
watch: {
},
props: {
type: String,
id: Number
},
methods: {
/**
* submit
*/
_ok () {
this.$refs.popup.spinnerLoading = true
if (this._validation()) {
this.store.dispatch('resource/resourceVerifyName', {
fullName: this.currentDir + '/' + this.name,
type: this.type
}).then(res => {
const isLt1024M = this.file.size / 1024 / 1024 < 1024
if (isLt1024M) {
this._formDataUpdate().then(res => {
setTimeout(() => {
this.$refs.popup.spinnerLoading = false
}, 800)
}).catch(e => {
this.$refs.popup.spinnerLoading = false
})
} else {
this.$message.warning(`${i18n.$t('Upload File Size')}`)
this.$refs.popup.spinnerLoading = false
}
}).catch(e => {
this.$message.error(e.msg || '')
this.$refs.popup.spinnerLoading = false
})
} else {
this.$refs.popup.spinnerLoading = false
}
},
/**
* validation
*/
_validation () {
if (!this.name) {
this.$message.warning(`${i18n.$t('Please enter file name')}`)
return false
}
if (!this.file) {
this.$message.warning(`${i18n.$t('Please select the file to upload')}`)
return false
}
return true
},
/**
* update file
*/
_formDataUpdate () {
return new Promise((resolve, reject) => {
let self = this
let formData = new FormData()
formData.append('file', this.file)
formData.append('type', this.type)
formData.append('name', this.name)
formData.append('pid', this.pid)
formData.append('currentDir', this.currentDir)
formData.append('description', this.description)
io.post('resources/create', res => {
this.$message.success(res.msg)
resolve()
self.$emit('onUpdateFileChildUpdate')
this.reset()
}, e => {
reject(e)
self.$emit('close')
this.$message.error(e.msg || '')
this.reset()
}, {
data: formData,
emulateJSON: false,
onUploadProgress (progressEvent) {
// Size has been uploaded
let loaded = progressEvent.loaded
// Total attachment size
let total = progressEvent.total
self.progress = Math.floor(100 * loaded / total)
self.$emit('onProgressFileChildUpdate', self.progress)
}
})
})
},
/**
* Archive to the top right corner Continue uploading
*/
_ckArchive () {
$('.update-file-modal').hide()
this.$emit('onArchiveFileChildUpdate')
},
/**
* Drag and drop upload
*/
_onDrop (e) {
let file = e.dataTransfer.files[0]
this.file = file
this.name = file.name
this.dragOver = false
},
close () {
this.$emit('closeFileChildUpdate')
},
reset () {
this.name = ''
this.description = ''
this.progress = 0
this.file = ''
this.currentDir = localStore.getItem('currentDir')
this.pid = this.id
this.dragOver = false
},
_onChange () {
let file = this.$refs.file.files[0]
this.file = file
this.name = file.name
this.$refs.file.value = null
}
},
components: { mPopup, mListBoxF, mProgressBar }
}
</script>
<style lang="scss" rel="stylesheet/scss">
.file-update-model {
.tooltip-info {
position: absolute;
left: 20px;
bottom: 26px;
span {
font-size: 12px;
color: #666;
vertical-align: middle;
}
.fa,.ans {
color: #0097e0;
font-size: 14px;
vertical-align: middle;
}
}
.hide-archive {
position: absolute;
right: 22px;
top: 17px;
.fa,.ans{
font-size: 16px;
color: #333;
font-weight: normal;
cursor: pointer;
&:hover {
color: #0097e0;
}
}
}
.file-update-box {
padding-top: 4px;
position: relative;
.file-update {
width: 70px;
height: 40px;
position: absolute;
left: 0;
top: 0;
cursor: pointer;
filter: alpha(opacity=0);
-moz-opacity: 0;
opacity: 0;
}
&:hover {
.v-btn-dashed {
background-color: transparent;
border-color: #47c3ff;
color: #47c3ff;
cursor: pointer;
}
}
.progress-box {
width: 200px;
position: absolute;
left: 70px;
top: 14px;
}
}
.update-popup {
width: calc(100% - 20px);
height: calc(100% - 20px);
background: rgba(255,253,239,.7);
position: absolute;
top: 10px;
left: 10px;
border-radius: 3px;
z-index: 1;
border: .18rem dashed #cccccc;
.icon-box {
text-align: center;
margin-top: 96px;
.fa,.ans {
font-size: 50px;
color: #2d8cf0;
}
}
.p1 {
text-align: center;
font-size: 16px;
color: #333;
padding-top: 8px;
}
}
}
</style>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,055 | [Bug][UI] Can not upload file to correct dir | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Create 2 file resources dir on UI,named dirA and dirB
2. upload a file to dirA
3. upload another file to dirB,but this file appear at dirA but not dirB
**Expected behavior**
Upload file to correct dir
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/5055 | https://github.com/apache/dolphinscheduler/pull/5056 | 5d7dc1cb2aff2147b09b13ed002cbe54f3478a2c | e8c9c33d7e45b361c3c25085b01d3d78b6b11d90 | "2021-03-15T10:09:31Z" | java | "2021-04-01T12:57:46Z" | dolphinscheduler-ui/src/js/module/components/fileUpdate/resourceChildUpdate.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<m-popup
ref="popup"
:ok-text="$t('Upload')"
:nameText="$t('File Upload')"
@ok="_ok"
@close="close"
:disabled="progress === 0 ? false : true">
<template slot="content">
<form name="files" enctype="multipart/form-data" method="post">
<div class="file-update-model"
@drop.prevent="_onDrop"
@dragover.prevent="dragOver = true"
@dragleave.prevent="dragOver = false"
id="file-update-model">
<div class="tooltip-info">
<em class="ans el-icon-warning"></em>
<span>{{$t('Drag the file into the current upload window')}}</span>
</div>
<div class="update-popup" v-if="dragOver">
<div class="icon-box">
<em class="ans el-icon-upload"></em>
</div>
<p class="p1">
<span>{{$t('Drag area upload')}}</span>
</p>
</div>
<m-list-box-f>
<template slot="name"><strong>*</strong>{{$t('File Name')}}</template>
<template slot="content">
<el-input
type="input"
v-model="name"
:disabled="progress !== 0"
size="small"
:placeholder="$t('Please enter name')">
</el-input>
</template>
</m-list-box-f>
<m-list-box-f>
<template slot="name">{{$t('Description')}}</template>
<template slot="content">
<el-input
type="textarea"
v-model="description"
:disabled="progress !== 0"
size="small"
:placeholder="$t('Please enter description')">
</el-input>
</template>
</m-list-box-f>
<m-list-box-f>
<template slot="name"><strong>*</strong>{{$t('Upload Files')}}</template>
<template slot="content">
<div class="file-update-box">
<template v-if="progress === 0">
<input ref="file" name="file" type="file" class="file-update" @change="_onChange">
<el-button size="mini">{{$t('Upload')}}<em class="el-icon-upload"></em></el-button>
</template>
<div class="progress-box" v-if="progress !== 0">
<m-progress-bar :value="progress" text-placement="left-right"></m-progress-bar>
</div>
</div>
</template>
</m-list-box-f>
</div>
</form>
</template>
</m-popup>
</template>
<script>
import io from '@/module/io'
import i18n from '@/module/i18n'
import store from '@/conf/home/store'
import localStore from '@/module/util/localStorage'
import mPopup from '@/module/components/popup/popup'
import mListBoxF from '@/module/components/listBoxF/listBoxF'
import mProgressBar from '@/module/components/progressBar/progressBar'
export default {
name: 'file-update',
data () {
return {
store,
// name
name: '',
// description
description: '',
// progress
progress: 0,
// file
file: '',
currentDir: localStore.getItem('currentDir'),
pid: this.id,
// Whether to drag upload
dragOver: false
}
},
watch: {
},
props: {
type: String,
id: Number
},
methods: {
/**
* submit
*/
_ok () {
this.$refs.popup.spinnerLoading = true
if (this._validation()) {
this.store.dispatch('resource/resourceVerifyName', {
fullName: this.currentDir + '/' + this.name,
type: this.type
}).then(res => {
const isLt1024M = this.file.size / 1024 / 1024 < 1024
if (isLt1024M) {
this._formDataUpdate().then(res => {
setTimeout(() => {
this.$refs.popup.spinnerLoading = false
}, 800)
}).catch(e => {
this.$refs.popup.spinnerLoading = false
})
} else {
this.$message.warning(`${i18n.$t('Upload File Size')}`)
this.$refs.popup.spinnerLoading = false
}
}).catch(e => {
this.$message.error(e.msg || '')
this.$refs.popup.spinnerLoading = false
})
} else {
this.$refs.popup.spinnerLoading = false
}
},
/**
* validation
*/
_validation () {
if (!this.name) {
this.$message.warning(`${i18n.$t('Please enter file name')}`)
return false
}
if (!this.file) {
this.$message.warning(`${i18n.$t('Please select the file to upload')}`)
return false
}
return true
},
/**
* update file
*/
_formDataUpdate () {
return new Promise((resolve, reject) => {
let self = this
let formData = new FormData()
formData.append('file', this.file)
formData.append('type', this.type)
formData.append('name', this.name)
formData.append('pid', this.pid)
formData.append('currentDir', this.currentDir)
formData.append('description', this.description)
io.post('resources/create', res => {
this.$message.success(res.msg)
resolve()
self.$emit('onUpdateResourceChildUpdate')
this.reset()
}, e => {
reject(e)
self.$emit('close')
this.$message.error(e.msg || '')
this.reset()
}, {
data: formData,
emulateJSON: false,
onUploadProgress (progressEvent) {
// Size has been uploaded
let loaded = progressEvent.loaded
// Total attachment size
let total = progressEvent.total
self.progress = Math.floor(100 * loaded / total)
self.$emit('onProgressResourceChildUpdate', self.progress)
}
})
})
},
/**
* Archive to the top right corner Continue uploading
*/
_ckArchive () {
$('.update-file-modal').hide()
this.$emit('onArchiveResourceChildUpdate')
},
reset () {
this.name = ''
this.description = ''
this.progress = 0
this.file = ''
this.currentDir = localStore.getItem('currentDir')
this.pid = this.id
this.dragOver = false
},
/**
* Drag and drop upload
*/
_onDrop (e) {
let file = e.dataTransfer.files[0]
this.file = file
this.name = file.name
this.dragOver = false
},
_onChange () {
let file = this.$refs.file.files[0]
this.file = file
this.name = file.name
this.$refs.file.value = null
},
close () {
this.$emit('closeResourceChildUpdate')
}
},
components: { mPopup, mListBoxF, mProgressBar }
}
</script>
<style lang="scss" rel="stylesheet/scss">
.file-update-model {
.tooltip-info {
position: absolute;
left: 20px;
bottom: 26px;
span {
font-size: 12px;
color: #666;
vertical-align: middle;
}
.fa,.ans {
color: #0097e0;
font-size: 14px;
vertical-align: middle;
}
}
.hide-archive {
position: absolute;
right: 22px;
top: 17px;
.fa,.ans{
font-size: 16px;
color: #333;
font-weight: normal;
cursor: pointer;
&:hover {
color: #0097e0;
}
}
}
.file-update-box {
padding-top: 4px;
position: relative;
.file-update {
width: 70px;
height: 40px;
position: absolute;
left: 0;
top: 0;
cursor: pointer;
filter: alpha(opacity=0);
-moz-opacity: 0;
opacity: 0;
}
&:hover {
.v-btn-dashed {
background-color: transparent;
border-color: #47c3ff;
color: #47c3ff;
cursor: pointer;
}
}
.progress-box {
width: 200px;
position: absolute;
left: 70px;
top: 14px;
}
}
.update-popup {
width: calc(100% - 20px);
height: calc(100% - 20px);
background: rgba(255,253,239,.7);
position: absolute;
top: 10px;
left: 10px;
border-radius: 3px;
z-index: 1;
border: .18rem dashed #cccccc;
.icon-box {
text-align: center;
margin-top: 96px;
.fa,.ans {
font-size: 50px;
color: #2d8cf0;
}
}
.p1 {
text-align: center;
font-size: 16px;
color: #333;
padding-top: 8px;
}
}
}
</style>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,199 | [Bug][api] When sleep is included in a shell task, cannot kill the task |
![image](https://user-images.githubusercontent.com/55787491/113400980-429f8600-93d5-11eb-8002-7684d36d6ed1.png)
![image](https://user-images.githubusercontent.com/55787491/113401140-81cdd700-93d5-11eb-89dd-a259284e5dae.png)
**Which version of Dolphin Scheduler:**
-[1.3.6-prepare]
| https://github.com/apache/dolphinscheduler/issues/5199 | https://github.com/apache/dolphinscheduler/pull/5212 | 97fb08f0ced20a4b9563725ed9e9600e5ea5722b | 4a1303b625f3425550636a522ad2e38bd8cae1b7 | "2021-04-02T09:04:44Z" | java | "2021-04-06T02:47:53Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/ProcessUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.utils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.LoggerUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.PropertyUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.service.log.LogClientService;
import java.io.File;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* mainly used to get the start command line of a process.
*/
public class ProcessUtils {
/**
* logger
*/
private static final Logger logger = LoggerFactory.getLogger(ProcessUtils.class);
/**
* Initialization regularization, solve the problem of pre-compilation performance,
* avoid the thread safety problem of multi-thread operation
*/
private static final Pattern MACPATTERN = Pattern.compile("-[+|-]-\\s(\\d+)");
/**
* Expression of PID recognition in Windows scene
*/
private static final Pattern WINDOWSATTERN = Pattern.compile("\\w+\\((\\d+)\\)");
private static final String LOCAL_PROCESS_EXEC = "jdk.lang.Process.allowAmbiguousCommands";
/**
* build command line characters.
*
* @param commandList command list
* @return command
*/
public static String buildCommandStr(List<String> commandList) {
String cmdstr;
String[] cmd = commandList.toArray(new String[0]);
SecurityManager security = System.getSecurityManager();
boolean allowAmbiguousCommands = isAllowAmbiguousCommands(security);
if (allowAmbiguousCommands) {
String executablePath = new File(cmd[0]).getPath();
if (needsEscaping(VERIFICATION_LEGACY, executablePath)) {
executablePath = quoteString(executablePath);
}
cmdstr = createCommandLine(
VERIFICATION_LEGACY, executablePath, cmd);
} else {
String executablePath;
try {
executablePath = getExecutablePath(cmd[0]);
} catch (IllegalArgumentException e) {
StringBuilder join = new StringBuilder();
for (String s : cmd) {
join.append(s).append(' ');
}
cmd = getTokensFromCommand(join.toString());
executablePath = getExecutablePath(cmd[0]);
// Check new executable name once more
if (security != null) {
security.checkExec(executablePath);
}
}
cmdstr = createCommandLine(
isShellFile(executablePath) ? VERIFICATION_CMD_BAT : VERIFICATION_WIN32, quoteString(executablePath), cmd);
}
return cmdstr;
}
/**
* check is allow ambiguous commands
*
* @param security security manager
* @return allow ambiguous command flag
*/
private static boolean isAllowAmbiguousCommands(SecurityManager security) {
boolean allowAmbiguousCommands = false;
if (security == null) {
allowAmbiguousCommands = true;
String value = System.getProperty(LOCAL_PROCESS_EXEC);
if (value != null) {
allowAmbiguousCommands = !Constants.STRING_FALSE.equalsIgnoreCase(value);
}
}
return allowAmbiguousCommands;
}
/**
* get executable path.
*
* @param path path
* @return executable path
*/
private static String getExecutablePath(String path) {
boolean pathIsQuoted = isQuoted(true, path, "Executable name has embedded quote, split the arguments");
File fileToRun = new File(pathIsQuoted ? path.substring(1, path.length() - 1) : path);
return fileToRun.getPath();
}
/**
* whether is shell file.
*
* @param executablePath executable path
* @return true if endsWith .CMD or .BAT
*/
private static boolean isShellFile(String executablePath) {
String upPath = executablePath.toUpperCase();
return (upPath.endsWith(".CMD") || upPath.endsWith(".BAT"));
}
/**
* quote string.
*
* @param arg argument
* @return format arg
*/
private static String quoteString(String arg) {
return '"' + arg + '"';
}
/**
* get tokens from command.
*
* @param command command
* @return token string array
*/
private static String[] getTokensFromCommand(String command) {
ArrayList<String> matchList = new ArrayList<>(8);
Matcher regexMatcher = LazyPattern.PATTERN.matcher(command);
while (regexMatcher.find()) {
matchList.add(regexMatcher.group());
}
return matchList.toArray(new String[0]);
}
/**
* Lazy Pattern.
*/
private static class LazyPattern {
/**
* Escape-support version:
* "(\")((?:\\\\\\1|.)+?)\\1|([^\\s\"]+)";
*/
private static final Pattern PATTERN = Pattern.compile("[^\\s\"]+|\"[^\"]*\"");
}
/**
* verification cmd bat.
*/
private static final int VERIFICATION_CMD_BAT = 0;
/**
* verification win32.
*/
private static final int VERIFICATION_WIN32 = 1;
/**
* verification legacy.
*/
private static final int VERIFICATION_LEGACY = 2;
/**
* escape verification.
*/
private static final char[][] ESCAPE_VERIFICATION = {{' ', '\t', '<', '>', '&', '|', '^'},
{' ', '\t', '<', '>'}, {' ', '\t'}};
/**
* create command line.
*
* @param verificationType verification type
* @param executablePath executable path
* @param cmd cmd
* @return command line
*/
private static String createCommandLine(int verificationType, final String executablePath, final String[] cmd) {
StringBuilder cmdbuf = new StringBuilder(80);
cmdbuf.append(executablePath);
for (int i = 1; i < cmd.length; ++i) {
cmdbuf.append(' ');
String s = cmd[i];
if (needsEscaping(verificationType, s)) {
cmdbuf.append('"').append(s);
if ((verificationType != VERIFICATION_CMD_BAT) && s.endsWith("\\")) {
cmdbuf.append('\\');
}
cmdbuf.append('"');
} else {
cmdbuf.append(s);
}
}
return cmdbuf.toString();
}
/**
* whether is quoted.
*
* @param noQuotesInside no quotes inside
* @param arg arg
* @param errorMessage error message
* @return boolean
*/
private static boolean isQuoted(boolean noQuotesInside, String arg, String errorMessage) {
int lastPos = arg.length() - 1;
if (lastPos >= 1 && arg.charAt(0) == '"' && arg.charAt(lastPos) == '"') {
// The argument has already been quoted.
if (noQuotesInside && arg.indexOf('"', 1) != lastPos) {
// There is ["] inside.
throw new IllegalArgumentException(errorMessage);
}
return true;
}
if (noQuotesInside && arg.indexOf('"') >= 0) {
// There is ["] inside.
throw new IllegalArgumentException(errorMessage);
}
return false;
}
/**
* whether needs escaping.
*
* @param verificationType verification type
* @param arg arg
* @return boolean
*/
private static boolean needsEscaping(int verificationType, String arg) {
boolean argIsQuoted = isQuoted((verificationType == VERIFICATION_CMD_BAT), arg, "Argument has embedded quote, use the explicit CMD.EXE call.");
if (!argIsQuoted) {
char[] testEscape = ESCAPE_VERIFICATION[verificationType];
for (char c : testEscape) {
if (arg.indexOf(c) >= 0) {
return true;
}
}
}
return false;
}
/**
* kill yarn application.
*
* @param appIds app id list
* @param logger logger
* @param tenantCode tenant code
* @param executePath execute path
*/
public static void cancelApplication(List<String> appIds, Logger logger, String tenantCode, String executePath) {
if (CollectionUtils.isNotEmpty(appIds)) {
for (String appId : appIds) {
try {
ExecutionStatus applicationStatus = HadoopUtils.getInstance().getApplicationStatus(appId);
if (!applicationStatus.typeIsFinished()) {
String commandFile = String
.format("%s/%s.kill", executePath, appId);
String cmd = getKerberosInitCommand() + "yarn application -kill " + appId;
execYarnKillCommand(logger, tenantCode, appId, commandFile, cmd);
}
} catch (Exception e) {
logger.error(String.format("Get yarn application app id [%s] status failed: [%s]", appId, e.getMessage()));
}
}
}
}
/**
* get kerberos init command
*/
public static String getKerberosInitCommand() {
logger.info("get kerberos init command");
StringBuilder kerberosCommandBuilder = new StringBuilder();
boolean hadoopKerberosState = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE,false);
if (hadoopKerberosState) {
kerberosCommandBuilder.append("export KRB5_CONFIG=")
.append(PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH))
.append("\n\n")
.append(String.format("kinit -k -t %s %s || true",PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH),PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME)))
.append("\n\n");
logger.info("kerberos init command: {}", kerberosCommandBuilder);
}
return kerberosCommandBuilder.toString();
}
/**
* build kill command for yarn application
*
* @param logger logger
* @param tenantCode tenant code
* @param appId app id
* @param commandFile command file
* @param cmd cmd
*/
private static void execYarnKillCommand(Logger logger, String tenantCode, String appId, String commandFile, String cmd) {
try {
StringBuilder sb = new StringBuilder();
sb.append("#!/bin/sh\n");
sb.append("BASEDIR=$(cd `dirname $0`; pwd)\n");
sb.append("cd $BASEDIR\n");
if (CommonUtils.getSystemEnvPath() != null) {
sb.append("source ").append(CommonUtils.getSystemEnvPath()).append("\n");
}
sb.append("\n\n");
sb.append(cmd);
File f = new File(commandFile);
if (!f.exists()) {
FileUtils.writeStringToFile(new File(commandFile), sb.toString(), StandardCharsets.UTF_8);
}
String runCmd = String.format("%s %s", Constants.SH, commandFile);
runCmd = OSUtils.getSudoCmd(tenantCode, runCmd);
logger.info("kill cmd:{}", runCmd);
OSUtils.exeCmd(runCmd);
} catch (Exception e) {
logger.error(String.format("Kill yarn application app id [%s] failed: [%s]", appId, e.getMessage()));
}
}
/**
* kill tasks according to different task types.
*
* @param taskExecutionContext taskExecutionContext
*/
public static void kill(TaskExecutionContext taskExecutionContext) {
try {
int processId = taskExecutionContext.getProcessId();
if (processId == 0) {
logger.error("process kill failed, process id :{}, task id:{}",
processId, taskExecutionContext.getTaskInstanceId());
return;
}
String pidsStr = getPidsStr(processId);
if (StringUtils.isNotEmpty(pidsStr)) {
String cmd = String.format("kill -9 %s", pidsStr);
cmd = OSUtils.getSudoCmd(taskExecutionContext.getTenantCode(), cmd);
logger.info("process id:{}, cmd:{}", processId, cmd);
OSUtils.exeCmd(cmd);
}
} catch (Exception e) {
logger.error("kill task failed", e);
}
// find log and kill yarn job
killYarnJob(taskExecutionContext);
}
/**
* get pids str.
*
* @param processId process id
* @return pids pid String
* @throws Exception exception
*/
public static String getPidsStr(int processId) throws Exception {
StringBuilder sb = new StringBuilder();
Matcher mat = null;
// pstree pid get sub pids
if (OSUtils.isMacOS()) {
String pids = OSUtils.exeCmd(String.format("%s -sp %d", Constants.PSTREE, processId));
if (null != pids) {
mat = MACPATTERN.matcher(pids);
}
} else {
String pids = OSUtils.exeCmd(String.format("%s -p %d", Constants.PSTREE, processId));
mat = WINDOWSATTERN.matcher(pids);
}
if (null != mat) {
while (mat.find()) {
sb.append(mat.group(1)).append(" ");
}
}
return sb.toString().trim();
}
/**
* find logs and kill yarn tasks.
* @param taskExecutionContext taskExecutionContext
* @return yarn application ids
*/
public static List<String> killYarnJob(TaskExecutionContext taskExecutionContext) {
try {
Thread.sleep(Constants.SLEEP_TIME_MILLIS);
LogClientService logClient = null;
String log;
try {
logClient = new LogClientService();
log = logClient.viewLog(Host.of(taskExecutionContext.getHost()).getIp(),
Constants.RPC_PORT,
taskExecutionContext.getLogPath());
} finally {
if (logClient != null) {
logClient.close();
}
}
if (StringUtils.isNotEmpty(log)) {
List<String> appIds = LoggerUtils.getAppIds(log, logger);
String workerDir = taskExecutionContext.getExecutePath();
if (StringUtils.isEmpty(workerDir)) {
logger.error("task instance work dir is empty");
throw new RuntimeException("task instance work dir is empty");
}
if (CollectionUtils.isNotEmpty(appIds)) {
cancelApplication(appIds, logger, taskExecutionContext.getTenantCode(), taskExecutionContext.getExecutePath());
return appIds;
}
}
} catch (Exception e) {
logger.error("kill yarn job failure", e);
}
return Collections.emptyList();
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,233 | [Bug]If commandMap doesn't contains StartParams,REPEAT RUNNING processInstance will throw exceprion | [Bug]if commandMap doesn't contains CMD_PARAM_START_PARAMS,REPEAT RUNNING processInstance will throw exceprion.
The commandMap contains key ,except the key of StartParams ,when REPEAT RUNNING processInstance will throw exceprion . | https://github.com/apache/dolphinscheduler/issues/5233 | https://github.com/apache/dolphinscheduler/pull/5234 | 2f1f193ba85abb7d4738f4f10ad05e6d510132c5 | 5c898e38ed62b0f8d034d1e910be4b45692dae8e | "2021-04-08T08:40:01Z" | java | "2021-04-08T10:58:43Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service.impl;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODE_NAMES;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_PARAMS;
import static org.apache.dolphinscheduler.common.Constants.MAX_TASK_TIMEOUT;
import org.apache.commons.collections.MapUtils;
import org.apache.dolphinscheduler.api.enums.ExecuteType;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.ExecutorService;
import org.apache.dolphinscheduler.api.service.MonitorService;
import org.apache.dolphinscheduler.api.service.ProjectService;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Priority;
import org.apache.dolphinscheduler.common.enums.ReleaseState;
import org.apache.dolphinscheduler.common.enums.RunMode;
import org.apache.dolphinscheduler.common.enums.TaskDependType;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.Command;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
/**
* executor service impl
*/
@Service
public class ExecutorServiceImpl extends BaseServiceImpl implements ExecutorService {
private static final Logger logger = LoggerFactory.getLogger(ExecutorServiceImpl.class);
@Autowired
private ProjectMapper projectMapper;
@Autowired
private ProjectService projectService;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
@Autowired
private MonitorService monitorService;
@Autowired
private ProcessInstanceMapper processInstanceMapper;
@Autowired
private ProcessService processService;
/**
* execute process instance
*
* @param loginUser login user
* @param projectName project name
* @param processDefinitionId process Definition Id
* @param cronTime cron time
* @param commandType command type
* @param failureStrategy failuer strategy
* @param startNodeList start nodelist
* @param taskDependType node dependency type
* @param warningType warning type
* @param warningGroupId notify group id
* @param processInstancePriority process instance priority
* @param workerGroup worker group name
* @param runMode run mode
* @param timeout timeout
* @param startParams the global param values which pass to new process instance
* @return execute process instance code
*/
@Override
public Map<String, Object> execProcessInstance(User loginUser, String projectName,
int processDefinitionId, String cronTime, CommandType commandType,
FailureStrategy failureStrategy, String startNodeList,
TaskDependType taskDependType, WarningType warningType, int warningGroupId,
RunMode runMode,
Priority processInstancePriority, String workerGroup, Integer timeout,
Map<String, String> startParams) {
Map<String, Object> result = new HashMap<>();
// timeout is invalid
if (timeout <= 0 || timeout > MAX_TASK_TIMEOUT) {
putMsg(result, Status.TASK_TIMEOUT_PARAMS_ERROR);
return result;
}
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResultAndAuth = checkResultAndAuth(loginUser, projectName, project);
if (checkResultAndAuth != null) {
return checkResultAndAuth;
}
// check process define release state
ProcessDefinition processDefinition = processDefinitionMapper.selectById(processDefinitionId);
result = checkProcessDefinitionValid(processDefinition, processDefinitionId);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
if (!checkTenantSuitable(processDefinition)) {
logger.error("there is not any valid tenant for the process definition: id:{},name:{}, ",
processDefinition.getId(), processDefinition.getName());
putMsg(result, Status.TENANT_NOT_SUITABLE);
return result;
}
// check master exists
if (!checkMasterExists(result)) {
return result;
}
/**
* create command
*/
int create = this.createCommand(commandType, processDefinitionId,
taskDependType, failureStrategy, startNodeList, cronTime, warningType, loginUser.getId(),
warningGroupId, runMode, processInstancePriority, workerGroup, startParams);
if (create > 0) {
processDefinition.setWarningGroupId(warningGroupId);
processDefinitionMapper.updateById(processDefinition);
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.START_PROCESS_INSTANCE_ERROR);
}
return result;
}
/**
* check whether master exists
*
* @param result result
* @return master exists return true , otherwise return false
*/
private boolean checkMasterExists(Map<String, Object> result) {
// check master server exists
List<Server> masterServers = monitorService.getServerListFromZK(true);
// no master
if (masterServers.isEmpty()) {
putMsg(result, Status.MASTER_NOT_EXISTS);
return false;
}
return true;
}
/**
* check whether the process definition can be executed
*
* @param processDefinition process definition
* @param processDefineId process definition id
* @return check result code
*/
@Override
public Map<String, Object> checkProcessDefinitionValid(ProcessDefinition processDefinition, int processDefineId) {
Map<String, Object> result = new HashMap<>();
if (processDefinition == null) {
// check process definition exists
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefineId);
} else if (processDefinition.getReleaseState() != ReleaseState.ONLINE) {
// check process definition online
putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE, processDefineId);
} else {
result.put(Constants.STATUS, Status.SUCCESS);
}
return result;
}
/**
* do action to process instance:pause, stop, repeat, recover from pause, recover from stop
*
* @param loginUser login user
* @param projectName project name
* @param processInstanceId process instance id
* @param executeType execute type
* @return execute result code
*/
@Override
public Map<String, Object> execute(User loginUser, String projectName, Integer processInstanceId, ExecuteType executeType) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = checkResultAndAuth(loginUser, projectName, project);
if (checkResult != null) {
return checkResult;
}
// check master exists
if (!checkMasterExists(result)) {
return result;
}
ProcessInstance processInstance = processService.findProcessInstanceDetailById(processInstanceId);
if (processInstance == null) {
putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceId);
return result;
}
ProcessDefinition processDefinition = processService.findProcessDefineById(processInstance.getProcessDefinitionId());
if (executeType != ExecuteType.STOP && executeType != ExecuteType.PAUSE) {
result = checkProcessDefinitionValid(processDefinition, processInstance.getProcessDefinitionId());
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
}
checkResult = checkExecuteType(processInstance, executeType);
Status status = (Status) checkResult.get(Constants.STATUS);
if (status != Status.SUCCESS) {
return checkResult;
}
if (!checkTenantSuitable(processDefinition)) {
logger.error("there is not any valid tenant for the process definition: id:{},name:{}, ",
processDefinition.getId(), processDefinition.getName());
putMsg(result, Status.TENANT_NOT_SUITABLE);
}
//get the startParams user specified at the first starting while repeat running is needed
Map<String, Object> commandMap = JSONUtils.toMap(processInstance.getCommandParam(), String.class, Object.class);
String startParams = null;
if (MapUtils.isNotEmpty(commandMap) && executeType == ExecuteType.REPEAT_RUNNING) {
startParams = (commandMap.get(Constants.CMD_PARAM_START_PARAMS)).toString();
}
switch (executeType) {
case REPEAT_RUNNING:
result = insertCommand(loginUser, processInstanceId, processDefinition.getId(), CommandType.REPEAT_RUNNING, startParams);
break;
case RECOVER_SUSPENDED_PROCESS:
result = insertCommand(loginUser, processInstanceId, processDefinition.getId(), CommandType.RECOVER_SUSPENDED_PROCESS, startParams);
break;
case START_FAILURE_TASK_PROCESS:
result = insertCommand(loginUser, processInstanceId, processDefinition.getId(), CommandType.START_FAILURE_TASK_PROCESS, startParams);
break;
case STOP:
if (processInstance.getState() == ExecutionStatus.READY_STOP) {
putMsg(result, Status.PROCESS_INSTANCE_ALREADY_CHANGED, processInstance.getName(), processInstance.getState());
} else {
result = updateProcessInstancePrepare(processInstance, CommandType.STOP, ExecutionStatus.READY_STOP);
}
break;
case PAUSE:
if (processInstance.getState() == ExecutionStatus.READY_PAUSE) {
putMsg(result, Status.PROCESS_INSTANCE_ALREADY_CHANGED, processInstance.getName(), processInstance.getState());
} else {
result = updateProcessInstancePrepare(processInstance, CommandType.PAUSE, ExecutionStatus.READY_PAUSE);
}
break;
default:
logger.error("unknown execute type : {}", executeType);
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "unknown execute type");
break;
}
return result;
}
/**
* check tenant suitable
*
* @param processDefinition process definition
* @return true if tenant suitable, otherwise return false
*/
private boolean checkTenantSuitable(ProcessDefinition processDefinition) {
Tenant tenant = processService.getTenantForProcess(processDefinition.getTenantId(),
processDefinition.getUserId());
return tenant != null;
}
/**
* Check the state of process instance and the type of operation match
*
* @param processInstance process instance
* @param executeType execute type
* @return check result code
*/
private Map<String, Object> checkExecuteType(ProcessInstance processInstance, ExecuteType executeType) {
Map<String, Object> result = new HashMap<>();
ExecutionStatus executionStatus = processInstance.getState();
boolean checkResult = false;
switch (executeType) {
case PAUSE:
case STOP:
if (executionStatus.typeIsRunning()) {
checkResult = true;
}
break;
case REPEAT_RUNNING:
if (executionStatus.typeIsFinished()) {
checkResult = true;
}
break;
case START_FAILURE_TASK_PROCESS:
if (executionStatus.typeIsFailure()) {
checkResult = true;
}
break;
case RECOVER_SUSPENDED_PROCESS:
if (executionStatus.typeIsPause() || executionStatus.typeIsCancel()) {
checkResult = true;
}
break;
default:
break;
}
if (!checkResult) {
putMsg(result, Status.PROCESS_INSTANCE_STATE_OPERATION_ERROR, processInstance.getName(), executionStatus.toString(), executeType.toString());
} else {
putMsg(result, Status.SUCCESS);
}
return result;
}
/**
* prepare to update process instance command type and status
*
* @param processInstance process instance
* @param commandType command type
* @param executionStatus execute status
* @return update result
*/
private Map<String, Object> updateProcessInstancePrepare(ProcessInstance processInstance, CommandType commandType, ExecutionStatus executionStatus) {
Map<String, Object> result = new HashMap<>();
processInstance.setCommandType(commandType);
processInstance.addHistoryCmd(commandType);
processInstance.setState(executionStatus);
int update = processService.updateProcessInstance(processInstance);
// determine whether the process is normal
if (update > 0) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.EXECUTE_PROCESS_INSTANCE_ERROR);
}
return result;
}
/**
* insert command, used in the implementation of the page, re run, recovery (pause / failure) execution
*
* @param loginUser login user
* @param instanceId instance id
* @param processDefinitionId process definition id
* @param commandType command type
* @return insert result code
*/
private Map<String, Object> insertCommand(User loginUser, Integer instanceId, Integer processDefinitionId, CommandType commandType, String startParams) {
Map<String, Object> result = new HashMap<>();
//To add startParams only when repeat running is needed
Map<String, Object> cmdParam = new HashMap<>();
cmdParam.put(CMD_PARAM_RECOVER_PROCESS_ID_STRING, instanceId);
if (StringUtils.isNotEmpty(startParams)) {
cmdParam.put(CMD_PARAM_START_PARAMS, startParams);
}
Command command = new Command();
command.setCommandType(commandType);
command.setProcessDefinitionId(processDefinitionId);
command.setCommandParam(JSONUtils.toJsonString(cmdParam));
command.setExecutorId(loginUser.getId());
if (!processService.verifyIsNeedCreateCommand(command)) {
putMsg(result, Status.PROCESS_INSTANCE_EXECUTING_COMMAND, processDefinitionId);
return result;
}
int create = processService.createCommand(command);
if (create > 0) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.EXECUTE_PROCESS_INSTANCE_ERROR);
}
return result;
}
/**
* check if sub processes are offline before starting process definition
*
* @param processDefineId process definition id
* @return check result code
*/
@Override
public Map<String, Object> startCheckByProcessDefinedId(int processDefineId) {
Map<String, Object> result = new HashMap<>();
if (processDefineId == 0) {
logger.error("process definition id is null");
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "process definition id");
}
List<Integer> ids = new ArrayList<>();
processService.recurseFindSubProcessId(processDefineId, ids);
Integer[] idArray = ids.toArray(new Integer[ids.size()]);
if (!ids.isEmpty()) {
List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryDefinitionListByIdList(idArray);
if (processDefinitionList != null) {
for (ProcessDefinition processDefinition : processDefinitionList) {
/**
* if there is no online process, exit directly
*/
if (processDefinition.getReleaseState() != ReleaseState.ONLINE) {
putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE, processDefinition.getName());
logger.info("not release process definition id: {} , name : {}",
processDefinition.getId(), processDefinition.getName());
return result;
}
}
}
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* create command
*
* @param commandType commandType
* @param processDefineId processDefineId
* @param nodeDep nodeDep
* @param failureStrategy failureStrategy
* @param startNodeList startNodeList
* @param schedule schedule
* @param warningType warningType
* @param executorId executorId
* @param warningGroupId warningGroupId
* @param runMode runMode
* @param processInstancePriority processInstancePriority
* @param workerGroup workerGroup
* @return command id
*/
private int createCommand(CommandType commandType, int processDefineId,
TaskDependType nodeDep, FailureStrategy failureStrategy,
String startNodeList, String schedule, WarningType warningType,
int executorId, int warningGroupId,
RunMode runMode, Priority processInstancePriority, String workerGroup,
Map<String, String> startParams) {
/**
* instantiate command schedule instance
*/
Command command = new Command();
Map<String, String> cmdParam = new HashMap<>();
if (commandType == null) {
command.setCommandType(CommandType.START_PROCESS);
} else {
command.setCommandType(commandType);
}
command.setProcessDefinitionId(processDefineId);
if (nodeDep != null) {
command.setTaskDependType(nodeDep);
}
if (failureStrategy != null) {
command.setFailureStrategy(failureStrategy);
}
if (StringUtils.isNotEmpty(startNodeList)) {
cmdParam.put(CMD_PARAM_START_NODE_NAMES, startNodeList);
}
if (warningType != null) {
command.setWarningType(warningType);
}
if (startParams != null && startParams.size() > 0) {
cmdParam.put(CMD_PARAM_START_PARAMS, JSONUtils.toJsonString(startParams));
}
command.setCommandParam(JSONUtils.toJsonString(cmdParam));
command.setExecutorId(executorId);
command.setWarningGroupId(warningGroupId);
command.setProcessInstancePriority(processInstancePriority);
command.setWorkerGroup(workerGroup);
Date start = null;
Date end = null;
if (StringUtils.isNotEmpty(schedule)) {
String[] interval = schedule.split(",");
if (interval.length == 2) {
start = DateUtils.getScheduleDate(interval[0]);
end = DateUtils.getScheduleDate(interval[1]);
}
}
// determine whether to complement
if (commandType == CommandType.COMPLEMENT_DATA) {
runMode = (runMode == null) ? RunMode.RUN_MODE_SERIAL : runMode;
if (null != start && null != end && !start.after(end)) {
if (runMode == RunMode.RUN_MODE_SERIAL) {
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(start));
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(end));
command.setCommandParam(JSONUtils.toJsonString(cmdParam));
return processService.createCommand(command);
} else if (runMode == RunMode.RUN_MODE_PARALLEL) {
List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionId(processDefineId);
List<Date> listDate = new LinkedList<>();
if (!CollectionUtils.isEmpty(schedules)) {
for (Schedule item : schedules) {
listDate.addAll(CronUtils.getSelfFireDateList(start, end, item.getCrontab()));
}
}
if (!CollectionUtils.isEmpty(listDate)) {
// loop by schedule date
for (Date date : listDate) {
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(date));
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(date));
command.setCommandParam(JSONUtils.toJsonString(cmdParam));
processService.createCommand(command);
}
return listDate.size();
} else {
// loop by day
int runCunt = 0;
while (!start.after(end)) {
runCunt += 1;
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(start));
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(start));
command.setCommandParam(JSONUtils.toJsonString(cmdParam));
processService.createCommand(command);
start = DateUtils.getSomeDay(start, 1);
}
return runCunt;
}
}
} else {
logger.error("there is not valid schedule date for the process definition: id:{}", processDefineId);
}
} else {
command.setCommandParam(JSONUtils.toJsonString(cmdParam));
return processService.createCommand(command);
}
return 0;
}
/**
* check result and auth
*/
private Map<String, Object> checkResultAndAuth(User loginUser, String projectName, Project project) {
// check project auth
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status status = (Status) checkResult.get(Constants.STATUS);
if (status != Status.SUCCESS) {
return checkResult;
}
return null;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,170 | [Improvement][API] the QueryAuthorizedProject interface adds permission judgment (easy to implement) | **Describe the question**
The QueryAuthorizedProject interface has query authority only for administrators,the project scenario where a user queries the user's own authorization cannot be satisfied.
**What are the current deficiencies and the benefits of improvement**
The QueryAuthorizedProject interface adds permission judgment ,the user itself can query the user's own authorized project
**Which version of DolphinScheduler:**
-[1.3.4-release]
| https://github.com/apache/dolphinscheduler/issues/5170 | https://github.com/apache/dolphinscheduler/pull/5184 | 73e917d3c179b67afd0f3fbd2902d683dbed172d | 039d517c1358c197629d54c580ba6e1b356d551d | "2021-03-29T09:49:27Z" | java | "2021-04-11T06:44:20Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProjectServiceImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service.impl;
import static org.apache.dolphinscheduler.api.utils.CheckUtils.checkDesc;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.ProjectService;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.ProjectUser;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectUserMapper;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
/**
* project service impl
**/
@Service
public class ProjectServiceImpl extends BaseServiceImpl implements ProjectService {
@Autowired
private ProjectMapper projectMapper;
@Autowired
private ProjectUserMapper projectUserMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
/**
* create project
*
* @param loginUser login user
* @param name project name
* @param desc description
* @return returns an error if it exists
*/
@Override
public Map<String, Object> createProject(User loginUser, String name, String desc) {
Map<String, Object> result = new HashMap<>();
Map<String, Object> descCheck = checkDesc(desc);
if (descCheck.get(Constants.STATUS) != Status.SUCCESS) {
return descCheck;
}
Project project = projectMapper.queryByName(name);
if (project != null) {
putMsg(result, Status.PROJECT_ALREADY_EXISTS, name);
return result;
}
Date now = new Date();
project = Project
.newBuilder()
.name(name)
.description(desc)
.userId(loginUser.getId())
.userName(loginUser.getUserName())
.createTime(now)
.updateTime(now)
.build();
if (projectMapper.insert(project) > 0) {
result.put(Constants.DATA_LIST, project.getId());
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.CREATE_PROJECT_ERROR);
}
return result;
}
/**
* query project details by id
*
* @param projectId project id
* @return project detail information
*/
@Override
public Map<String, Object> queryById(Integer projectId) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.selectById(projectId);
if (project != null) {
result.put(Constants.DATA_LIST, project);
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.PROJECT_NOT_FOUNT, projectId);
}
return result;
}
/**
* check project and authorization
*
* @param loginUser login user
* @param project project
* @param projectName project name
* @return true if the login user have permission to see the project
*/
@Override
public Map<String, Object> checkProjectAndAuth(User loginUser, Project project, String projectName) {
Map<String, Object> result = new HashMap<>();
if (project == null) {
putMsg(result, Status.PROJECT_NOT_FOUNT, projectName);
} else if (!checkReadPermission(loginUser, project)) {
// check read permission
putMsg(result, Status.USER_NO_OPERATION_PROJECT_PERM, loginUser.getUserName(), projectName);
} else {
putMsg(result, Status.SUCCESS);
}
return result;
}
@Override
public boolean hasProjectAndPerm(User loginUser, Project project, Map<String, Object> result) {
boolean checkResult = false;
if (project == null) {
putMsg(result, Status.PROJECT_NOT_FOUNT, "");
} else if (!checkReadPermission(loginUser, project)) {
putMsg(result, Status.USER_NO_OPERATION_PROJECT_PERM, loginUser.getUserName(), project.getName());
} else {
checkResult = true;
}
return checkResult;
}
/**
* admin can view all projects
*
* @param loginUser login user
* @param searchVal search value
* @param pageSize page size
* @param pageNo page number
* @return project list which the login user have permission to see
*/
@Override
public Map<String, Object> queryProjectListPaging(User loginUser, Integer pageSize, Integer pageNo, String searchVal) {
Map<String, Object> result = new HashMap<>();
PageInfo<Project> pageInfo = new PageInfo<>(pageNo, pageSize);
Page<Project> page = new Page<>(pageNo, pageSize);
int userId = loginUser.getUserType() == UserType.ADMIN_USER ? 0 : loginUser.getId();
IPage<Project> projectIPage = projectMapper.queryProjectListPaging(page, userId, searchVal);
List<Project> projectList = projectIPage.getRecords();
if (userId != 0) {
for (Project project : projectList) {
project.setPerm(Constants.DEFAULT_ADMIN_PERMISSION);
}
}
pageInfo.setTotalCount((int) projectIPage.getTotal());
pageInfo.setLists(projectList);
result.put(Constants.COUNT, (int) projectIPage.getTotal());
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* delete project by id
*
* @param loginUser login user
* @param projectId project id
* @return delete result code
*/
@Override
public Map<String, Object> deleteProject(User loginUser, Integer projectId) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.selectById(projectId);
Map<String, Object> checkResult = getCheckResult(loginUser, project);
if (checkResult != null) {
return checkResult;
}
if (!hasPerm(loginUser, project.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryAllDefinitionList(projectId);
if (!processDefinitionList.isEmpty()) {
putMsg(result, Status.DELETE_PROJECT_ERROR_DEFINES_NOT_NULL);
return result;
}
int delete = projectMapper.deleteById(projectId);
if (delete > 0) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.DELETE_PROJECT_ERROR);
}
return result;
}
/**
* get check result
*
* @param loginUser login user
* @param project project
* @return check result
*/
private Map<String, Object> getCheckResult(User loginUser, Project project) {
String projectName = project == null ? null : project.getName();
Map<String, Object> checkResult = checkProjectAndAuth(loginUser, project, projectName);
Status status = (Status) checkResult.get(Constants.STATUS);
if (status != Status.SUCCESS) {
return checkResult;
}
return null;
}
/**
* updateProcessInstance project
*
* @param loginUser login user
* @param projectId project id
* @param projectName project name
* @param desc description
* @return update result code
*/
@Override
public Map<String, Object> update(User loginUser, Integer projectId, String projectName, String desc) {
Map<String, Object> result = new HashMap<>();
Map<String, Object> descCheck = checkDesc(desc);
if (descCheck.get(Constants.STATUS) != Status.SUCCESS) {
return descCheck;
}
Project project = projectMapper.selectById(projectId);
boolean hasProjectAndPerm = hasProjectAndPerm(loginUser, project, result);
if (!hasProjectAndPerm) {
return result;
}
Project tempProject = projectMapper.queryByName(projectName);
if (tempProject != null && tempProject.getId() != projectId) {
putMsg(result, Status.PROJECT_ALREADY_EXISTS, projectName);
return result;
}
project.setName(projectName);
project.setDescription(desc);
project.setUpdateTime(new Date());
int update = projectMapper.updateById(project);
if (update > 0) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.UPDATE_PROJECT_ERROR);
}
return result;
}
/**
* query unauthorized project
*
* @param loginUser login user
* @param userId user id
* @return the projects which user have not permission to see
*/
@Override
public Map<String, Object> queryUnauthorizedProject(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
/**
* query all project list except specified userId
*/
List<Project> projectList = projectMapper.queryProjectExceptUserId(userId);
List<Project> resultList = new ArrayList<>();
Set<Project> projectSet = null;
if (projectList != null && !projectList.isEmpty()) {
projectSet = new HashSet<>(projectList);
List<Project> authedProjectList = projectMapper.queryAuthedProjectListByUserId(userId);
resultList = getUnauthorizedProjects(projectSet, authedProjectList);
}
result.put(Constants.DATA_LIST, resultList);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* get unauthorized project
*
* @param projectSet project set
* @param authedProjectList authed project list
* @return project list that authorization
*/
private List<Project> getUnauthorizedProjects(Set<Project> projectSet, List<Project> authedProjectList) {
List<Project> resultList;
Set<Project> authedProjectSet = null;
if (authedProjectList != null && !authedProjectList.isEmpty()) {
authedProjectSet = new HashSet<>(authedProjectList);
projectSet.removeAll(authedProjectSet);
}
resultList = new ArrayList<>(projectSet);
return resultList;
}
/**
* query authorized project
*
* @param loginUser login user
* @param userId user id
* @return projects which the user have permission to see, Except for items created by this user
*/
@Override
public Map<String, Object> queryAuthorizedProject(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
List<Project> projects = projectMapper.queryAuthedProjectListByUserId(userId);
result.put(Constants.DATA_LIST, projects);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query authorized project
*
* @param loginUser login user
* @return projects which the user have permission to see, Except for items created by this user
*/
@Override
public Map<String, Object> queryProjectCreatedByUser(User loginUser) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
List<Project> projects = projectMapper.queryProjectCreatedByUser(loginUser.getId());
result.put(Constants.DATA_LIST, projects);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query authorized and user create project list by user
*
* @param loginUser login user
* @return
*/
@Override
public Map<String, Object> queryProjectCreatedAndAuthorizedByUser(User loginUser) {
Map<String, Object> result = new HashMap<>();
List<Project> projects = null;
if (loginUser.getUserType() == UserType.ADMIN_USER) {
projects = projectMapper.selectList(null);
} else {
projects = projectMapper.queryProjectCreatedAndAuthorizedByUserId(loginUser.getId());
}
result.put(Constants.DATA_LIST, projects);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* check whether have read permission
*
* @param user user
* @param project project
* @return true if the user have permission to see the project, otherwise return false
*/
private boolean checkReadPermission(User user, Project project) {
int permissionId = queryPermission(user, project);
return (permissionId & Constants.READ_PERMISSION) != 0;
}
/**
* query permission id
*
* @param user user
* @param project project
* @return permission
*/
private int queryPermission(User user, Project project) {
if (user.getUserType() == UserType.ADMIN_USER) {
return Constants.READ_PERMISSION;
}
if (project.getUserId() == user.getId()) {
return Constants.ALL_PERMISSIONS;
}
ProjectUser projectUser = projectUserMapper.queryProjectRelation(project.getId(), user.getId());
if (projectUser == null) {
return 0;
}
return projectUser.getPerm();
}
/**
* query all project list
*
* @return project list
*/
@Override
public Map<String, Object> queryAllProjectList() {
Map<String, Object> result = new HashMap<>();
List<Project> projects = projectMapper.queryAllProject();
result.put(Constants.DATA_LIST, projects);
putMsg(result, Status.SUCCESS);
return result;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,170 | [Improvement][API] the QueryAuthorizedProject interface adds permission judgment (easy to implement) | **Describe the question**
The QueryAuthorizedProject interface has query authority only for administrators,the project scenario where a user queries the user's own authorization cannot be satisfied.
**What are the current deficiencies and the benefits of improvement**
The QueryAuthorizedProject interface adds permission judgment ,the user itself can query the user's own authorized project
**Which version of DolphinScheduler:**
-[1.3.4-release]
| https://github.com/apache/dolphinscheduler/issues/5170 | https://github.com/apache/dolphinscheduler/pull/5184 | 73e917d3c179b67afd0f3fbd2902d683dbed172d | 039d517c1358c197629d54c580ba6e1b356d551d | "2021-03-29T09:49:27Z" | java | "2021-04-11T06:44:20Z" | dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ProjectServiceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.impl.ProjectServiceImpl;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.ProjectUser;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectUserMapper;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
/**
* project service test
**/
@RunWith(MockitoJUnitRunner.class)
public class ProjectServiceTest {
private static final Logger logger = LoggerFactory.getLogger(ProjectServiceTest.class);
@InjectMocks
private ProjectServiceImpl projectService;
@Mock
private ProjectMapper projectMapper;
@Mock
private ProjectUserMapper projectUserMapper;
@Mock
private ProcessDefinitionMapper processDefinitionMapper;
private String projectName = "ProjectServiceTest";
private String userName = "ProjectServiceTest";
@Test
public void testCreateProject() {
User loginUser = getLoginUser();
loginUser.setId(1);
Map<String, Object> result = projectService.createProject(loginUser, projectName, getDesc());
logger.info(result.toString());
Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS));
//project name exist
Mockito.when(projectMapper.queryByName(projectName)).thenReturn(getProject());
result = projectService.createProject(loginUser, projectName, projectName);
logger.info(result.toString());
Assert.assertEquals(Status.PROJECT_ALREADY_EXISTS, result.get(Constants.STATUS));
//success
Mockito.when(projectMapper.insert(Mockito.any(Project.class))).thenReturn(1);
result = projectService.createProject(loginUser, "test", "test");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
}
@Test
public void testQueryById() {
//not exist
Map<String, Object> result = projectService.queryById(Integer.MAX_VALUE);
Assert.assertEquals(Status.PROJECT_NOT_FOUNT, result.get(Constants.STATUS));
logger.info(result.toString());
//success
Mockito.when(projectMapper.selectById(1)).thenReturn(getProject());
result = projectService.queryById(1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
}
@Test
public void testCheckProjectAndAuth() {
Mockito.when(projectUserMapper.queryProjectRelation(1, 1)).thenReturn(getProjectUser());
User loginUser = getLoginUser();
Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, null, projectName);
logger.info(result.toString());
Status status = (Status) result.get(Constants.STATUS);
Assert.assertEquals(Status.PROJECT_NOT_FOUNT, result.get(Constants.STATUS));
Project project = getProject();
//USER_NO_OPERATION_PROJECT_PERM
project.setUserId(2);
result = projectService.checkProjectAndAuth(loginUser, project, projectName);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PROJECT_PERM, result.get(Constants.STATUS));
//success
project.setUserId(1);
result = projectService.checkProjectAndAuth(loginUser, project, projectName);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
Map<String, Object> result2 = new HashMap<>();
result2 = projectService.checkProjectAndAuth(loginUser, null, projectName);
Assert.assertEquals(Status.PROJECT_NOT_FOUNT, result2.get(Constants.STATUS));
Project project1 = getProject();
// USER_NO_OPERATION_PROJECT_PERM
project1.setUserId(2);
result2 = projectService.checkProjectAndAuth(loginUser, project1, projectName);
Assert.assertEquals(Status.USER_NO_OPERATION_PROJECT_PERM, result2.get(Constants.STATUS));
//success
project1.setUserId(1);
projectService.checkProjectAndAuth(loginUser, project1, projectName);
}
@Test
public void testHasProjectAndPerm() {
// Mockito.when(projectUserMapper.queryProjectRelation(1, 1)).thenReturn(getProjectUser());
User loginUser = getLoginUser();
Project project = getProject();
Map<String, Object> result = new HashMap<>();
// not exist user
User tempUser = new User();
tempUser.setId(Integer.MAX_VALUE);
boolean checkResult = projectService.hasProjectAndPerm(tempUser, project, result);
logger.info(result.toString());
Assert.assertFalse(checkResult);
//success
result = new HashMap<>();
project.setUserId(1);
checkResult = projectService.hasProjectAndPerm(loginUser, project, result);
logger.info(result.toString());
Assert.assertTrue(checkResult);
}
@Test
public void testQueryProjectListPaging() {
IPage<Project> page = new Page<>(1, 10);
page.setRecords(getList());
page.setTotal(1L);
Mockito.when(projectMapper.queryProjectListPaging(Mockito.any(Page.class), Mockito.eq(1), Mockito.eq(projectName))).thenReturn(page);
User loginUser = getLoginUser();
// project owner
Map<String, Object> result = projectService.queryProjectListPaging(loginUser, 10, 1, projectName);
logger.info(result.toString());
PageInfo<Project> pageInfo = (PageInfo<Project>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(pageInfo.getLists()));
//admin
Mockito.when(projectMapper.queryProjectListPaging(Mockito.any(Page.class), Mockito.eq(0), Mockito.eq(projectName))).thenReturn(page);
loginUser.setUserType(UserType.ADMIN_USER);
result = projectService.queryProjectListPaging(loginUser, 10, 1, projectName);
logger.info(result.toString());
pageInfo = (PageInfo<Project>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(pageInfo.getLists()));
}
@Test
public void testDeleteProject() {
Mockito.when(projectMapper.selectById(1)).thenReturn(getProject());
User loginUser = getLoginUser();
//PROJECT_NOT_FOUNT
Map<String, Object> result = projectService.deleteProject(loginUser, 12);
logger.info(result.toString());
Assert.assertEquals(Status.PROJECT_NOT_FOUNT, result.get(Constants.STATUS));
loginUser.setId(2);
//USER_NO_OPERATION_PROJECT_PERM
result = projectService.deleteProject(loginUser, 1);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PROJECT_PERM, result.get(Constants.STATUS));
//DELETE_PROJECT_ERROR_DEFINES_NOT_NULL
Mockito.when(processDefinitionMapper.queryAllDefinitionList(1)).thenReturn(getProcessDefinitions());
loginUser.setUserType(UserType.ADMIN_USER);
result = projectService.deleteProject(loginUser, 1);
logger.info(result.toString());
Assert.assertEquals(Status.DELETE_PROJECT_ERROR_DEFINES_NOT_NULL, result.get(Constants.STATUS));
//success
Mockito.when(projectMapper.deleteById(1)).thenReturn(1);
Mockito.when(processDefinitionMapper.queryAllDefinitionList(1)).thenReturn(new ArrayList<>());
result = projectService.deleteProject(loginUser, 1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
}
@Test
public void testUpdate() {
User loginUser = getLoginUser();
Project project = getProject();
project.setId(2);
Mockito.when(projectMapper.queryByName(projectName)).thenReturn(project);
Mockito.when(projectMapper.selectById(1)).thenReturn(getProject());
// PROJECT_NOT_FOUNT
Map<String, Object> result = projectService.update(loginUser, 12, projectName, "desc");
logger.info(result.toString());
Assert.assertEquals(Status.PROJECT_NOT_FOUNT, result.get(Constants.STATUS));
//PROJECT_ALREADY_EXISTS
result = projectService.update(loginUser, 1, projectName, "desc");
logger.info(result.toString());
Assert.assertEquals(Status.PROJECT_ALREADY_EXISTS, result.get(Constants.STATUS));
//success
project.setUserId(1);
Mockito.when(projectMapper.updateById(Mockito.any(Project.class))).thenReturn(1);
result = projectService.update(loginUser, 1, "test", "desc");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
}
@Test
public void testQueryAuthorizedProject() {
User loginUser = getLoginUser();
Mockito.when(projectMapper.queryAuthedProjectListByUserId(1)).thenReturn(getList());
//USER_NO_OPERATION_PERM
Map<String, Object> result = projectService.queryAuthorizedProject(loginUser, 3);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS));
//success
loginUser.setUserType(UserType.ADMIN_USER);
result = projectService.queryAuthorizedProject(loginUser, 1);
logger.info(result.toString());
List<Project> projects = (List<Project>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(projects));
}
@Test
public void testQueryCreatedProject() {
User loginUser = getLoginUser();
Mockito.when(projectMapper.queryProjectCreatedByUser(1)).thenReturn(getList());
//USER_NO_OPERATION_PERM
Map<String, Object> result = projectService.queryProjectCreatedByUser(loginUser);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS));
//success
loginUser.setUserType(UserType.ADMIN_USER);
result = projectService.queryProjectCreatedByUser(loginUser);
logger.info(result.toString());
List<Project> projects = (List<Project>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(projects));
}
@Test
public void testQueryProjectCreatedAndAuthorizedByUser() {
Map<String, Object> result = null;
User loginUser = getLoginUser();
// not admin user
Mockito.when(projectMapper.queryProjectCreatedAndAuthorizedByUserId(1)).thenReturn(getList());
result = projectService.queryProjectCreatedAndAuthorizedByUser(loginUser);
List<Project> notAdminUserResult = (List<Project>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(notAdminUserResult));
//admin user
loginUser.setUserType(UserType.ADMIN_USER);
Mockito.when(projectMapper.selectList(null)).thenReturn(getList());
result = projectService.queryProjectCreatedAndAuthorizedByUser(loginUser);
List<Project> projects = (List<Project>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(projects));
}
@Test
public void testQueryAllProjectList() {
Mockito.when(projectMapper.queryAllProject()).thenReturn(getList());
Map<String, Object> result = projectService.queryAllProjectList();
logger.info(result.toString());
List<Project> projects = (List<Project>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(projects));
}
@Test
public void testQueryUnauthorizedProject() {
// Mockito.when(projectMapper.queryAuthedProjectListByUserId(1)).thenReturn(getList());
Mockito.when(projectMapper.queryProjectExceptUserId(2)).thenReturn(getList());
User loginUser = new User();
loginUser.setUserType(UserType.ADMIN_USER);
Map<String, Object> result = projectService.queryUnauthorizedProject(loginUser, 2);
logger.info(result.toString());
List<Project> projects = (List<Project>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(projects));
}
private Project getProject() {
Project project = new Project();
project.setId(1);
project.setName(projectName);
project.setUserId(1);
return project;
}
private List<Project> getList() {
List<Project> list = new ArrayList<>();
list.add(getProject());
return list;
}
/**
* create admin user
*/
private User getLoginUser() {
User loginUser = new User();
loginUser.setUserType(UserType.GENERAL_USER);
loginUser.setUserName(userName);
loginUser.setId(1);
return loginUser;
}
/**
* get project user
*/
private ProjectUser getProjectUser() {
ProjectUser projectUser = new ProjectUser();
projectUser.setProjectId(1);
projectUser.setUserId(1);
return projectUser;
}
private List<ProcessDefinition> getProcessDefinitions() {
List<ProcessDefinition> list = new ArrayList<>();
ProcessDefinition processDefinition = new ProcessDefinition();
processDefinition.setProjectId(1);
list.add(processDefinition);
return list;
}
private List<Integer> getProjectIds() {
return Collections.singletonList(1);
}
private String getDesc() {
return "projectUserMapper.deleteProjectRelation(projectId,userId)projectUserMappe"
+ ".deleteProjectRelation(projectId,userId)projectUserMappe"
+ "r.deleteProjectRelation(projectId,userId)projectUserMapper"
+ ".deleteProjectRelation(projectId,userId)projectUserMapper.deleteProjectRelation(projectId,userId)";
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,241 | Bug: create dataX job json error | [https://github.com/apache/incubator-dolphinscheduler/blob/dev/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java](url)
line:313-314
Reader and Writer in datax job json file is json string. DataX parse job json file failed. | https://github.com/apache/dolphinscheduler/issues/5241 | https://github.com/apache/dolphinscheduler/pull/5243 | faa111674fb77bc8e4486e62a578b129e403012b | cc7a4446f50a247f054a9671648e948c654dfe65 | "2021-04-09T07:24:56Z" | java | "2021-04-14T03:31:40Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/DataxUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.utils;
import org.apache.dolphinscheduler.common.enums.DbType;
import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser;
import com.alibaba.druid.sql.dialect.oracle.parser.OracleStatementParser;
import com.alibaba.druid.sql.dialect.postgresql.parser.PGSQLStatementParser;
import com.alibaba.druid.sql.dialect.sqlserver.parser.SQLServerStatementParser;
import com.alibaba.druid.sql.parser.SQLStatementParser;
public class DataxUtils {
public static final String DATAX_READER_PLUGIN_MYSQL = "mysqlreader";
public static final String DATAX_READER_PLUGIN_POSTGRESQL = "postgresqlreader";
public static final String DATAX_READER_PLUGIN_ORACLE = "oraclereader";
public static final String DATAX_READER_PLUGIN_SQLSERVER = "sqlserverreader";
public static final String DATAX_WRITER_PLUGIN_MYSQL = "mysqlwriter";
public static final String DATAX_WRITER_PLUGIN_POSTGRESQL = "postgresqlwriter";
public static final String DATAX_WRITER_PLUGIN_ORACLE = "oraclewriter";
public static final String DATAX_WRITER_PLUGIN_SQLSERVER = "sqlserverwriter";
public static String getReaderPluginName(DbType dbType) {
switch (dbType) {
case MYSQL:
return DATAX_READER_PLUGIN_MYSQL;
case POSTGRESQL:
return DATAX_READER_PLUGIN_POSTGRESQL;
case ORACLE:
return DATAX_READER_PLUGIN_ORACLE;
case SQLSERVER:
return DATAX_READER_PLUGIN_SQLSERVER;
default:
return null;
}
}
public static String getWriterPluginName(DbType dbType) {
switch (dbType) {
case MYSQL:
return DATAX_WRITER_PLUGIN_MYSQL;
case POSTGRESQL:
return DATAX_WRITER_PLUGIN_POSTGRESQL;
case ORACLE:
return DATAX_WRITER_PLUGIN_ORACLE;
case SQLSERVER:
return DATAX_WRITER_PLUGIN_SQLSERVER;
default:
return null;
}
}
public static SQLStatementParser getSqlStatementParser(DbType dbType, String sql) {
switch (dbType) {
case MYSQL:
return new MySqlStatementParser(sql);
case POSTGRESQL:
return new PGSQLStatementParser(sql);
case ORACLE:
return new OracleStatementParser(sql);
case SQLSERVER:
return new SQLServerStatementParser(sql);
default:
return null;
}
}
public static String[] convertKeywordsColumns(DbType dbType, String[] columns) {
if (columns == null) {
return null;
}
String[] toColumns = new String[columns.length];
for (int i = 0; i < columns.length; i++ ) {
toColumns[i] = doConvertKeywordsColumn(dbType, columns[i]);
}
return toColumns;
}
public static String doConvertKeywordsColumn(DbType dbType, String column) {
if (column == null) {
return column;
}
column = column.trim();
column = column.replace("`", "");
column = column.replace("\"", "");
column = column.replace("'", "");
switch (dbType) {
case MYSQL:
return String.format("`%s`", column);
case POSTGRESQL:
return String.format("\"%s\"", column);
case ORACLE:
return String.format("\"%s\"", column);
case SQLSERVER:
return String.format("`%s`", column);
default:
return column;
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,241 | Bug: create dataX job json error | [https://github.com/apache/incubator-dolphinscheduler/blob/dev/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java](url)
line:313-314
Reader and Writer in datax job json file is json string. DataX parse job json file failed. | https://github.com/apache/dolphinscheduler/issues/5241 | https://github.com/apache/dolphinscheduler/pull/5243 | faa111674fb77bc8e4486e62a578b129e403012b | cc7a4446f50a247f054a9671648e948c654dfe65 | "2021-04-09T07:24:56Z" | java | "2021-04-14T03:31:40Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task.datax;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.datax.DataxParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.dao.datasource.BaseDataSource;
import org.apache.dolphinscheduler.dao.datasource.DataSourceFactory;
import org.apache.dolphinscheduler.server.entity.DataxTaskExecutionContext;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.DataxUtils;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.worker.task.AbstractTask;
import org.apache.dolphinscheduler.server.worker.task.CommandExecuteResult;
import org.apache.dolphinscheduler.server.worker.task.ShellCommandExecutor;
import org.apache.commons.io.FileUtils;
import java.io.File;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.FileAttribute;
import java.nio.file.attribute.PosixFilePermission;
import java.nio.file.attribute.PosixFilePermissions;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.slf4j.Logger;
import com.alibaba.druid.sql.ast.SQLStatement;
import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr;
import com.alibaba.druid.sql.ast.expr.SQLPropertyExpr;
import com.alibaba.druid.sql.ast.statement.SQLSelect;
import com.alibaba.druid.sql.ast.statement.SQLSelectItem;
import com.alibaba.druid.sql.ast.statement.SQLSelectQueryBlock;
import com.alibaba.druid.sql.ast.statement.SQLSelectStatement;
import com.alibaba.druid.sql.ast.statement.SQLUnionQuery;
import com.alibaba.druid.sql.parser.SQLStatementParser;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
/**
* DataX task
*/
public class DataxTask extends AbstractTask {
/**
* jvm parameters
*/
public static final String JVM_PARAM = " --jvm=\"-Xms%sG -Xmx%sG\" ";
/**
* python process(datax only supports version 2.7 by default)
*/
private static final String DATAX_PYTHON = "python2.7";
/**
* datax path
*/
private static final String DATAX_PATH = "${DATAX_HOME}/bin/datax.py";
/**
* datax channel count
*/
private static final int DATAX_CHANNEL_COUNT = 1;
/**
* datax parameters
*/
private DataxParameters dataXParameters;
/**
* shell command executor
*/
private ShellCommandExecutor shellCommandExecutor;
/**
* taskExecutionContext
*/
private TaskExecutionContext taskExecutionContext;
/**
* constructor
*
* @param taskExecutionContext taskExecutionContext
* @param logger logger
*/
public DataxTask(TaskExecutionContext taskExecutionContext, Logger logger) {
super(taskExecutionContext, logger);
this.taskExecutionContext = taskExecutionContext;
this.shellCommandExecutor = new ShellCommandExecutor(this::logHandle,
taskExecutionContext, logger);
}
/**
* init DataX config
*/
@Override
public void init() {
logger.info("datax task params {}", taskExecutionContext.getTaskParams());
dataXParameters = JSONUtils.parseObject(taskExecutionContext.getTaskParams(), DataxParameters.class);
if (!dataXParameters.checkParameters()) {
throw new RuntimeException("datax task params is not valid");
}
}
/**
* run DataX process
*
* @throws Exception if error throws Exception
*/
@Override
public void handle() throws Exception {
try {
// set the name of the current thread
String threadLoggerInfoName = String.format("TaskLogInfo-%s", taskExecutionContext.getTaskAppId());
Thread.currentThread().setName(threadLoggerInfoName);
// combining local and global parameters
Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()),
taskExecutionContext.getDefinedParams(),
dataXParameters.getLocalParametersMap(),
CommandType.of(taskExecutionContext.getCmdTypeIfComplement()),
taskExecutionContext.getScheduleTime());
// run datax procesDataSourceService.s
String jsonFilePath = buildDataxJsonFile(paramsMap);
String shellCommandFilePath = buildShellCommandFile(jsonFilePath, paramsMap);
CommandExecuteResult commandExecuteResult = shellCommandExecutor.run(shellCommandFilePath);
setExitStatusCode(commandExecuteResult.getExitStatusCode());
setAppIds(commandExecuteResult.getAppIds());
setProcessId(commandExecuteResult.getProcessId());
} catch (Exception e) {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
throw e;
}
}
/**
* cancel DataX process
*
* @param cancelApplication cancelApplication
* @throws Exception if error throws Exception
*/
@Override
public void cancelApplication(boolean cancelApplication)
throws Exception {
// cancel process
shellCommandExecutor.cancelApplication();
}
/**
* build datax configuration file
*
* @return datax json file name
* @throws Exception if error throws Exception
*/
private String buildDataxJsonFile(Map<String, Property> paramsMap)
throws Exception {
// generate json
String fileName = String.format("%s/%s_job.json",
taskExecutionContext.getExecutePath(),
taskExecutionContext.getTaskAppId());
String json;
Path path = new File(fileName).toPath();
if (Files.exists(path)) {
return fileName;
}
if (dataXParameters.getCustomConfig() == Flag.YES.ordinal()) {
json = dataXParameters.getJson().replaceAll("\\r\\n", "\n");
} else {
ObjectNode job = JSONUtils.createObjectNode();
job.putArray("content").addAll(buildDataxJobContentJson());
job.set("setting", buildDataxJobSettingJson());
ObjectNode root = JSONUtils.createObjectNode();
root.set("job", job);
root.set("core", buildDataxCoreJson());
json = root.toString();
}
// replace placeholder
json = ParameterUtils.convertParameterPlaceholders(json, ParamUtils.convert(paramsMap));
logger.debug("datax job json : {}", json);
// create datax json file
FileUtils.writeStringToFile(new File(fileName), json, StandardCharsets.UTF_8);
return fileName;
}
/**
* build datax job config
*
* @return collection of datax job config JSONObject
* @throws SQLException if error throws SQLException
*/
private List<ObjectNode> buildDataxJobContentJson() {
DataxTaskExecutionContext dataxTaskExecutionContext = taskExecutionContext.getDataxTaskExecutionContext();
BaseDataSource dataSourceCfg = DataSourceFactory.getDatasource(DbType.of(dataxTaskExecutionContext.getSourcetype()),
dataxTaskExecutionContext.getSourceConnectionParams());
BaseDataSource dataTargetCfg = DataSourceFactory.getDatasource(DbType.of(dataxTaskExecutionContext.getTargetType()),
dataxTaskExecutionContext.getTargetConnectionParams());
List<ObjectNode> readerConnArr = new ArrayList<>();
ObjectNode readerConn = JSONUtils.createObjectNode();
ArrayNode sqlArr = readerConn.putArray("querySql");
for (String sql : new String[]{dataXParameters.getSql()}) {
sqlArr.add(sql);
}
ArrayNode urlArr = readerConn.putArray("jdbcUrl");
for (String url : new String[]{dataSourceCfg.getJdbcUrl()}) {
urlArr.add(url);
}
readerConnArr.add(readerConn);
ObjectNode readerParam = JSONUtils.createObjectNode();
readerParam.put("username", dataSourceCfg.getUser());
readerParam.put("password", dataSourceCfg.getPassword());
readerParam.putArray("connection").addAll(readerConnArr);
ObjectNode reader = JSONUtils.createObjectNode();
reader.put("name", DataxUtils.getReaderPluginName(DbType.of(dataxTaskExecutionContext.getSourcetype())));
reader.set("parameter", readerParam);
List<ObjectNode> writerConnArr = new ArrayList<>();
ObjectNode writerConn = JSONUtils.createObjectNode();
ArrayNode tableArr = writerConn.putArray("table");
for (String table : new String[]{dataXParameters.getTargetTable()}) {
tableArr.add(table);
}
writerConn.put("jdbcUrl", dataTargetCfg.getJdbcUrl());
writerConnArr.add(writerConn);
ObjectNode writerParam = JSONUtils.createObjectNode();
writerParam.put("username", dataTargetCfg.getUser());
writerParam.put("password", dataTargetCfg.getPassword());
String[] columns = parsingSqlColumnNames(DbType.of(dataxTaskExecutionContext.getSourcetype()),
DbType.of(dataxTaskExecutionContext.getTargetType()),
dataSourceCfg, dataXParameters.getSql());
ArrayNode columnArr = writerParam.putArray("column");
for (String column : columns) {
columnArr.add(column);
}
writerParam.putArray("connection").addAll(writerConnArr);
if (CollectionUtils.isNotEmpty(dataXParameters.getPreStatements())) {
ArrayNode preSqlArr = writerParam.putArray("preSql");
for (String preSql : dataXParameters.getPreStatements()) {
preSqlArr.add(preSql);
}
}
if (CollectionUtils.isNotEmpty(dataXParameters.getPostStatements())) {
ArrayNode postSqlArr = writerParam.putArray("postSql");
for (String postSql : dataXParameters.getPostStatements()) {
postSqlArr.add(postSql);
}
}
ObjectNode writer = JSONUtils.createObjectNode();
writer.put("name", DataxUtils.getWriterPluginName(DbType.of(dataxTaskExecutionContext.getTargetType())));
writer.set("parameter", writerParam);
List<ObjectNode> contentList = new ArrayList<>();
ObjectNode content = JSONUtils.createObjectNode();
content.put("reader", reader.toString());
content.put("writer", writer.toString());
contentList.add(content);
return contentList;
}
/**
* build datax setting config
*
* @return datax setting config JSONObject
*/
private ObjectNode buildDataxJobSettingJson() {
ObjectNode speed = JSONUtils.createObjectNode();
speed.put("channel", DATAX_CHANNEL_COUNT);
if (dataXParameters.getJobSpeedByte() > 0) {
speed.put("byte", dataXParameters.getJobSpeedByte());
}
if (dataXParameters.getJobSpeedRecord() > 0) {
speed.put("record", dataXParameters.getJobSpeedRecord());
}
ObjectNode errorLimit = JSONUtils.createObjectNode();
errorLimit.put("record", 0);
errorLimit.put("percentage", 0);
ObjectNode setting = JSONUtils.createObjectNode();
setting.put("speed", speed.toString());
setting.put("errorLimit", errorLimit.toString());
return setting;
}
private ObjectNode buildDataxCoreJson() {
ObjectNode speed = JSONUtils.createObjectNode();
speed.put("channel", DATAX_CHANNEL_COUNT);
if (dataXParameters.getJobSpeedByte() > 0) {
speed.put("byte", dataXParameters.getJobSpeedByte());
}
if (dataXParameters.getJobSpeedRecord() > 0) {
speed.put("record", dataXParameters.getJobSpeedRecord());
}
ObjectNode channel = JSONUtils.createObjectNode();
channel.set("speed", speed);
ObjectNode transport = JSONUtils.createObjectNode();
transport.set("channel", channel);
ObjectNode core = JSONUtils.createObjectNode();
core.set("transport", transport);
return core;
}
/**
* create command
*
* @return shell command file name
* @throws Exception if error throws Exception
*/
private String buildShellCommandFile(String jobConfigFilePath, Map<String, Property> paramsMap)
throws Exception {
// generate scripts
String fileName = String.format("%s/%s_node.%s",
taskExecutionContext.getExecutePath(),
taskExecutionContext.getTaskAppId(),
OSUtils.isWindows() ? "bat" : "sh");
Path path = new File(fileName).toPath();
if (Files.exists(path)) {
return fileName;
}
// datax python command
StringBuilder sbr = new StringBuilder();
sbr.append(DATAX_PYTHON);
sbr.append(" ");
sbr.append(DATAX_PATH);
sbr.append(" ");
sbr.append(loadJvmEnv(dataXParameters));
sbr.append(jobConfigFilePath);
// replace placeholder
String dataxCommand = ParameterUtils.convertParameterPlaceholders(sbr.toString(), ParamUtils.convert(paramsMap));
logger.debug("raw script : {}", dataxCommand);
// create shell command file
Set<PosixFilePermission> perms = PosixFilePermissions.fromString(Constants.RWXR_XR_X);
FileAttribute<Set<PosixFilePermission>> attr = PosixFilePermissions.asFileAttribute(perms);
if (OSUtils.isWindows()) {
Files.createFile(path);
} else {
Files.createFile(path, attr);
}
Files.write(path, dataxCommand.getBytes(), StandardOpenOption.APPEND);
return fileName;
}
public String loadJvmEnv(DataxParameters dataXParameters) {
int xms = dataXParameters.getXms() < 1 ? 1 : dataXParameters.getXms();
int xmx = dataXParameters.getXmx() < 1 ? 1 : dataXParameters.getXmx();
return String.format(JVM_PARAM, xms, xmx);
}
/**
* parsing synchronized column names in SQL statements
*
* @param dsType the database type of the data source
* @param dtType the database type of the data target
* @param dataSourceCfg the database connection parameters of the data source
* @param sql sql for data synchronization
* @return Keyword converted column names
*/
private String[] parsingSqlColumnNames(DbType dsType, DbType dtType, BaseDataSource dataSourceCfg, String sql) {
String[] columnNames = tryGrammaticalAnalysisSqlColumnNames(dsType, sql);
if (columnNames == null || columnNames.length == 0) {
logger.info("try to execute sql analysis query column name");
columnNames = tryExecuteSqlResolveColumnNames(dataSourceCfg, sql);
}
notNull(columnNames, String.format("parsing sql columns failed : %s", sql));
return DataxUtils.convertKeywordsColumns(dtType, columnNames);
}
/**
* try grammatical parsing column
*
* @param dbType database type
* @param sql sql for data synchronization
* @return column name array
* @throws RuntimeException if error throws RuntimeException
*/
private String[] tryGrammaticalAnalysisSqlColumnNames(DbType dbType, String sql) {
String[] columnNames;
try {
SQLStatementParser parser = DataxUtils.getSqlStatementParser(dbType, sql);
notNull(parser, String.format("database driver [%s] is not support", dbType.toString()));
SQLStatement sqlStatement = parser.parseStatement();
SQLSelectStatement sqlSelectStatement = (SQLSelectStatement) sqlStatement;
SQLSelect sqlSelect = sqlSelectStatement.getSelect();
List<SQLSelectItem> selectItemList = null;
if (sqlSelect.getQuery() instanceof SQLSelectQueryBlock) {
SQLSelectQueryBlock block = (SQLSelectQueryBlock) sqlSelect.getQuery();
selectItemList = block.getSelectList();
} else if (sqlSelect.getQuery() instanceof SQLUnionQuery) {
SQLUnionQuery unionQuery = (SQLUnionQuery) sqlSelect.getQuery();
SQLSelectQueryBlock block = (SQLSelectQueryBlock) unionQuery.getRight();
selectItemList = block.getSelectList();
}
notNull(selectItemList,
String.format("select query type [%s] is not support", sqlSelect.getQuery().toString()));
columnNames = new String[selectItemList.size()];
for (int i = 0; i < selectItemList.size(); i++) {
SQLSelectItem item = selectItemList.get(i);
String columnName = null;
if (item.getAlias() != null) {
columnName = item.getAlias();
} else if (item.getExpr() != null) {
if (item.getExpr() instanceof SQLPropertyExpr) {
SQLPropertyExpr expr = (SQLPropertyExpr) item.getExpr();
columnName = expr.getName();
} else if (item.getExpr() instanceof SQLIdentifierExpr) {
SQLIdentifierExpr expr = (SQLIdentifierExpr) item.getExpr();
columnName = expr.getName();
}
} else {
throw new RuntimeException(
String.format("grammatical analysis sql column [ %s ] failed", item.toString()));
}
if (columnName == null) {
throw new RuntimeException(
String.format("grammatical analysis sql column [ %s ] failed", item.toString()));
}
columnNames[i] = columnName;
}
} catch (Exception e) {
logger.warn(e.getMessage(), e);
return null;
}
return columnNames;
}
/**
* try to execute sql to resolve column names
*
* @param baseDataSource the database connection parameters
* @param sql sql for data synchronization
* @return column name array
*/
public String[] tryExecuteSqlResolveColumnNames(BaseDataSource baseDataSource, String sql) {
String[] columnNames;
sql = String.format("SELECT t.* FROM ( %s ) t WHERE 0 = 1", sql);
sql = sql.replace(";", "");
try (
Connection connection = DriverManager.getConnection(baseDataSource.getJdbcUrl(), baseDataSource.getUser(),
baseDataSource.getPassword());
PreparedStatement stmt = connection.prepareStatement(sql);
ResultSet resultSet = stmt.executeQuery()) {
ResultSetMetaData md = resultSet.getMetaData();
int num = md.getColumnCount();
columnNames = new String[num];
for (int i = 1; i <= num; i++) {
columnNames[i - 1] = md.getColumnName(i);
}
} catch (SQLException e) {
logger.warn(e.getMessage(), e);
return null;
}
return columnNames;
}
@Override
public AbstractParameters getParameters() {
return dataXParameters;
}
private void notNull(Object obj, String message) {
if (obj == null) {
throw new RuntimeException(message);
}
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,241 | Bug: create dataX job json error | [https://github.com/apache/incubator-dolphinscheduler/blob/dev/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java](url)
line:313-314
Reader and Writer in datax job json file is json string. DataX parse job json file failed. | https://github.com/apache/dolphinscheduler/issues/5241 | https://github.com/apache/dolphinscheduler/pull/5243 | faa111674fb77bc8e4486e62a578b129e403012b | cc7a4446f50a247f054a9671648e948c654dfe65 | "2021-04-09T07:24:56Z" | java | "2021-04-14T03:31:40Z" | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/utils/DataxUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.utils;
import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser;
import com.alibaba.druid.sql.dialect.oracle.parser.OracleStatementParser;
import com.alibaba.druid.sql.dialect.postgresql.parser.PGSQLStatementParser;
import com.alibaba.druid.sql.dialect.sqlserver.parser.SQLServerStatementParser;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.junit.Assert;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
* DataxUtils Tester.
*/
public class DataxUtilsTest {
/**
*
* Method: getReaderPluginName(DbType dbType)
*
*/
@Test
public void testGetReaderPluginName() {
assertEquals(DataxUtils.DATAX_READER_PLUGIN_MYSQL, DataxUtils.getReaderPluginName(DbType.MYSQL));
assertEquals(DataxUtils.DATAX_READER_PLUGIN_POSTGRESQL, DataxUtils.getReaderPluginName(DbType.POSTGRESQL));
assertEquals(DataxUtils.DATAX_READER_PLUGIN_SQLSERVER, DataxUtils.getReaderPluginName(DbType.SQLSERVER));
assertEquals(DataxUtils.DATAX_READER_PLUGIN_ORACLE, DataxUtils.getReaderPluginName(DbType.ORACLE));
assertTrue(DataxUtils.getReaderPluginName(DbType.DB2) == null);
}
/**
*
* Method: getWriterPluginName(DbType dbType)
*
*/
@Test
public void testGetWriterPluginName() {
assertEquals(DataxUtils.DATAX_WRITER_PLUGIN_MYSQL, DataxUtils.getWriterPluginName(DbType.MYSQL));
assertEquals(DataxUtils.DATAX_WRITER_PLUGIN_POSTGRESQL, DataxUtils.getWriterPluginName(DbType.POSTGRESQL));
assertEquals(DataxUtils.DATAX_WRITER_PLUGIN_SQLSERVER, DataxUtils.getWriterPluginName(DbType.SQLSERVER));
assertEquals(DataxUtils.DATAX_WRITER_PLUGIN_ORACLE, DataxUtils.getWriterPluginName(DbType.ORACLE));
assertTrue(DataxUtils.getWriterPluginName(DbType.DB2) == null);
}
/**
*
* Method: getSqlStatementParser(DbType dbType, String sql)
*
*/
@Test
public void testGetSqlStatementParser() throws Exception {
assertTrue(DataxUtils.getSqlStatementParser(DbType.MYSQL, "select 1") instanceof MySqlStatementParser);
assertTrue(DataxUtils.getSqlStatementParser(DbType.POSTGRESQL, "select 1") instanceof PGSQLStatementParser);
assertTrue(DataxUtils.getSqlStatementParser(DbType.ORACLE, "select 1") instanceof OracleStatementParser);
assertTrue(DataxUtils.getSqlStatementParser(DbType.SQLSERVER, "select 1") instanceof SQLServerStatementParser);
assertTrue(DataxUtils.getSqlStatementParser(DbType.DB2, "select 1") == null);
}
/**
*
* Method: convertKeywordsColumns(DbType dbType, String[] columns)
*
*/
@Test
public void testConvertKeywordsColumns() throws Exception {
String[] fromColumns = new String[]{"`select`", "from", "\"where\"", " table "};
String[] targetColumns = new String[]{"`select`", "`from`", "`where`", "`table`"};
String[] toColumns = DataxUtils.convertKeywordsColumns(DbType.MYSQL, fromColumns);
assertTrue(fromColumns.length == toColumns.length);
for (int i = 0; i < toColumns.length; i++) {
assertEquals(targetColumns[i], toColumns[i]);
}
}
/**
*
* Method: doConvertKeywordsColumn(DbType dbType, String column)
*
*/
@Test
public void testDoConvertKeywordsColumn() throws Exception {
assertEquals("`select`", DataxUtils.doConvertKeywordsColumn(DbType.MYSQL, " \"`select`\" "));
assertEquals("\"select\"", DataxUtils.doConvertKeywordsColumn(DbType.POSTGRESQL, " \"`select`\" "));
assertEquals("`select`", DataxUtils.doConvertKeywordsColumn(DbType.SQLSERVER, " \"`select`\" "));
assertEquals("\"select\"", DataxUtils.doConvertKeywordsColumn(DbType.ORACLE, " \"`select`\" "));
assertEquals("select", DataxUtils.doConvertKeywordsColumn(DbType.DB2, " \"`select`\" "));
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,241 | Bug: create dataX job json error | [https://github.com/apache/incubator-dolphinscheduler/blob/dev/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java](url)
line:313-314
Reader and Writer in datax job json file is json string. DataX parse job json file failed. | https://github.com/apache/dolphinscheduler/issues/5241 | https://github.com/apache/dolphinscheduler/pull/5243 | faa111674fb77bc8e4486e62a578b129e403012b | cc7a4446f50a247f054a9671648e948c654dfe65 | "2021-04-09T07:24:56Z" | java | "2021-04-14T03:31:40Z" | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTaskTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task.datax;
import static org.apache.dolphinscheduler.common.enums.CommandType.START_PROCESS;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.task.datax.DataxParameters;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.dao.datasource.BaseDataSource;
import org.apache.dolphinscheduler.dao.datasource.DataSourceFactory;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.server.entity.DataxTaskExecutionContext;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.DataxUtils;
import org.apache.dolphinscheduler.server.worker.task.ShellCommandExecutor;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
import java.lang.reflect.Method;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.UUID;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.powermock.api.mockito.PowerMockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.ApplicationContext;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
/**
* DataxTask Tester.
*/
public class DataxTaskTest {
private static final Logger logger = LoggerFactory.getLogger(DataxTaskTest.class);
private static final String CONNECTION_PARAMS = " {\n"
+ " \"user\":\"root\",\n"
+ " \"password\":\"123456\",\n"
+ " \"address\":\"jdbc:mysql://127.0.0.1:3306\",\n"
+ " \"database\":\"test\",\n"
+ " \"jdbcUrl\":\"jdbc:mysql://127.0.0.1:3306/test\"\n"
+ "}";
private DataxTask dataxTask;
private ProcessService processService;
private ShellCommandExecutor shellCommandExecutor;
private ApplicationContext applicationContext;
private TaskExecutionContext taskExecutionContext;
private final TaskProps props = new TaskProps();
@Before
public void before()
throws Exception {
setTaskParems(0);
}
private void setTaskParems(Integer customConfig) {
processService = Mockito.mock(ProcessService.class);
shellCommandExecutor = Mockito.mock(ShellCommandExecutor.class);
applicationContext = Mockito.mock(ApplicationContext.class);
SpringApplicationContext springApplicationContext = new SpringApplicationContext();
springApplicationContext.setApplicationContext(applicationContext);
Mockito.when(applicationContext.getBean(ProcessService.class)).thenReturn(processService);
TaskProps props = new TaskProps();
props.setExecutePath("/tmp");
props.setTaskAppId(String.valueOf(System.currentTimeMillis()));
props.setTaskInstanceId(1);
props.setTenantCode("1");
props.setEnvFile(".dolphinscheduler_env.sh");
props.setTaskStartTime(new Date());
props.setTaskTimeout(0);
if (customConfig == 1) {
props.setTaskParams(
"{\n"
+ " \"customConfig\":1,\n"
+ " \"localParams\":[\n"
+ " {\n"
+ " \"prop\":\"test\",\n"
+ " \"value\":\"38294729\"\n"
+ " }\n"
+ " ],\n"
+ " \"json\":\""
+ "{\"job\":{\"setting\":{\"speed\":{\"byte\":1048576},\"errorLimit\":{\"record\":0,\"percentage\":0.02}},\"content\":["
+ "{\"reader\":{\"name\":\"rdbmsreader\",\"parameter\":{\"username\":\"xxx\",\"password\":\"${test}\",\"column\":[\"id\",\"name\"],\"splitPk\":\"pk\",\""
+ "connection\":[{\"querySql\":[\"SELECT * from dual\"],\"jdbcUrl\":[\"jdbc:dm://ip:port/database\"]}],\"fetchSize\":1024,\"where\":\"1 = 1\"}},\""
+ "writer\":{\"name\":\"streamwriter\",\"parameter\":{\"print\":true}}}]}}\"\n"
+ "}");
} else {
props.setTaskParams(
"{\n"
+ " \"customConfig\":0,\n"
+ " \"targetTable\":\"test\",\n"
+ " \"postStatements\":[\n"
+ " \"delete from test\"\n"
+ " ],\n"
+ " \"jobSpeedByte\":0,\n"
+ " \"jobSpeedRecord\":1000,\n"
+ " \"dtType\":\"MYSQL\",\n"
+ " \"dataSource\":1,\n"
+ " \"dsType\":\"MYSQL\",\n"
+ " \"dataTarget\":2,\n"
+ " \"sql\":\"select 1 as test from dual\",\n"
+ " \"preStatements\":[\n"
+ " \"delete from test\"\n"
+ " ]\n"
+ "}");
}
taskExecutionContext = Mockito.mock(TaskExecutionContext.class);
Mockito.when(taskExecutionContext.getTaskParams()).thenReturn(props.getTaskParams());
Mockito.when(taskExecutionContext.getExecutePath()).thenReturn("/tmp");
Mockito.when(taskExecutionContext.getTaskAppId()).thenReturn(UUID.randomUUID().toString());
Mockito.when(taskExecutionContext.getTenantCode()).thenReturn("root");
Mockito.when(taskExecutionContext.getStartTime()).thenReturn(new Date());
Mockito.when(taskExecutionContext.getTaskTimeout()).thenReturn(10000);
Mockito.when(taskExecutionContext.getLogPath()).thenReturn("/tmp/dx");
DataxTaskExecutionContext dataxTaskExecutionContext = new DataxTaskExecutionContext();
dataxTaskExecutionContext.setSourcetype(0);
dataxTaskExecutionContext.setTargetType(0);
dataxTaskExecutionContext.setSourceConnectionParams(CONNECTION_PARAMS);
dataxTaskExecutionContext.setTargetConnectionParams(CONNECTION_PARAMS);
Mockito.when(taskExecutionContext.getDataxTaskExecutionContext()).thenReturn(dataxTaskExecutionContext);
dataxTask = PowerMockito.spy(new DataxTask(taskExecutionContext, logger));
dataxTask.init();
props.setCmdTypeIfComplement(START_PROCESS);
Mockito.when(processService.findDataSourceById(1)).thenReturn(getDataSource());
Mockito.when(processService.findDataSourceById(2)).thenReturn(getDataSource());
Mockito.when(processService.findProcessInstanceByTaskId(1)).thenReturn(getProcessInstance());
String fileName = String.format("%s/%s_node.sh", props.getExecutePath(), props.getTaskAppId());
try {
Mockito.when(shellCommandExecutor.run(fileName)).thenReturn(null);
} catch (Exception e) {
e.printStackTrace();
}
dataxTask = PowerMockito.spy(new DataxTask(taskExecutionContext, logger));
dataxTask.init();
}
private DataSource getDataSource() {
DataSource dataSource = new DataSource();
dataSource.setType(DbType.MYSQL);
dataSource.setConnectionParams(CONNECTION_PARAMS);
dataSource.setUserId(1);
return dataSource;
}
private ProcessInstance getProcessInstance() {
ProcessInstance processInstance = new ProcessInstance();
processInstance.setCommandType(START_PROCESS);
processInstance.setScheduleTime(new Date());
return processInstance;
}
@After
public void after()
throws Exception {
}
/**
* Method: DataxTask()
*/
@Test
public void testDataxTask()
throws Exception {
TaskProps props = new TaskProps();
props.setExecutePath("/tmp");
props.setTaskAppId(String.valueOf(System.currentTimeMillis()));
props.setTaskInstanceId(1);
props.setTenantCode("1");
Assert.assertNotNull(new DataxTask(null, logger));
}
/**
* Method: init
*/
@Test
public void testInit()
throws Exception {
try {
dataxTask.init();
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: handle()
*/
@Test
public void testHandle()
throws Exception {
}
/**
* Method: cancelApplication()
*/
@Test
public void testCancelApplication()
throws Exception {
try {
dataxTask.cancelApplication(true);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: parsingSqlColumnNames(DbType dsType, DbType dtType, BaseDataSource
* dataSourceCfg, String sql)
*/
@Test
public void testParsingSqlColumnNames()
throws Exception {
try {
BaseDataSource dataSource = DataSourceFactory.getDatasource(getDataSource().getType(),
getDataSource().getConnectionParams());
Method method = DataxTask.class.getDeclaredMethod("parsingSqlColumnNames", DbType.class, DbType.class, BaseDataSource.class, String.class);
method.setAccessible(true);
String[] columns = (String[]) method.invoke(dataxTask, DbType.MYSQL, DbType.MYSQL, dataSource, "select 1 as a, 2 as `table` from dual");
Assert.assertNotNull(columns);
Assert.assertTrue(columns.length == 2);
Assert.assertEquals("[`a`, `table`]", Arrays.toString(columns));
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: tryGrammaticalParsingSqlColumnNames(DbType dbType, String sql)
*/
@Test
public void testTryGrammaticalAnalysisSqlColumnNames()
throws Exception {
try {
Method method = DataxTask.class.getDeclaredMethod("tryGrammaticalAnalysisSqlColumnNames", DbType.class, String.class);
method.setAccessible(true);
String[] columns = (String[]) method.invoke(dataxTask, DbType.MYSQL, "select t1.a, t1.b from test t1 union all select a, t2.b from (select a, b from test) t2");
Assert.assertNotNull(columns);
Assert.assertTrue(columns.length == 2);
Assert.assertEquals("[a, b]", Arrays.toString(columns));
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: tryExecuteSqlResolveColumnNames(BaseDataSource baseDataSource,
* String sql)
*/
@Test
public void testTryExecuteSqlResolveColumnNames()
throws Exception {
// TODO: Test goes here...
}
/**
* Method: buildDataxJsonFile()
*/
@Test
public void testBuildDataxJsonFile()
throws Exception {
try {
setTaskParems(1);
Method method = DataxTask.class.getDeclaredMethod("buildDataxJsonFile");
method.setAccessible(true);
String filePath = (String) method.invoke(dataxTask, null);
Assert.assertNotNull(filePath);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: buildDataxJsonFile()
*/
@Test
public void testBuildDataxJsonFile0()
throws Exception {
try {
setTaskParems(0);
Method method = DataxTask.class.getDeclaredMethod("buildDataxJsonFile");
method.setAccessible(true);
String filePath = (String) method.invoke(dataxTask, null);
Assert.assertNotNull(filePath);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: buildDataxJobContentJson()
*/
@Test
public void testBuildDataxJobContentJson()
throws Exception {
try {
Method method = DataxTask.class.getDeclaredMethod("buildDataxJobContentJson");
method.setAccessible(true);
List<ObjectNode> contentList = (List<ObjectNode>) method.invoke(dataxTask, null);
Assert.assertNotNull(contentList);
ObjectNode content = contentList.get(0);
JsonNode reader = JSONUtils.parseObject(content.path("reader").asText());
Assert.assertNotNull(reader);
String readerPluginName = reader.path("name").asText();
Assert.assertEquals(DataxUtils.DATAX_READER_PLUGIN_MYSQL, readerPluginName);
JsonNode writer = JSONUtils.parseObject(content.path("writer").asText());
Assert.assertNotNull(writer);
String writerPluginName = writer.path("name").asText();
Assert.assertEquals(DataxUtils.DATAX_WRITER_PLUGIN_MYSQL, writerPluginName);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: buildDataxJobSettingJson()
*/
@Test
public void testBuildDataxJobSettingJson()
throws Exception {
try {
Method method = DataxTask.class.getDeclaredMethod("buildDataxJobSettingJson");
method.setAccessible(true);
JsonNode setting = (JsonNode) method.invoke(dataxTask, null);
Assert.assertNotNull(setting);
Assert.assertNotNull(setting.get("speed"));
Assert.assertNotNull(setting.get("errorLimit"));
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: buildDataxCoreJson()
*/
@Test
public void testBuildDataxCoreJson()
throws Exception {
try {
Method method = DataxTask.class.getDeclaredMethod("buildDataxCoreJson");
method.setAccessible(true);
ObjectNode coreConfig = (ObjectNode) method.invoke(dataxTask, null);
Assert.assertNotNull(coreConfig);
Assert.assertNotNull(coreConfig.get("transport"));
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: buildShellCommandFile(String jobConfigFilePath)
*/
@Test
public void testBuildShellCommandFile()
throws Exception {
try {
Method method = DataxTask.class.getDeclaredMethod("buildShellCommandFile", String.class);
method.setAccessible(true);
Assert.assertNotNull(method.invoke(dataxTask, "test.json"));
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: getParameters
*/
@Test
public void testGetParameters()
throws Exception {
Assert.assertTrue(dataxTask.getParameters() != null);
}
/**
* Method: notNull(Object obj, String message)
*/
@Test
public void testNotNull()
throws Exception {
try {
Method method = DataxTask.class.getDeclaredMethod("notNull", Object.class, String.class);
method.setAccessible(true);
method.invoke(dataxTask, "abc", "test throw RuntimeException");
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
@Test
public void testLoadJvmEnv() {
DataxTask dataxTask = new DataxTask(null,null);
DataxParameters dataxParameters = new DataxParameters();
dataxParameters.setXms(0);
dataxParameters.setXmx(-100);
String actual = dataxTask.loadJvmEnv(dataxParameters);
String except = " --jvm=\"-Xms1G -Xmx1G\" ";
Assert.assertEquals(except,actual);
dataxParameters.setXms(13);
dataxParameters.setXmx(14);
actual = dataxTask.loadJvmEnv(dataxParameters);
except = " --jvm=\"-Xms13G -Xmx14G\" ";
Assert.assertEquals(except,actual);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,241 | Bug: create dataX job json error | [https://github.com/apache/incubator-dolphinscheduler/blob/dev/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java](url)
line:313-314
Reader and Writer in datax job json file is json string. DataX parse job json file failed. | https://github.com/apache/dolphinscheduler/issues/5241 | https://github.com/apache/dolphinscheduler/pull/5243 | faa111674fb77bc8e4486e62a578b129e403012b | cc7a4446f50a247f054a9671648e948c654dfe65 | "2021-04-09T07:24:56Z" | java | "2021-04-14T03:31:40Z" | dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/datax.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="datax-model">
<m-list-box>
<div slot="text">{{$t('Custom template')}}</div>
<div slot="content">
<label class="label-box">
<div style="padding-top: 5px;">
<el-switch v-model="enable" @change="_onSwitch" :disabled="isDetails"></el-switch>
</div>
</label>
</div>
</m-list-box>
<template v-if="!enable">
<m-list-box>
<div slot="text">{{$t('Datasource')}}</div>
<div slot="content">
<m-datasource
ref="refDs"
@on-dsData="_onDsData"
:supportType="['MYSQL','POSTGRESQL', 'ORACLE', 'SQLSERVER']"
:data="{ type:dsType,datasource:datasource }">
</m-datasource>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('SQL Statement')}}</div>
<div slot="content">
<div class="form-mirror">
<textarea
id="code-sql-mirror"
name="code-sql-mirror"
style="opacity: 0;">
</textarea>
<a class="ans-modal-box-max">
<em class="el-icon-full-screen" @click="setEditorVal"></em>
</a>
</div>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('TargetDataBase')}}</div>
<div slot="content">
<m-datasource
ref="refDt"
@on-dsData="_onDtData"
:supportType="['MYSQL','POSTGRESQL', 'ORACLE', 'SQLSERVER']"
:data="{ type:dtType,datasource:datatarget }">
</m-datasource>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('TargetTable')}}</div>
<div slot="content">
<el-input
type="input"
size="small"
v-model="targetTable"
:placeholder="$t('Please enter the table of target')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('TargetDataBase')}}{{$t('Pre Statement')}}</div>
<div slot="content">
<m-statement-list
ref="refPreStatements"
@on-statement-list="_onPreStatements"
:statement-list="preStatements">
</m-statement-list>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('TargetDataBase')}}{{$t('Post Statement')}}</div>
<div slot="content">
<m-statement-list
ref="refPostStatements"
@on-statement-list="_onPostStatements"
:statement-list="postStatements">
</m-statement-list>
</div>
</m-list-box>
<m-list-box>
<div slot="text">
<span>{{$t('SpeedByte')}}</span>
</div>
<div slot="content">
<m-select-input v-model="jobSpeedByte" :list="[0,1,10,50,100,512]">
</m-select-input>
<span>({{$t('0 means unlimited by byte')}})</span>
</div>
</m-list-box>
<m-list-box>
<div slot="text">
<span>{{$t('SpeedRecord')}}</span>
</div>
<div slot="content">
<m-select-input v-model="jobSpeedRecord" :list="[0,500,1000,1500,2000,2500,3000]">
</m-select-input>
<span>({{$t('0 means unlimited by count')}})</span>
</div>
</m-list-box>
</template>
<template v-else>
<m-list-box>
<div slot="text">json</div>
<div slot="content">
<div class="form-mirror">
<textarea
id="code-json-mirror"
name="code-json-mirror"
style="opacity: 0;">
</textarea>
</div>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Custom Parameters')}}</div>
<div slot="content">
<m-local-params
ref="refLocalParams"
@on-local-params="_onLocalParams"
:udp-list="localParams"
:hide="false">
</m-local-params>
</div>
</m-list-box>
</template>
<m-list-box>
<div slot="text">{{$t('Running Memory')}}</div>
<div slot="content">
<span >{{$t('Min Memory')}}</span>
<m-select-input v-model="xms" :list="[1,2,3,4]">
</m-select-input>
<span> G </span>
<span >{{$t('Max Memory')}}</span>
<m-select-input v-model="xmx" :list="[1,2,3,4]">
</m-select-input>
<span> G</span>
</div>
</m-list-box>
<el-dialog
:visible.sync="scriptBoxDialog"
append-to-body="true"
width="80%">
<m-script-box :item="item" @getSriptBoxValue="getSriptBoxValue" @closeAble="closeAble"></m-script-box>
</el-dialog>
</div>
</template>
<script>
import _ from 'lodash'
import i18n from '@/module/i18n'
import mListBox from './_source/listBox'
import mScriptBox from './_source/scriptBox'
import mDatasource from './_source/datasource'
import mLocalParams from './_source/localParams'
import mStatementList from './_source/statementList'
import disabledState from '@/module/mixin/disabledState'
import mSelectInput from '../_source/selectInput'
import codemirror from '@/conf/home/pages/resource/pages/file/pages/_source/codemirror'
let editor
let jsonEditor
export default {
name: 'datax',
data () {
return {
// Data Custom template
enable: false,
// Data source type
dsType: '',
// data source
datasource: '',
// Data source type
dtType: '',
// data source
datatarget: '',
// Return to the selected data source
rtDatasource: '',
// Return to the selected data target
rtDatatarget: '',
// Sql statement
sql: '',
json: '',
// target table
targetTable: '',
// Pre statements
preStatements: [],
// Post statements
postStatements: [],
// speed byte
jobSpeedByte: 0,
// speed record
jobSpeedRecord: 1000,
// Custom parameter
localParams: [],
customConfig: 0,
// jvm memory xms
xms: 1,
// jvm memory xms
xmx: 1,
scriptBoxDialog: false,
item: ''
}
},
mixins: [disabledState],
props: {
backfillItem: Object,
createNodeId: Number
},
methods: {
setEditorVal () {
this.item = editor.getValue()
this.scriptBoxDialog = true
},
getSriptBoxValue (val) {
editor.setValue(val)
},
_onSwitch (is) {
if (is) {
this.customConfig = 1
setTimeout(() => {
this._handlerJsonEditor()
}, 200)
} else {
this.customConfig = 0
setTimeout(() => {
this._handlerEditor()
}, 200)
}
},
/**
* return data source
*/
_onDsData (o) {
this.dsType = o.type
this.rtDatasource = o.datasource
},
/**
* return data target
*/
_onDtData (o) {
this.dtType = o.type
this.rtDatatarget = o.datasource
},
/**
* return pre statements
*/
_onPreStatements (a) {
this.preStatements = a
},
/**
* return post statements
*/
_onPostStatements (a) {
this.postStatements = a
},
/**
* return localParams
*/
_onLocalParams (a) {
this.localParams = a
},
/**
* verification
*/
_verification () {
if (this.customConfig) {
if (!jsonEditor.getValue()) {
this.$message.warning(`${i18n.$t('Please enter a JSON Statement(required)')}`)
return false
}
// localParams Subcomponent verification
if (!this.$refs.refLocalParams._verifProp()) {
return false
}
// storage
this.$emit('on-params', {
customConfig: this.customConfig,
json: jsonEditor.getValue(),
localParams: this.localParams,
xms: +this.xms,
xmx: +this.xmx
})
return true
} else {
if (!editor.getValue()) {
this.$message.warning(`${i18n.$t('Please enter a SQL Statement(required)')}`)
return false
}
// datasource Subcomponent verification
if (!this.$refs.refDs._verifDatasource()) {
return false
}
// datasource Subcomponent verification
if (!this.$refs.refDt._verifDatasource()) {
return false
}
if (!this.targetTable) {
this.$message.warning(`${i18n.$t('Please enter a Target Table(required)')}`)
return false
}
// preStatements Subcomponent verification
if (!this.$refs.refPreStatements._verifProp()) {
return false
}
// postStatements Subcomponent verification
if (!this.$refs.refPostStatements._verifProp()) {
return false
}
// storage
this.$emit('on-params', {
customConfig: this.customConfig,
dsType: this.dsType,
dataSource: this.rtDatasource,
dtType: this.dtType,
dataTarget: this.rtDatatarget,
sql: editor.getValue(),
targetTable: this.targetTable,
jobSpeedByte: this.jobSpeedByte * 1024,
jobSpeedRecord: this.jobSpeedRecord,
preStatements: this.preStatements,
postStatements: this.postStatements,
xms: +this.xms,
xmx: +this.xmx
})
return true
}
},
/**
* Processing code highlighting
*/
_handlerEditor () {
this._destroyEditor()
// editor
editor = codemirror('code-sql-mirror', {
mode: 'sql',
readOnly: this.isDetails
})
this.keypress = () => {
if (!editor.getOption('readOnly')) {
editor.showHint({
completeSingle: false
})
}
}
// Monitor keyboard
editor.on('keypress', this.keypress)
editor.on('changes', () => {
this._cacheParams()
})
editor.setValue(this.sql)
return editor
},
_handlerJsonEditor () {
this._destroyJsonEditor()
// jsonEditor
jsonEditor = codemirror('code-json-mirror', {
mode: 'json',
readOnly: this.isDetails
})
this.keypress = () => {
if (!jsonEditor.getOption('readOnly')) {
jsonEditor.showHint({
completeSingle: false
})
}
}
// Monitor keyboard
jsonEditor.on('keypress', this.keypress)
jsonEditor.on('changes', () => {
// this._cacheParams()
})
jsonEditor.setValue(this.json)
return jsonEditor
},
_cacheParams () {
this.$emit('on-cache-params', {
dsType: this.dsType,
dataSource: this.rtDatasource,
dtType: this.dtType,
dataTarget: this.rtDatatarget,
sql: editor ? editor.getValue() : '',
targetTable: this.targetTable,
jobSpeedByte: this.jobSpeedByte * 1024,
jobSpeedRecord: this.jobSpeedRecord,
preStatements: this.preStatements,
postStatements: this.postStatements,
xms: +this.xms,
xmx: +this.xmx
})
},
_destroyEditor () {
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-sql-mirror'), 'keypress', this.keypress)
editor.off($('.code-sql-mirror'), 'changes', this.changes)
}
},
_destroyJsonEditor () {
if (jsonEditor) {
jsonEditor.toTextArea() // Uninstall
jsonEditor.off($('.code-json-mirror'), 'keypress', this.keypress)
jsonEditor.off($('.code-json-mirror'), 'changes', this.changes)
}
}
},
created () {
let o = this.backfillItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
// set jvm memory
this.xms = o.params.xms || 1
this.xmx = o.params.xmx || 1
// backfill
if (o.params.customConfig === 0) {
this.customConfig = 0
this.enable = false
this.dsType = o.params.dsType || ''
this.datasource = o.params.dataSource || ''
this.dtType = o.params.dtType || ''
this.datatarget = o.params.dataTarget || ''
this.sql = o.params.sql || ''
this.targetTable = o.params.targetTable || ''
this.jobSpeedByte = o.params.jobSpeedByte / 1024 || 0
this.jobSpeedRecord = o.params.jobSpeedRecord || 0
this.preStatements = o.params.preStatements || []
this.postStatements = o.params.postStatements || []
} else {
this.customConfig = 1
this.enable = true
this.json = o.params.json || []
this.localParams = o.params.localParams || ''
}
}
},
mounted () {
if (this.customConfig) {
setTimeout(() => {
this._handlerJsonEditor()
}, 200)
} else {
setTimeout(() => {
this._handlerEditor()
}, 200)
}
},
destroyed () {
/**
* Destroy the editor instance
*/
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-sql-mirror'), 'keypress', this.keypress)
}
if (jsonEditor) {
jsonEditor.toTextArea() // Uninstall
jsonEditor.off($('.code-json-mirror'), 'keypress', this.keypress)
}
},
watch: {
// Watch the cacheParams
cacheParams (val) {
this._cacheParams()
}
},
computed: {
cacheParams () {
return {
dsType: this.dsType,
dataSource: this.rtDatasource,
dtType: this.dtType,
dataTarget: this.rtDatatarget,
targetTable: this.targetTable,
jobSpeedByte: this.jobSpeedByte * 1024,
jobSpeedRecord: this.jobSpeedRecord,
preStatements: this.preStatements,
postStatements: this.postStatements
}
}
},
components: { mListBox, mDatasource, mLocalParams, mStatementList, mSelectInput, mScriptBox }
}
</script>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,241 | Bug: create dataX job json error | [https://github.com/apache/incubator-dolphinscheduler/blob/dev/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java](url)
line:313-314
Reader and Writer in datax job json file is json string. DataX parse job json file failed. | https://github.com/apache/dolphinscheduler/issues/5241 | https://github.com/apache/dolphinscheduler/pull/5243 | faa111674fb77bc8e4486e62a578b129e403012b | cc7a4446f50a247f054a9671648e948c654dfe65 | "2021-04-09T07:24:56Z" | java | "2021-04-14T03:31:40Z" | pom.xml | <?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler</artifactId>
<version>${revision}</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
<url>http://dolphinscheduler.apache.org</url>
<description>Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated
to solving the complex dependencies in data processing, making the scheduling system out of the box for data
processing.
</description>
<licenses>
<license>
<name>Apache License 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:https://github.com/apache/incubator-dolphinscheduler.git</connection>
<developerConnection>scm:git:https://github.com/apache/incubator-dolphinscheduler.git</developerConnection>
<url>https://github.com/apache/incubator-dolphinscheduler</url>
<tag>HEAD</tag>
</scm>
<mailingLists>
<mailingList>
<name>DolphinScheduler Developer List</name>
<post>dev@dolphinscheduler.incubator.apache.org</post>
<subscribe>dev-subscribe@dolphinscheduler.incubator.apache.org</subscribe>
<unsubscribe>dev-unsubscribe@dolphinscheduler.incubator.apache.org</unsubscribe>
</mailingList>
</mailingLists>
<parent>
<groupId>org.apache</groupId>
<artifactId>apache</artifactId>
<version>21</version>
</parent>
<properties>
<revision>1.3.6-SNAPSHOT</revision>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<curator.version>4.3.0</curator.version>
<spring.version>5.1.18.RELEASE</spring.version>
<spring.boot.version>2.1.17.RELEASE</spring.boot.version>
<java.version>1.8</java.version>
<logback.version>1.2.3</logback.version>
<hadoop.version>2.7.3</hadoop.version>
<quartz.version>2.3.0</quartz.version>
<jackson.version>2.10.5</jackson.version>
<mybatis-plus.version>3.2.0</mybatis-plus.version>
<mybatis.spring.version>2.0.1</mybatis.spring.version>
<cron.utils.version>5.0.5</cron.utils.version>
<druid.version>1.1.22</druid.version>
<h2.version>1.4.200</h2.version>
<commons.codec.version>1.11</commons.codec.version>
<commons.logging.version>1.1.1</commons.logging.version>
<httpclient.version>4.4.1</httpclient.version>
<httpcore.version>4.4.1</httpcore.version>
<junit.version>4.12</junit.version>
<mysql.connector.version>5.1.34</mysql.connector.version>
<slf4j.api.version>1.7.5</slf4j.api.version>
<slf4j.log4j12.version>1.7.5</slf4j.log4j12.version>
<commons.collections.version>3.2.2</commons.collections.version>
<commons.httpclient>3.0.1</commons.httpclient>
<commons.beanutils.version>1.9.4</commons.beanutils.version>
<commons.configuration.version>1.10</commons.configuration.version>
<commons.email.version>1.5</commons.email.version>
<poi.version>3.17</poi.version>
<javax.servlet.api.version>3.1.0</javax.servlet.api.version>
<commons.collections4.version>4.1</commons.collections4.version>
<guava.version>24.1-jre</guava.version>
<postgresql.version>42.1.4</postgresql.version>
<hive.jdbc.version>2.1.0</hive.jdbc.version>
<commons.io.version>2.4</commons.io.version>
<oshi.core.version>3.5.0</oshi.core.version>
<clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version>
<mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version>
<presto.jdbc.version>0.238.1</presto.jdbc.version>
<spotbugs.version>3.1.12</spotbugs.version>
<checkstyle.version>3.0.0</checkstyle.version>
<zookeeper.version>3.4.14</zookeeper.version>
<frontend-maven-plugin.version>1.6</frontend-maven-plugin.version>
<maven-compiler-plugin.version>3.3</maven-compiler-plugin.version>
<maven-assembly-plugin.version>3.1.0</maven-assembly-plugin.version>
<maven-release-plugin.version>2.5.3</maven-release-plugin.version>
<maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version>
<maven-source-plugin.version>2.4</maven-source-plugin.version>
<maven-surefire-plugin.version>2.22.1</maven-surefire-plugin.version>
<maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version>
<rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version>
<jacoco.version>0.8.4</jacoco.version>
<jcip.version>1.0</jcip.version>
<maven.deploy.skip>false</maven.deploy.skip>
<cobertura-maven-plugin.version>2.7</cobertura-maven-plugin.version>
<mockito.version>2.21.0</mockito.version>
<powermock.version>2.0.2</powermock.version>
<servlet-api.version>2.5</servlet-api.version>
<swagger.version>1.9.3</swagger.version>
<springfox.version>2.9.2</springfox.version>
<swagger-models.version>1.5.24</swagger-models.version>
<guava-retry.version>2.0.0</guava-retry.version>
<dep.airlift.version>0.184</dep.airlift.version>
<dep.packaging.version>${dep.airlift.version}</dep.packaging.version>
<protostuff.version>1.7.2</protostuff.version>
<reflections.version>0.9.12</reflections.version>
<byte-buddy.version>1.9.16</byte-buddy.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<!-- quartz-->
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz-jobs</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>com.cronutils</groupId>
<artifactId>cron-utils</artifactId>
<version>${cron.utils.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>${druid.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>${spring.boot.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-core</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-beans</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-tx</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-jdbc</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
<version>${spring.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-server</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert-plugin</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-dao</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-remote</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-service</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-spi</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
<version>${curator.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<artifactId>netty</artifactId>
<groupId>io.netty</groupId>
</exclusion>
<exclusion>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-annotations</artifactId>
</exclusion>
</exclusions>
<version>${zookeeper.version}</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>${commons.codec.version}</version>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons.logging.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>${httpclient.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>${httpcore.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<version>${jackson.version}</version>
</dependency>
<!--protostuff-->
<!-- https://mvnrepository.com/artifact/io.protostuff/protostuff-core -->
<dependency>
<groupId>io.protostuff</groupId>
<artifactId>protostuff-core</artifactId>
<version>${protostuff.version}</version>
</dependency>
<!-- https://mvnrepository.com/artifact/io.protostuff/protostuff-runtime -->
<dependency>
<groupId>io.protostuff</groupId>
<artifactId>protostuff-runtime</artifactId>
<version>${protostuff.version}</version>
</dependency>
<dependency>
<groupId>net.bytebuddy</groupId>
<artifactId>byte-buddy</artifactId>
<version>${byte-buddy.version}</version>
</dependency>
<dependency>
<groupId>org.reflections</groupId>
<artifactId>reflections</artifactId>
<version>${reflections.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>${mockito.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-junit4</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-api-mockito2</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${mysql.connector.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.h2database</groupId>
<artifactId>h2</artifactId>
<version>${h2.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${slf4j.api.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>${slf4j.log4j12.version}</version>
</dependency>
<dependency>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
<version>${commons.collections.version}</version>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>${commons.httpclient}</version>
</dependency>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<version>${commons.beanutils.version}</version>
</dependency>
<dependency>
<groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId>
<version>${commons.configuration.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-email</artifactId>
<version>${commons.email.version}</version>
</dependency>
<!--excel poi-->
<dependency>
<groupId>org.apache.poi</groupId>
<artifactId>poi</artifactId>
<version>${poi.version}</version>
</dependency>
<!-- hadoop -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>com.sun.jersey</artifactId>
<groupId>jersey-json</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-aws</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>${commons.collections4.version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>${postgresql.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>${hive.jdbc.version}</version>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>${commons.io.version}</version>
</dependency>
<dependency>
<groupId>com.github.oshi</groupId>
<artifactId>oshi-core</artifactId>
<version>${oshi.core.version}</version>
</dependency>
<dependency>
<groupId>ru.yandex.clickhouse</groupId>
<artifactId>clickhouse-jdbc</artifactId>
<version>${clickhouse.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
<version>${mssql.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.facebook.presto</groupId>
<artifactId>presto-jdbc</artifactId>
<version>${presto.jdbc.version}</version>
</dependency>
<dependency>
<groupId>net.jcip</groupId>
<artifactId>jcip-annotations</artifactId>
<version>${jcip.version}</version>
<optional>true</optional>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<version>${servlet-api.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
<version>${javax.servlet.api.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger2</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger-ui</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.swagger</groupId>
<artifactId>swagger-models</artifactId>
<version>${swagger-models.version}</version>
</dependency>
<dependency>
<groupId>com.github.xiaoymin</groupId>
<artifactId>swagger-bootstrap-ui</artifactId>
<version>${swagger.version}</version>
</dependency>
<dependency>
<groupId>com.github.rholder</groupId>
<artifactId>guava-retrying</artifactId>
<version>${guava-retry.version}</version>
</dependency>
<dependency>
<groupId>org.sonatype.aether</groupId>
<artifactId>aether-api</artifactId>
<version>1.13.1</version>
</dependency>
<dependency>
<groupId>io.airlift.resolver</groupId>
<artifactId>resolver</artifactId>
<version>1.5</version>
</dependency>
<dependency>
<groupId>org.ow2.asm</groupId>
<artifactId>asm</artifactId>
<version>6.2.1</version>
</dependency>
<dependency>
<groupId>javax.activation</groupId>
<artifactId>activation</artifactId>
<version>1.1</version>
</dependency>
<dependency>
<groupId>com.sun.mail</groupId>
<artifactId>javax.mail</artifactId>
<version>1.6.2</version>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<finalName>apache-dolphinscheduler-incubating-${project.version}</finalName>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-maven-plugin</artifactId>
<version>1.0.0</version>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>ca.vanzyl.maven.plugins</groupId>
<artifactId>provisio-maven-plugin</artifactId>
<version>1.0.4</version>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>rpm-maven-plugin</artifactId>
<version>${rpm-maven-plugion.version}</version>
<inherited>false</inherited>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<testSource>${java.version}</testSource>
<testTarget>${java.version}</testTarget>
</configuration>
<version>${maven-compiler-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<tagNameFormat>@{project.version}</tagNameFormat>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>${maven-assembly-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<configuration>
<source>8</source>
<failOnError>false</failOnError>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>${maven-source-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>${maven-dependency-plugin.version}</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-maven-plugin</artifactId>
<extensions>true</extensions>
<!--<configuration>-->
<!--<allowedProvidedDependencies>-->
<!--<allowedProvidedDependency>org.apache.dolphinscheduler:dolphinscheduler-common</allowedProvidedDependency>-->
<!--</allowedProvidedDependencies>-->
<!--</configuration>-->
</plugin>
<plugin>
<groupId>ca.vanzyl.maven.plugins</groupId>
<artifactId>provisio-maven-plugin</artifactId>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<id>attach-sources</id>
<phase>verify</phase>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
<configuration>
<aggregate>true</aggregate>
<charset>${project.build.sourceEncoding}</charset>
<encoding>${project.build.sourceEncoding}</encoding>
<docencoding>${project.build.sourceEncoding}</docencoding>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<autoVersionSubmodules>true</autoVersionSubmodules>
<tagNameFormat>@{project.version}</tagNameFormat>
<tagBase>${project.version}</tagBase>
<!--<goals>-f pom.xml deploy</goals>-->
</configuration>
<dependencies>
<dependency>
<groupId>org.apache.maven.scm</groupId>
<artifactId>maven-scm-provider-jgit</artifactId>
<version>1.9.5</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<encoding>${project.build.sourceEncoding}</encoding>
<skip>false</skip><!--not skip compile test classes-->
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<includes>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TenantControllerTest.java</include>
<include>**/api/dto/resources/filter/ResourceFilterTest.java</include>
<include>**/api/dto/resources/visitor/ResourceTreeVisitorTest.java</include>
<includeDataxTaskTest>**/api/enums/testGetEnum.java</includeDataxTaskTest>
<include>**/api/enums/StatusTest.java</include>
<include>**/api/exceptions/ApiExceptionHandlerTest.java</include>
<include>**/api/exceptions/ServiceExceptionTest.java</include>
<include>**/api/interceptor/LocaleChangeInterceptorTest.java</include>
<include>**/api/interceptor/LoginHandlerInterceptorTest.java</include>
<include>**/api/security/impl/pwd/PasswordAuthenticatorTest.java</include>
<include>**/api/security/impl/ldap/LdapAuthenticatorTest.java</include>
<include>**/api/security/SecurityConfigLDAPTest.java</include>
<include>**/api/security/SecurityConfigPasswordTest.java</include>
<include>**/api/service/AccessTokenServiceTest.java</include>
<include>**/api/service/AlertGroupServiceTest.java</include>
<include>**/api/service/BaseDAGServiceTest.java</include>
<include>**/api/service/BaseServiceTest.java</include>
<include>**/api/service/DataAnalysisServiceTest.java</include>
<include>**/api/service/AlertPluginInstanceServiceTest.java</include>
<include>**/api/service/DataSourceServiceTest.java</include>
<include>**/api/service/ExecutorService2Test.java</include>
<include>**/api/service/ExecutorServiceTest.java</include>
<include>**/api/service/LoggerServiceTest.java</include>
<include>**/api/service/MonitorServiceTest.java</include>
<include>**/api/service/ProcessDefinitionServiceTest.java</include>
<include>**/api/service/ProcessDefinitionVersionServiceTest.java</include>
<include>**/api/service/ProcessInstanceServiceTest.java</include>
<include>**/api/service/ProjectServiceTest.java</include>
<include>**/api/service/QueueServiceTest.java</include>
<include>**/api/service/ResourcesServiceTest.java</include>
<include>**/api/service/SchedulerServiceTest.java</include>
<include>**/api/service/SessionServiceTest.java</include>
<include>**/api/service/TaskInstanceServiceTest.java</include>
<include>**/api/service/TenantServiceTest.java</include>
<include>**/api/service/UdfFuncServiceTest.java</include>
<include>**/api/service/UiPluginServiceTest.java</include>
<include>**/api/service/UserAlertGroupServiceTest.java</include>
<include>**/api/service/UsersServiceTest.java</include>
<include>**/api/service/WorkerGroupServiceTest.java</include>
<include>**/api/service/WorkFlowLineageServiceTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TaskInstanceControllerTest.java</include>
<include>**/api/controller/WorkFlowLineageControllerTest.java</include>
<include>**/api/utils/exportprocess/DataSourceParamTest.java</include>
<include>**/api/utils/exportprocess/DependentParamTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/FileUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/ResultTest.java</include>
<include>**/common/graph/DAGTest.java</include>
<include>**/common/os/OshiTest.java</include>
<include>**/common/os/OSUtilsTest.java</include>
<include>**/common/shell/ShellExecutorTest.java</include>
<include>**/common/task/DataxParametersTest.java</include>
<include>**/common/task/EntityTestUtils.java</include>
<include>**/common/task/FlinkParametersTest.java</include>
<include>**/common/task/HttpParametersTest.java</include>
<include>**/common/task/SqlParametersTest.java</include>
<include>**/common/task/SqoopParameterEntityTest.java</include>
<include>**/common/threadutils/ThreadPoolExecutorsTest.java</include>
<include>**/common/threadutils/ThreadUtilsTest.java</include>
<include>**/common/utils/CollectionUtilsTest.java</include>
<include>**/common/utils/CommonUtilsTest.java</include>
<include>**/common/utils/DateUtilsTest.java</include>
<include>**/common/utils/DependentUtilsTest.java</include>
<include>**/common/utils/EncryptionUtilsTest.java</include>
<include>**/common/utils/FileUtilsTest.java</include>
<include>**/common/utils/IpUtilsTest.java</include>
<include>**/common/utils/JSONUtilsTest.java</include>
<include>**/common/utils/LoggerUtilsTest.java</include>
<include>**/common/utils/NetUtilsTest.java</include>
<include>**/common/utils/OSUtilsTest.java</include>
<include>**/common/utils/ParameterUtilsTest.java</include>
<include>**/common/utils/TimePlaceholderUtilsTest.java</include>
<include>**/common/utils/PreconditionsTest.java</include>
<include>**/common/utils/PropertyUtilsTest.java</include>
<include>**/common/utils/SchemaUtilsTest.java</include>
<include>**/common/utils/ScriptRunnerTest.java</include>
<include>**/common/utils/SensitiveLogUtilsTest.java</include>
<include>**/common/utils/StringTest.java</include>
<include>**/common/utils/StringUtilsTest.java</include>
<include>**/common/utils/TaskParametersUtilsTest.java</include>
<include>**/common/utils/VarPoolUtilsTest.java</include>
<include>**/common/utils/HadoopUtilsTest.java</include>
<include>**/common/utils/HttpUtilsTest.java</include>
<include>**/common/utils/KerberosHttpClientTest.java</include>
<include>**/common/utils/HiveConfUtilsTest.java</include>
<include>**/common/ConstantsTest.java</include>
<include>**/common/utils/HadoopUtils.java</include>
<include>**/common/utils/RetryerUtilsTest.java</include>
<include>**/common/plugin/DolphinSchedulerPluginLoaderTest.java</include>
<include>**/common/enums/ExecutionStatusTest</include>
<include>**/dao/mapper/AccessTokenMapperTest.java</include>
<include>**/dao/mapper/AlertGroupMapperTest.java</include>
<include>**/dao/mapper/CommandMapperTest.java</include>
<include>**/dao/mapper/ConnectionFactoryTest.java</include>
<include>**/dao/mapper/DataSourceMapperTest.java</include>
<include>**/dao/datasource/MySQLDataSourceTest.java</include>
<include>**/dao/entity/TaskInstanceTest.java</include>
<include>**/dao/entity/UdfFuncTest.java</include>
<include>**/remote/command/alert/AlertSendRequestCommandTest.java</include>
<include>**/remote/command/alert/AlertSendResponseCommandTest.java</include>
<include>**/remote/command/future/ResponseFutureTest.java</include>
<include>**/remote/command/log/RemoveTaskLogRequestCommandTest.java</include>
<include>**/remote/command/log/RemoveTaskLogResponseCommandTest.java</include>
<include>**/remote/utils/HostTest.java</include>
<include>**/remote/utils/NettyUtilTest.java</include>
<include>**/remote/NettyRemotingClientTest.java</include>
<include>**/rpc/RpcTest.java</include>
<include>**/server/log/LoggerServerTest.java</include>
<include>**/server/entity/SQLTaskExecutionContextTest.java</include>
<include>**/server/log/MasterLogFilterTest.java</include>
<include>**/server/log/SensitiveDataConverterTest.java</include>
<include>**/server/log/LoggerRequestProcessorTest.java</include>
<!--<include>**/server/log/TaskLogDiscriminatorTest.java</include>-->
<include>**/server/log/TaskLogFilterTest.java</include>
<include>**/server/log/WorkerLogFilterTest.java</include>
<include>**/server/master/config/MasterConfigTest.java</include>
<include>**/server/master/consumer/TaskPriorityQueueConsumerTest.java</include>
<include>**/server/master/runner/MasterTaskExecThreadTest.java</include>
<!--<include>**/server/master/dispatch/executor/NettyExecutorManagerTest.java</include>-->
<include>**/server/master/dispatch/host/assign/LowerWeightRoundRobinTest.java</include>
<include>**/server/master/dispatch/host/assign/RandomSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/HostWorkerTest.java</include>
<include>**/server/master/register/MasterRegistryTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinHostManagerTest.java</include>
<include>**/server/master/AlertManagerTest.java</include>
<include>**/server/master/MasterCommandTest.java</include>
<include>**/server/master/DependentTaskTest.java</include>
<include>**/server/master/ConditionsTaskTest.java</include>
<include>**/server/master/MasterExecThreadTest.java</include>
<include>**/server/master/ParamsTest.java</include>
<include>**/server/master/SubProcessTaskTest.java</include>
<include>**/server/master/processor/TaskAckProcessorTest.java</include>
<include>**/server/master/processor/TaskKillResponseProcessorTest.java</include>
<include>**/server/master/processor/queue/TaskResponseServiceTest.java</include>
<include>**/server/register/ZookeeperNodeManagerTest.java</include>
<include>**/server/register/ZookeeperRegistryCenterTest.java</include>
<include>**/server/utils/DataxUtilsTest.java</include>
<include>**/server/utils/ExecutionContextTestUtils.java</include>
<include>**/server/utils/FlinkArgsUtilsTest.java</include>
<include>**/server/utils/LogUtilsTest.java</include>
<include>**/server/utils/MapReduceArgsUtilsTest.java</include>
<include>**/server/utils/ParamUtilsTest.java</include>
<include>**/server/utils/ProcessUtilsTest.java</include>
<include>**/server/utils/SparkArgsUtilsTest.java</include>
<include>**/server/worker/processor/TaskCallbackServiceTest.java</include>
<include>**/server/worker/processor/TaskExecuteProcessorTest.java</include>
<include>**/server/worker/registry/WorkerRegistryTest.java</include>
<include>**/server/worker/shell/ShellCommandExecutorTest.java</include>
<include>**/server/worker/sql/SqlExecutorTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<include>**/server/worker/task/EnvFileTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<!--<include>**/server/worker/task/datax/DataxTaskTest.java</include>-->
<!--<include>**/server/worker/task/http/HttpTaskTest.java</include>-->
<include>**/server/worker/task/sqoop/SqoopTaskTest.java</include>
<include>**/server/worker/task/shell/ShellTaskTest.java</include>
<include>**/server/worker/task/TaskManagerTest.java</include>
<include>**/server/worker/task/AbstractCommandExecutorTest.java</include>
<include>**/server/worker/task/ShellTaskReturnTest.java</include>
<include>**/server/worker/EnvFileTest.java</include>
<include>**/server/worker/runner/TaskExecuteThreadTest.java</include>
<include>**/server/worker/runner/WorkerManagerThreadTest.java</include>
<include>**/service/quartz/cron/CronUtilsTest.java</include>
<include>**/service/process/ProcessServiceTest.java</include>
<include>**/service/zk/DefaultEnsembleProviderTest.java</include>
<include>**/service/zk/ZKServerTest.java</include>
<include>**/service/zk/CuratorZookeeperClientTest.java</include>
<include>**/service/zk/RegisterOperatorTest.java</include>
<include>**/service/queue/TaskUpdateQueueTest.java</include>
<include>**/service/queue/PeerTaskInstancePriorityQueueTest.java</include>
<include>**/service/log/LogClientServiceTest.java</include>
<include>**/service/alert/AlertClientServiceTest.java</include>
<include>**/dao/mapper/DataSourceUserMapperTest.java</include>
<!--<iTaskUpdateQueueConsumerThreadnclude>**/dao/mapper/ErrorCommandMapperTest.java</iTaskUpdateQueueConsumerThreadnclude>-->
<include>**/dao/mapper/ProcessDefinitionMapperTest.java</include>
<include>**/dao/mapper/ProcessDefinitionVersionMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapperTest.java</include>
<include>**/dao/mapper/ProjectMapperTest.java</include>
<include>**/dao/mapper/ProjectUserMapperTest.java</include>
<include>**/dao/mapper/QueueMapperTest.java</include>
<include>**/dao/mapper/ResourceUserMapperTest.java</include>
<include>**/dao/mapper/ScheduleMapperTest.java</include>
<include>**/dao/mapper/SessionMapperTest.java</include>
<include>**/dao/mapper/TaskInstanceMapperTest.java</include>
<include>**/dao/mapper/TenantMapperTest.java</include>
<include>**/dao/mapper/UdfFuncMapperTest.java</include>
<include>**/dao/mapper/UDFUserMapperTest.java</include>
<include>**/dao/mapper/UserMapperTest.java</include>
<include>**/dao/mapper/AlertPluginInstanceMapperTest.java</include>
<include>**/dao/mapper/PluginDefineTest.java</include>
<include>**/dao/utils/DagHelperTest.java</include>
<include>**/dao/AlertDaoTest.java</include>
<include>**/dao/datasource/OracleDataSourceTest.java</include>
<include>**/dao/datasource/HiveDataSourceTest.java</include>
<include>**/dao/datasource/BaseDataSourceTest.java</include>
<include>**/dao/upgrade/ProcessDefinitionDaoTest.java</include>
<include>**/dao/upgrade/WokrerGrouopDaoTest.java</include>
<include>**/dao/upgrade/UpgradeDaoTest.java</include>
<include>**/plugin/alert/email/EmailAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/email/EmailAlertChannelTest.java</include>
<include>**/plugin/alert/email/ExcelUtilsTest.java</include>
<include>**/plugin/alert/email/MailUtilsTest.java</include>
<include>**/plugin/alert/email/template/DefaultHTMLTemplateTest.java</include>
<include>**/plugin/alert/dingtalk/DingTalkSenderTest.java</include>
<include>**/plugin/alert/dingtalk/DingTalkAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/wechat/WeChatSenderTest.java</include>
<include>**/plugin/alert/wechat/WeChatAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/script/ProcessUtilsTest.java</include>
<include>**/plugin/alert/script/ScriptAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/script/ScriptSenderTest.java</include>
<include>**/plugin/alert/http/HttpAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/http/HttpAlertChannelTest.java</include>
<include>**/plugin/alert/feishu/FeiShuAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/feishu/FeiShuSenderTest.java</include>
<include>**/plugin/alert/http/HttpAlertPluginTest.java</include>
<include>**/plugin/alert/http/HttpSenderTest.java</include>
<include>**/spi/params/PluginParamsTransferTest.java</include>
<include>**/alert/plugin/EmailAlertPluginTest.java</include>
<include>**/alert/plugin/AlertPluginManagerTest.java</include>
<include>**/alert/plugin/DolphinPluginLoaderTest.java</include>
<include>**/alert/utils/DingTalkUtilsTest.java</include>
<include>**/alert/utils/EnterpriseWeChatUtilsTest.java</include>
<include>**/alert/utils/FuncUtilsTest.java</include>
<include>**/alert/processor/AlertRequestProcessorTest.java</include>
<include>**/alert/runner/AlertSenderTest.java</include>
<include>**/alert/AlertServerTest.java</include>
</includes>
<!-- <skip>true</skip> -->
</configuration>
</plugin>
<!-- jenkins plugin jacoco report-->
<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<version>${jacoco.version}</version>
<configuration>
<destFile>target/jacoco.exec</destFile>
<dataFile>target/jacoco.exec</dataFile>
</configuration>
<executions>
<execution>
<id>jacoco-initialize</id>
<goals>
<goal>prepare-agent</goal>
</goals>
</execution>
<execution>
<id>jacoco-site</id>
<phase>test</phase>
<goals>
<goal>report</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-maven-plugin</artifactId>
<version>${spotbugs.version}</version>
<configuration>
<xmlOutput>true</xmlOutput>
<threshold>medium</threshold>
<effort>default</effort>
<excludeFilterFile>dev-config/spotbugs-exclude.xml</excludeFilterFile>
<failOnError>true</failOnError>
</configuration>
<dependencies>
<dependency>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs</artifactId>
<version>4.0.0-beta4</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>${checkstyle.version}</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
<version>8.18</version>
</dependency>
</dependencies>
<configuration>
<consoleOutput>true</consoleOutput>
<encoding>UTF-8</encoding>
<configLocation>style/checkstyle.xml</configLocation>
<suppressionsLocation>style/checkstyle-suppressions.xml</suppressionsLocation>
<suppressionsFileExpression>checkstyle.suppressions.file</suppressionsFileExpression>
<failOnViolation>true</failOnViolation>
<violationSeverity>warning</violationSeverity>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<sourceDirectories>
<sourceDirectory>${project.build.sourceDirectory}</sourceDirectory>
</sourceDirectories>
<excludes>**\/generated-sources\/</excludes>
<skip>true</skip>
</configuration>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>cobertura-maven-plugin</artifactId>
<version>${cobertura-maven-plugin.version}</version>
<configuration>
<check>
</check>
<aggregate>true</aggregate>
<outputDirectory>./target/cobertura</outputDirectory>
<encoding>${project.build.sourceEncoding}</encoding>
<quiet>true</quiet>
<format>xml</format>
<instrumentation>
<ignoreTrivial>true</ignoreTrivial>
</instrumentation>
</configuration>
</plugin>
</plugins>
</build>
<modules>
<module>dolphinscheduler-alert-plugin</module>
<module>dolphinscheduler-ui</module>
<module>dolphinscheduler-server</module>
<module>dolphinscheduler-common</module>
<module>dolphinscheduler-api</module>
<module>dolphinscheduler-dao</module>
<module>dolphinscheduler-alert</module>
<module>dolphinscheduler-dist</module>
<module>dolphinscheduler-remote</module>
<module>dolphinscheduler-service</module>
<module>dolphinscheduler-spi</module>
<module>dolphinscheduler-microbench</module>
</modules>
</project>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,180 | [Improvement][Module Name] DataX Node doesn't Support Clickhouse as the datasource | **Describe the question**
When I create a DataX node, I can't use a Clickhouse Datasource as the source or the target in a DataX task. Currently, I can only select from Mysql, Postgresql,Oracle ans Sqlserver.
![image](https://user-images.githubusercontent.com/23460917/112953652-3d8dcd00-9170-11eb-87e2-029ab4d56939.png)
**Describe alternatives you've considered**
I hope I can't use Clickhouse as my datasource.
| https://github.com/apache/dolphinscheduler/issues/5180 | https://github.com/apache/dolphinscheduler/pull/5243 | faa111674fb77bc8e4486e62a578b129e403012b | cc7a4446f50a247f054a9671648e948c654dfe65 | "2021-03-30T07:54:54Z" | java | "2021-04-14T03:31:40Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/DataxUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.utils;
import org.apache.dolphinscheduler.common.enums.DbType;
import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser;
import com.alibaba.druid.sql.dialect.oracle.parser.OracleStatementParser;
import com.alibaba.druid.sql.dialect.postgresql.parser.PGSQLStatementParser;
import com.alibaba.druid.sql.dialect.sqlserver.parser.SQLServerStatementParser;
import com.alibaba.druid.sql.parser.SQLStatementParser;
public class DataxUtils {
public static final String DATAX_READER_PLUGIN_MYSQL = "mysqlreader";
public static final String DATAX_READER_PLUGIN_POSTGRESQL = "postgresqlreader";
public static final String DATAX_READER_PLUGIN_ORACLE = "oraclereader";
public static final String DATAX_READER_PLUGIN_SQLSERVER = "sqlserverreader";
public static final String DATAX_WRITER_PLUGIN_MYSQL = "mysqlwriter";
public static final String DATAX_WRITER_PLUGIN_POSTGRESQL = "postgresqlwriter";
public static final String DATAX_WRITER_PLUGIN_ORACLE = "oraclewriter";
public static final String DATAX_WRITER_PLUGIN_SQLSERVER = "sqlserverwriter";
public static String getReaderPluginName(DbType dbType) {
switch (dbType) {
case MYSQL:
return DATAX_READER_PLUGIN_MYSQL;
case POSTGRESQL:
return DATAX_READER_PLUGIN_POSTGRESQL;
case ORACLE:
return DATAX_READER_PLUGIN_ORACLE;
case SQLSERVER:
return DATAX_READER_PLUGIN_SQLSERVER;
default:
return null;
}
}
public static String getWriterPluginName(DbType dbType) {
switch (dbType) {
case MYSQL:
return DATAX_WRITER_PLUGIN_MYSQL;
case POSTGRESQL:
return DATAX_WRITER_PLUGIN_POSTGRESQL;
case ORACLE:
return DATAX_WRITER_PLUGIN_ORACLE;
case SQLSERVER:
return DATAX_WRITER_PLUGIN_SQLSERVER;
default:
return null;
}
}
public static SQLStatementParser getSqlStatementParser(DbType dbType, String sql) {
switch (dbType) {
case MYSQL:
return new MySqlStatementParser(sql);
case POSTGRESQL:
return new PGSQLStatementParser(sql);
case ORACLE:
return new OracleStatementParser(sql);
case SQLSERVER:
return new SQLServerStatementParser(sql);
default:
return null;
}
}
public static String[] convertKeywordsColumns(DbType dbType, String[] columns) {
if (columns == null) {
return null;
}
String[] toColumns = new String[columns.length];
for (int i = 0; i < columns.length; i++ ) {
toColumns[i] = doConvertKeywordsColumn(dbType, columns[i]);
}
return toColumns;
}
public static String doConvertKeywordsColumn(DbType dbType, String column) {
if (column == null) {
return column;
}
column = column.trim();
column = column.replace("`", "");
column = column.replace("\"", "");
column = column.replace("'", "");
switch (dbType) {
case MYSQL:
return String.format("`%s`", column);
case POSTGRESQL:
return String.format("\"%s\"", column);
case ORACLE:
return String.format("\"%s\"", column);
case SQLSERVER:
return String.format("`%s`", column);
default:
return column;
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,180 | [Improvement][Module Name] DataX Node doesn't Support Clickhouse as the datasource | **Describe the question**
When I create a DataX node, I can't use a Clickhouse Datasource as the source or the target in a DataX task. Currently, I can only select from Mysql, Postgresql,Oracle ans Sqlserver.
![image](https://user-images.githubusercontent.com/23460917/112953652-3d8dcd00-9170-11eb-87e2-029ab4d56939.png)
**Describe alternatives you've considered**
I hope I can't use Clickhouse as my datasource.
| https://github.com/apache/dolphinscheduler/issues/5180 | https://github.com/apache/dolphinscheduler/pull/5243 | faa111674fb77bc8e4486e62a578b129e403012b | cc7a4446f50a247f054a9671648e948c654dfe65 | "2021-03-30T07:54:54Z" | java | "2021-04-14T03:31:40Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task.datax;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.datax.DataxParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.dao.datasource.BaseDataSource;
import org.apache.dolphinscheduler.dao.datasource.DataSourceFactory;
import org.apache.dolphinscheduler.server.entity.DataxTaskExecutionContext;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.DataxUtils;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.worker.task.AbstractTask;
import org.apache.dolphinscheduler.server.worker.task.CommandExecuteResult;
import org.apache.dolphinscheduler.server.worker.task.ShellCommandExecutor;
import org.apache.commons.io.FileUtils;
import java.io.File;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.FileAttribute;
import java.nio.file.attribute.PosixFilePermission;
import java.nio.file.attribute.PosixFilePermissions;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.slf4j.Logger;
import com.alibaba.druid.sql.ast.SQLStatement;
import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr;
import com.alibaba.druid.sql.ast.expr.SQLPropertyExpr;
import com.alibaba.druid.sql.ast.statement.SQLSelect;
import com.alibaba.druid.sql.ast.statement.SQLSelectItem;
import com.alibaba.druid.sql.ast.statement.SQLSelectQueryBlock;
import com.alibaba.druid.sql.ast.statement.SQLSelectStatement;
import com.alibaba.druid.sql.ast.statement.SQLUnionQuery;
import com.alibaba.druid.sql.parser.SQLStatementParser;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
/**
* DataX task
*/
public class DataxTask extends AbstractTask {
/**
* jvm parameters
*/
public static final String JVM_PARAM = " --jvm=\"-Xms%sG -Xmx%sG\" ";
/**
* python process(datax only supports version 2.7 by default)
*/
private static final String DATAX_PYTHON = "python2.7";
/**
* datax path
*/
private static final String DATAX_PATH = "${DATAX_HOME}/bin/datax.py";
/**
* datax channel count
*/
private static final int DATAX_CHANNEL_COUNT = 1;
/**
* datax parameters
*/
private DataxParameters dataXParameters;
/**
* shell command executor
*/
private ShellCommandExecutor shellCommandExecutor;
/**
* taskExecutionContext
*/
private TaskExecutionContext taskExecutionContext;
/**
* constructor
*
* @param taskExecutionContext taskExecutionContext
* @param logger logger
*/
public DataxTask(TaskExecutionContext taskExecutionContext, Logger logger) {
super(taskExecutionContext, logger);
this.taskExecutionContext = taskExecutionContext;
this.shellCommandExecutor = new ShellCommandExecutor(this::logHandle,
taskExecutionContext, logger);
}
/**
* init DataX config
*/
@Override
public void init() {
logger.info("datax task params {}", taskExecutionContext.getTaskParams());
dataXParameters = JSONUtils.parseObject(taskExecutionContext.getTaskParams(), DataxParameters.class);
if (!dataXParameters.checkParameters()) {
throw new RuntimeException("datax task params is not valid");
}
}
/**
* run DataX process
*
* @throws Exception if error throws Exception
*/
@Override
public void handle() throws Exception {
try {
// set the name of the current thread
String threadLoggerInfoName = String.format("TaskLogInfo-%s", taskExecutionContext.getTaskAppId());
Thread.currentThread().setName(threadLoggerInfoName);
// combining local and global parameters
Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()),
taskExecutionContext.getDefinedParams(),
dataXParameters.getLocalParametersMap(),
CommandType.of(taskExecutionContext.getCmdTypeIfComplement()),
taskExecutionContext.getScheduleTime());
// run datax procesDataSourceService.s
String jsonFilePath = buildDataxJsonFile(paramsMap);
String shellCommandFilePath = buildShellCommandFile(jsonFilePath, paramsMap);
CommandExecuteResult commandExecuteResult = shellCommandExecutor.run(shellCommandFilePath);
setExitStatusCode(commandExecuteResult.getExitStatusCode());
setAppIds(commandExecuteResult.getAppIds());
setProcessId(commandExecuteResult.getProcessId());
} catch (Exception e) {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
throw e;
}
}
/**
* cancel DataX process
*
* @param cancelApplication cancelApplication
* @throws Exception if error throws Exception
*/
@Override
public void cancelApplication(boolean cancelApplication)
throws Exception {
// cancel process
shellCommandExecutor.cancelApplication();
}
/**
* build datax configuration file
*
* @return datax json file name
* @throws Exception if error throws Exception
*/
private String buildDataxJsonFile(Map<String, Property> paramsMap)
throws Exception {
// generate json
String fileName = String.format("%s/%s_job.json",
taskExecutionContext.getExecutePath(),
taskExecutionContext.getTaskAppId());
String json;
Path path = new File(fileName).toPath();
if (Files.exists(path)) {
return fileName;
}
if (dataXParameters.getCustomConfig() == Flag.YES.ordinal()) {
json = dataXParameters.getJson().replaceAll("\\r\\n", "\n");
} else {
ObjectNode job = JSONUtils.createObjectNode();
job.putArray("content").addAll(buildDataxJobContentJson());
job.set("setting", buildDataxJobSettingJson());
ObjectNode root = JSONUtils.createObjectNode();
root.set("job", job);
root.set("core", buildDataxCoreJson());
json = root.toString();
}
// replace placeholder
json = ParameterUtils.convertParameterPlaceholders(json, ParamUtils.convert(paramsMap));
logger.debug("datax job json : {}", json);
// create datax json file
FileUtils.writeStringToFile(new File(fileName), json, StandardCharsets.UTF_8);
return fileName;
}
/**
* build datax job config
*
* @return collection of datax job config JSONObject
* @throws SQLException if error throws SQLException
*/
private List<ObjectNode> buildDataxJobContentJson() {
DataxTaskExecutionContext dataxTaskExecutionContext = taskExecutionContext.getDataxTaskExecutionContext();
BaseDataSource dataSourceCfg = DataSourceFactory.getDatasource(DbType.of(dataxTaskExecutionContext.getSourcetype()),
dataxTaskExecutionContext.getSourceConnectionParams());
BaseDataSource dataTargetCfg = DataSourceFactory.getDatasource(DbType.of(dataxTaskExecutionContext.getTargetType()),
dataxTaskExecutionContext.getTargetConnectionParams());
List<ObjectNode> readerConnArr = new ArrayList<>();
ObjectNode readerConn = JSONUtils.createObjectNode();
ArrayNode sqlArr = readerConn.putArray("querySql");
for (String sql : new String[]{dataXParameters.getSql()}) {
sqlArr.add(sql);
}
ArrayNode urlArr = readerConn.putArray("jdbcUrl");
for (String url : new String[]{dataSourceCfg.getJdbcUrl()}) {
urlArr.add(url);
}
readerConnArr.add(readerConn);
ObjectNode readerParam = JSONUtils.createObjectNode();
readerParam.put("username", dataSourceCfg.getUser());
readerParam.put("password", dataSourceCfg.getPassword());
readerParam.putArray("connection").addAll(readerConnArr);
ObjectNode reader = JSONUtils.createObjectNode();
reader.put("name", DataxUtils.getReaderPluginName(DbType.of(dataxTaskExecutionContext.getSourcetype())));
reader.set("parameter", readerParam);
List<ObjectNode> writerConnArr = new ArrayList<>();
ObjectNode writerConn = JSONUtils.createObjectNode();
ArrayNode tableArr = writerConn.putArray("table");
for (String table : new String[]{dataXParameters.getTargetTable()}) {
tableArr.add(table);
}
writerConn.put("jdbcUrl", dataTargetCfg.getJdbcUrl());
writerConnArr.add(writerConn);
ObjectNode writerParam = JSONUtils.createObjectNode();
writerParam.put("username", dataTargetCfg.getUser());
writerParam.put("password", dataTargetCfg.getPassword());
String[] columns = parsingSqlColumnNames(DbType.of(dataxTaskExecutionContext.getSourcetype()),
DbType.of(dataxTaskExecutionContext.getTargetType()),
dataSourceCfg, dataXParameters.getSql());
ArrayNode columnArr = writerParam.putArray("column");
for (String column : columns) {
columnArr.add(column);
}
writerParam.putArray("connection").addAll(writerConnArr);
if (CollectionUtils.isNotEmpty(dataXParameters.getPreStatements())) {
ArrayNode preSqlArr = writerParam.putArray("preSql");
for (String preSql : dataXParameters.getPreStatements()) {
preSqlArr.add(preSql);
}
}
if (CollectionUtils.isNotEmpty(dataXParameters.getPostStatements())) {
ArrayNode postSqlArr = writerParam.putArray("postSql");
for (String postSql : dataXParameters.getPostStatements()) {
postSqlArr.add(postSql);
}
}
ObjectNode writer = JSONUtils.createObjectNode();
writer.put("name", DataxUtils.getWriterPluginName(DbType.of(dataxTaskExecutionContext.getTargetType())));
writer.set("parameter", writerParam);
List<ObjectNode> contentList = new ArrayList<>();
ObjectNode content = JSONUtils.createObjectNode();
content.put("reader", reader.toString());
content.put("writer", writer.toString());
contentList.add(content);
return contentList;
}
/**
* build datax setting config
*
* @return datax setting config JSONObject
*/
private ObjectNode buildDataxJobSettingJson() {
ObjectNode speed = JSONUtils.createObjectNode();
speed.put("channel", DATAX_CHANNEL_COUNT);
if (dataXParameters.getJobSpeedByte() > 0) {
speed.put("byte", dataXParameters.getJobSpeedByte());
}
if (dataXParameters.getJobSpeedRecord() > 0) {
speed.put("record", dataXParameters.getJobSpeedRecord());
}
ObjectNode errorLimit = JSONUtils.createObjectNode();
errorLimit.put("record", 0);
errorLimit.put("percentage", 0);
ObjectNode setting = JSONUtils.createObjectNode();
setting.put("speed", speed.toString());
setting.put("errorLimit", errorLimit.toString());
return setting;
}
private ObjectNode buildDataxCoreJson() {
ObjectNode speed = JSONUtils.createObjectNode();
speed.put("channel", DATAX_CHANNEL_COUNT);
if (dataXParameters.getJobSpeedByte() > 0) {
speed.put("byte", dataXParameters.getJobSpeedByte());
}
if (dataXParameters.getJobSpeedRecord() > 0) {
speed.put("record", dataXParameters.getJobSpeedRecord());
}
ObjectNode channel = JSONUtils.createObjectNode();
channel.set("speed", speed);
ObjectNode transport = JSONUtils.createObjectNode();
transport.set("channel", channel);
ObjectNode core = JSONUtils.createObjectNode();
core.set("transport", transport);
return core;
}
/**
* create command
*
* @return shell command file name
* @throws Exception if error throws Exception
*/
private String buildShellCommandFile(String jobConfigFilePath, Map<String, Property> paramsMap)
throws Exception {
// generate scripts
String fileName = String.format("%s/%s_node.%s",
taskExecutionContext.getExecutePath(),
taskExecutionContext.getTaskAppId(),
OSUtils.isWindows() ? "bat" : "sh");
Path path = new File(fileName).toPath();
if (Files.exists(path)) {
return fileName;
}
// datax python command
StringBuilder sbr = new StringBuilder();
sbr.append(DATAX_PYTHON);
sbr.append(" ");
sbr.append(DATAX_PATH);
sbr.append(" ");
sbr.append(loadJvmEnv(dataXParameters));
sbr.append(jobConfigFilePath);
// replace placeholder
String dataxCommand = ParameterUtils.convertParameterPlaceholders(sbr.toString(), ParamUtils.convert(paramsMap));
logger.debug("raw script : {}", dataxCommand);
// create shell command file
Set<PosixFilePermission> perms = PosixFilePermissions.fromString(Constants.RWXR_XR_X);
FileAttribute<Set<PosixFilePermission>> attr = PosixFilePermissions.asFileAttribute(perms);
if (OSUtils.isWindows()) {
Files.createFile(path);
} else {
Files.createFile(path, attr);
}
Files.write(path, dataxCommand.getBytes(), StandardOpenOption.APPEND);
return fileName;
}
public String loadJvmEnv(DataxParameters dataXParameters) {
int xms = dataXParameters.getXms() < 1 ? 1 : dataXParameters.getXms();
int xmx = dataXParameters.getXmx() < 1 ? 1 : dataXParameters.getXmx();
return String.format(JVM_PARAM, xms, xmx);
}
/**
* parsing synchronized column names in SQL statements
*
* @param dsType the database type of the data source
* @param dtType the database type of the data target
* @param dataSourceCfg the database connection parameters of the data source
* @param sql sql for data synchronization
* @return Keyword converted column names
*/
private String[] parsingSqlColumnNames(DbType dsType, DbType dtType, BaseDataSource dataSourceCfg, String sql) {
String[] columnNames = tryGrammaticalAnalysisSqlColumnNames(dsType, sql);
if (columnNames == null || columnNames.length == 0) {
logger.info("try to execute sql analysis query column name");
columnNames = tryExecuteSqlResolveColumnNames(dataSourceCfg, sql);
}
notNull(columnNames, String.format("parsing sql columns failed : %s", sql));
return DataxUtils.convertKeywordsColumns(dtType, columnNames);
}
/**
* try grammatical parsing column
*
* @param dbType database type
* @param sql sql for data synchronization
* @return column name array
* @throws RuntimeException if error throws RuntimeException
*/
private String[] tryGrammaticalAnalysisSqlColumnNames(DbType dbType, String sql) {
String[] columnNames;
try {
SQLStatementParser parser = DataxUtils.getSqlStatementParser(dbType, sql);
notNull(parser, String.format("database driver [%s] is not support", dbType.toString()));
SQLStatement sqlStatement = parser.parseStatement();
SQLSelectStatement sqlSelectStatement = (SQLSelectStatement) sqlStatement;
SQLSelect sqlSelect = sqlSelectStatement.getSelect();
List<SQLSelectItem> selectItemList = null;
if (sqlSelect.getQuery() instanceof SQLSelectQueryBlock) {
SQLSelectQueryBlock block = (SQLSelectQueryBlock) sqlSelect.getQuery();
selectItemList = block.getSelectList();
} else if (sqlSelect.getQuery() instanceof SQLUnionQuery) {
SQLUnionQuery unionQuery = (SQLUnionQuery) sqlSelect.getQuery();
SQLSelectQueryBlock block = (SQLSelectQueryBlock) unionQuery.getRight();
selectItemList = block.getSelectList();
}
notNull(selectItemList,
String.format("select query type [%s] is not support", sqlSelect.getQuery().toString()));
columnNames = new String[selectItemList.size()];
for (int i = 0; i < selectItemList.size(); i++) {
SQLSelectItem item = selectItemList.get(i);
String columnName = null;
if (item.getAlias() != null) {
columnName = item.getAlias();
} else if (item.getExpr() != null) {
if (item.getExpr() instanceof SQLPropertyExpr) {
SQLPropertyExpr expr = (SQLPropertyExpr) item.getExpr();
columnName = expr.getName();
} else if (item.getExpr() instanceof SQLIdentifierExpr) {
SQLIdentifierExpr expr = (SQLIdentifierExpr) item.getExpr();
columnName = expr.getName();
}
} else {
throw new RuntimeException(
String.format("grammatical analysis sql column [ %s ] failed", item.toString()));
}
if (columnName == null) {
throw new RuntimeException(
String.format("grammatical analysis sql column [ %s ] failed", item.toString()));
}
columnNames[i] = columnName;
}
} catch (Exception e) {
logger.warn(e.getMessage(), e);
return null;
}
return columnNames;
}
/**
* try to execute sql to resolve column names
*
* @param baseDataSource the database connection parameters
* @param sql sql for data synchronization
* @return column name array
*/
public String[] tryExecuteSqlResolveColumnNames(BaseDataSource baseDataSource, String sql) {
String[] columnNames;
sql = String.format("SELECT t.* FROM ( %s ) t WHERE 0 = 1", sql);
sql = sql.replace(";", "");
try (
Connection connection = DriverManager.getConnection(baseDataSource.getJdbcUrl(), baseDataSource.getUser(),
baseDataSource.getPassword());
PreparedStatement stmt = connection.prepareStatement(sql);
ResultSet resultSet = stmt.executeQuery()) {
ResultSetMetaData md = resultSet.getMetaData();
int num = md.getColumnCount();
columnNames = new String[num];
for (int i = 1; i <= num; i++) {
columnNames[i - 1] = md.getColumnName(i);
}
} catch (SQLException e) {
logger.warn(e.getMessage(), e);
return null;
}
return columnNames;
}
@Override
public AbstractParameters getParameters() {
return dataXParameters;
}
private void notNull(Object obj, String message) {
if (obj == null) {
throw new RuntimeException(message);
}
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,180 | [Improvement][Module Name] DataX Node doesn't Support Clickhouse as the datasource | **Describe the question**
When I create a DataX node, I can't use a Clickhouse Datasource as the source or the target in a DataX task. Currently, I can only select from Mysql, Postgresql,Oracle ans Sqlserver.
![image](https://user-images.githubusercontent.com/23460917/112953652-3d8dcd00-9170-11eb-87e2-029ab4d56939.png)
**Describe alternatives you've considered**
I hope I can't use Clickhouse as my datasource.
| https://github.com/apache/dolphinscheduler/issues/5180 | https://github.com/apache/dolphinscheduler/pull/5243 | faa111674fb77bc8e4486e62a578b129e403012b | cc7a4446f50a247f054a9671648e948c654dfe65 | "2021-03-30T07:54:54Z" | java | "2021-04-14T03:31:40Z" | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/utils/DataxUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.utils;
import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser;
import com.alibaba.druid.sql.dialect.oracle.parser.OracleStatementParser;
import com.alibaba.druid.sql.dialect.postgresql.parser.PGSQLStatementParser;
import com.alibaba.druid.sql.dialect.sqlserver.parser.SQLServerStatementParser;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.junit.Assert;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
* DataxUtils Tester.
*/
public class DataxUtilsTest {
/**
*
* Method: getReaderPluginName(DbType dbType)
*
*/
@Test
public void testGetReaderPluginName() {
assertEquals(DataxUtils.DATAX_READER_PLUGIN_MYSQL, DataxUtils.getReaderPluginName(DbType.MYSQL));
assertEquals(DataxUtils.DATAX_READER_PLUGIN_POSTGRESQL, DataxUtils.getReaderPluginName(DbType.POSTGRESQL));
assertEquals(DataxUtils.DATAX_READER_PLUGIN_SQLSERVER, DataxUtils.getReaderPluginName(DbType.SQLSERVER));
assertEquals(DataxUtils.DATAX_READER_PLUGIN_ORACLE, DataxUtils.getReaderPluginName(DbType.ORACLE));
assertTrue(DataxUtils.getReaderPluginName(DbType.DB2) == null);
}
/**
*
* Method: getWriterPluginName(DbType dbType)
*
*/
@Test
public void testGetWriterPluginName() {
assertEquals(DataxUtils.DATAX_WRITER_PLUGIN_MYSQL, DataxUtils.getWriterPluginName(DbType.MYSQL));
assertEquals(DataxUtils.DATAX_WRITER_PLUGIN_POSTGRESQL, DataxUtils.getWriterPluginName(DbType.POSTGRESQL));
assertEquals(DataxUtils.DATAX_WRITER_PLUGIN_SQLSERVER, DataxUtils.getWriterPluginName(DbType.SQLSERVER));
assertEquals(DataxUtils.DATAX_WRITER_PLUGIN_ORACLE, DataxUtils.getWriterPluginName(DbType.ORACLE));
assertTrue(DataxUtils.getWriterPluginName(DbType.DB2) == null);
}
/**
*
* Method: getSqlStatementParser(DbType dbType, String sql)
*
*/
@Test
public void testGetSqlStatementParser() throws Exception {
assertTrue(DataxUtils.getSqlStatementParser(DbType.MYSQL, "select 1") instanceof MySqlStatementParser);
assertTrue(DataxUtils.getSqlStatementParser(DbType.POSTGRESQL, "select 1") instanceof PGSQLStatementParser);
assertTrue(DataxUtils.getSqlStatementParser(DbType.ORACLE, "select 1") instanceof OracleStatementParser);
assertTrue(DataxUtils.getSqlStatementParser(DbType.SQLSERVER, "select 1") instanceof SQLServerStatementParser);
assertTrue(DataxUtils.getSqlStatementParser(DbType.DB2, "select 1") == null);
}
/**
*
* Method: convertKeywordsColumns(DbType dbType, String[] columns)
*
*/
@Test
public void testConvertKeywordsColumns() throws Exception {
String[] fromColumns = new String[]{"`select`", "from", "\"where\"", " table "};
String[] targetColumns = new String[]{"`select`", "`from`", "`where`", "`table`"};
String[] toColumns = DataxUtils.convertKeywordsColumns(DbType.MYSQL, fromColumns);
assertTrue(fromColumns.length == toColumns.length);
for (int i = 0; i < toColumns.length; i++) {
assertEquals(targetColumns[i], toColumns[i]);
}
}
/**
*
* Method: doConvertKeywordsColumn(DbType dbType, String column)
*
*/
@Test
public void testDoConvertKeywordsColumn() throws Exception {
assertEquals("`select`", DataxUtils.doConvertKeywordsColumn(DbType.MYSQL, " \"`select`\" "));
assertEquals("\"select\"", DataxUtils.doConvertKeywordsColumn(DbType.POSTGRESQL, " \"`select`\" "));
assertEquals("`select`", DataxUtils.doConvertKeywordsColumn(DbType.SQLSERVER, " \"`select`\" "));
assertEquals("\"select\"", DataxUtils.doConvertKeywordsColumn(DbType.ORACLE, " \"`select`\" "));
assertEquals("select", DataxUtils.doConvertKeywordsColumn(DbType.DB2, " \"`select`\" "));
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,180 | [Improvement][Module Name] DataX Node doesn't Support Clickhouse as the datasource | **Describe the question**
When I create a DataX node, I can't use a Clickhouse Datasource as the source or the target in a DataX task. Currently, I can only select from Mysql, Postgresql,Oracle ans Sqlserver.
![image](https://user-images.githubusercontent.com/23460917/112953652-3d8dcd00-9170-11eb-87e2-029ab4d56939.png)
**Describe alternatives you've considered**
I hope I can't use Clickhouse as my datasource.
| https://github.com/apache/dolphinscheduler/issues/5180 | https://github.com/apache/dolphinscheduler/pull/5243 | faa111674fb77bc8e4486e62a578b129e403012b | cc7a4446f50a247f054a9671648e948c654dfe65 | "2021-03-30T07:54:54Z" | java | "2021-04-14T03:31:40Z" | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTaskTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task.datax;
import static org.apache.dolphinscheduler.common.enums.CommandType.START_PROCESS;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.task.datax.DataxParameters;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.dao.datasource.BaseDataSource;
import org.apache.dolphinscheduler.dao.datasource.DataSourceFactory;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.server.entity.DataxTaskExecutionContext;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.DataxUtils;
import org.apache.dolphinscheduler.server.worker.task.ShellCommandExecutor;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
import java.lang.reflect.Method;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.UUID;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.powermock.api.mockito.PowerMockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.ApplicationContext;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
/**
* DataxTask Tester.
*/
public class DataxTaskTest {
private static final Logger logger = LoggerFactory.getLogger(DataxTaskTest.class);
private static final String CONNECTION_PARAMS = " {\n"
+ " \"user\":\"root\",\n"
+ " \"password\":\"123456\",\n"
+ " \"address\":\"jdbc:mysql://127.0.0.1:3306\",\n"
+ " \"database\":\"test\",\n"
+ " \"jdbcUrl\":\"jdbc:mysql://127.0.0.1:3306/test\"\n"
+ "}";
private DataxTask dataxTask;
private ProcessService processService;
private ShellCommandExecutor shellCommandExecutor;
private ApplicationContext applicationContext;
private TaskExecutionContext taskExecutionContext;
private final TaskProps props = new TaskProps();
@Before
public void before()
throws Exception {
setTaskParems(0);
}
private void setTaskParems(Integer customConfig) {
processService = Mockito.mock(ProcessService.class);
shellCommandExecutor = Mockito.mock(ShellCommandExecutor.class);
applicationContext = Mockito.mock(ApplicationContext.class);
SpringApplicationContext springApplicationContext = new SpringApplicationContext();
springApplicationContext.setApplicationContext(applicationContext);
Mockito.when(applicationContext.getBean(ProcessService.class)).thenReturn(processService);
TaskProps props = new TaskProps();
props.setExecutePath("/tmp");
props.setTaskAppId(String.valueOf(System.currentTimeMillis()));
props.setTaskInstanceId(1);
props.setTenantCode("1");
props.setEnvFile(".dolphinscheduler_env.sh");
props.setTaskStartTime(new Date());
props.setTaskTimeout(0);
if (customConfig == 1) {
props.setTaskParams(
"{\n"
+ " \"customConfig\":1,\n"
+ " \"localParams\":[\n"
+ " {\n"
+ " \"prop\":\"test\",\n"
+ " \"value\":\"38294729\"\n"
+ " }\n"
+ " ],\n"
+ " \"json\":\""
+ "{\"job\":{\"setting\":{\"speed\":{\"byte\":1048576},\"errorLimit\":{\"record\":0,\"percentage\":0.02}},\"content\":["
+ "{\"reader\":{\"name\":\"rdbmsreader\",\"parameter\":{\"username\":\"xxx\",\"password\":\"${test}\",\"column\":[\"id\",\"name\"],\"splitPk\":\"pk\",\""
+ "connection\":[{\"querySql\":[\"SELECT * from dual\"],\"jdbcUrl\":[\"jdbc:dm://ip:port/database\"]}],\"fetchSize\":1024,\"where\":\"1 = 1\"}},\""
+ "writer\":{\"name\":\"streamwriter\",\"parameter\":{\"print\":true}}}]}}\"\n"
+ "}");
} else {
props.setTaskParams(
"{\n"
+ " \"customConfig\":0,\n"
+ " \"targetTable\":\"test\",\n"
+ " \"postStatements\":[\n"
+ " \"delete from test\"\n"
+ " ],\n"
+ " \"jobSpeedByte\":0,\n"
+ " \"jobSpeedRecord\":1000,\n"
+ " \"dtType\":\"MYSQL\",\n"
+ " \"dataSource\":1,\n"
+ " \"dsType\":\"MYSQL\",\n"
+ " \"dataTarget\":2,\n"
+ " \"sql\":\"select 1 as test from dual\",\n"
+ " \"preStatements\":[\n"
+ " \"delete from test\"\n"
+ " ]\n"
+ "}");
}
taskExecutionContext = Mockito.mock(TaskExecutionContext.class);
Mockito.when(taskExecutionContext.getTaskParams()).thenReturn(props.getTaskParams());
Mockito.when(taskExecutionContext.getExecutePath()).thenReturn("/tmp");
Mockito.when(taskExecutionContext.getTaskAppId()).thenReturn(UUID.randomUUID().toString());
Mockito.when(taskExecutionContext.getTenantCode()).thenReturn("root");
Mockito.when(taskExecutionContext.getStartTime()).thenReturn(new Date());
Mockito.when(taskExecutionContext.getTaskTimeout()).thenReturn(10000);
Mockito.when(taskExecutionContext.getLogPath()).thenReturn("/tmp/dx");
DataxTaskExecutionContext dataxTaskExecutionContext = new DataxTaskExecutionContext();
dataxTaskExecutionContext.setSourcetype(0);
dataxTaskExecutionContext.setTargetType(0);
dataxTaskExecutionContext.setSourceConnectionParams(CONNECTION_PARAMS);
dataxTaskExecutionContext.setTargetConnectionParams(CONNECTION_PARAMS);
Mockito.when(taskExecutionContext.getDataxTaskExecutionContext()).thenReturn(dataxTaskExecutionContext);
dataxTask = PowerMockito.spy(new DataxTask(taskExecutionContext, logger));
dataxTask.init();
props.setCmdTypeIfComplement(START_PROCESS);
Mockito.when(processService.findDataSourceById(1)).thenReturn(getDataSource());
Mockito.when(processService.findDataSourceById(2)).thenReturn(getDataSource());
Mockito.when(processService.findProcessInstanceByTaskId(1)).thenReturn(getProcessInstance());
String fileName = String.format("%s/%s_node.sh", props.getExecutePath(), props.getTaskAppId());
try {
Mockito.when(shellCommandExecutor.run(fileName)).thenReturn(null);
} catch (Exception e) {
e.printStackTrace();
}
dataxTask = PowerMockito.spy(new DataxTask(taskExecutionContext, logger));
dataxTask.init();
}
private DataSource getDataSource() {
DataSource dataSource = new DataSource();
dataSource.setType(DbType.MYSQL);
dataSource.setConnectionParams(CONNECTION_PARAMS);
dataSource.setUserId(1);
return dataSource;
}
private ProcessInstance getProcessInstance() {
ProcessInstance processInstance = new ProcessInstance();
processInstance.setCommandType(START_PROCESS);
processInstance.setScheduleTime(new Date());
return processInstance;
}
@After
public void after()
throws Exception {
}
/**
* Method: DataxTask()
*/
@Test
public void testDataxTask()
throws Exception {
TaskProps props = new TaskProps();
props.setExecutePath("/tmp");
props.setTaskAppId(String.valueOf(System.currentTimeMillis()));
props.setTaskInstanceId(1);
props.setTenantCode("1");
Assert.assertNotNull(new DataxTask(null, logger));
}
/**
* Method: init
*/
@Test
public void testInit()
throws Exception {
try {
dataxTask.init();
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: handle()
*/
@Test
public void testHandle()
throws Exception {
}
/**
* Method: cancelApplication()
*/
@Test
public void testCancelApplication()
throws Exception {
try {
dataxTask.cancelApplication(true);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: parsingSqlColumnNames(DbType dsType, DbType dtType, BaseDataSource
* dataSourceCfg, String sql)
*/
@Test
public void testParsingSqlColumnNames()
throws Exception {
try {
BaseDataSource dataSource = DataSourceFactory.getDatasource(getDataSource().getType(),
getDataSource().getConnectionParams());
Method method = DataxTask.class.getDeclaredMethod("parsingSqlColumnNames", DbType.class, DbType.class, BaseDataSource.class, String.class);
method.setAccessible(true);
String[] columns = (String[]) method.invoke(dataxTask, DbType.MYSQL, DbType.MYSQL, dataSource, "select 1 as a, 2 as `table` from dual");
Assert.assertNotNull(columns);
Assert.assertTrue(columns.length == 2);
Assert.assertEquals("[`a`, `table`]", Arrays.toString(columns));
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: tryGrammaticalParsingSqlColumnNames(DbType dbType, String sql)
*/
@Test
public void testTryGrammaticalAnalysisSqlColumnNames()
throws Exception {
try {
Method method = DataxTask.class.getDeclaredMethod("tryGrammaticalAnalysisSqlColumnNames", DbType.class, String.class);
method.setAccessible(true);
String[] columns = (String[]) method.invoke(dataxTask, DbType.MYSQL, "select t1.a, t1.b from test t1 union all select a, t2.b from (select a, b from test) t2");
Assert.assertNotNull(columns);
Assert.assertTrue(columns.length == 2);
Assert.assertEquals("[a, b]", Arrays.toString(columns));
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: tryExecuteSqlResolveColumnNames(BaseDataSource baseDataSource,
* String sql)
*/
@Test
public void testTryExecuteSqlResolveColumnNames()
throws Exception {
// TODO: Test goes here...
}
/**
* Method: buildDataxJsonFile()
*/
@Test
public void testBuildDataxJsonFile()
throws Exception {
try {
setTaskParems(1);
Method method = DataxTask.class.getDeclaredMethod("buildDataxJsonFile");
method.setAccessible(true);
String filePath = (String) method.invoke(dataxTask, null);
Assert.assertNotNull(filePath);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: buildDataxJsonFile()
*/
@Test
public void testBuildDataxJsonFile0()
throws Exception {
try {
setTaskParems(0);
Method method = DataxTask.class.getDeclaredMethod("buildDataxJsonFile");
method.setAccessible(true);
String filePath = (String) method.invoke(dataxTask, null);
Assert.assertNotNull(filePath);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: buildDataxJobContentJson()
*/
@Test
public void testBuildDataxJobContentJson()
throws Exception {
try {
Method method = DataxTask.class.getDeclaredMethod("buildDataxJobContentJson");
method.setAccessible(true);
List<ObjectNode> contentList = (List<ObjectNode>) method.invoke(dataxTask, null);
Assert.assertNotNull(contentList);
ObjectNode content = contentList.get(0);
JsonNode reader = JSONUtils.parseObject(content.path("reader").asText());
Assert.assertNotNull(reader);
String readerPluginName = reader.path("name").asText();
Assert.assertEquals(DataxUtils.DATAX_READER_PLUGIN_MYSQL, readerPluginName);
JsonNode writer = JSONUtils.parseObject(content.path("writer").asText());
Assert.assertNotNull(writer);
String writerPluginName = writer.path("name").asText();
Assert.assertEquals(DataxUtils.DATAX_WRITER_PLUGIN_MYSQL, writerPluginName);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: buildDataxJobSettingJson()
*/
@Test
public void testBuildDataxJobSettingJson()
throws Exception {
try {
Method method = DataxTask.class.getDeclaredMethod("buildDataxJobSettingJson");
method.setAccessible(true);
JsonNode setting = (JsonNode) method.invoke(dataxTask, null);
Assert.assertNotNull(setting);
Assert.assertNotNull(setting.get("speed"));
Assert.assertNotNull(setting.get("errorLimit"));
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: buildDataxCoreJson()
*/
@Test
public void testBuildDataxCoreJson()
throws Exception {
try {
Method method = DataxTask.class.getDeclaredMethod("buildDataxCoreJson");
method.setAccessible(true);
ObjectNode coreConfig = (ObjectNode) method.invoke(dataxTask, null);
Assert.assertNotNull(coreConfig);
Assert.assertNotNull(coreConfig.get("transport"));
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: buildShellCommandFile(String jobConfigFilePath)
*/
@Test
public void testBuildShellCommandFile()
throws Exception {
try {
Method method = DataxTask.class.getDeclaredMethod("buildShellCommandFile", String.class);
method.setAccessible(true);
Assert.assertNotNull(method.invoke(dataxTask, "test.json"));
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: getParameters
*/
@Test
public void testGetParameters()
throws Exception {
Assert.assertTrue(dataxTask.getParameters() != null);
}
/**
* Method: notNull(Object obj, String message)
*/
@Test
public void testNotNull()
throws Exception {
try {
Method method = DataxTask.class.getDeclaredMethod("notNull", Object.class, String.class);
method.setAccessible(true);
method.invoke(dataxTask, "abc", "test throw RuntimeException");
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
@Test
public void testLoadJvmEnv() {
DataxTask dataxTask = new DataxTask(null,null);
DataxParameters dataxParameters = new DataxParameters();
dataxParameters.setXms(0);
dataxParameters.setXmx(-100);
String actual = dataxTask.loadJvmEnv(dataxParameters);
String except = " --jvm=\"-Xms1G -Xmx1G\" ";
Assert.assertEquals(except,actual);
dataxParameters.setXms(13);
dataxParameters.setXmx(14);
actual = dataxTask.loadJvmEnv(dataxParameters);
except = " --jvm=\"-Xms13G -Xmx14G\" ";
Assert.assertEquals(except,actual);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,180 | [Improvement][Module Name] DataX Node doesn't Support Clickhouse as the datasource | **Describe the question**
When I create a DataX node, I can't use a Clickhouse Datasource as the source or the target in a DataX task. Currently, I can only select from Mysql, Postgresql,Oracle ans Sqlserver.
![image](https://user-images.githubusercontent.com/23460917/112953652-3d8dcd00-9170-11eb-87e2-029ab4d56939.png)
**Describe alternatives you've considered**
I hope I can't use Clickhouse as my datasource.
| https://github.com/apache/dolphinscheduler/issues/5180 | https://github.com/apache/dolphinscheduler/pull/5243 | faa111674fb77bc8e4486e62a578b129e403012b | cc7a4446f50a247f054a9671648e948c654dfe65 | "2021-03-30T07:54:54Z" | java | "2021-04-14T03:31:40Z" | dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/datax.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="datax-model">
<m-list-box>
<div slot="text">{{$t('Custom template')}}</div>
<div slot="content">
<label class="label-box">
<div style="padding-top: 5px;">
<el-switch v-model="enable" @change="_onSwitch" :disabled="isDetails"></el-switch>
</div>
</label>
</div>
</m-list-box>
<template v-if="!enable">
<m-list-box>
<div slot="text">{{$t('Datasource')}}</div>
<div slot="content">
<m-datasource
ref="refDs"
@on-dsData="_onDsData"
:supportType="['MYSQL','POSTGRESQL', 'ORACLE', 'SQLSERVER']"
:data="{ type:dsType,datasource:datasource }">
</m-datasource>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('SQL Statement')}}</div>
<div slot="content">
<div class="form-mirror">
<textarea
id="code-sql-mirror"
name="code-sql-mirror"
style="opacity: 0;">
</textarea>
<a class="ans-modal-box-max">
<em class="el-icon-full-screen" @click="setEditorVal"></em>
</a>
</div>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('TargetDataBase')}}</div>
<div slot="content">
<m-datasource
ref="refDt"
@on-dsData="_onDtData"
:supportType="['MYSQL','POSTGRESQL', 'ORACLE', 'SQLSERVER']"
:data="{ type:dtType,datasource:datatarget }">
</m-datasource>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('TargetTable')}}</div>
<div slot="content">
<el-input
type="input"
size="small"
v-model="targetTable"
:placeholder="$t('Please enter the table of target')">
</el-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('TargetDataBase')}}{{$t('Pre Statement')}}</div>
<div slot="content">
<m-statement-list
ref="refPreStatements"
@on-statement-list="_onPreStatements"
:statement-list="preStatements">
</m-statement-list>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('TargetDataBase')}}{{$t('Post Statement')}}</div>
<div slot="content">
<m-statement-list
ref="refPostStatements"
@on-statement-list="_onPostStatements"
:statement-list="postStatements">
</m-statement-list>
</div>
</m-list-box>
<m-list-box>
<div slot="text">
<span>{{$t('SpeedByte')}}</span>
</div>
<div slot="content">
<m-select-input v-model="jobSpeedByte" :list="[0,1,10,50,100,512]">
</m-select-input>
<span>({{$t('0 means unlimited by byte')}})</span>
</div>
</m-list-box>
<m-list-box>
<div slot="text">
<span>{{$t('SpeedRecord')}}</span>
</div>
<div slot="content">
<m-select-input v-model="jobSpeedRecord" :list="[0,500,1000,1500,2000,2500,3000]">
</m-select-input>
<span>({{$t('0 means unlimited by count')}})</span>
</div>
</m-list-box>
</template>
<template v-else>
<m-list-box>
<div slot="text">json</div>
<div slot="content">
<div class="form-mirror">
<textarea
id="code-json-mirror"
name="code-json-mirror"
style="opacity: 0;">
</textarea>
</div>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Custom Parameters')}}</div>
<div slot="content">
<m-local-params
ref="refLocalParams"
@on-local-params="_onLocalParams"
:udp-list="localParams"
:hide="false">
</m-local-params>
</div>
</m-list-box>
</template>
<m-list-box>
<div slot="text">{{$t('Running Memory')}}</div>
<div slot="content">
<span >{{$t('Min Memory')}}</span>
<m-select-input v-model="xms" :list="[1,2,3,4]">
</m-select-input>
<span> G </span>
<span >{{$t('Max Memory')}}</span>
<m-select-input v-model="xmx" :list="[1,2,3,4]">
</m-select-input>
<span> G</span>
</div>
</m-list-box>
<el-dialog
:visible.sync="scriptBoxDialog"
append-to-body="true"
width="80%">
<m-script-box :item="item" @getSriptBoxValue="getSriptBoxValue" @closeAble="closeAble"></m-script-box>
</el-dialog>
</div>
</template>
<script>
import _ from 'lodash'
import i18n from '@/module/i18n'
import mListBox from './_source/listBox'
import mScriptBox from './_source/scriptBox'
import mDatasource from './_source/datasource'
import mLocalParams from './_source/localParams'
import mStatementList from './_source/statementList'
import disabledState from '@/module/mixin/disabledState'
import mSelectInput from '../_source/selectInput'
import codemirror from '@/conf/home/pages/resource/pages/file/pages/_source/codemirror'
let editor
let jsonEditor
export default {
name: 'datax',
data () {
return {
// Data Custom template
enable: false,
// Data source type
dsType: '',
// data source
datasource: '',
// Data source type
dtType: '',
// data source
datatarget: '',
// Return to the selected data source
rtDatasource: '',
// Return to the selected data target
rtDatatarget: '',
// Sql statement
sql: '',
json: '',
// target table
targetTable: '',
// Pre statements
preStatements: [],
// Post statements
postStatements: [],
// speed byte
jobSpeedByte: 0,
// speed record
jobSpeedRecord: 1000,
// Custom parameter
localParams: [],
customConfig: 0,
// jvm memory xms
xms: 1,
// jvm memory xms
xmx: 1,
scriptBoxDialog: false,
item: ''
}
},
mixins: [disabledState],
props: {
backfillItem: Object,
createNodeId: Number
},
methods: {
setEditorVal () {
this.item = editor.getValue()
this.scriptBoxDialog = true
},
getSriptBoxValue (val) {
editor.setValue(val)
},
_onSwitch (is) {
if (is) {
this.customConfig = 1
setTimeout(() => {
this._handlerJsonEditor()
}, 200)
} else {
this.customConfig = 0
setTimeout(() => {
this._handlerEditor()
}, 200)
}
},
/**
* return data source
*/
_onDsData (o) {
this.dsType = o.type
this.rtDatasource = o.datasource
},
/**
* return data target
*/
_onDtData (o) {
this.dtType = o.type
this.rtDatatarget = o.datasource
},
/**
* return pre statements
*/
_onPreStatements (a) {
this.preStatements = a
},
/**
* return post statements
*/
_onPostStatements (a) {
this.postStatements = a
},
/**
* return localParams
*/
_onLocalParams (a) {
this.localParams = a
},
/**
* verification
*/
_verification () {
if (this.customConfig) {
if (!jsonEditor.getValue()) {
this.$message.warning(`${i18n.$t('Please enter a JSON Statement(required)')}`)
return false
}
// localParams Subcomponent verification
if (!this.$refs.refLocalParams._verifProp()) {
return false
}
// storage
this.$emit('on-params', {
customConfig: this.customConfig,
json: jsonEditor.getValue(),
localParams: this.localParams,
xms: +this.xms,
xmx: +this.xmx
})
return true
} else {
if (!editor.getValue()) {
this.$message.warning(`${i18n.$t('Please enter a SQL Statement(required)')}`)
return false
}
// datasource Subcomponent verification
if (!this.$refs.refDs._verifDatasource()) {
return false
}
// datasource Subcomponent verification
if (!this.$refs.refDt._verifDatasource()) {
return false
}
if (!this.targetTable) {
this.$message.warning(`${i18n.$t('Please enter a Target Table(required)')}`)
return false
}
// preStatements Subcomponent verification
if (!this.$refs.refPreStatements._verifProp()) {
return false
}
// postStatements Subcomponent verification
if (!this.$refs.refPostStatements._verifProp()) {
return false
}
// storage
this.$emit('on-params', {
customConfig: this.customConfig,
dsType: this.dsType,
dataSource: this.rtDatasource,
dtType: this.dtType,
dataTarget: this.rtDatatarget,
sql: editor.getValue(),
targetTable: this.targetTable,
jobSpeedByte: this.jobSpeedByte * 1024,
jobSpeedRecord: this.jobSpeedRecord,
preStatements: this.preStatements,
postStatements: this.postStatements,
xms: +this.xms,
xmx: +this.xmx
})
return true
}
},
/**
* Processing code highlighting
*/
_handlerEditor () {
this._destroyEditor()
// editor
editor = codemirror('code-sql-mirror', {
mode: 'sql',
readOnly: this.isDetails
})
this.keypress = () => {
if (!editor.getOption('readOnly')) {
editor.showHint({
completeSingle: false
})
}
}
// Monitor keyboard
editor.on('keypress', this.keypress)
editor.on('changes', () => {
this._cacheParams()
})
editor.setValue(this.sql)
return editor
},
_handlerJsonEditor () {
this._destroyJsonEditor()
// jsonEditor
jsonEditor = codemirror('code-json-mirror', {
mode: 'json',
readOnly: this.isDetails
})
this.keypress = () => {
if (!jsonEditor.getOption('readOnly')) {
jsonEditor.showHint({
completeSingle: false
})
}
}
// Monitor keyboard
jsonEditor.on('keypress', this.keypress)
jsonEditor.on('changes', () => {
// this._cacheParams()
})
jsonEditor.setValue(this.json)
return jsonEditor
},
_cacheParams () {
this.$emit('on-cache-params', {
dsType: this.dsType,
dataSource: this.rtDatasource,
dtType: this.dtType,
dataTarget: this.rtDatatarget,
sql: editor ? editor.getValue() : '',
targetTable: this.targetTable,
jobSpeedByte: this.jobSpeedByte * 1024,
jobSpeedRecord: this.jobSpeedRecord,
preStatements: this.preStatements,
postStatements: this.postStatements,
xms: +this.xms,
xmx: +this.xmx
})
},
_destroyEditor () {
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-sql-mirror'), 'keypress', this.keypress)
editor.off($('.code-sql-mirror'), 'changes', this.changes)
}
},
_destroyJsonEditor () {
if (jsonEditor) {
jsonEditor.toTextArea() // Uninstall
jsonEditor.off($('.code-json-mirror'), 'keypress', this.keypress)
jsonEditor.off($('.code-json-mirror'), 'changes', this.changes)
}
}
},
created () {
let o = this.backfillItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
// set jvm memory
this.xms = o.params.xms || 1
this.xmx = o.params.xmx || 1
// backfill
if (o.params.customConfig === 0) {
this.customConfig = 0
this.enable = false
this.dsType = o.params.dsType || ''
this.datasource = o.params.dataSource || ''
this.dtType = o.params.dtType || ''
this.datatarget = o.params.dataTarget || ''
this.sql = o.params.sql || ''
this.targetTable = o.params.targetTable || ''
this.jobSpeedByte = o.params.jobSpeedByte / 1024 || 0
this.jobSpeedRecord = o.params.jobSpeedRecord || 0
this.preStatements = o.params.preStatements || []
this.postStatements = o.params.postStatements || []
} else {
this.customConfig = 1
this.enable = true
this.json = o.params.json || []
this.localParams = o.params.localParams || ''
}
}
},
mounted () {
if (this.customConfig) {
setTimeout(() => {
this._handlerJsonEditor()
}, 200)
} else {
setTimeout(() => {
this._handlerEditor()
}, 200)
}
},
destroyed () {
/**
* Destroy the editor instance
*/
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-sql-mirror'), 'keypress', this.keypress)
}
if (jsonEditor) {
jsonEditor.toTextArea() // Uninstall
jsonEditor.off($('.code-json-mirror'), 'keypress', this.keypress)
}
},
watch: {
// Watch the cacheParams
cacheParams (val) {
this._cacheParams()
}
},
computed: {
cacheParams () {
return {
dsType: this.dsType,
dataSource: this.rtDatasource,
dtType: this.dtType,
dataTarget: this.rtDatatarget,
targetTable: this.targetTable,
jobSpeedByte: this.jobSpeedByte * 1024,
jobSpeedRecord: this.jobSpeedRecord,
preStatements: this.preStatements,
postStatements: this.postStatements
}
}
},
components: { mListBox, mDatasource, mLocalParams, mStatementList, mSelectInput, mScriptBox }
}
</script>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,180 | [Improvement][Module Name] DataX Node doesn't Support Clickhouse as the datasource | **Describe the question**
When I create a DataX node, I can't use a Clickhouse Datasource as the source or the target in a DataX task. Currently, I can only select from Mysql, Postgresql,Oracle ans Sqlserver.
![image](https://user-images.githubusercontent.com/23460917/112953652-3d8dcd00-9170-11eb-87e2-029ab4d56939.png)
**Describe alternatives you've considered**
I hope I can't use Clickhouse as my datasource.
| https://github.com/apache/dolphinscheduler/issues/5180 | https://github.com/apache/dolphinscheduler/pull/5243 | faa111674fb77bc8e4486e62a578b129e403012b | cc7a4446f50a247f054a9671648e948c654dfe65 | "2021-03-30T07:54:54Z" | java | "2021-04-14T03:31:40Z" | pom.xml | <?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler</artifactId>
<version>${revision}</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
<url>http://dolphinscheduler.apache.org</url>
<description>Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated
to solving the complex dependencies in data processing, making the scheduling system out of the box for data
processing.
</description>
<licenses>
<license>
<name>Apache License 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:https://github.com/apache/incubator-dolphinscheduler.git</connection>
<developerConnection>scm:git:https://github.com/apache/incubator-dolphinscheduler.git</developerConnection>
<url>https://github.com/apache/incubator-dolphinscheduler</url>
<tag>HEAD</tag>
</scm>
<mailingLists>
<mailingList>
<name>DolphinScheduler Developer List</name>
<post>dev@dolphinscheduler.incubator.apache.org</post>
<subscribe>dev-subscribe@dolphinscheduler.incubator.apache.org</subscribe>
<unsubscribe>dev-unsubscribe@dolphinscheduler.incubator.apache.org</unsubscribe>
</mailingList>
</mailingLists>
<parent>
<groupId>org.apache</groupId>
<artifactId>apache</artifactId>
<version>21</version>
</parent>
<properties>
<revision>1.3.6-SNAPSHOT</revision>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<curator.version>4.3.0</curator.version>
<spring.version>5.1.18.RELEASE</spring.version>
<spring.boot.version>2.1.17.RELEASE</spring.boot.version>
<java.version>1.8</java.version>
<logback.version>1.2.3</logback.version>
<hadoop.version>2.7.3</hadoop.version>
<quartz.version>2.3.0</quartz.version>
<jackson.version>2.10.5</jackson.version>
<mybatis-plus.version>3.2.0</mybatis-plus.version>
<mybatis.spring.version>2.0.1</mybatis.spring.version>
<cron.utils.version>5.0.5</cron.utils.version>
<druid.version>1.1.22</druid.version>
<h2.version>1.4.200</h2.version>
<commons.codec.version>1.11</commons.codec.version>
<commons.logging.version>1.1.1</commons.logging.version>
<httpclient.version>4.4.1</httpclient.version>
<httpcore.version>4.4.1</httpcore.version>
<junit.version>4.12</junit.version>
<mysql.connector.version>5.1.34</mysql.connector.version>
<slf4j.api.version>1.7.5</slf4j.api.version>
<slf4j.log4j12.version>1.7.5</slf4j.log4j12.version>
<commons.collections.version>3.2.2</commons.collections.version>
<commons.httpclient>3.0.1</commons.httpclient>
<commons.beanutils.version>1.9.4</commons.beanutils.version>
<commons.configuration.version>1.10</commons.configuration.version>
<commons.email.version>1.5</commons.email.version>
<poi.version>3.17</poi.version>
<javax.servlet.api.version>3.1.0</javax.servlet.api.version>
<commons.collections4.version>4.1</commons.collections4.version>
<guava.version>24.1-jre</guava.version>
<postgresql.version>42.1.4</postgresql.version>
<hive.jdbc.version>2.1.0</hive.jdbc.version>
<commons.io.version>2.4</commons.io.version>
<oshi.core.version>3.5.0</oshi.core.version>
<clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version>
<mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version>
<presto.jdbc.version>0.238.1</presto.jdbc.version>
<spotbugs.version>3.1.12</spotbugs.version>
<checkstyle.version>3.0.0</checkstyle.version>
<zookeeper.version>3.4.14</zookeeper.version>
<frontend-maven-plugin.version>1.6</frontend-maven-plugin.version>
<maven-compiler-plugin.version>3.3</maven-compiler-plugin.version>
<maven-assembly-plugin.version>3.1.0</maven-assembly-plugin.version>
<maven-release-plugin.version>2.5.3</maven-release-plugin.version>
<maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version>
<maven-source-plugin.version>2.4</maven-source-plugin.version>
<maven-surefire-plugin.version>2.22.1</maven-surefire-plugin.version>
<maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version>
<rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version>
<jacoco.version>0.8.4</jacoco.version>
<jcip.version>1.0</jcip.version>
<maven.deploy.skip>false</maven.deploy.skip>
<cobertura-maven-plugin.version>2.7</cobertura-maven-plugin.version>
<mockito.version>2.21.0</mockito.version>
<powermock.version>2.0.2</powermock.version>
<servlet-api.version>2.5</servlet-api.version>
<swagger.version>1.9.3</swagger.version>
<springfox.version>2.9.2</springfox.version>
<swagger-models.version>1.5.24</swagger-models.version>
<guava-retry.version>2.0.0</guava-retry.version>
<dep.airlift.version>0.184</dep.airlift.version>
<dep.packaging.version>${dep.airlift.version}</dep.packaging.version>
<protostuff.version>1.7.2</protostuff.version>
<reflections.version>0.9.12</reflections.version>
<byte-buddy.version>1.9.16</byte-buddy.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<!-- quartz-->
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz-jobs</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>com.cronutils</groupId>
<artifactId>cron-utils</artifactId>
<version>${cron.utils.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>${druid.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>${spring.boot.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-core</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-beans</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-tx</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-jdbc</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
<version>${spring.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-server</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert-plugin</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-dao</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-remote</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-service</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-spi</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
<version>${curator.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<artifactId>netty</artifactId>
<groupId>io.netty</groupId>
</exclusion>
<exclusion>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-annotations</artifactId>
</exclusion>
</exclusions>
<version>${zookeeper.version}</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>${commons.codec.version}</version>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons.logging.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>${httpclient.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>${httpcore.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<version>${jackson.version}</version>
</dependency>
<!--protostuff-->
<!-- https://mvnrepository.com/artifact/io.protostuff/protostuff-core -->
<dependency>
<groupId>io.protostuff</groupId>
<artifactId>protostuff-core</artifactId>
<version>${protostuff.version}</version>
</dependency>
<!-- https://mvnrepository.com/artifact/io.protostuff/protostuff-runtime -->
<dependency>
<groupId>io.protostuff</groupId>
<artifactId>protostuff-runtime</artifactId>
<version>${protostuff.version}</version>
</dependency>
<dependency>
<groupId>net.bytebuddy</groupId>
<artifactId>byte-buddy</artifactId>
<version>${byte-buddy.version}</version>
</dependency>
<dependency>
<groupId>org.reflections</groupId>
<artifactId>reflections</artifactId>
<version>${reflections.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>${mockito.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-junit4</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-api-mockito2</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${mysql.connector.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.h2database</groupId>
<artifactId>h2</artifactId>
<version>${h2.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${slf4j.api.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>${slf4j.log4j12.version}</version>
</dependency>
<dependency>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
<version>${commons.collections.version}</version>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>${commons.httpclient}</version>
</dependency>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<version>${commons.beanutils.version}</version>
</dependency>
<dependency>
<groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId>
<version>${commons.configuration.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-email</artifactId>
<version>${commons.email.version}</version>
</dependency>
<!--excel poi-->
<dependency>
<groupId>org.apache.poi</groupId>
<artifactId>poi</artifactId>
<version>${poi.version}</version>
</dependency>
<!-- hadoop -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>com.sun.jersey</artifactId>
<groupId>jersey-json</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-aws</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>${commons.collections4.version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>${postgresql.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>${hive.jdbc.version}</version>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>${commons.io.version}</version>
</dependency>
<dependency>
<groupId>com.github.oshi</groupId>
<artifactId>oshi-core</artifactId>
<version>${oshi.core.version}</version>
</dependency>
<dependency>
<groupId>ru.yandex.clickhouse</groupId>
<artifactId>clickhouse-jdbc</artifactId>
<version>${clickhouse.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
<version>${mssql.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.facebook.presto</groupId>
<artifactId>presto-jdbc</artifactId>
<version>${presto.jdbc.version}</version>
</dependency>
<dependency>
<groupId>net.jcip</groupId>
<artifactId>jcip-annotations</artifactId>
<version>${jcip.version}</version>
<optional>true</optional>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<version>${servlet-api.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
<version>${javax.servlet.api.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger2</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger-ui</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.swagger</groupId>
<artifactId>swagger-models</artifactId>
<version>${swagger-models.version}</version>
</dependency>
<dependency>
<groupId>com.github.xiaoymin</groupId>
<artifactId>swagger-bootstrap-ui</artifactId>
<version>${swagger.version}</version>
</dependency>
<dependency>
<groupId>com.github.rholder</groupId>
<artifactId>guava-retrying</artifactId>
<version>${guava-retry.version}</version>
</dependency>
<dependency>
<groupId>org.sonatype.aether</groupId>
<artifactId>aether-api</artifactId>
<version>1.13.1</version>
</dependency>
<dependency>
<groupId>io.airlift.resolver</groupId>
<artifactId>resolver</artifactId>
<version>1.5</version>
</dependency>
<dependency>
<groupId>org.ow2.asm</groupId>
<artifactId>asm</artifactId>
<version>6.2.1</version>
</dependency>
<dependency>
<groupId>javax.activation</groupId>
<artifactId>activation</artifactId>
<version>1.1</version>
</dependency>
<dependency>
<groupId>com.sun.mail</groupId>
<artifactId>javax.mail</artifactId>
<version>1.6.2</version>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<finalName>apache-dolphinscheduler-incubating-${project.version}</finalName>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-maven-plugin</artifactId>
<version>1.0.0</version>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>ca.vanzyl.maven.plugins</groupId>
<artifactId>provisio-maven-plugin</artifactId>
<version>1.0.4</version>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>rpm-maven-plugin</artifactId>
<version>${rpm-maven-plugion.version}</version>
<inherited>false</inherited>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<testSource>${java.version}</testSource>
<testTarget>${java.version}</testTarget>
</configuration>
<version>${maven-compiler-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<tagNameFormat>@{project.version}</tagNameFormat>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>${maven-assembly-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<configuration>
<source>8</source>
<failOnError>false</failOnError>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>${maven-source-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>${maven-dependency-plugin.version}</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-maven-plugin</artifactId>
<extensions>true</extensions>
<!--<configuration>-->
<!--<allowedProvidedDependencies>-->
<!--<allowedProvidedDependency>org.apache.dolphinscheduler:dolphinscheduler-common</allowedProvidedDependency>-->
<!--</allowedProvidedDependencies>-->
<!--</configuration>-->
</plugin>
<plugin>
<groupId>ca.vanzyl.maven.plugins</groupId>
<artifactId>provisio-maven-plugin</artifactId>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<id>attach-sources</id>
<phase>verify</phase>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
<configuration>
<aggregate>true</aggregate>
<charset>${project.build.sourceEncoding}</charset>
<encoding>${project.build.sourceEncoding}</encoding>
<docencoding>${project.build.sourceEncoding}</docencoding>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<autoVersionSubmodules>true</autoVersionSubmodules>
<tagNameFormat>@{project.version}</tagNameFormat>
<tagBase>${project.version}</tagBase>
<!--<goals>-f pom.xml deploy</goals>-->
</configuration>
<dependencies>
<dependency>
<groupId>org.apache.maven.scm</groupId>
<artifactId>maven-scm-provider-jgit</artifactId>
<version>1.9.5</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<encoding>${project.build.sourceEncoding}</encoding>
<skip>false</skip><!--not skip compile test classes-->
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<includes>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TenantControllerTest.java</include>
<include>**/api/dto/resources/filter/ResourceFilterTest.java</include>
<include>**/api/dto/resources/visitor/ResourceTreeVisitorTest.java</include>
<includeDataxTaskTest>**/api/enums/testGetEnum.java</includeDataxTaskTest>
<include>**/api/enums/StatusTest.java</include>
<include>**/api/exceptions/ApiExceptionHandlerTest.java</include>
<include>**/api/exceptions/ServiceExceptionTest.java</include>
<include>**/api/interceptor/LocaleChangeInterceptorTest.java</include>
<include>**/api/interceptor/LoginHandlerInterceptorTest.java</include>
<include>**/api/security/impl/pwd/PasswordAuthenticatorTest.java</include>
<include>**/api/security/impl/ldap/LdapAuthenticatorTest.java</include>
<include>**/api/security/SecurityConfigLDAPTest.java</include>
<include>**/api/security/SecurityConfigPasswordTest.java</include>
<include>**/api/service/AccessTokenServiceTest.java</include>
<include>**/api/service/AlertGroupServiceTest.java</include>
<include>**/api/service/BaseDAGServiceTest.java</include>
<include>**/api/service/BaseServiceTest.java</include>
<include>**/api/service/DataAnalysisServiceTest.java</include>
<include>**/api/service/AlertPluginInstanceServiceTest.java</include>
<include>**/api/service/DataSourceServiceTest.java</include>
<include>**/api/service/ExecutorService2Test.java</include>
<include>**/api/service/ExecutorServiceTest.java</include>
<include>**/api/service/LoggerServiceTest.java</include>
<include>**/api/service/MonitorServiceTest.java</include>
<include>**/api/service/ProcessDefinitionServiceTest.java</include>
<include>**/api/service/ProcessDefinitionVersionServiceTest.java</include>
<include>**/api/service/ProcessInstanceServiceTest.java</include>
<include>**/api/service/ProjectServiceTest.java</include>
<include>**/api/service/QueueServiceTest.java</include>
<include>**/api/service/ResourcesServiceTest.java</include>
<include>**/api/service/SchedulerServiceTest.java</include>
<include>**/api/service/SessionServiceTest.java</include>
<include>**/api/service/TaskInstanceServiceTest.java</include>
<include>**/api/service/TenantServiceTest.java</include>
<include>**/api/service/UdfFuncServiceTest.java</include>
<include>**/api/service/UiPluginServiceTest.java</include>
<include>**/api/service/UserAlertGroupServiceTest.java</include>
<include>**/api/service/UsersServiceTest.java</include>
<include>**/api/service/WorkerGroupServiceTest.java</include>
<include>**/api/service/WorkFlowLineageServiceTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TaskInstanceControllerTest.java</include>
<include>**/api/controller/WorkFlowLineageControllerTest.java</include>
<include>**/api/utils/exportprocess/DataSourceParamTest.java</include>
<include>**/api/utils/exportprocess/DependentParamTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/FileUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/ResultTest.java</include>
<include>**/common/graph/DAGTest.java</include>
<include>**/common/os/OshiTest.java</include>
<include>**/common/os/OSUtilsTest.java</include>
<include>**/common/shell/ShellExecutorTest.java</include>
<include>**/common/task/DataxParametersTest.java</include>
<include>**/common/task/EntityTestUtils.java</include>
<include>**/common/task/FlinkParametersTest.java</include>
<include>**/common/task/HttpParametersTest.java</include>
<include>**/common/task/SqlParametersTest.java</include>
<include>**/common/task/SqoopParameterEntityTest.java</include>
<include>**/common/threadutils/ThreadPoolExecutorsTest.java</include>
<include>**/common/threadutils/ThreadUtilsTest.java</include>
<include>**/common/utils/CollectionUtilsTest.java</include>
<include>**/common/utils/CommonUtilsTest.java</include>
<include>**/common/utils/DateUtilsTest.java</include>
<include>**/common/utils/DependentUtilsTest.java</include>
<include>**/common/utils/EncryptionUtilsTest.java</include>
<include>**/common/utils/FileUtilsTest.java</include>
<include>**/common/utils/IpUtilsTest.java</include>
<include>**/common/utils/JSONUtilsTest.java</include>
<include>**/common/utils/LoggerUtilsTest.java</include>
<include>**/common/utils/NetUtilsTest.java</include>
<include>**/common/utils/OSUtilsTest.java</include>
<include>**/common/utils/ParameterUtilsTest.java</include>
<include>**/common/utils/TimePlaceholderUtilsTest.java</include>
<include>**/common/utils/PreconditionsTest.java</include>
<include>**/common/utils/PropertyUtilsTest.java</include>
<include>**/common/utils/SchemaUtilsTest.java</include>
<include>**/common/utils/ScriptRunnerTest.java</include>
<include>**/common/utils/SensitiveLogUtilsTest.java</include>
<include>**/common/utils/StringTest.java</include>
<include>**/common/utils/StringUtilsTest.java</include>
<include>**/common/utils/TaskParametersUtilsTest.java</include>
<include>**/common/utils/VarPoolUtilsTest.java</include>
<include>**/common/utils/HadoopUtilsTest.java</include>
<include>**/common/utils/HttpUtilsTest.java</include>
<include>**/common/utils/KerberosHttpClientTest.java</include>
<include>**/common/utils/HiveConfUtilsTest.java</include>
<include>**/common/ConstantsTest.java</include>
<include>**/common/utils/HadoopUtils.java</include>
<include>**/common/utils/RetryerUtilsTest.java</include>
<include>**/common/plugin/DolphinSchedulerPluginLoaderTest.java</include>
<include>**/common/enums/ExecutionStatusTest</include>
<include>**/dao/mapper/AccessTokenMapperTest.java</include>
<include>**/dao/mapper/AlertGroupMapperTest.java</include>
<include>**/dao/mapper/CommandMapperTest.java</include>
<include>**/dao/mapper/ConnectionFactoryTest.java</include>
<include>**/dao/mapper/DataSourceMapperTest.java</include>
<include>**/dao/datasource/MySQLDataSourceTest.java</include>
<include>**/dao/entity/TaskInstanceTest.java</include>
<include>**/dao/entity/UdfFuncTest.java</include>
<include>**/remote/command/alert/AlertSendRequestCommandTest.java</include>
<include>**/remote/command/alert/AlertSendResponseCommandTest.java</include>
<include>**/remote/command/future/ResponseFutureTest.java</include>
<include>**/remote/command/log/RemoveTaskLogRequestCommandTest.java</include>
<include>**/remote/command/log/RemoveTaskLogResponseCommandTest.java</include>
<include>**/remote/utils/HostTest.java</include>
<include>**/remote/utils/NettyUtilTest.java</include>
<include>**/remote/NettyRemotingClientTest.java</include>
<include>**/rpc/RpcTest.java</include>
<include>**/server/log/LoggerServerTest.java</include>
<include>**/server/entity/SQLTaskExecutionContextTest.java</include>
<include>**/server/log/MasterLogFilterTest.java</include>
<include>**/server/log/SensitiveDataConverterTest.java</include>
<include>**/server/log/LoggerRequestProcessorTest.java</include>
<!--<include>**/server/log/TaskLogDiscriminatorTest.java</include>-->
<include>**/server/log/TaskLogFilterTest.java</include>
<include>**/server/log/WorkerLogFilterTest.java</include>
<include>**/server/master/config/MasterConfigTest.java</include>
<include>**/server/master/consumer/TaskPriorityQueueConsumerTest.java</include>
<include>**/server/master/runner/MasterTaskExecThreadTest.java</include>
<!--<include>**/server/master/dispatch/executor/NettyExecutorManagerTest.java</include>-->
<include>**/server/master/dispatch/host/assign/LowerWeightRoundRobinTest.java</include>
<include>**/server/master/dispatch/host/assign/RandomSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/HostWorkerTest.java</include>
<include>**/server/master/register/MasterRegistryTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinHostManagerTest.java</include>
<include>**/server/master/AlertManagerTest.java</include>
<include>**/server/master/MasterCommandTest.java</include>
<include>**/server/master/DependentTaskTest.java</include>
<include>**/server/master/ConditionsTaskTest.java</include>
<include>**/server/master/MasterExecThreadTest.java</include>
<include>**/server/master/ParamsTest.java</include>
<include>**/server/master/SubProcessTaskTest.java</include>
<include>**/server/master/processor/TaskAckProcessorTest.java</include>
<include>**/server/master/processor/TaskKillResponseProcessorTest.java</include>
<include>**/server/master/processor/queue/TaskResponseServiceTest.java</include>
<include>**/server/register/ZookeeperNodeManagerTest.java</include>
<include>**/server/register/ZookeeperRegistryCenterTest.java</include>
<include>**/server/utils/DataxUtilsTest.java</include>
<include>**/server/utils/ExecutionContextTestUtils.java</include>
<include>**/server/utils/FlinkArgsUtilsTest.java</include>
<include>**/server/utils/LogUtilsTest.java</include>
<include>**/server/utils/MapReduceArgsUtilsTest.java</include>
<include>**/server/utils/ParamUtilsTest.java</include>
<include>**/server/utils/ProcessUtilsTest.java</include>
<include>**/server/utils/SparkArgsUtilsTest.java</include>
<include>**/server/worker/processor/TaskCallbackServiceTest.java</include>
<include>**/server/worker/processor/TaskExecuteProcessorTest.java</include>
<include>**/server/worker/registry/WorkerRegistryTest.java</include>
<include>**/server/worker/shell/ShellCommandExecutorTest.java</include>
<include>**/server/worker/sql/SqlExecutorTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<include>**/server/worker/task/EnvFileTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<!--<include>**/server/worker/task/datax/DataxTaskTest.java</include>-->
<!--<include>**/server/worker/task/http/HttpTaskTest.java</include>-->
<include>**/server/worker/task/sqoop/SqoopTaskTest.java</include>
<include>**/server/worker/task/shell/ShellTaskTest.java</include>
<include>**/server/worker/task/TaskManagerTest.java</include>
<include>**/server/worker/task/AbstractCommandExecutorTest.java</include>
<include>**/server/worker/task/ShellTaskReturnTest.java</include>
<include>**/server/worker/EnvFileTest.java</include>
<include>**/server/worker/runner/TaskExecuteThreadTest.java</include>
<include>**/server/worker/runner/WorkerManagerThreadTest.java</include>
<include>**/service/quartz/cron/CronUtilsTest.java</include>
<include>**/service/process/ProcessServiceTest.java</include>
<include>**/service/zk/DefaultEnsembleProviderTest.java</include>
<include>**/service/zk/ZKServerTest.java</include>
<include>**/service/zk/CuratorZookeeperClientTest.java</include>
<include>**/service/zk/RegisterOperatorTest.java</include>
<include>**/service/queue/TaskUpdateQueueTest.java</include>
<include>**/service/queue/PeerTaskInstancePriorityQueueTest.java</include>
<include>**/service/log/LogClientServiceTest.java</include>
<include>**/service/alert/AlertClientServiceTest.java</include>
<include>**/dao/mapper/DataSourceUserMapperTest.java</include>
<!--<iTaskUpdateQueueConsumerThreadnclude>**/dao/mapper/ErrorCommandMapperTest.java</iTaskUpdateQueueConsumerThreadnclude>-->
<include>**/dao/mapper/ProcessDefinitionMapperTest.java</include>
<include>**/dao/mapper/ProcessDefinitionVersionMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapperTest.java</include>
<include>**/dao/mapper/ProjectMapperTest.java</include>
<include>**/dao/mapper/ProjectUserMapperTest.java</include>
<include>**/dao/mapper/QueueMapperTest.java</include>
<include>**/dao/mapper/ResourceUserMapperTest.java</include>
<include>**/dao/mapper/ScheduleMapperTest.java</include>
<include>**/dao/mapper/SessionMapperTest.java</include>
<include>**/dao/mapper/TaskInstanceMapperTest.java</include>
<include>**/dao/mapper/TenantMapperTest.java</include>
<include>**/dao/mapper/UdfFuncMapperTest.java</include>
<include>**/dao/mapper/UDFUserMapperTest.java</include>
<include>**/dao/mapper/UserMapperTest.java</include>
<include>**/dao/mapper/AlertPluginInstanceMapperTest.java</include>
<include>**/dao/mapper/PluginDefineTest.java</include>
<include>**/dao/utils/DagHelperTest.java</include>
<include>**/dao/AlertDaoTest.java</include>
<include>**/dao/datasource/OracleDataSourceTest.java</include>
<include>**/dao/datasource/HiveDataSourceTest.java</include>
<include>**/dao/datasource/BaseDataSourceTest.java</include>
<include>**/dao/upgrade/ProcessDefinitionDaoTest.java</include>
<include>**/dao/upgrade/WokrerGrouopDaoTest.java</include>
<include>**/dao/upgrade/UpgradeDaoTest.java</include>
<include>**/plugin/alert/email/EmailAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/email/EmailAlertChannelTest.java</include>
<include>**/plugin/alert/email/ExcelUtilsTest.java</include>
<include>**/plugin/alert/email/MailUtilsTest.java</include>
<include>**/plugin/alert/email/template/DefaultHTMLTemplateTest.java</include>
<include>**/plugin/alert/dingtalk/DingTalkSenderTest.java</include>
<include>**/plugin/alert/dingtalk/DingTalkAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/wechat/WeChatSenderTest.java</include>
<include>**/plugin/alert/wechat/WeChatAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/script/ProcessUtilsTest.java</include>
<include>**/plugin/alert/script/ScriptAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/script/ScriptSenderTest.java</include>
<include>**/plugin/alert/http/HttpAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/http/HttpAlertChannelTest.java</include>
<include>**/plugin/alert/feishu/FeiShuAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/feishu/FeiShuSenderTest.java</include>
<include>**/plugin/alert/http/HttpAlertPluginTest.java</include>
<include>**/plugin/alert/http/HttpSenderTest.java</include>
<include>**/spi/params/PluginParamsTransferTest.java</include>
<include>**/alert/plugin/EmailAlertPluginTest.java</include>
<include>**/alert/plugin/AlertPluginManagerTest.java</include>
<include>**/alert/plugin/DolphinPluginLoaderTest.java</include>
<include>**/alert/utils/DingTalkUtilsTest.java</include>
<include>**/alert/utils/EnterpriseWeChatUtilsTest.java</include>
<include>**/alert/utils/FuncUtilsTest.java</include>
<include>**/alert/processor/AlertRequestProcessorTest.java</include>
<include>**/alert/runner/AlertSenderTest.java</include>
<include>**/alert/AlertServerTest.java</include>
</includes>
<!-- <skip>true</skip> -->
</configuration>
</plugin>
<!-- jenkins plugin jacoco report-->
<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<version>${jacoco.version}</version>
<configuration>
<destFile>target/jacoco.exec</destFile>
<dataFile>target/jacoco.exec</dataFile>
</configuration>
<executions>
<execution>
<id>jacoco-initialize</id>
<goals>
<goal>prepare-agent</goal>
</goals>
</execution>
<execution>
<id>jacoco-site</id>
<phase>test</phase>
<goals>
<goal>report</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-maven-plugin</artifactId>
<version>${spotbugs.version}</version>
<configuration>
<xmlOutput>true</xmlOutput>
<threshold>medium</threshold>
<effort>default</effort>
<excludeFilterFile>dev-config/spotbugs-exclude.xml</excludeFilterFile>
<failOnError>true</failOnError>
</configuration>
<dependencies>
<dependency>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs</artifactId>
<version>4.0.0-beta4</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>${checkstyle.version}</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
<version>8.18</version>
</dependency>
</dependencies>
<configuration>
<consoleOutput>true</consoleOutput>
<encoding>UTF-8</encoding>
<configLocation>style/checkstyle.xml</configLocation>
<suppressionsLocation>style/checkstyle-suppressions.xml</suppressionsLocation>
<suppressionsFileExpression>checkstyle.suppressions.file</suppressionsFileExpression>
<failOnViolation>true</failOnViolation>
<violationSeverity>warning</violationSeverity>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<sourceDirectories>
<sourceDirectory>${project.build.sourceDirectory}</sourceDirectory>
</sourceDirectories>
<excludes>**\/generated-sources\/</excludes>
<skip>true</skip>
</configuration>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>cobertura-maven-plugin</artifactId>
<version>${cobertura-maven-plugin.version}</version>
<configuration>
<check>
</check>
<aggregate>true</aggregate>
<outputDirectory>./target/cobertura</outputDirectory>
<encoding>${project.build.sourceEncoding}</encoding>
<quiet>true</quiet>
<format>xml</format>
<instrumentation>
<ignoreTrivial>true</ignoreTrivial>
</instrumentation>
</configuration>
</plugin>
</plugins>
</build>
<modules>
<module>dolphinscheduler-alert-plugin</module>
<module>dolphinscheduler-ui</module>
<module>dolphinscheduler-server</module>
<module>dolphinscheduler-common</module>
<module>dolphinscheduler-api</module>
<module>dolphinscheduler-dao</module>
<module>dolphinscheduler-alert</module>
<module>dolphinscheduler-dist</module>
<module>dolphinscheduler-remote</module>
<module>dolphinscheduler-service</module>
<module>dolphinscheduler-spi</module>
<module>dolphinscheduler-microbench</module>
</modules>
</project>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,269 | [Question] Useless property in the `Command` and `ErrorCommand` table |
**Useless property in the `Command` and `ErrorCommand` table**
I'm working on adopting the table field to our company database specification: all the fields could not be `null`.
So I found the `Command` and `ErrorCommand` table has `dependence` in the database, but I cannot found [the bean property in java bean](https://github.com/apache/incubator-dolphinscheduler/blob/5d264c9f208a089bc1f23be5f3077d2dab2b0f40/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/Command.java). I found the [t_ds_command](https://github.com/apache/incubator-dolphinscheduler/blob/5d264c9f208a089bc1f23be5f3077d2dab2b0f40/sql/dolphinscheduler_mysql.sql#L330) and [t_ds_error_command](https://github.com/apache/incubator-dolphinscheduler/blob/5d264c9f208a089bc1f23be5f3077d2dab2b0f40/sql/dolphinscheduler_mysql.sql#L378) DDL file defined this field.
**Which version of DolphinScheduler:**
- dev branch
**Requirement or improvement**
- I think it should be removed, Does I miss anything?
| https://github.com/apache/dolphinscheduler/issues/5269 | https://github.com/apache/dolphinscheduler/pull/5284 | 029d3eb81404dd0fbf89716e00dda56993dd337a | b6453da298bff491b02a3b690674dc62bcb84cd3 | "2021-04-13T13:09:06Z" | java | "2021-04-15T12:15:20Z" | dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/CommandMapper.xml | <?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.CommandMapper">
<select id="getOneToRun" resultType="org.apache.dolphinscheduler.dao.entity.Command">
select cmd.id, cmd.command_type, cmd.process_definition_id, cmd.command_param, cmd.task_depend_type, cmd.failure_strategy,
cmd.warning_type, cmd.warning_group_id, cmd.schedule_time, cmd.start_time, cmd.executor_id, cmd.dependence, cmd.update_time,
cmd.process_instance_priority, cmd.worker_group
from t_ds_command cmd
join t_ds_process_definition definition on cmd.process_definition_id = definition.id
where definition.release_state = 1 AND definition.flag = 1
order by cmd.update_time asc
limit 1
</select>
<select id="countCommandState" resultType="org.apache.dolphinscheduler.dao.entity.CommandCount">
select cmd.command_type as command_type, count(1) as count
from t_ds_command cmd, t_ds_process_definition process
where cmd.process_definition_id = process.id
<if test="projectIdArray != null and projectIdArray.length != 0">
and process.project_id in
<foreach collection="projectIdArray" index="index" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
<if test="startTime != null and endTime != null">
and cmd.start_time <![CDATA[ >= ]]> #{startTime} and cmd.update_time <![CDATA[ <= ]]> #{endTime}
</if>
group by cmd.command_type
</select>
</mapper>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,269 | [Question] Useless property in the `Command` and `ErrorCommand` table |
**Useless property in the `Command` and `ErrorCommand` table**
I'm working on adopting the table field to our company database specification: all the fields could not be `null`.
So I found the `Command` and `ErrorCommand` table has `dependence` in the database, but I cannot found [the bean property in java bean](https://github.com/apache/incubator-dolphinscheduler/blob/5d264c9f208a089bc1f23be5f3077d2dab2b0f40/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/Command.java). I found the [t_ds_command](https://github.com/apache/incubator-dolphinscheduler/blob/5d264c9f208a089bc1f23be5f3077d2dab2b0f40/sql/dolphinscheduler_mysql.sql#L330) and [t_ds_error_command](https://github.com/apache/incubator-dolphinscheduler/blob/5d264c9f208a089bc1f23be5f3077d2dab2b0f40/sql/dolphinscheduler_mysql.sql#L378) DDL file defined this field.
**Which version of DolphinScheduler:**
- dev branch
**Requirement or improvement**
- I think it should be removed, Does I miss anything?
| https://github.com/apache/dolphinscheduler/issues/5269 | https://github.com/apache/dolphinscheduler/pull/5284 | 029d3eb81404dd0fbf89716e00dda56993dd337a | b6453da298bff491b02a3b690674dc62bcb84cd3 | "2021-04-13T13:09:06Z" | java | "2021-04-15T12:15:20Z" | sql/dolphinscheduler_mysql.sql | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
SET FOREIGN_KEY_CHECKS=0;
-- ----------------------------
-- Table structure for QRTZ_BLOB_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_BLOB_TRIGGERS`;
CREATE TABLE `QRTZ_BLOB_TRIGGERS` (
`SCHED_NAME` varchar(120) NOT NULL,
`TRIGGER_NAME` varchar(200) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
`BLOB_DATA` blob,
PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
KEY `SCHED_NAME` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
CONSTRAINT `QRTZ_BLOB_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_BLOB_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_CALENDARS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_CALENDARS`;
CREATE TABLE `QRTZ_CALENDARS` (
`SCHED_NAME` varchar(120) NOT NULL,
`CALENDAR_NAME` varchar(200) NOT NULL,
`CALENDAR` blob NOT NULL,
PRIMARY KEY (`SCHED_NAME`,`CALENDAR_NAME`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_CALENDARS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_CRON_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_CRON_TRIGGERS`;
CREATE TABLE `QRTZ_CRON_TRIGGERS` (
`SCHED_NAME` varchar(120) NOT NULL,
`TRIGGER_NAME` varchar(200) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
`CRON_EXPRESSION` varchar(120) NOT NULL,
`TIME_ZONE_ID` varchar(80) DEFAULT NULL,
PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
CONSTRAINT `QRTZ_CRON_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_CRON_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_FIRED_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_FIRED_TRIGGERS`;
CREATE TABLE `QRTZ_FIRED_TRIGGERS` (
`SCHED_NAME` varchar(120) NOT NULL,
`ENTRY_ID` varchar(95) NOT NULL,
`TRIGGER_NAME` varchar(200) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
`INSTANCE_NAME` varchar(200) NOT NULL,
`FIRED_TIME` bigint(13) NOT NULL,
`SCHED_TIME` bigint(13) NOT NULL,
`PRIORITY` int(11) NOT NULL,
`STATE` varchar(16) NOT NULL,
`JOB_NAME` varchar(200) DEFAULT NULL,
`JOB_GROUP` varchar(200) DEFAULT NULL,
`IS_NONCONCURRENT` varchar(1) DEFAULT NULL,
`REQUESTS_RECOVERY` varchar(1) DEFAULT NULL,
PRIMARY KEY (`SCHED_NAME`,`ENTRY_ID`),
KEY `IDX_QRTZ_FT_TRIG_INST_NAME` (`SCHED_NAME`,`INSTANCE_NAME`),
KEY `IDX_QRTZ_FT_INST_JOB_REQ_RCVRY` (`SCHED_NAME`,`INSTANCE_NAME`,`REQUESTS_RECOVERY`),
KEY `IDX_QRTZ_FT_J_G` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`),
KEY `IDX_QRTZ_FT_JG` (`SCHED_NAME`,`JOB_GROUP`),
KEY `IDX_QRTZ_FT_T_G` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
KEY `IDX_QRTZ_FT_TG` (`SCHED_NAME`,`TRIGGER_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_FIRED_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_JOB_DETAILS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_JOB_DETAILS`;
CREATE TABLE `QRTZ_JOB_DETAILS` (
`SCHED_NAME` varchar(120) NOT NULL,
`JOB_NAME` varchar(200) NOT NULL,
`JOB_GROUP` varchar(200) NOT NULL,
`DESCRIPTION` varchar(250) DEFAULT NULL,
`JOB_CLASS_NAME` varchar(250) NOT NULL,
`IS_DURABLE` varchar(1) NOT NULL,
`IS_NONCONCURRENT` varchar(1) NOT NULL,
`IS_UPDATE_DATA` varchar(1) NOT NULL,
`REQUESTS_RECOVERY` varchar(1) NOT NULL,
`JOB_DATA` blob,
PRIMARY KEY (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`),
KEY `IDX_QRTZ_J_REQ_RECOVERY` (`SCHED_NAME`,`REQUESTS_RECOVERY`),
KEY `IDX_QRTZ_J_GRP` (`SCHED_NAME`,`JOB_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_JOB_DETAILS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_LOCKS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_LOCKS`;
CREATE TABLE `QRTZ_LOCKS` (
`SCHED_NAME` varchar(120) NOT NULL,
`LOCK_NAME` varchar(40) NOT NULL,
PRIMARY KEY (`SCHED_NAME`,`LOCK_NAME`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_LOCKS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_PAUSED_TRIGGER_GRPS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_PAUSED_TRIGGER_GRPS`;
CREATE TABLE `QRTZ_PAUSED_TRIGGER_GRPS` (
`SCHED_NAME` varchar(120) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
PRIMARY KEY (`SCHED_NAME`,`TRIGGER_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_PAUSED_TRIGGER_GRPS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_SCHEDULER_STATE
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_SCHEDULER_STATE`;
CREATE TABLE `QRTZ_SCHEDULER_STATE` (
`SCHED_NAME` varchar(120) NOT NULL,
`INSTANCE_NAME` varchar(200) NOT NULL,
`LAST_CHECKIN_TIME` bigint(13) NOT NULL,
`CHECKIN_INTERVAL` bigint(13) NOT NULL,
PRIMARY KEY (`SCHED_NAME`,`INSTANCE_NAME`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_SCHEDULER_STATE
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_SIMPLE_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_SIMPLE_TRIGGERS`;
CREATE TABLE `QRTZ_SIMPLE_TRIGGERS` (
`SCHED_NAME` varchar(120) NOT NULL,
`TRIGGER_NAME` varchar(200) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
`REPEAT_COUNT` bigint(7) NOT NULL,
`REPEAT_INTERVAL` bigint(12) NOT NULL,
`TIMES_TRIGGERED` bigint(10) NOT NULL,
PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
CONSTRAINT `QRTZ_SIMPLE_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_SIMPLE_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_SIMPROP_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_SIMPROP_TRIGGERS`;
CREATE TABLE `QRTZ_SIMPROP_TRIGGERS` (
`SCHED_NAME` varchar(120) NOT NULL,
`TRIGGER_NAME` varchar(200) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
`STR_PROP_1` varchar(512) DEFAULT NULL,
`STR_PROP_2` varchar(512) DEFAULT NULL,
`STR_PROP_3` varchar(512) DEFAULT NULL,
`INT_PROP_1` int(11) DEFAULT NULL,
`INT_PROP_2` int(11) DEFAULT NULL,
`LONG_PROP_1` bigint(20) DEFAULT NULL,
`LONG_PROP_2` bigint(20) DEFAULT NULL,
`DEC_PROP_1` decimal(13,4) DEFAULT NULL,
`DEC_PROP_2` decimal(13,4) DEFAULT NULL,
`BOOL_PROP_1` varchar(1) DEFAULT NULL,
`BOOL_PROP_2` varchar(1) DEFAULT NULL,
PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
CONSTRAINT `QRTZ_SIMPROP_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_SIMPROP_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_TRIGGERS`;
CREATE TABLE `QRTZ_TRIGGERS` (
`SCHED_NAME` varchar(120) NOT NULL,
`TRIGGER_NAME` varchar(200) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
`JOB_NAME` varchar(200) NOT NULL,
`JOB_GROUP` varchar(200) NOT NULL,
`DESCRIPTION` varchar(250) DEFAULT NULL,
`NEXT_FIRE_TIME` bigint(13) DEFAULT NULL,
`PREV_FIRE_TIME` bigint(13) DEFAULT NULL,
`PRIORITY` int(11) DEFAULT NULL,
`TRIGGER_STATE` varchar(16) NOT NULL,
`TRIGGER_TYPE` varchar(8) NOT NULL,
`START_TIME` bigint(13) NOT NULL,
`END_TIME` bigint(13) DEFAULT NULL,
`CALENDAR_NAME` varchar(200) DEFAULT NULL,
`MISFIRE_INSTR` smallint(2) DEFAULT NULL,
`JOB_DATA` blob,
PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
KEY `IDX_QRTZ_T_J` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`),
KEY `IDX_QRTZ_T_JG` (`SCHED_NAME`,`JOB_GROUP`),
KEY `IDX_QRTZ_T_C` (`SCHED_NAME`,`CALENDAR_NAME`),
KEY `IDX_QRTZ_T_G` (`SCHED_NAME`,`TRIGGER_GROUP`),
KEY `IDX_QRTZ_T_STATE` (`SCHED_NAME`,`TRIGGER_STATE`),
KEY `IDX_QRTZ_T_N_STATE` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`),
KEY `IDX_QRTZ_T_N_G_STATE` (`SCHED_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`),
KEY `IDX_QRTZ_T_NEXT_FIRE_TIME` (`SCHED_NAME`,`NEXT_FIRE_TIME`),
KEY `IDX_QRTZ_T_NFT_ST` (`SCHED_NAME`,`TRIGGER_STATE`,`NEXT_FIRE_TIME`),
KEY `IDX_QRTZ_T_NFT_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`),
KEY `IDX_QRTZ_T_NFT_ST_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_STATE`),
KEY `IDX_QRTZ_T_NFT_ST_MISFIRE_GRP` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_GROUP`,`TRIGGER_STATE`),
CONSTRAINT `QRTZ_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`) REFERENCES `QRTZ_JOB_DETAILS` (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of QRTZ_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_access_token
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_access_token`;
CREATE TABLE `t_ds_access_token` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`user_id` int(11) DEFAULT NULL COMMENT 'user id',
`token` varchar(64) DEFAULT NULL COMMENT 'token',
`expire_time` datetime DEFAULT NULL COMMENT 'end time of token ',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_access_token
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_alert
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_alert`;
CREATE TABLE `t_ds_alert` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`title` varchar(64) DEFAULT NULL COMMENT 'title',
`content` text COMMENT 'Message content (can be email, can be SMS. Mail is stored in JSON map, and SMS is string)',
`alert_status` tinyint(4) DEFAULT '0' COMMENT '0:wait running,1:success,2:failed',
`log` text COMMENT 'log',
`alertgroup_id` int(11) DEFAULT NULL COMMENT 'alert group id',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_alert
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_alertgroup
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_alertgroup`;
CREATE TABLE `t_ds_alertgroup`(
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`alert_instance_ids` varchar (255) DEFAULT NULL COMMENT 'alert instance ids',
`create_user_id` int(11) DEFAULT NULL COMMENT 'create user id',
`group_name` varchar(255) DEFAULT NULL COMMENT 'group name',
`description` varchar(255) DEFAULT NULL,
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_alertgroup
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_command
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_command`;
CREATE TABLE `t_ds_command` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`command_type` tinyint(4) DEFAULT NULL COMMENT 'Command type: 0 start workflow, 1 start execution from current node, 2 resume fault-tolerant workflow, 3 resume pause process, 4 start execution from failed node, 5 complement, 6 schedule, 7 rerun, 8 pause, 9 stop, 10 resume waiting thread',
`process_definition_id` int(11) DEFAULT NULL COMMENT 'process definition id',
`command_param` text COMMENT 'json command parameters',
`task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'Node dependency type: 0 current node, 1 forward, 2 backward',
`failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'Failed policy: 0 end, 1 continue',
`warning_type` tinyint(4) DEFAULT '0' COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent',
`warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group',
`schedule_time` datetime DEFAULT NULL COMMENT 'schedule time',
`start_time` datetime DEFAULT NULL COMMENT 'start time',
`executor_id` int(11) DEFAULT NULL COMMENT 'executor id',
`dependence` varchar(255) DEFAULT NULL COMMENT 'dependence',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
`process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority: 0 Highest,1 High,2 Medium,3 Low,4 Lowest',
`worker_group` varchar(64) COMMENT 'worker group',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_command
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_datasource
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_datasource`;
CREATE TABLE `t_ds_datasource` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`name` varchar(64) NOT NULL COMMENT 'data source name',
`note` varchar(256) DEFAULT NULL COMMENT 'description',
`type` tinyint(4) NOT NULL COMMENT 'data source type: 0:mysql,1:postgresql,2:hive,3:spark',
`user_id` int(11) NOT NULL COMMENT 'the creator id',
`connection_params` text NOT NULL COMMENT 'json connection params',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_datasource
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_error_command
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_error_command`;
CREATE TABLE `t_ds_error_command` (
`id` int(11) NOT NULL COMMENT 'key',
`command_type` tinyint(4) DEFAULT NULL COMMENT 'command type',
`executor_id` int(11) DEFAULT NULL COMMENT 'executor id',
`process_definition_id` int(11) DEFAULT NULL COMMENT 'process definition id',
`command_param` text COMMENT 'json command parameters',
`task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type',
`failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy',
`warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type',
`warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id',
`schedule_time` datetime DEFAULT NULL COMMENT 'scheduler time',
`start_time` datetime DEFAULT NULL COMMENT 'start time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
`dependence` text COMMENT 'dependence',
`process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority, 0 Highest,1 High,2 Medium,3 Low,4 Lowest',
`worker_group` varchar(64) COMMENT 'worker group',
`message` text COMMENT 'message',
PRIMARY KEY (`id`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC;
-- ----------------------------
-- Records of t_ds_error_command
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_process_definition
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_process_definition`;
CREATE TABLE `t_ds_process_definition` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`name` varchar(255) DEFAULT NULL COMMENT 'process definition name',
`version` int(11) DEFAULT NULL COMMENT 'process definition version',
`release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online',
`project_id` int(11) DEFAULT NULL COMMENT 'project id',
`user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id',
`process_definition_json` longtext COMMENT 'process definition json content',
`description` text,
`global_params` text COMMENT 'global parameters',
`flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available',
`locations` text COMMENT 'Node location information',
`connects` text COMMENT 'Node connection information',
`warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`timeout` int(11) DEFAULT '0' COMMENT 'time out',
`tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
`modify_by` varchar(255) DEFAULT NULL,
`resource_ids` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `process_definition_unique` (`name`,`project_id`),
KEY `process_definition_index` (`project_id`,`id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_process_definition
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_process_definition_version
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_process_definition_version`;
CREATE TABLE `t_ds_process_definition_version` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`process_definition_id` int(11) NOT NULL COMMENT 'process definition id',
`version` int(11) DEFAULT NULL COMMENT 'process definition version',
`process_definition_json` longtext COMMENT 'process definition json content',
`description` text,
`global_params` text COMMENT 'global parameters',
`locations` text COMMENT 'Node location information',
`connects` text COMMENT 'Node connection information',
`warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`timeout` int(11) DEFAULT '0' COMMENT 'time out',
`resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource ids',
PRIMARY KEY (`id`),
UNIQUE KEY `process_definition_id_and_version` (`process_definition_id`,`version`) USING BTREE,
KEY `process_definition_index` (`id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=84 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_process_definition
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_process_instance
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_process_instance`;
CREATE TABLE `t_ds_process_instance` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`name` varchar(255) DEFAULT NULL COMMENT 'process instance name',
`process_definition_id` int(11) DEFAULT NULL COMMENT 'process definition id',
`state` tinyint(4) DEFAULT NULL COMMENT 'process instance Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete',
`recovery` tinyint(4) DEFAULT NULL COMMENT 'process instance failover flag:0:normal,1:failover instance',
`start_time` datetime DEFAULT NULL COMMENT 'process instance start time',
`end_time` datetime DEFAULT NULL COMMENT 'process instance end time',
`run_times` int(11) DEFAULT NULL COMMENT 'process instance run times',
`host` varchar(135) DEFAULT NULL COMMENT 'process instance host',
`command_type` tinyint(4) DEFAULT NULL COMMENT 'command type',
`command_param` text COMMENT 'json command parameters',
`task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type. 0: only current node,1:before the node,2:later nodes',
`max_try_times` tinyint(4) DEFAULT '0' COMMENT 'max try times',
`failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy. 0:end the process when node failed,1:continue running the other nodes when node failed',
`warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type. 0:no warning,1:warning if process success,2:warning if process failed,3:warning if success',
`warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id',
`schedule_time` datetime DEFAULT NULL COMMENT 'schedule time',
`command_start_time` datetime DEFAULT NULL COMMENT 'command start time',
`global_params` text COMMENT 'global parameters',
`process_instance_json` longtext COMMENT 'process instance json(copy的process definition 的json)',
`flag` tinyint(4) DEFAULT '1' COMMENT 'flag',
`update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`is_sub_process` int(11) DEFAULT '0' COMMENT 'flag, whether the process is sub process',
`executor_id` int(11) NOT NULL COMMENT 'executor id',
`locations` text COMMENT 'Node location information',
`connects` text COMMENT 'Node connection information',
`history_cmd` text COMMENT 'history commands of process instance operation',
`dependence_schedule_times` text COMMENT 'depend schedule fire time',
`process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority. 0 Highest,1 High,2 Medium,3 Low,4 Lowest',
`worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id',
`timeout` int(11) DEFAULT '0' COMMENT 'time out',
`tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id',
`var_pool` longtext COMMENT 'var_pool',
PRIMARY KEY (`id`),
KEY `process_instance_index` (`process_definition_id`,`id`) USING BTREE,
KEY `start_time_index` (`start_time`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_process_instance
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_project
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_project`;
CREATE TABLE `t_ds_project` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`name` varchar(100) DEFAULT NULL COMMENT 'project name',
`description` varchar(200) DEFAULT NULL,
`user_id` int(11) DEFAULT NULL COMMENT 'creator id',
`flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`),
KEY `user_id_index` (`user_id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_project
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_queue
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_queue`;
CREATE TABLE `t_ds_queue` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`queue_name` varchar(64) DEFAULT NULL COMMENT 'queue name',
`queue` varchar(64) DEFAULT NULL COMMENT 'yarn queue name',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_queue
-- ----------------------------
INSERT INTO `t_ds_queue` VALUES ('1', 'default', 'default', null, null);
-- ----------------------------
-- Table structure for t_ds_relation_datasource_user
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_relation_datasource_user`;
CREATE TABLE `t_ds_relation_datasource_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`user_id` int(11) NOT NULL COMMENT 'user id',
`datasource_id` int(11) DEFAULT NULL COMMENT 'data source id',
`perm` int(11) DEFAULT '1' COMMENT 'limits of authority',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_relation_datasource_user
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_relation_process_instance
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_relation_process_instance`;
CREATE TABLE `t_ds_relation_process_instance` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`parent_process_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id',
`parent_task_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id',
`process_instance_id` int(11) DEFAULT NULL COMMENT 'child process instance id',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_relation_process_instance
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_relation_project_user
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_relation_project_user`;
CREATE TABLE `t_ds_relation_project_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`user_id` int(11) NOT NULL COMMENT 'user id',
`project_id` int(11) DEFAULT NULL COMMENT 'project id',
`perm` int(11) DEFAULT '1' COMMENT 'limits of authority',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`),
KEY `user_id_index` (`user_id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_relation_project_user
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_relation_resources_user
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_relation_resources_user`;
CREATE TABLE `t_ds_relation_resources_user` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`user_id` int(11) NOT NULL COMMENT 'user id',
`resources_id` int(11) DEFAULT NULL COMMENT 'resource id',
`perm` int(11) DEFAULT '1' COMMENT 'limits of authority',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_relation_resources_user
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_relation_udfs_user
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_relation_udfs_user`;
CREATE TABLE `t_ds_relation_udfs_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`user_id` int(11) NOT NULL COMMENT 'userid',
`udf_id` int(11) DEFAULT NULL COMMENT 'udf id',
`perm` int(11) DEFAULT '1' COMMENT 'limits of authority',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_resources
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_resources`;
CREATE TABLE `t_ds_resources` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`alias` varchar(64) DEFAULT NULL COMMENT 'alias',
`file_name` varchar(64) DEFAULT NULL COMMENT 'file name',
`description` varchar(256) DEFAULT NULL,
`user_id` int(11) DEFAULT NULL COMMENT 'user id',
`type` tinyint(4) DEFAULT NULL COMMENT 'resource type,0:FILE,1:UDF',
`size` bigint(20) DEFAULT NULL COMMENT 'resource size',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
`pid` int(11) DEFAULT NULL,
`full_name` varchar(64) DEFAULT NULL,
`is_directory` tinyint(4) DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `t_ds_resources_un` (`full_name`,`type`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_resources
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_schedules
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_schedules`;
CREATE TABLE `t_ds_schedules` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`process_definition_id` int(11) NOT NULL COMMENT 'process definition id',
`start_time` datetime NOT NULL COMMENT 'start time',
`end_time` datetime NOT NULL COMMENT 'end time',
`crontab` varchar(256) NOT NULL COMMENT 'crontab description',
`failure_strategy` tinyint(4) NOT NULL COMMENT 'failure strategy. 0:end,1:continue',
`user_id` int(11) NOT NULL COMMENT 'user id',
`release_state` tinyint(4) NOT NULL COMMENT 'release state. 0:offline,1:online ',
`warning_type` tinyint(4) NOT NULL COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent',
`warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id',
`process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest',
`worker_group` varchar(256) DEFAULT '' COMMENT 'worker group id',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime NOT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_schedules
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_session
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_session`;
CREATE TABLE `t_ds_session` (
`id` varchar(64) NOT NULL COMMENT 'key',
`user_id` int(11) DEFAULT NULL COMMENT 'user id',
`ip` varchar(45) DEFAULT NULL COMMENT 'ip',
`last_login_time` datetime DEFAULT NULL COMMENT 'last login time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_session
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_task_instance
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_task_instance`;
CREATE TABLE `t_ds_task_instance` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`name` varchar(255) DEFAULT NULL COMMENT 'task name',
`task_type` varchar(64) DEFAULT NULL COMMENT 'task type',
`process_definition_id` int(11) DEFAULT NULL COMMENT 'process definition id',
`process_instance_id` int(11) DEFAULT NULL COMMENT 'process instance id',
`task_json` longtext COMMENT 'task content json',
`state` tinyint(4) DEFAULT NULL COMMENT 'Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete',
`submit_time` datetime DEFAULT NULL COMMENT 'task submit time',
`start_time` datetime DEFAULT NULL COMMENT 'task start time',
`end_time` datetime DEFAULT NULL COMMENT 'task end time',
`host` varchar(135) DEFAULT NULL COMMENT 'host of task running on',
`execute_path` varchar(200) DEFAULT NULL COMMENT 'task execute path in the host',
`log_path` varchar(200) DEFAULT NULL COMMENT 'task log path',
`alert_flag` tinyint(4) DEFAULT NULL COMMENT 'whether alert',
`retry_times` int(4) DEFAULT '0' COMMENT 'task retry times',
`pid` int(4) DEFAULT NULL COMMENT 'pid of task',
`app_link` text COMMENT 'yarn app id',
`flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available',
`retry_interval` int(4) DEFAULT NULL COMMENT 'retry interval when task failed ',
`max_retry_times` int(2) DEFAULT NULL COMMENT 'max retry times',
`task_instance_priority` int(11) DEFAULT NULL COMMENT 'task instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest',
`worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id',
`executor_id` int(11) DEFAULT NULL,
`first_submit_time` datetime DEFAULT NULL COMMENT 'task first submit time',
`delay_time` int(4) DEFAULT '0' COMMENT 'task delay execution time',
`var_pool` longtext COMMENT 'var_pool',
PRIMARY KEY (`id`),
KEY `process_instance_id` (`process_instance_id`) USING BTREE,
KEY `task_instance_index` (`process_definition_id`,`process_instance_id`) USING BTREE,
CONSTRAINT `foreign_key_instance_id` FOREIGN KEY (`process_instance_id`) REFERENCES `t_ds_process_instance` (`id`) ON DELETE CASCADE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_task_instance
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_tenant
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_tenant`;
CREATE TABLE `t_ds_tenant` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`tenant_code` varchar(64) DEFAULT NULL COMMENT 'tenant code',
`description` varchar(256) DEFAULT NULL,
`queue_id` int(11) DEFAULT NULL COMMENT 'queue id',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_tenant
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_udfs
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_udfs`;
CREATE TABLE `t_ds_udfs` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`user_id` int(11) NOT NULL COMMENT 'user id',
`func_name` varchar(100) NOT NULL COMMENT 'UDF function name',
`class_name` varchar(255) NOT NULL COMMENT 'class of udf',
`type` tinyint(4) NOT NULL COMMENT 'Udf function type',
`arg_types` varchar(255) DEFAULT NULL COMMENT 'arguments types',
`database` varchar(255) DEFAULT NULL COMMENT 'data base',
`description` varchar(255) DEFAULT NULL,
`resource_id` int(11) NOT NULL COMMENT 'resource id',
`resource_name` varchar(255) NOT NULL COMMENT 'resource name',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime NOT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_udfs
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_user
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_user`;
CREATE TABLE `t_ds_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'user id',
`user_name` varchar(64) DEFAULT NULL COMMENT 'user name',
`user_password` varchar(64) DEFAULT NULL COMMENT 'user password',
`user_type` tinyint(4) DEFAULT NULL COMMENT 'user type, 0:administrator,1:ordinary user',
`email` varchar(64) DEFAULT NULL COMMENT 'email',
`phone` varchar(11) DEFAULT NULL COMMENT 'phone',
`tenant_id` int(11) DEFAULT NULL COMMENT 'tenant id',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
`queue` varchar(64) DEFAULT NULL COMMENT 'queue',
`state` int(1) DEFAULT 1 COMMENT 'state 0:disable 1:enable',
PRIMARY KEY (`id`),
UNIQUE KEY `user_name_unique` (`user_name`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_user
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_worker_group
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_worker_group`;
CREATE TABLE `t_ds_worker_group` (
`id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id',
`name` varchar(256) NOT NULL COMMENT 'worker group name',
`addr_list` text NULL DEFAULT NULL COMMENT 'worker addr list. split by [,]',
`create_time` datetime NULL DEFAULT NULL COMMENT 'create time',
`update_time` datetime NULL DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`),
UNIQUE KEY `name_unique` (`name`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_worker_group
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_version
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_version`;
CREATE TABLE `t_ds_version` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`version` varchar(200) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `version_UNIQUE` (`version`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8 COMMENT='version';
-- ----------------------------
-- Records of t_ds_version
-- ----------------------------
INSERT INTO `t_ds_version` VALUES ('1', '1.4.0');
-- ----------------------------
-- Records of t_ds_alertgroup
-- ----------------------------
INSERT INTO `t_ds_alertgroup`(alert_instance_ids, create_user_id, group_name, description, create_time, update_time)
VALUES ("1,2", 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39');
-- ----------------------------
-- Records of t_ds_user
-- ----------------------------
INSERT INTO `t_ds_user`
VALUES ('1', 'admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', '', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22', null, 1);
-- ----------------------------
-- Table structure for t_ds_plugin_define
-- ----------------------------
SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY',''));
DROP TABLE IF EXISTS `t_ds_plugin_define`;
CREATE TABLE `t_ds_plugin_define` (
`id` int NOT NULL AUTO_INCREMENT,
`plugin_name` varchar(100) NOT NULL COMMENT 'the name of plugin eg: email',
`plugin_type` varchar(100) NOT NULL COMMENT 'plugin type . alert=alert plugin, job=job plugin',
`plugin_params` text COMMENT 'plugin params',
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
UNIQUE KEY `t_ds_plugin_define_UN` (`plugin_name`,`plugin_type`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_alert_plugin_instance
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_alert_plugin_instance`;
CREATE TABLE `t_ds_alert_plugin_instance` (
`id` int NOT NULL AUTO_INCREMENT,
`plugin_define_id` int NOT NULL,
`plugin_instance_params` text COMMENT 'plugin instance params. Also contain the params value which user input in web ui.',
`create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
`update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`instance_name` varchar(200) DEFAULT NULL COMMENT 'alert instance name',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,269 | [Question] Useless property in the `Command` and `ErrorCommand` table |
**Useless property in the `Command` and `ErrorCommand` table**
I'm working on adopting the table field to our company database specification: all the fields could not be `null`.
So I found the `Command` and `ErrorCommand` table has `dependence` in the database, but I cannot found [the bean property in java bean](https://github.com/apache/incubator-dolphinscheduler/blob/5d264c9f208a089bc1f23be5f3077d2dab2b0f40/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/Command.java). I found the [t_ds_command](https://github.com/apache/incubator-dolphinscheduler/blob/5d264c9f208a089bc1f23be5f3077d2dab2b0f40/sql/dolphinscheduler_mysql.sql#L330) and [t_ds_error_command](https://github.com/apache/incubator-dolphinscheduler/blob/5d264c9f208a089bc1f23be5f3077d2dab2b0f40/sql/dolphinscheduler_mysql.sql#L378) DDL file defined this field.
**Which version of DolphinScheduler:**
- dev branch
**Requirement or improvement**
- I think it should be removed, Does I miss anything?
| https://github.com/apache/dolphinscheduler/issues/5269 | https://github.com/apache/dolphinscheduler/pull/5284 | 029d3eb81404dd0fbf89716e00dda56993dd337a | b6453da298bff491b02a3b690674dc62bcb84cd3 | "2021-04-13T13:09:06Z" | java | "2021-04-15T12:15:20Z" | sql/dolphinscheduler_postgre.sql | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS;
DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE;
DROP TABLE IF EXISTS QRTZ_LOCKS;
DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_JOB_DETAILS;
DROP TABLE IF EXISTS QRTZ_CALENDARS;
CREATE TABLE QRTZ_JOB_DETAILS(
SCHED_NAME character varying(120) NOT NULL,
JOB_NAME character varying(200) NOT NULL,
JOB_GROUP character varying(200) NOT NULL,
DESCRIPTION character varying(250) NULL,
JOB_CLASS_NAME character varying(250) NOT NULL,
IS_DURABLE boolean NOT NULL,
IS_NONCONCURRENT boolean NOT NULL,
IS_UPDATE_DATA boolean NOT NULL,
REQUESTS_RECOVERY boolean NOT NULL,
JOB_DATA bytea NULL);
alter table QRTZ_JOB_DETAILS add primary key(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE TABLE QRTZ_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
JOB_NAME character varying(200) NOT NULL,
JOB_GROUP character varying(200) NOT NULL,
DESCRIPTION character varying(250) NULL,
NEXT_FIRE_TIME BIGINT NULL,
PREV_FIRE_TIME BIGINT NULL,
PRIORITY INTEGER NULL,
TRIGGER_STATE character varying(16) NOT NULL,
TRIGGER_TYPE character varying(8) NOT NULL,
START_TIME BIGINT NOT NULL,
END_TIME BIGINT NULL,
CALENDAR_NAME character varying(200) NULL,
MISFIRE_INSTR SMALLINT NULL,
JOB_DATA bytea NULL) ;
alter table QRTZ_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_SIMPLE_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
REPEAT_COUNT BIGINT NOT NULL,
REPEAT_INTERVAL BIGINT NOT NULL,
TIMES_TRIGGERED BIGINT NOT NULL) ;
alter table QRTZ_SIMPLE_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_CRON_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
CRON_EXPRESSION character varying(120) NOT NULL,
TIME_ZONE_ID character varying(80)) ;
alter table QRTZ_CRON_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_SIMPROP_TRIGGERS
(
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
STR_PROP_1 character varying(512) NULL,
STR_PROP_2 character varying(512) NULL,
STR_PROP_3 character varying(512) NULL,
INT_PROP_1 INT NULL,
INT_PROP_2 INT NULL,
LONG_PROP_1 BIGINT NULL,
LONG_PROP_2 BIGINT NULL,
DEC_PROP_1 NUMERIC(13,4) NULL,
DEC_PROP_2 NUMERIC(13,4) NULL,
BOOL_PROP_1 boolean NULL,
BOOL_PROP_2 boolean NULL) ;
alter table QRTZ_SIMPROP_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_BLOB_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
BLOB_DATA bytea NULL) ;
alter table QRTZ_BLOB_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_CALENDARS (
SCHED_NAME character varying(120) NOT NULL,
CALENDAR_NAME character varying(200) NOT NULL,
CALENDAR bytea NOT NULL) ;
alter table QRTZ_CALENDARS add primary key(SCHED_NAME,CALENDAR_NAME);
CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL) ;
alter table QRTZ_PAUSED_TRIGGER_GRPS add primary key(SCHED_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_FIRED_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
ENTRY_ID character varying(95) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
INSTANCE_NAME character varying(200) NOT NULL,
FIRED_TIME BIGINT NOT NULL,
SCHED_TIME BIGINT NOT NULL,
PRIORITY INTEGER NOT NULL,
STATE character varying(16) NOT NULL,
JOB_NAME character varying(200) NULL,
JOB_GROUP character varying(200) NULL,
IS_NONCONCURRENT boolean NULL,
REQUESTS_RECOVERY boolean NULL) ;
alter table QRTZ_FIRED_TRIGGERS add primary key(SCHED_NAME,ENTRY_ID);
CREATE TABLE QRTZ_SCHEDULER_STATE (
SCHED_NAME character varying(120) NOT NULL,
INSTANCE_NAME character varying(200) NOT NULL,
LAST_CHECKIN_TIME BIGINT NOT NULL,
CHECKIN_INTERVAL BIGINT NOT NULL) ;
alter table QRTZ_SCHEDULER_STATE add primary key(SCHED_NAME,INSTANCE_NAME);
CREATE TABLE QRTZ_LOCKS (
SCHED_NAME character varying(120) NOT NULL,
LOCK_NAME character varying(40) NOT NULL) ;
alter table QRTZ_LOCKS add primary key(SCHED_NAME,LOCK_NAME);
CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY);
CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME);
CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME);
CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
--
-- Table structure for table t_ds_access_token
--
DROP TABLE IF EXISTS t_ds_access_token;
CREATE TABLE t_ds_access_token (
id int NOT NULL ,
user_id int DEFAULT NULL ,
token varchar(64) DEFAULT NULL ,
expire_time timestamp DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_alert
--
DROP TABLE IF EXISTS t_ds_alert;
CREATE TABLE t_ds_alert (
id int NOT NULL ,
title varchar(64) DEFAULT NULL ,
content text ,
alert_status int DEFAULT '0' ,
log text ,
alertgroup_id int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_alertgroup
--
DROP TABLE IF EXISTS t_ds_alertgroup;
CREATE TABLE t_ds_alertgroup(
id int NOT NULL,
alert_instance_ids varchar (255) DEFAULT NULL,
create_user_id int4 DEFAULT NULL,
group_name varchar(255) DEFAULT NULL,
description varchar(255) DEFAULT NULL,
create_time timestamp DEFAULT NULL,
update_time timestamp DEFAULT NULL,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_command
--
DROP TABLE IF EXISTS t_ds_command;
CREATE TABLE t_ds_command (
id int NOT NULL ,
command_type int DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
executor_id int DEFAULT NULL ,
dependence varchar(255) DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_datasource
--
DROP TABLE IF EXISTS t_ds_datasource;
CREATE TABLE t_ds_datasource (
id int NOT NULL ,
name varchar(64) NOT NULL ,
note varchar(256) DEFAULT NULL ,
type int NOT NULL ,
user_id int NOT NULL ,
connection_params text NOT NULL ,
create_time timestamp NOT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_error_command
--
DROP TABLE IF EXISTS t_ds_error_command;
CREATE TABLE t_ds_error_command (
id int NOT NULL ,
command_type int DEFAULT NULL ,
executor_id int DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
dependence text ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
message text ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_master_server
--
--
-- Table structure for table t_ds_process_definition
--
DROP TABLE IF EXISTS t_ds_process_definition;
CREATE TABLE t_ds_process_definition (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
version int DEFAULT NULL ,
release_state int DEFAULT NULL ,
project_id int DEFAULT NULL ,
user_id int DEFAULT NULL ,
process_definition_json text ,
description text ,
global_params text ,
flag int DEFAULT NULL ,
locations text ,
connects text ,
warning_group_id int4 DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
timeout int DEFAULT '0' ,
tenant_id int NOT NULL DEFAULT '-1' ,
update_time timestamp DEFAULT NULL ,
modify_by varchar(36) DEFAULT '' ,
resource_ids varchar(64),
PRIMARY KEY (id),
CONSTRAINT process_definition_unique UNIQUE (name, project_id)
) ;
create index process_definition_index on t_ds_process_definition (project_id,id);
--
-- Table structure for table t_ds_process_definition_version
--
DROP TABLE IF EXISTS t_ds_process_definition_version;
CREATE TABLE t_ds_process_definition_version (
id int NOT NULL ,
process_definition_id int NOT NULL ,
version int DEFAULT NULL ,
process_definition_json text ,
description text ,
global_params text ,
locations text ,
connects text ,
warning_group_id int4 DEFAULT NULL,
create_time timestamp DEFAULT NULL ,
timeout int DEFAULT '0' ,
resource_ids varchar(64),
PRIMARY KEY (id)
) ;
create index process_definition_id_and_version on t_ds_process_definition_version (process_definition_id,version);
--
-- Table structure for table t_ds_process_instance
--
DROP TABLE IF EXISTS t_ds_process_instance;
CREATE TABLE t_ds_process_instance (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
state int DEFAULT NULL ,
recovery int DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
end_time timestamp DEFAULT NULL ,
run_times int DEFAULT NULL ,
host varchar(135) DEFAULT NULL ,
command_type int DEFAULT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
max_try_times int DEFAULT '0' ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
command_start_time timestamp DEFAULT NULL ,
global_params text ,
process_instance_json text ,
flag int DEFAULT '1' ,
update_time timestamp NULL ,
is_sub_process int DEFAULT '0' ,
executor_id int NOT NULL ,
locations text ,
connects text ,
history_cmd text ,
dependence_schedule_times text ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64) ,
timeout int DEFAULT '0' ,
tenant_id int NOT NULL DEFAULT '-1' ,
var_pool text ,
PRIMARY KEY (id)
) ;
create index process_instance_index on t_ds_process_instance (process_definition_id,id);
create index start_time_index on t_ds_process_instance (start_time);
--
-- Table structure for table t_ds_project
--
DROP TABLE IF EXISTS t_ds_project;
CREATE TABLE t_ds_project (
id int NOT NULL ,
name varchar(100) DEFAULT NULL ,
description varchar(200) DEFAULT NULL ,
user_id int DEFAULT NULL ,
flag int DEFAULT '1' ,
create_time timestamp DEFAULT CURRENT_TIMESTAMP ,
update_time timestamp DEFAULT CURRENT_TIMESTAMP ,
PRIMARY KEY (id)
) ;
create index user_id_index on t_ds_project (user_id);
--
-- Table structure for table t_ds_queue
--
DROP TABLE IF EXISTS t_ds_queue;
CREATE TABLE t_ds_queue (
id int NOT NULL ,
queue_name varchar(64) DEFAULT NULL ,
queue varchar(64) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_relation_datasource_user
--
DROP TABLE IF EXISTS t_ds_relation_datasource_user;
CREATE TABLE t_ds_relation_datasource_user (
id int NOT NULL ,
user_id int NOT NULL ,
datasource_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
;
--
-- Table structure for table t_ds_relation_process_instance
--
DROP TABLE IF EXISTS t_ds_relation_process_instance;
CREATE TABLE t_ds_relation_process_instance (
id int NOT NULL ,
parent_process_instance_id int DEFAULT NULL ,
parent_task_instance_id int DEFAULT NULL ,
process_instance_id int DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_relation_project_user
--
DROP TABLE IF EXISTS t_ds_relation_project_user;
CREATE TABLE t_ds_relation_project_user (
id int NOT NULL ,
user_id int NOT NULL ,
project_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
create index relation_project_user_id_index on t_ds_relation_project_user (user_id);
--
-- Table structure for table t_ds_relation_resources_user
--
DROP TABLE IF EXISTS t_ds_relation_resources_user;
CREATE TABLE t_ds_relation_resources_user (
id int NOT NULL ,
user_id int NOT NULL ,
resources_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_relation_udfs_user
--
DROP TABLE IF EXISTS t_ds_relation_udfs_user;
CREATE TABLE t_ds_relation_udfs_user (
id int NOT NULL ,
user_id int NOT NULL ,
udf_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
;
--
-- Table structure for table t_ds_resources
--
DROP TABLE IF EXISTS t_ds_resources;
CREATE TABLE t_ds_resources (
id int NOT NULL ,
alias varchar(64) DEFAULT NULL ,
file_name varchar(64) DEFAULT NULL ,
description varchar(256) DEFAULT NULL ,
user_id int DEFAULT NULL ,
type int DEFAULT NULL ,
size bigint DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
pid int,
full_name varchar(64),
is_directory int,
PRIMARY KEY (id),
CONSTRAINT t_ds_resources_un UNIQUE (full_name, type)
) ;
--
-- Table structure for table t_ds_schedules
--
DROP TABLE IF EXISTS t_ds_schedules;
CREATE TABLE t_ds_schedules (
id int NOT NULL ,
process_definition_id int NOT NULL ,
start_time timestamp NOT NULL ,
end_time timestamp NOT NULL ,
crontab varchar(256) NOT NULL ,
failure_strategy int NOT NULL ,
user_id int NOT NULL ,
release_state int NOT NULL ,
warning_type int NOT NULL ,
warning_group_id int DEFAULT NULL ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
create_time timestamp NOT NULL ,
update_time timestamp NOT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_session
--
DROP TABLE IF EXISTS t_ds_session;
CREATE TABLE t_ds_session (
id varchar(64) NOT NULL ,
user_id int DEFAULT NULL ,
ip varchar(45) DEFAULT NULL ,
last_login_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_task_instance
--
DROP TABLE IF EXISTS t_ds_task_instance;
CREATE TABLE t_ds_task_instance (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
task_type varchar(64) DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
process_instance_id int DEFAULT NULL ,
task_json text ,
state int DEFAULT NULL ,
submit_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
end_time timestamp DEFAULT NULL ,
host varchar(135) DEFAULT NULL ,
execute_path varchar(200) DEFAULT NULL ,
log_path varchar(200) DEFAULT NULL ,
alert_flag int DEFAULT NULL ,
retry_times int DEFAULT '0' ,
pid int DEFAULT NULL ,
app_link text ,
flag int DEFAULT '1' ,
retry_interval int DEFAULT NULL ,
max_retry_times int DEFAULT NULL ,
task_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
executor_id int DEFAULT NULL ,
first_submit_time timestamp DEFAULT NULL ,
delay_time int DEFAULT '0' ,
var_pool text ,
PRIMARY KEY (id),
CONSTRAINT foreign_key_instance_id FOREIGN KEY(process_instance_id) REFERENCES t_ds_process_instance(id) ON DELETE CASCADE
) ;
--
-- Table structure for table t_ds_tenant
--
DROP TABLE IF EXISTS t_ds_tenant;
CREATE TABLE t_ds_tenant (
id int NOT NULL ,
tenant_code varchar(64) DEFAULT NULL ,
description varchar(256) DEFAULT NULL ,
queue_id int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_udfs
--
DROP TABLE IF EXISTS t_ds_udfs;
CREATE TABLE t_ds_udfs (
id int NOT NULL ,
user_id int NOT NULL ,
func_name varchar(100) NOT NULL ,
class_name varchar(255) NOT NULL ,
type int NOT NULL ,
arg_types varchar(255) DEFAULT NULL ,
database varchar(255) DEFAULT NULL ,
description varchar(255) DEFAULT NULL ,
resource_id int NOT NULL ,
resource_name varchar(255) NOT NULL ,
create_time timestamp NOT NULL ,
update_time timestamp NOT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_user
--
DROP TABLE IF EXISTS t_ds_user;
CREATE TABLE t_ds_user (
id int NOT NULL ,
user_name varchar(64) DEFAULT NULL ,
user_password varchar(64) DEFAULT NULL ,
user_type int DEFAULT NULL ,
email varchar(64) DEFAULT NULL ,
phone varchar(11) DEFAULT NULL ,
tenant_id int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
queue varchar(64) DEFAULT NULL ,
state int DEFAULT 1 ,
PRIMARY KEY (id)
);
comment on column t_ds_user.state is 'state 0:disable 1:enable';
--
-- Table structure for table t_ds_version
--
DROP TABLE IF EXISTS t_ds_version;
CREATE TABLE t_ds_version (
id int NOT NULL ,
version varchar(200) NOT NULL,
PRIMARY KEY (id)
) ;
create index version_index on t_ds_version(version);
--
-- Table structure for table t_ds_worker_group
--
DROP TABLE IF EXISTS t_ds_worker_group;
CREATE TABLE t_ds_worker_group (
id bigint NOT NULL ,
name varchar(256) NOT NULL ,
addr_list text DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id) ,
CONSTRAINT name_unique UNIQUE (name)
) ;
--
-- Table structure for table t_ds_worker_server
--
DROP TABLE IF EXISTS t_ds_worker_server;
CREATE TABLE t_ds_worker_server (
id int NOT NULL ,
host varchar(45) DEFAULT NULL ,
port int DEFAULT NULL ,
zk_directory varchar(64) DEFAULT NULL ,
res_info varchar(255) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
last_heartbeat_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
DROP SEQUENCE IF EXISTS t_ds_access_token_id_sequence;
CREATE SEQUENCE t_ds_access_token_id_sequence;
ALTER TABLE t_ds_access_token ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_access_token_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_alert_id_sequence;
CREATE SEQUENCE t_ds_alert_id_sequence;
ALTER TABLE t_ds_alert ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alert_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_alertgroup_id_sequence;
CREATE SEQUENCE t_ds_alertgroup_id_sequence;
ALTER TABLE t_ds_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alertgroup_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_command_id_sequence;
CREATE SEQUENCE t_ds_command_id_sequence;
ALTER TABLE t_ds_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_command_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_datasource_id_sequence;
CREATE SEQUENCE t_ds_datasource_id_sequence;
ALTER TABLE t_ds_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_datasource_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_definition_id_sequence;
CREATE SEQUENCE t_ds_process_definition_id_sequence;
ALTER TABLE t_ds_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_definition_version_id_sequence;
CREATE SEQUENCE t_ds_process_definition_version_id_sequence;
ALTER TABLE t_ds_process_definition_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_version_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_instance_id_sequence;
CREATE SEQUENCE t_ds_process_instance_id_sequence;
ALTER TABLE t_ds_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_project_id_sequence;
CREATE SEQUENCE t_ds_project_id_sequence;
ALTER TABLE t_ds_project ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_project_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_queue_id_sequence;
CREATE SEQUENCE t_ds_queue_id_sequence;
ALTER TABLE t_ds_queue ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_queue_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_datasource_user_id_sequence;
CREATE SEQUENCE t_ds_relation_datasource_user_id_sequence;
ALTER TABLE t_ds_relation_datasource_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_datasource_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_process_instance_id_sequence;
CREATE SEQUENCE t_ds_relation_process_instance_id_sequence;
ALTER TABLE t_ds_relation_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_process_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_project_user_id_sequence;
CREATE SEQUENCE t_ds_relation_project_user_id_sequence;
ALTER TABLE t_ds_relation_project_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_project_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_resources_user_id_sequence;
CREATE SEQUENCE t_ds_relation_resources_user_id_sequence;
ALTER TABLE t_ds_relation_resources_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_resources_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_udfs_user_id_sequence;
CREATE SEQUENCE t_ds_relation_udfs_user_id_sequence;
ALTER TABLE t_ds_relation_udfs_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_udfs_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_resources_id_sequence;
CREATE SEQUENCE t_ds_resources_id_sequence;
ALTER TABLE t_ds_resources ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_resources_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_schedules_id_sequence;
CREATE SEQUENCE t_ds_schedules_id_sequence;
ALTER TABLE t_ds_schedules ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_schedules_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_task_instance_id_sequence;
CREATE SEQUENCE t_ds_task_instance_id_sequence;
ALTER TABLE t_ds_task_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_tenant_id_sequence;
CREATE SEQUENCE t_ds_tenant_id_sequence;
ALTER TABLE t_ds_tenant ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_tenant_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_udfs_id_sequence;
CREATE SEQUENCE t_ds_udfs_id_sequence;
ALTER TABLE t_ds_udfs ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_udfs_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_user_id_sequence;
CREATE SEQUENCE t_ds_user_id_sequence;
ALTER TABLE t_ds_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_version_id_sequence;
CREATE SEQUENCE t_ds_version_id_sequence;
ALTER TABLE t_ds_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_version_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_worker_group_id_sequence;
CREATE SEQUENCE t_ds_worker_group_id_sequence;
ALTER TABLE t_ds_worker_group ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_group_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_worker_server_id_sequence;
CREATE SEQUENCE t_ds_worker_server_id_sequence;
ALTER TABLE t_ds_worker_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_server_id_sequence');
-- Records of t_ds_user?user : admin , password : dolphinscheduler123
INSERT INTO t_ds_user(user_name, user_password, user_type, email, phone, tenant_id, state, create_time, update_time)
VALUES ('admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', '', '0', 1, '2018-03-27 15:48:50', '2018-10-24 17:40:22');
-- Records of t_ds_alertgroup, default admin warning group
INSERT INTO t_ds_alertgroup(alert_instance_ids, create_user_id, group_name, description, create_time, update_time)
VALUES ('1,2', 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39');
-- Records of t_ds_queue,default queue name : default
INSERT INTO t_ds_queue(queue_name, queue, create_time, update_time)
VALUES ('default', 'default', '2018-11-29 10:22:33', '2018-11-29 10:22:33');
-- Records of t_ds_queue,default queue name : default
INSERT INTO t_ds_version(version) VALUES ('1.4.0');
--
-- Table structure for table t_ds_plugin_define
--
DROP TABLE IF EXISTS t_ds_plugin_define;
CREATE TABLE t_ds_plugin_define (
id serial NOT NULL,
plugin_name varchar(100) NOT NULL,
plugin_type varchar(100) NOT NULL,
plugin_params text NULL,
create_time timestamp NULL,
update_time timestamp NULL,
CONSTRAINT t_ds_plugin_define_pk PRIMARY KEY (id),
CONSTRAINT t_ds_plugin_define_un UNIQUE (plugin_name, plugin_type)
);
--
-- Table structure for table t_ds_alert_plugin_instance
--
DROP TABLE IF EXISTS t_ds_alert_plugin_instance;
CREATE TABLE t_ds_alert_plugin_instance (
id serial NOT NULL,
plugin_define_id int4 NOT NULL,
plugin_instance_params text NULL,
create_time timestamp NULL,
update_time timestamp NULL,
instance_name varchar(200) NULL,
CONSTRAINT t_ds_alert_plugin_instance_pk PRIMARY KEY (id)
); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,269 | [Question] Useless property in the `Command` and `ErrorCommand` table |
**Useless property in the `Command` and `ErrorCommand` table**
I'm working on adopting the table field to our company database specification: all the fields could not be `null`.
So I found the `Command` and `ErrorCommand` table has `dependence` in the database, but I cannot found [the bean property in java bean](https://github.com/apache/incubator-dolphinscheduler/blob/5d264c9f208a089bc1f23be5f3077d2dab2b0f40/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/Command.java). I found the [t_ds_command](https://github.com/apache/incubator-dolphinscheduler/blob/5d264c9f208a089bc1f23be5f3077d2dab2b0f40/sql/dolphinscheduler_mysql.sql#L330) and [t_ds_error_command](https://github.com/apache/incubator-dolphinscheduler/blob/5d264c9f208a089bc1f23be5f3077d2dab2b0f40/sql/dolphinscheduler_mysql.sql#L378) DDL file defined this field.
**Which version of DolphinScheduler:**
- dev branch
**Requirement or improvement**
- I think it should be removed, Does I miss anything?
| https://github.com/apache/dolphinscheduler/issues/5269 | https://github.com/apache/dolphinscheduler/pull/5284 | 029d3eb81404dd0fbf89716e00dda56993dd337a | b6453da298bff491b02a3b690674dc62bcb84cd3 | "2021-04-13T13:09:06Z" | java | "2021-04-15T12:15:20Z" | sql/upgrade/1.4.0_schema/mysql/dolphinscheduler_ddl.sql | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY',''));
-- uc_dolphin_T_t_ds_user_A_state
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_user_A_state;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_user_A_state()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_user'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='state')
THEN
ALTER TABLE t_ds_user ADD `state` int(1) DEFAULT 1 COMMENT 'state 0:disable 1:enable';
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_user_A_state;
DROP PROCEDURE uc_dolphin_T_t_ds_user_A_state;
-- uc_dolphin_T_t_ds_tenant_A_tenant_name
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_tenant_A_tenant_name;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_tenant_A_tenant_name()
BEGIN
IF EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_tenant'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='tenant_name')
THEN
ALTER TABLE t_ds_tenant DROP `tenant_name`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_tenant_A_tenant_name;
DROP PROCEDURE uc_dolphin_T_t_ds_tenant_A_tenant_name;
-- uc_dolphin_T_t_ds_task_instance_A_first_submit_time
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_first_submit_time;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_first_submit_time()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='first_submit_time')
THEN
ALTER TABLE t_ds_task_instance ADD `first_submit_time` datetime DEFAULT NULL COMMENT 'task first submit time';
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_task_instance_A_first_submit_time();
DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_first_submit_time;
-- uc_dolphin_T_t_ds_task_instance_A_delay_time
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_delay_time;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_delay_time()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='delay_time')
THEN
ALTER TABLE t_ds_task_instance ADD `delay_time` int(4) DEFAULT '0' COMMENT 'task delay execution time';
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_task_instance_A_delay_time();
DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_delay_time;
-- uc_dolphin_T_t_ds_task_instance_A_var_pool
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_var_pool;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_var_pool()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='var_pool')
THEN
ALTER TABLE t_ds_task_instance ADD `var_pool` longtext NULL;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_task_instance_A_var_pool();
DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_var_pool;
-- uc_dolphin_T_t_ds_process_instance_A_var_pool
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_instance_A_var_pool;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_process_instance_A_var_pool()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_instance'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='var_pool')
THEN
ALTER TABLE t_ds_process_instance ADD `var_pool` longtext NULL;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_process_instance_A_var_pool();
DROP PROCEDURE uc_dolphin_T_t_ds_process_instance_A_var_pool;
-- uc_dolphin_T_t_ds_process_definition_A_modify_by
drop PROCEDURE if EXISTS ct_dolphin_T_t_ds_process_definition_version;
delimiter d//
CREATE PROCEDURE ct_dolphin_T_t_ds_process_definition_version()
BEGIN
CREATE TABLE IF NOT EXISTS `t_ds_process_definition_version` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`process_definition_id` int(11) NOT NULL COMMENT 'process definition id',
`version` int(11) DEFAULT NULL COMMENT 'process definition version',
`process_definition_json` longtext COMMENT 'process definition json content',
`description` text,
`global_params` text COMMENT 'global parameters',
`locations` text COMMENT 'Node location information',
`connects` text COMMENT 'Node connection information',
`receivers` text COMMENT 'receivers',
`receivers_cc` text COMMENT 'cc',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`timeout` int(11) DEFAULT '0' COMMENT 'time out',
`resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource ids',
PRIMARY KEY (`id`),
UNIQUE KEY `process_definition_id_and_version` (`process_definition_id`,`version`) USING BTREE,
KEY `process_definition_index` (`id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=84 DEFAULT CHARSET=utf8;
END;
d//
delimiter ;
CALL ct_dolphin_T_t_ds_process_definition_version;
DROP PROCEDURE ct_dolphin_T_t_ds_process_definition_version;
-- ----------------------------
-- Table structure for t_ds_plugin_define
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_plugin_define`;
CREATE TABLE `t_ds_plugin_define` (
`id` int NOT NULL AUTO_INCREMENT,
`plugin_name` varchar(100) NOT NULL COMMENT 'the name of plugin eg: email',
`plugin_type` varchar(100) NOT NULL COMMENT 'plugin type . alert=alert plugin, job=job plugin',
`plugin_params` text COMMENT 'plugin params',
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
UNIQUE KEY `t_ds_plugin_define_UN` (`plugin_name`,`plugin_type`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_alert_plugin_instance
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_alert_plugin_instance`;
CREATE TABLE `t_ds_alert_plugin_instance` (
`id` int NOT NULL AUTO_INCREMENT,
`plugin_define_id` int NOT NULL,
`plugin_instance_params` text COMMENT 'plugin instance params. Also contain the params value which user input in web ui.',
`create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
`update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`instance_name` varchar(200) DEFAULT NULL COMMENT 'alert instance name',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- uc_dolphin_T_t_ds_process_definition_A_warning_group_id
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_definition_A_warning_group_id;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_process_definition_A_warning_group_id()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_definition'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='warning_group_id')
THEN
ALTER TABLE t_ds_process_definition ADD COLUMN `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id' AFTER `connects`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_process_definition_A_warning_group_id();
DROP PROCEDURE uc_dolphin_T_t_ds_process_definition_A_warning_group_id;
-- uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_definition_version'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='warning_group_id')
THEN
ALTER TABLE t_ds_process_definition_version ADD COLUMN `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id' AFTER `connects`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id();
DROP PROCEDURE uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id;
-- uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_alertgroup'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='alert_instance_ids')
THEN
ALTER TABLE t_ds_alertgroup ADD COLUMN `alert_instance_ids` varchar (255) DEFAULT NULL COMMENT 'alert instance ids' AFTER `id`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids();
DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids;
-- uc_dolphin_T_t_ds_alertgroup_A_create_user_id
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_create_user_id;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_create_user_id()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_alertgroup'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='create_user_id')
THEN
ALTER TABLE t_ds_alertgroup ADD COLUMN `create_user_id` int(11) DEFAULT NULL COMMENT 'create user id' AFTER `alert_instance_ids`;
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_alertgroup_A_create_user_id();
DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_create_user_id;
-- ----------------------------
-- These columns will not be used in the new version,if you determine that the historical data is useless, you can delete it using the sql below
-- ----------------------------
-- ALTER TABLE t_ds_alert DROP `show_type`, DROP `alert_type`, DROP `receivers`, DROP `receivers_cc`;
-- ALTER TABLE t_ds_alertgroup DROP `group_type`;
-- ALTER TABLE t_ds_process_definition DROP `receivers`, DROP `receivers_cc`;
-- ALTER TABLE t_ds_process_definition_version DROP `receivers`, DROP `receivers_cc`;
-- DROP TABLE IF EXISTS t_ds_relation_user_alertgroup;
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,269 | [Question] Useless property in the `Command` and `ErrorCommand` table |
**Useless property in the `Command` and `ErrorCommand` table**
I'm working on adopting the table field to our company database specification: all the fields could not be `null`.
So I found the `Command` and `ErrorCommand` table has `dependence` in the database, but I cannot found [the bean property in java bean](https://github.com/apache/incubator-dolphinscheduler/blob/5d264c9f208a089bc1f23be5f3077d2dab2b0f40/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/Command.java). I found the [t_ds_command](https://github.com/apache/incubator-dolphinscheduler/blob/5d264c9f208a089bc1f23be5f3077d2dab2b0f40/sql/dolphinscheduler_mysql.sql#L330) and [t_ds_error_command](https://github.com/apache/incubator-dolphinscheduler/blob/5d264c9f208a089bc1f23be5f3077d2dab2b0f40/sql/dolphinscheduler_mysql.sql#L378) DDL file defined this field.
**Which version of DolphinScheduler:**
- dev branch
**Requirement or improvement**
- I think it should be removed, Does I miss anything?
| https://github.com/apache/dolphinscheduler/issues/5269 | https://github.com/apache/dolphinscheduler/pull/5284 | 029d3eb81404dd0fbf89716e00dda56993dd337a | b6453da298bff491b02a3b690674dc62bcb84cd3 | "2021-04-13T13:09:06Z" | java | "2021-04-15T12:15:20Z" | sql/upgrade/1.4.0_schema/postgresql/dolphinscheduler_ddl.sql | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-- uc_dolphin_T_t_ds_user_A_state
delimiter ;
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_user_A_state();
delimiter d//
CREATE FUNCTION uc_dolphin_T_t_ds_user_A_state() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_CATALOG=current_database()
AND TABLE_SCHEMA=current_schema()
AND TABLE_NAME='t_ds_user'
AND COLUMN_NAME ='state')
THEN
ALTER TABLE t_ds_user ADD COLUMN state int DEFAULT 1;
comment on column t_ds_user.state is 'state 0:disable 1:enable';
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
select uc_dolphin_T_t_ds_user_A_state();
DROP FUNCTION uc_dolphin_T_t_ds_user_A_state();
-- uc_dolphin_T_t_ds_tenant_A_tenant_name
delimiter ;
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_tenant_A_tenant_name();
delimiter d//
CREATE FUNCTION uc_dolphin_T_t_ds_tenant_A_tenant_name() RETURNS void AS $$
BEGIN
IF EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_CATALOG=current_database()
AND TABLE_SCHEMA=current_schema()
AND TABLE_NAME='t_ds_tenant'
AND COLUMN_NAME ='tenant_name')
THEN
ALTER TABLE t_ds_tenant DROP COLUMN "tenant_name";
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
select uc_dolphin_T_t_ds_tenant_A_tenant_name();
DROP FUNCTION uc_dolphin_T_t_ds_tenant_A_tenant_name();
-- uc_dolphin_T_t_ds_task_instance_A_first_submit_time
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_first_submit_time() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND COLUMN_NAME ='first_submit_time')
THEN
ALTER TABLE t_ds_task_instance ADD COLUMN first_submit_time timestamp DEFAULT NULL;
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_task_instance_A_first_submit_time();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_first_submit_time();
-- uc_dolphin_T_t_ds_task_instance_A_delay_time
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_delay_time() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND COLUMN_NAME ='delay_time')
THEN
ALTER TABLE t_ds_task_instance ADD COLUMN delay_time int DEFAULT '0';
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_task_instance_A_delay_time();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_delay_time();
-- uc_dolphin_T_t_ds_task_instance_A_var_pool
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_var_pool() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND COLUMN_NAME ='var_pool')
THEN
ALTER TABLE t_ds_task_instance ADD COLUMN var_pool text;
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_task_instance_A_var_pool();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_var_pool();
-- uc_dolphin_T_t_ds_process_instance_A_var_pool
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_instance_A_var_pool() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_instance'
AND COLUMN_NAME ='var_pool')
THEN
ALTER TABLE t_ds_process_instance ADD COLUMN var_pool text;
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_process_instance_A_var_pool();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_instance_A_var_pool();
-- uc_dolphin_T_t_ds_process_definition_A_modify_by
delimiter d//
CREATE OR REPLACE FUNCTION ct_dolphin_T_t_ds_process_definition_version() RETURNS void AS $$
BEGIN
CREATE TABLE IF NOT EXISTS t_ds_process_definition_version (
id int NOT NULL ,
process_definition_id int NOT NULL ,
version int DEFAULT NULL ,
process_definition_json text ,
description text ,
global_params text ,
locations text ,
connects text ,
receivers text ,
receivers_cc text ,
create_time timestamp DEFAULT NULL ,
timeout int DEFAULT '0' ,
resource_ids varchar(64),
PRIMARY KEY (id)
) ;
create index process_definition_id_and_version on t_ds_process_definition_version (process_definition_id,version);
DROP SEQUENCE IF EXISTS t_ds_process_definition_version_id_sequence;
CREATE SEQUENCE t_ds_process_definition_version_id_sequence;
ALTER TABLE t_ds_process_definition_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_version_id_sequence');
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT ct_dolphin_T_t_ds_process_definition_version();
DROP FUNCTION IF EXISTS ct_dolphin_T_t_ds_process_definition_version();
-- ----------------------------
-- Table structure for t_ds_plugin_define
-- ----------------------------
DROP TABLE IF EXISTS t_ds_plugin_define;
CREATE TABLE t_ds_plugin_define (
id serial NOT NULL,
plugin_name varchar(100) NOT NULL,
plugin_type varchar(100) NOT NULL,
plugin_params text NULL,
create_time timestamp NULL,
update_time timestamp NULL,
CONSTRAINT t_ds_plugin_define_pk PRIMARY KEY (id),
CONSTRAINT t_ds_plugin_define_un UNIQUE (plugin_name, plugin_type)
);
-- ----------------------------
-- Table structure for t_ds_alert_plugin_instance
-- ----------------------------
DROP TABLE IF EXISTS t_ds_alert_plugin_instance;
CREATE TABLE t_ds_alert_plugin_instance (
id serial NOT NULL,
plugin_define_id int4 NOT NULL,
plugin_instance_params text NULL,
create_time timestamp NULL,
update_time timestamp NULL,
instance_name varchar(200) NULL,
CONSTRAINT t_ds_alert_plugin_instance_pk PRIMARY KEY (id)
);
-- uc_dolphin_T_t_ds_process_definition_A_warning_group_id
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_definition_A_warning_group_id() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_definition'
AND COLUMN_NAME ='warning_group_id')
THEN
ALTER TABLE t_ds_process_definition ADD COLUMN warning_group_id int4 DEFAULT NULL;
COMMENT ON COLUMN t_ds_process_definition.warning_group_id IS 'alert group id';
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_process_definition_A_warning_group_id();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_definition_A_warning_group_id();
-- uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_definition_version'
AND COLUMN_NAME ='warning_group_id')
THEN
ALTER TABLE t_ds_process_definition_version ADD COLUMN warning_group_id int4 DEFAULT NULL;
COMMENT ON COLUMN t_ds_process_definition_version.warning_group_id IS 'alert group id';
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id();
-- uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_alertgroup'
AND COLUMN_NAME ='alert_instance_ids')
THEN
ALTER TABLE t_ds_alertgroup ADD COLUMN alert_instance_ids varchar (255) DEFAULT NULL;
COMMENT ON COLUMN t_ds_alertgroup.alert_instance_ids IS 'alert instance ids';
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids();
-- uc_dolphin_T_t_ds_alertgroup_A_create_user_id
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_create_user_id() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_alertgroup'
AND COLUMN_NAME ='create_user_id')
THEN
ALTER TABLE t_ds_alertgroup ADD COLUMN create_user_id int4 DEFAULT NULL;
COMMENT ON COLUMN t_ds_alertgroup.create_user_id IS 'create user id';
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_alertgroup_A_create_user_id();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_create_user_id();
-- ----------------------------
-- These columns will not be used in the new version,if you determine that the historical data is useless, you can delete it using the sql below
-- ----------------------------
-- ALTER TABLE t_ds_alert DROP COLUMN "show_type", DROP COLUMN "alert_type", DROP COLUMN "receivers", DROP COLUMN "receivers_cc";
-- ALTER TABLE t_ds_alertgroup DROP COLUMN "group_type";
-- ALTER TABLE t_ds_process_definition DROP COLUMN "receivers", DROP COLUMN "receivers_cc";
-- ALTER TABLE t_ds_process_definition_version DROP COLUMN "receivers", DROP COLUMN "receivers_cc";
-- DROP TABLE IF EXISTS t_ds_relation_user_alertgroup;
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,235 | [Improvement][ApiServer] Traffic limit | **Describe the question**
After support OpenAPI, it is necessary to control the traffic, otherwise the service may be breakdown.
**What are the current deficiencies and the benefits of improvement**
- Maybe we can add global request rate limit. And in the future, we can consider to support tenant-level rate control
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5235 | https://github.com/apache/dolphinscheduler/pull/5307 | 07e612c8777447349ed074ec7bb4384c647df1d8 | 7843ed40873bcb4b7f2e1b05f5773e1fc2efa51f | "2021-04-08T13:44:34Z" | java | "2021-04-16T14:41:02Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/configuration/AppConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.configuration;
import org.apache.dolphinscheduler.api.interceptor.LocaleChangeInterceptor;
import org.apache.dolphinscheduler.api.interceptor.LoginHandlerInterceptor;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.cors.CorsConfiguration;
import org.springframework.web.cors.UrlBasedCorsConfigurationSource;
import org.springframework.web.filter.CorsFilter;
import org.springframework.web.servlet.LocaleResolver;
import org.springframework.web.servlet.config.annotation.*;
import org.springframework.web.servlet.i18n.CookieLocaleResolver;
import java.util.Locale;
/**
* application configuration
*/
@Configuration
public class AppConfiguration implements WebMvcConfigurer {
public static final String LOGIN_INTERCEPTOR_PATH_PATTERN = "/**/*";
public static final String LOGIN_PATH_PATTERN = "/login";
public static final String REGISTER_PATH_PATTERN = "/users/register";
public static final String PATH_PATTERN = "/**";
public static final String LOCALE_LANGUAGE_COOKIE = "language";
@Bean
public CorsFilter corsFilter() {
CorsConfiguration config = new CorsConfiguration();
config.addAllowedOrigin("*");
config.addAllowedMethod("*");
config.addAllowedHeader("*");
UrlBasedCorsConfigurationSource configSource = new UrlBasedCorsConfigurationSource();
configSource.registerCorsConfiguration(PATH_PATTERN, config);
return new CorsFilter(configSource);
}
@Bean
public LoginHandlerInterceptor loginInterceptor() {
return new LoginHandlerInterceptor();
}
/**
* Cookie
* @return local resolver
*/
@Bean(name = "localeResolver")
public LocaleResolver localeResolver() {
CookieLocaleResolver localeResolver = new CookieLocaleResolver();
localeResolver.setCookieName(LOCALE_LANGUAGE_COOKIE);
// set default locale
localeResolver.setDefaultLocale(Locale.US);
// set language tag compliant
localeResolver.setLanguageTagCompliant(false);
return localeResolver;
}
@Bean
public LocaleChangeInterceptor localeChangeInterceptor() {
return new LocaleChangeInterceptor();
}
@Override
public void addInterceptors(InterceptorRegistry registry) {
// i18n
registry.addInterceptor(localeChangeInterceptor());
registry.addInterceptor(loginInterceptor())
.addPathPatterns(LOGIN_INTERCEPTOR_PATH_PATTERN)
.excludePathPatterns(LOGIN_PATH_PATTERN, REGISTER_PATH_PATTERN,
"/swagger-resources/**", "/webjars/**", "/v2/**",
"/doc.html", "/swagger-ui.html", "*.html", "/ui/**");
}
@Override
public void addResourceHandlers(ResourceHandlerRegistry registry) {
registry.addResourceHandler("/static/**").addResourceLocations("classpath:/static/");
registry.addResourceHandler("doc.html").addResourceLocations("classpath:/META-INF/resources/");
registry.addResourceHandler("swagger-ui.html").addResourceLocations("classpath:/META-INF/resources/");
registry.addResourceHandler("/webjars/**").addResourceLocations("classpath:/META-INF/resources/webjars/");
registry.addResourceHandler("/ui/**").addResourceLocations("file:ui/");
}
@Override
public void addViewControllers(ViewControllerRegistry registry) {
registry.addViewController("/ui/").setViewName("forward:/ui/index.html");
registry.addViewController("/").setViewName("forward:/ui/index.html");
}
/**
* Turn off suffix-based content negotiation
*
* @param configurer configurer
*/
@Override
public void configureContentNegotiation(final ContentNegotiationConfigurer configurer) {
configurer.favorPathExtension(false);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,235 | [Improvement][ApiServer] Traffic limit | **Describe the question**
After support OpenAPI, it is necessary to control the traffic, otherwise the service may be breakdown.
**What are the current deficiencies and the benefits of improvement**
- Maybe we can add global request rate limit. And in the future, we can consider to support tenant-level rate control
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5235 | https://github.com/apache/dolphinscheduler/pull/5307 | 07e612c8777447349ed074ec7bb4384c647df1d8 | 7843ed40873bcb4b7f2e1b05f5773e1fc2efa51f | "2021-04-08T13:44:34Z" | java | "2021-04-16T14:41:02Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/configuration/TrafficConfiguration.java | |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,235 | [Improvement][ApiServer] Traffic limit | **Describe the question**
After support OpenAPI, it is necessary to control the traffic, otherwise the service may be breakdown.
**What are the current deficiencies and the benefits of improvement**
- Maybe we can add global request rate limit. And in the future, we can consider to support tenant-level rate control
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5235 | https://github.com/apache/dolphinscheduler/pull/5307 | 07e612c8777447349ed074ec7bb4384c647df1d8 | 7843ed40873bcb4b7f2e1b05f5773e1fc2efa51f | "2021-04-08T13:44:34Z" | java | "2021-04-16T14:41:02Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/interceptor/RateLimitInterceptor.java | |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,235 | [Improvement][ApiServer] Traffic limit | **Describe the question**
After support OpenAPI, it is necessary to control the traffic, otherwise the service may be breakdown.
**What are the current deficiencies and the benefits of improvement**
- Maybe we can add global request rate limit. And in the future, we can consider to support tenant-level rate control
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5235 | https://github.com/apache/dolphinscheduler/pull/5307 | 07e612c8777447349ed074ec7bb4384c647df1d8 | 7843ed40873bcb4b7f2e1b05f5773e1fc2efa51f | "2021-04-08T13:44:34Z" | java | "2021-04-16T14:41:02Z" | dolphinscheduler-api/src/main/resources/application-api.properties | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# server port
server.port=12345
# session config
server.servlet.session.timeout=7200
# servlet config
server.servlet.context-path=/dolphinscheduler/
# time zone
spring.jackson.time-zone=GMT+8
# file size limit for upload
spring.servlet.multipart.max-file-size=1024MB
spring.servlet.multipart.max-request-size=1024MB
# enable response compression
server.compression.enabled=true
server.compression.mime-types=text/html,text/xml,text/plain,text/css,text/javascript,application/javascript,application/json,application/xml
# post content
server.jetty.max-http-form-post-size=5000000
# i18n
spring.messages.encoding=UTF-8
# i18n classpath folder , file prefix messages, if have many files, use "," seperator
spring.messages.basename=i18n/messages
# Authentication types (supported types: PASSWORD)
security.authentication.type=PASSWORD
#============================================================================
# LDAP Config
# mock ldap server from https://www.forumsys.com/tutorials/integration-how-to/ldap/online-ldap-test-server/
#============================================================================
# admin userId
#security.authentication.ldap.user.admin=read-only-admin
# ldap server config
#ldap.urls=ldap://ldap.forumsys.com:389/
#ldap.base.dn=dc=example,dc=com
#ldap.username=cn=read-only-admin,dc=example,dc=com
#ldap.password=password
#ldap.user.identity.attribute=uid
#ldap.user.email.attribute=mail
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,235 | [Improvement][ApiServer] Traffic limit | **Describe the question**
After support OpenAPI, it is necessary to control the traffic, otherwise the service may be breakdown.
**What are the current deficiencies and the benefits of improvement**
- Maybe we can add global request rate limit. And in the future, we can consider to support tenant-level rate control
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5235 | https://github.com/apache/dolphinscheduler/pull/5307 | 07e612c8777447349ed074ec7bb4384c647df1d8 | 7843ed40873bcb4b7f2e1b05f5773e1fc2efa51f | "2021-04-08T13:44:34Z" | java | "2021-04-16T14:41:02Z" | dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/configuration/TrafficConfigurationTest.java | |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,235 | [Improvement][ApiServer] Traffic limit | **Describe the question**
After support OpenAPI, it is necessary to control the traffic, otherwise the service may be breakdown.
**What are the current deficiencies and the benefits of improvement**
- Maybe we can add global request rate limit. And in the future, we can consider to support tenant-level rate control
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5235 | https://github.com/apache/dolphinscheduler/pull/5307 | 07e612c8777447349ed074ec7bb4384c647df1d8 | 7843ed40873bcb4b7f2e1b05f5773e1fc2efa51f | "2021-04-08T13:44:34Z" | java | "2021-04-16T14:41:02Z" | dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/interceptor/RateLimitInterceptorTest.java | |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,235 | [Improvement][ApiServer] Traffic limit | **Describe the question**
After support OpenAPI, it is necessary to control the traffic, otherwise the service may be breakdown.
**What are the current deficiencies and the benefits of improvement**
- Maybe we can add global request rate limit. And in the future, we can consider to support tenant-level rate control
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5235 | https://github.com/apache/dolphinscheduler/pull/5307 | 07e612c8777447349ed074ec7bb4384c647df1d8 | 7843ed40873bcb4b7f2e1b05f5773e1fc2efa51f | "2021-04-08T13:44:34Z" | java | "2021-04-16T14:41:02Z" | pom.xml | <?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler</artifactId>
<version>${revision}</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
<url>http://dolphinscheduler.apache.org</url>
<description>Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated
to solving the complex dependencies in data processing, making the scheduling system out of the box for data
processing.
</description>
<licenses>
<license>
<name>Apache License 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:https://github.com/apache/dolphinscheduler.git</connection>
<developerConnection>scm:git:https://github.com/apache/dolphinscheduler.git</developerConnection>
<url>https://github.com/apache/dolphinscheduler</url>
<tag>HEAD</tag>
</scm>
<mailingLists>
<mailingList>
<name>DolphinScheduler Developer List</name>
<post>dev@dolphinscheduler.apache.org</post>
<subscribe>dev-subscribe@dolphinscheduler.apache.org</subscribe>
<unsubscribe>dev-unsubscribe@dolphinscheduler.apache.org</unsubscribe>
</mailingList>
</mailingLists>
<parent>
<groupId>org.apache</groupId>
<artifactId>apache</artifactId>
<version>21</version>
</parent>
<properties>
<revision>1.3.6-SNAPSHOT</revision>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<curator.version>4.3.0</curator.version>
<spring.version>5.1.18.RELEASE</spring.version>
<spring.boot.version>2.1.17.RELEASE</spring.boot.version>
<java.version>1.8</java.version>
<logback.version>1.2.3</logback.version>
<hadoop.version>2.7.3</hadoop.version>
<quartz.version>2.3.0</quartz.version>
<jackson.version>2.10.5</jackson.version>
<mybatis-plus.version>3.2.0</mybatis-plus.version>
<mybatis.spring.version>2.0.1</mybatis.spring.version>
<cron.utils.version>5.0.5</cron.utils.version>
<druid.version>1.1.22</druid.version>
<h2.version>1.4.200</h2.version>
<commons.codec.version>1.11</commons.codec.version>
<commons.logging.version>1.1.1</commons.logging.version>
<httpclient.version>4.4.1</httpclient.version>
<httpcore.version>4.4.1</httpcore.version>
<junit.version>4.12</junit.version>
<mysql.connector.version>5.1.34</mysql.connector.version>
<slf4j.api.version>1.7.5</slf4j.api.version>
<slf4j.log4j12.version>1.7.5</slf4j.log4j12.version>
<commons.collections.version>3.2.2</commons.collections.version>
<commons.httpclient>3.0.1</commons.httpclient>
<commons.beanutils.version>1.9.4</commons.beanutils.version>
<commons.configuration.version>1.10</commons.configuration.version>
<commons.email.version>1.5</commons.email.version>
<poi.version>3.17</poi.version>
<javax.servlet.api.version>3.1.0</javax.servlet.api.version>
<commons.collections4.version>4.1</commons.collections4.version>
<guava.version>24.1-jre</guava.version>
<postgresql.version>42.1.4</postgresql.version>
<hive.jdbc.version>2.1.0</hive.jdbc.version>
<commons.io.version>2.4</commons.io.version>
<oshi.core.version>3.5.0</oshi.core.version>
<clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version>
<mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version>
<presto.jdbc.version>0.238.1</presto.jdbc.version>
<spotbugs.version>3.1.12</spotbugs.version>
<checkstyle.version>3.0.0</checkstyle.version>
<zookeeper.version>3.4.14</zookeeper.version>
<frontend-maven-plugin.version>1.6</frontend-maven-plugin.version>
<maven-compiler-plugin.version>3.3</maven-compiler-plugin.version>
<maven-assembly-plugin.version>3.1.0</maven-assembly-plugin.version>
<maven-release-plugin.version>2.5.3</maven-release-plugin.version>
<maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version>
<maven-source-plugin.version>2.4</maven-source-plugin.version>
<maven-surefire-plugin.version>2.22.1</maven-surefire-plugin.version>
<maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version>
<rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version>
<jacoco.version>0.8.4</jacoco.version>
<jcip.version>1.0</jcip.version>
<maven.deploy.skip>false</maven.deploy.skip>
<cobertura-maven-plugin.version>2.7</cobertura-maven-plugin.version>
<mockito.version>2.21.0</mockito.version>
<powermock.version>2.0.2</powermock.version>
<servlet-api.version>2.5</servlet-api.version>
<swagger.version>1.9.3</swagger.version>
<springfox.version>2.9.2</springfox.version>
<swagger-models.version>1.5.24</swagger-models.version>
<guava-retry.version>2.0.0</guava-retry.version>
<dep.airlift.version>0.184</dep.airlift.version>
<dep.packaging.version>${dep.airlift.version}</dep.packaging.version>
<protostuff.version>1.7.2</protostuff.version>
<reflections.version>0.9.12</reflections.version>
<byte-buddy.version>1.9.16</byte-buddy.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<!-- quartz-->
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz-jobs</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>com.cronutils</groupId>
<artifactId>cron-utils</artifactId>
<version>${cron.utils.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>${druid.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>${spring.boot.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-core</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-beans</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-tx</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-jdbc</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
<version>${spring.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-server</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert-plugin</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-dao</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-remote</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-service</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-spi</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
<version>${curator.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<artifactId>netty</artifactId>
<groupId>io.netty</groupId>
</exclusion>
<exclusion>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-annotations</artifactId>
</exclusion>
</exclusions>
<version>${zookeeper.version}</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>${commons.codec.version}</version>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons.logging.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>${httpclient.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>${httpcore.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<version>${jackson.version}</version>
</dependency>
<!--protostuff-->
<!-- https://mvnrepository.com/artifact/io.protostuff/protostuff-core -->
<dependency>
<groupId>io.protostuff</groupId>
<artifactId>protostuff-core</artifactId>
<version>${protostuff.version}</version>
</dependency>
<!-- https://mvnrepository.com/artifact/io.protostuff/protostuff-runtime -->
<dependency>
<groupId>io.protostuff</groupId>
<artifactId>protostuff-runtime</artifactId>
<version>${protostuff.version}</version>
</dependency>
<dependency>
<groupId>net.bytebuddy</groupId>
<artifactId>byte-buddy</artifactId>
<version>${byte-buddy.version}</version>
</dependency>
<dependency>
<groupId>org.reflections</groupId>
<artifactId>reflections</artifactId>
<version>${reflections.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>${mockito.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-junit4</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-api-mockito2</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${mysql.connector.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.h2database</groupId>
<artifactId>h2</artifactId>
<version>${h2.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${slf4j.api.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>${slf4j.log4j12.version}</version>
</dependency>
<dependency>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
<version>${commons.collections.version}</version>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>${commons.httpclient}</version>
</dependency>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<version>${commons.beanutils.version}</version>
</dependency>
<dependency>
<groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId>
<version>${commons.configuration.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-email</artifactId>
<version>${commons.email.version}</version>
</dependency>
<!--excel poi-->
<dependency>
<groupId>org.apache.poi</groupId>
<artifactId>poi</artifactId>
<version>${poi.version}</version>
</dependency>
<!-- hadoop -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>com.sun.jersey</artifactId>
<groupId>jersey-json</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-aws</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>${commons.collections4.version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>${postgresql.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>${hive.jdbc.version}</version>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>${commons.io.version}</version>
</dependency>
<dependency>
<groupId>com.github.oshi</groupId>
<artifactId>oshi-core</artifactId>
<version>${oshi.core.version}</version>
</dependency>
<dependency>
<groupId>ru.yandex.clickhouse</groupId>
<artifactId>clickhouse-jdbc</artifactId>
<version>${clickhouse.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
<version>${mssql.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.facebook.presto</groupId>
<artifactId>presto-jdbc</artifactId>
<version>${presto.jdbc.version}</version>
</dependency>
<dependency>
<groupId>net.jcip</groupId>
<artifactId>jcip-annotations</artifactId>
<version>${jcip.version}</version>
<optional>true</optional>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<version>${servlet-api.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
<version>${javax.servlet.api.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger2</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger-ui</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.swagger</groupId>
<artifactId>swagger-models</artifactId>
<version>${swagger-models.version}</version>
</dependency>
<dependency>
<groupId>com.github.xiaoymin</groupId>
<artifactId>swagger-bootstrap-ui</artifactId>
<version>${swagger.version}</version>
</dependency>
<dependency>
<groupId>com.github.rholder</groupId>
<artifactId>guava-retrying</artifactId>
<version>${guava-retry.version}</version>
</dependency>
<dependency>
<groupId>org.sonatype.aether</groupId>
<artifactId>aether-api</artifactId>
<version>1.13.1</version>
</dependency>
<dependency>
<groupId>io.airlift.resolver</groupId>
<artifactId>resolver</artifactId>
<version>1.5</version>
</dependency>
<dependency>
<groupId>org.ow2.asm</groupId>
<artifactId>asm</artifactId>
<version>6.2.1</version>
</dependency>
<dependency>
<groupId>javax.activation</groupId>
<artifactId>activation</artifactId>
<version>1.1</version>
</dependency>
<dependency>
<groupId>com.sun.mail</groupId>
<artifactId>javax.mail</artifactId>
<version>1.6.2</version>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<finalName>apache-dolphinscheduler-incubating-${project.version}</finalName>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-maven-plugin</artifactId>
<version>1.0.0</version>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>ca.vanzyl.maven.plugins</groupId>
<artifactId>provisio-maven-plugin</artifactId>
<version>1.0.4</version>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>rpm-maven-plugin</artifactId>
<version>${rpm-maven-plugion.version}</version>
<inherited>false</inherited>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<testSource>${java.version}</testSource>
<testTarget>${java.version}</testTarget>
</configuration>
<version>${maven-compiler-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<tagNameFormat>@{project.version}</tagNameFormat>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>${maven-assembly-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<configuration>
<source>8</source>
<failOnError>false</failOnError>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>${maven-source-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>${maven-dependency-plugin.version}</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-maven-plugin</artifactId>
<extensions>true</extensions>
<!--<configuration>-->
<!--<allowedProvidedDependencies>-->
<!--<allowedProvidedDependency>org.apache.dolphinscheduler:dolphinscheduler-common</allowedProvidedDependency>-->
<!--</allowedProvidedDependencies>-->
<!--</configuration>-->
</plugin>
<plugin>
<groupId>ca.vanzyl.maven.plugins</groupId>
<artifactId>provisio-maven-plugin</artifactId>
<extensions>true</extensions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<id>attach-sources</id>
<phase>verify</phase>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
<configuration>
<aggregate>true</aggregate>
<charset>${project.build.sourceEncoding}</charset>
<encoding>${project.build.sourceEncoding}</encoding>
<docencoding>${project.build.sourceEncoding}</docencoding>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<autoVersionSubmodules>true</autoVersionSubmodules>
<tagNameFormat>@{project.version}</tagNameFormat>
<tagBase>${project.version}</tagBase>
<!--<goals>-f pom.xml deploy</goals>-->
</configuration>
<dependencies>
<dependency>
<groupId>org.apache.maven.scm</groupId>
<artifactId>maven-scm-provider-jgit</artifactId>
<version>1.9.5</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<encoding>${project.build.sourceEncoding}</encoding>
<skip>false</skip><!--not skip compile test classes-->
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<includes>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TenantControllerTest.java</include>
<include>**/api/dto/resources/filter/ResourceFilterTest.java</include>
<include>**/api/dto/resources/visitor/ResourceTreeVisitorTest.java</include>
<includeDataxTaskTest>**/api/enums/testGetEnum.java</includeDataxTaskTest>
<include>**/api/enums/StatusTest.java</include>
<include>**/api/exceptions/ApiExceptionHandlerTest.java</include>
<include>**/api/exceptions/ServiceExceptionTest.java</include>
<include>**/api/interceptor/LocaleChangeInterceptorTest.java</include>
<include>**/api/interceptor/LoginHandlerInterceptorTest.java</include>
<include>**/api/security/impl/pwd/PasswordAuthenticatorTest.java</include>
<include>**/api/security/impl/ldap/LdapAuthenticatorTest.java</include>
<include>**/api/security/SecurityConfigLDAPTest.java</include>
<include>**/api/security/SecurityConfigPasswordTest.java</include>
<include>**/api/service/AccessTokenServiceTest.java</include>
<include>**/api/service/AlertGroupServiceTest.java</include>
<include>**/api/service/BaseDAGServiceTest.java</include>
<include>**/api/service/BaseServiceTest.java</include>
<include>**/api/service/DataAnalysisServiceTest.java</include>
<include>**/api/service/AlertPluginInstanceServiceTest.java</include>
<include>**/api/service/DataSourceServiceTest.java</include>
<include>**/api/service/ExecutorService2Test.java</include>
<include>**/api/service/ExecutorServiceTest.java</include>
<include>**/api/service/LoggerServiceTest.java</include>
<include>**/api/service/MonitorServiceTest.java</include>
<include>**/api/service/ProcessDefinitionServiceTest.java</include>
<include>**/api/service/ProcessDefinitionVersionServiceTest.java</include>
<include>**/api/service/ProcessInstanceServiceTest.java</include>
<include>**/api/service/ProjectServiceTest.java</include>
<include>**/api/service/QueueServiceTest.java</include>
<include>**/api/service/ResourcesServiceTest.java</include>
<include>**/api/service/SchedulerServiceTest.java</include>
<include>**/api/service/SessionServiceTest.java</include>
<include>**/api/service/TaskInstanceServiceTest.java</include>
<include>**/api/service/TenantServiceTest.java</include>
<include>**/api/service/UdfFuncServiceTest.java</include>
<include>**/api/service/UiPluginServiceTest.java</include>
<include>**/api/service/UserAlertGroupServiceTest.java</include>
<include>**/api/service/UsersServiceTest.java</include>
<include>**/api/service/WorkerGroupServiceTest.java</include>
<include>**/api/service/WorkFlowLineageServiceTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TaskInstanceControllerTest.java</include>
<include>**/api/controller/WorkFlowLineageControllerTest.java</include>
<include>**/api/utils/exportprocess/DataSourceParamTest.java</include>
<include>**/api/utils/exportprocess/DependentParamTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/FileUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/ResultTest.java</include>
<include>**/common/graph/DAGTest.java</include>
<include>**/common/os/OshiTest.java</include>
<include>**/common/os/OSUtilsTest.java</include>
<include>**/common/shell/ShellExecutorTest.java</include>
<include>**/common/task/DataxParametersTest.java</include>
<include>**/common/task/EntityTestUtils.java</include>
<include>**/common/task/FlinkParametersTest.java</include>
<include>**/common/task/HttpParametersTest.java</include>
<include>**/common/task/SqlParametersTest.java</include>
<include>**/common/task/SqoopParameterEntityTest.java</include>
<include>**/common/threadutils/ThreadPoolExecutorsTest.java</include>
<include>**/common/threadutils/ThreadUtilsTest.java</include>
<include>**/common/utils/CollectionUtilsTest.java</include>
<include>**/common/utils/CommonUtilsTest.java</include>
<include>**/common/utils/DateUtilsTest.java</include>
<include>**/common/utils/DependentUtilsTest.java</include>
<include>**/common/utils/EncryptionUtilsTest.java</include>
<include>**/common/utils/FileUtilsTest.java</include>
<include>**/common/utils/JSONUtilsTest.java</include>
<include>**/common/utils/LoggerUtilsTest.java</include>
<include>**/common/utils/NetUtilsTest.java</include>
<include>**/common/utils/OSUtilsTest.java</include>
<include>**/common/utils/ParameterUtilsTest.java</include>
<include>**/common/utils/TimePlaceholderUtilsTest.java</include>
<include>**/common/utils/PreconditionsTest.java</include>
<include>**/common/utils/PropertyUtilsTest.java</include>
<include>**/common/utils/SchemaUtilsTest.java</include>
<include>**/common/utils/ScriptRunnerTest.java</include>
<include>**/common/utils/SensitiveLogUtilsTest.java</include>
<include>**/common/utils/StringTest.java</include>
<include>**/common/utils/StringUtilsTest.java</include>
<include>**/common/utils/TaskParametersUtilsTest.java</include>
<include>**/common/utils/VarPoolUtilsTest.java</include>
<include>**/common/utils/HadoopUtilsTest.java</include>
<include>**/common/utils/HttpUtilsTest.java</include>
<include>**/common/utils/KerberosHttpClientTest.java</include>
<include>**/common/utils/HiveConfUtilsTest.java</include>
<include>**/common/ConstantsTest.java</include>
<include>**/common/utils/HadoopUtils.java</include>
<include>**/common/utils/RetryerUtilsTest.java</include>
<include>**/common/plugin/DolphinSchedulerPluginLoaderTest.java</include>
<include>**/common/enums/ExecutionStatusTest</include>
<include>**/dao/mapper/AccessTokenMapperTest.java</include>
<include>**/dao/mapper/AlertGroupMapperTest.java</include>
<include>**/dao/mapper/CommandMapperTest.java</include>
<include>**/dao/mapper/ConnectionFactoryTest.java</include>
<include>**/dao/mapper/DataSourceMapperTest.java</include>
<include>**/dao/datasource/MySQLDataSourceTest.java</include>
<include>**/dao/entity/TaskInstanceTest.java</include>
<include>**/dao/entity/UdfFuncTest.java</include>
<include>**/remote/command/alert/AlertSendRequestCommandTest.java</include>
<include>**/remote/command/alert/AlertSendResponseCommandTest.java</include>
<include>**/remote/command/future/ResponseFutureTest.java</include>
<include>**/remote/command/log/RemoveTaskLogRequestCommandTest.java</include>
<include>**/remote/command/log/RemoveTaskLogResponseCommandTest.java</include>
<include>**/remote/utils/HostTest.java</include>
<include>**/remote/utils/NettyUtilTest.java</include>
<include>**/remote/NettyRemotingClientTest.java</include>
<include>**/rpc/RpcTest.java</include>
<include>**/server/log/LoggerServerTest.java</include>
<include>**/server/entity/SQLTaskExecutionContextTest.java</include>
<include>**/server/log/MasterLogFilterTest.java</include>
<include>**/server/log/SensitiveDataConverterTest.java</include>
<include>**/server/log/LoggerRequestProcessorTest.java</include>
<!--<include>**/server/log/TaskLogDiscriminatorTest.java</include>-->
<include>**/server/log/TaskLogFilterTest.java</include>
<include>**/server/log/WorkerLogFilterTest.java</include>
<include>**/server/master/config/MasterConfigTest.java</include>
<include>**/server/master/consumer/TaskPriorityQueueConsumerTest.java</include>
<include>**/server/master/runner/MasterTaskExecThreadTest.java</include>
<!--<include>**/server/master/dispatch/executor/NettyExecutorManagerTest.java</include>-->
<include>**/server/master/dispatch/host/assign/LowerWeightRoundRobinTest.java</include>
<include>**/server/master/dispatch/host/assign/RandomSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/HostWorkerTest.java</include>
<include>**/server/master/register/MasterRegistryTest.java</include>
<include>**/server/master/registry/ServerNodeManagerTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinHostManagerTest.java</include>
<include>**/server/master/AlertManagerTest.java</include>
<include>**/server/master/MasterCommandTest.java</include>
<include>**/server/master/DependentTaskTest.java</include>
<include>**/server/master/ConditionsTaskTest.java</include>
<include>**/server/master/MasterExecThreadTest.java</include>
<include>**/server/master/ParamsTest.java</include>
<include>**/server/master/SubProcessTaskTest.java</include>
<include>**/server/master/processor/TaskAckProcessorTest.java</include>
<include>**/server/master/processor/TaskKillResponseProcessorTest.java</include>
<include>**/server/master/processor/queue/TaskResponseServiceTest.java</include>
<include>**/server/master/zk/ZKMasterClientTest.java</include>
<include>**/server/register/ZookeeperRegistryCenterTest.java</include>
<include>**/server/utils/DataxUtilsTest.java</include>
<include>**/server/utils/ExecutionContextTestUtils.java</include>
<include>**/server/utils/FlinkArgsUtilsTest.java</include>
<include>**/server/utils/LogUtilsTest.java</include>
<include>**/server/utils/MapReduceArgsUtilsTest.java</include>
<include>**/server/utils/ParamUtilsTest.java</include>
<include>**/server/utils/ProcessUtilsTest.java</include>
<include>**/server/utils/SparkArgsUtilsTest.java</include>
<include>**/server/worker/processor/TaskCallbackServiceTest.java</include>
<include>**/server/worker/processor/TaskExecuteProcessorTest.java</include>
<include>**/server/worker/registry/WorkerRegistryTest.java</include>
<include>**/server/worker/shell/ShellCommandExecutorTest.java</include>
<include>**/server/worker/sql/SqlExecutorTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<include>**/server/worker/task/EnvFileTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<include>**/server/worker/task/datax/DataxTaskTest.java</include>
<!--<include>**/server/worker/task/http/HttpTaskTest.java</include>-->
<include>**/server/worker/task/sqoop/SqoopTaskTest.java</include>
<include>**/server/worker/task/shell/ShellTaskTest.java</include>
<include>**/server/worker/task/TaskManagerTest.java</include>
<include>**/server/worker/task/AbstractCommandExecutorTest.java</include>
<include>**/server/worker/task/ShellTaskReturnTest.java</include>
<include>**/server/worker/EnvFileTest.java</include>
<include>**/server/worker/runner/TaskExecuteThreadTest.java</include>
<include>**/server/worker/runner/WorkerManagerThreadTest.java</include>
<include>**/service/quartz/cron/CronUtilsTest.java</include>
<include>**/service/process/ProcessServiceTest.java</include>
<include>**/service/zk/DefaultEnsembleProviderTest.java</include>
<include>**/service/zk/ZKServerTest.java</include>
<include>**/service/zk/CuratorZookeeperClientTest.java</include>
<include>**/service/zk/RegisterOperatorTest.java</include>
<include>**/service/queue/TaskUpdateQueueTest.java</include>
<include>**/service/queue/PeerTaskInstancePriorityQueueTest.java</include>
<include>**/service/log/LogClientServiceTest.java</include>
<include>**/service/alert/AlertClientServiceTest.java</include>
<include>**/dao/mapper/DataSourceUserMapperTest.java</include>
<!--<iTaskUpdateQueueConsumerThreadnclude>**/dao/mapper/ErrorCommandMapperTest.java</iTaskUpdateQueueConsumerThreadnclude>-->
<include>**/dao/mapper/ProcessDefinitionMapperTest.java</include>
<include>**/dao/mapper/ProcessDefinitionVersionMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapperTest.java</include>
<include>**/dao/mapper/ProjectMapperTest.java</include>
<include>**/dao/mapper/ProjectUserMapperTest.java</include>
<include>**/dao/mapper/QueueMapperTest.java</include>
<include>**/dao/mapper/ResourceUserMapperTest.java</include>
<include>**/dao/mapper/ScheduleMapperTest.java</include>
<include>**/dao/mapper/SessionMapperTest.java</include>
<include>**/dao/mapper/TaskInstanceMapperTest.java</include>
<include>**/dao/mapper/TenantMapperTest.java</include>
<include>**/dao/mapper/UdfFuncMapperTest.java</include>
<include>**/dao/mapper/UDFUserMapperTest.java</include>
<include>**/dao/mapper/UserMapperTest.java</include>
<include>**/dao/mapper/AlertPluginInstanceMapperTest.java</include>
<include>**/dao/mapper/PluginDefineTest.java</include>
<include>**/dao/utils/DagHelperTest.java</include>
<include>**/dao/AlertDaoTest.java</include>
<include>**/dao/datasource/OracleDataSourceTest.java</include>
<include>**/dao/datasource/HiveDataSourceTest.java</include>
<include>**/dao/datasource/BaseDataSourceTest.java</include>
<include>**/dao/upgrade/ProcessDefinitionDaoTest.java</include>
<include>**/dao/upgrade/WokrerGrouopDaoTest.java</include>
<include>**/dao/upgrade/UpgradeDaoTest.java</include>
<include>**/plugin/alert/email/EmailAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/email/EmailAlertChannelTest.java</include>
<include>**/plugin/alert/email/ExcelUtilsTest.java</include>
<include>**/plugin/alert/email/MailUtilsTest.java</include>
<include>**/plugin/alert/email/template/DefaultHTMLTemplateTest.java</include>
<include>**/plugin/alert/dingtalk/DingTalkSenderTest.java</include>
<include>**/plugin/alert/dingtalk/DingTalkAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/wechat/WeChatSenderTest.java</include>
<include>**/plugin/alert/wechat/WeChatAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/script/ProcessUtilsTest.java</include>
<include>**/plugin/alert/script/ScriptAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/script/ScriptSenderTest.java</include>
<include>**/plugin/alert/http/HttpAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/http/HttpAlertChannelTest.java</include>
<include>**/plugin/alert/feishu/FeiShuAlertChannelFactoryTest.java</include>
<include>**/plugin/alert/feishu/FeiShuSenderTest.java</include>
<include>**/plugin/alert/http/HttpAlertPluginTest.java</include>
<include>**/plugin/alert/http/HttpSenderTest.java</include>
<include>**/spi/params/PluginParamsTransferTest.java</include>
<include>**/alert/plugin/EmailAlertPluginTest.java</include>
<include>**/alert/plugin/AlertPluginManagerTest.java</include>
<include>**/alert/plugin/DolphinPluginLoaderTest.java</include>
<include>**/alert/utils/DingTalkUtilsTest.java</include>
<include>**/alert/utils/EnterpriseWeChatUtilsTest.java</include>
<include>**/alert/utils/FuncUtilsTest.java</include>
<include>**/alert/processor/AlertRequestProcessorTest.java</include>
<include>**/alert/runner/AlertSenderTest.java</include>
<include>**/alert/AlertServerTest.java</include>
</includes>
<!-- <skip>true</skip> -->
</configuration>
</plugin>
<!-- jenkins plugin jacoco report-->
<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<version>${jacoco.version}</version>
<configuration>
<destFile>target/jacoco.exec</destFile>
<dataFile>target/jacoco.exec</dataFile>
</configuration>
<executions>
<execution>
<id>jacoco-initialize</id>
<goals>
<goal>prepare-agent</goal>
</goals>
</execution>
<execution>
<id>jacoco-site</id>
<phase>test</phase>
<goals>
<goal>report</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-maven-plugin</artifactId>
<version>${spotbugs.version}</version>
<configuration>
<xmlOutput>true</xmlOutput>
<threshold>medium</threshold>
<effort>default</effort>
<excludeFilterFile>dev-config/spotbugs-exclude.xml</excludeFilterFile>
<failOnError>true</failOnError>
</configuration>
<dependencies>
<dependency>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs</artifactId>
<version>4.0.0-beta4</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>${checkstyle.version}</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
<version>8.18</version>
</dependency>
</dependencies>
<configuration>
<consoleOutput>true</consoleOutput>
<encoding>UTF-8</encoding>
<configLocation>style/checkstyle.xml</configLocation>
<suppressionsLocation>style/checkstyle-suppressions.xml</suppressionsLocation>
<suppressionsFileExpression>checkstyle.suppressions.file</suppressionsFileExpression>
<failOnViolation>true</failOnViolation>
<violationSeverity>warning</violationSeverity>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<sourceDirectories>
<sourceDirectory>${project.build.sourceDirectory}</sourceDirectory>
</sourceDirectories>
<excludes>**\/generated-sources\/</excludes>
<skip>true</skip>
</configuration>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>cobertura-maven-plugin</artifactId>
<version>${cobertura-maven-plugin.version}</version>
<configuration>
<check>
</check>
<aggregate>true</aggregate>
<outputDirectory>./target/cobertura</outputDirectory>
<encoding>${project.build.sourceEncoding}</encoding>
<quiet>true</quiet>
<format>xml</format>
<instrumentation>
<ignoreTrivial>true</ignoreTrivial>
</instrumentation>
</configuration>
</plugin>
</plugins>
</build>
<modules>
<module>dolphinscheduler-alert-plugin</module>
<module>dolphinscheduler-ui</module>
<module>dolphinscheduler-server</module>
<module>dolphinscheduler-common</module>
<module>dolphinscheduler-api</module>
<module>dolphinscheduler-dao</module>
<module>dolphinscheduler-alert</module>
<module>dolphinscheduler-dist</module>
<module>dolphinscheduler-remote</module>
<module>dolphinscheduler-service</module>
<module>dolphinscheduler-spi</module>
<module>dolphinscheduler-microbench</module>
</modules>
</project>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,266 | [Bug][master-server] If start a process instance form fail node or stop node ,the param will be init. | If start a process instance form fail node or stop node ,the param will be init.
如果从失败节点开始执行,或从暂定节点开始执行,已经执行过的节点处理过的参数会被初始化。 | https://github.com/apache/dolphinscheduler/issues/5266 | https://github.com/apache/dolphinscheduler/pull/5267 | 94a08c8f327bed2b53240de3f6ba9018fed74fa9 | 6dbbf6d55d105485b04d2b12533dbbc6e4c9a68f | "2021-04-13T07:22:38Z" | java | "2021-04-16T15:14:22Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.service.process;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_EMPTY_SUB_PROCESS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_FATHER_PARAMS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID;
import static org.apache.dolphinscheduler.common.Constants.LOCAL_PARAMS;
import static org.apache.dolphinscheduler.common.Constants.YYYY_MM_DD_HH_MM_SS;
import static java.util.stream.Collectors.toSet;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.CycleEnum;
import org.apache.dolphinscheduler.common.enums.Direct;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.TaskDependType;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.model.DateInterval;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.task.conditions.ConditionsParameters;
import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.Command;
import org.apache.dolphinscheduler.dao.entity.CycleDependency;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.dao.entity.ErrorCommand;
import org.apache.dolphinscheduler.dao.entity.ProcessData;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.ProjectUser;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.CommandMapper;
import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper;
import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper;
import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.TenantMapper;
import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper;
import org.apache.dolphinscheduler.dao.mapper.UserMapper;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.service.log.LogClientService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Date;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import org.quartz.CronExpression;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import com.cronutils.model.Cron;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
/**
* process relative dao that some mappers in this.
*/
@Component
public class ProcessService {
private final Logger logger = LoggerFactory.getLogger(getClass());
private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
ExecutionStatus.RUNNING_EXECUTION.ordinal(),
ExecutionStatus.DELAY_EXECUTION.ordinal(),
ExecutionStatus.READY_PAUSE.ordinal(),
ExecutionStatus.READY_STOP.ordinal()};
@Autowired
private UserMapper userMapper;
@Autowired
private ProcessDefinitionMapper processDefineMapper;
@Autowired
private ProcessInstanceMapper processInstanceMapper;
@Autowired
private DataSourceMapper dataSourceMapper;
@Autowired
private ProcessInstanceMapMapper processInstanceMapMapper;
@Autowired
private TaskInstanceMapper taskInstanceMapper;
@Autowired
private CommandMapper commandMapper;
@Autowired
private ScheduleMapper scheduleMapper;
@Autowired
private UdfFuncMapper udfFuncMapper;
@Autowired
private ResourceMapper resourceMapper;
@Autowired
private ResourceUserMapper resourceUserMapper;
@Autowired
private ErrorCommandMapper errorCommandMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private ProjectMapper projectMapper;
/**
* handle Command (construct ProcessInstance from Command) , wrapped in transaction
*
* @param logger logger
* @param host host
* @param validThreadNum validThreadNum
* @param command found command
* @return process instance
*/
@Transactional(rollbackFor = Exception.class)
public ProcessInstance handleCommand(Logger logger, String host, int validThreadNum, Command command) {
ProcessInstance processInstance = constructProcessInstance(command, host);
// cannot construct process instance, return null
if (processInstance == null) {
logger.error("scan command, command parameter is error: {}", command);
moveToErrorCommand(command, "process instance is null");
return null;
}
if (!checkThreadNum(command, validThreadNum)) {
logger.info("there is not enough thread for this command: {}", command);
return setWaitingThreadProcess(command, processInstance);
}
processInstance.setCommandType(command.getCommandType());
processInstance.addHistoryCmd(command.getCommandType());
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
delCommandById(command.getId());
return processInstance;
}
/**
* save error command, and delete original command
*
* @param command command
* @param message message
*/
@Transactional(rollbackFor = Exception.class)
public void moveToErrorCommand(Command command, String message) {
ErrorCommand errorCommand = new ErrorCommand(command, message);
this.errorCommandMapper.insert(errorCommand);
delCommandById(command.getId());
}
/**
* set process waiting thread
*
* @param command command
* @param processInstance processInstance
* @return process instance
*/
private ProcessInstance setWaitingThreadProcess(Command command, ProcessInstance processInstance) {
processInstance.setState(ExecutionStatus.WAITTING_THREAD);
if (command.getCommandType() != CommandType.RECOVER_WAITTING_THREAD) {
processInstance.addHistoryCmd(command.getCommandType());
}
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
createRecoveryWaitingThreadCommand(command, processInstance);
return null;
}
/**
* check thread num
*
* @param command command
* @param validThreadNum validThreadNum
* @return if thread is enough
*/
private boolean checkThreadNum(Command command, int validThreadNum) {
int commandThreadCount = this.workProcessThreadNumCount(command.getProcessDefinitionId());
return validThreadNum >= commandThreadCount;
}
/**
* insert one command
*
* @param command command
* @return create result
*/
public int createCommand(Command command) {
int result = 0;
if (command != null) {
result = commandMapper.insert(command);
}
return result;
}
/**
* find one command from queue list
*
* @return command
*/
public Command findOneCommand() {
return commandMapper.getOneToRun();
}
/**
* check the input command exists in queue list
*
* @param command command
* @return create command result
*/
public boolean verifyIsNeedCreateCommand(Command command) {
boolean isNeedCreate = true;
EnumMap<CommandType, Integer> cmdTypeMap = new EnumMap<>(CommandType.class);
cmdTypeMap.put(CommandType.REPEAT_RUNNING, 1);
cmdTypeMap.put(CommandType.RECOVER_SUSPENDED_PROCESS, 1);
cmdTypeMap.put(CommandType.START_FAILURE_TASK_PROCESS, 1);
CommandType commandType = command.getCommandType();
if (cmdTypeMap.containsKey(commandType)) {
ObjectNode cmdParamObj = JSONUtils.parseObject(command.getCommandParam());
int processInstanceId = cmdParamObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt();
List<Command> commands = commandMapper.selectList(null);
// for all commands
for (Command tmpCommand : commands) {
if (cmdTypeMap.containsKey(tmpCommand.getCommandType())) {
ObjectNode tempObj = JSONUtils.parseObject(tmpCommand.getCommandParam());
if (tempObj != null && processInstanceId == tempObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt()) {
isNeedCreate = false;
break;
}
}
}
}
return isNeedCreate;
}
/**
* find process instance detail by id
*
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceDetailById(int processId) {
return processInstanceMapper.queryDetailById(processId);
}
/**
* get task node list by definitionId
*/
public List<TaskNode> getTaskNodeListByDefinitionId(Integer defineId) {
ProcessDefinition processDefinition = processDefineMapper.selectById(defineId);
if (processDefinition == null) {
logger.info("process define not exists");
return null;
}
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
//process data check
if (null == processData) {
logger.error("process data is null");
return new ArrayList<>();
}
return processData.getTasks();
}
/**
* find process instance by id
*
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceById(int processId) {
return processInstanceMapper.selectById(processId);
}
/**
* find process define by id.
*
* @param processDefinitionId processDefinitionId
* @return process definition
*/
public ProcessDefinition findProcessDefineById(int processDefinitionId) {
return processDefineMapper.selectById(processDefinitionId);
}
/**
* delete work process instance by id
*
* @param processInstanceId processInstanceId
* @return delete process instance result
*/
public int deleteWorkProcessInstanceById(int processInstanceId) {
return processInstanceMapper.deleteById(processInstanceId);
}
/**
* delete all sub process by parent instance id
*
* @param processInstanceId processInstanceId
* @return delete all sub process instance result
*/
public int deleteAllSubWorkProcessByParentId(int processInstanceId) {
List<Integer> subProcessIdList = processInstanceMapMapper.querySubIdListByParentId(processInstanceId);
for (Integer subId : subProcessIdList) {
deleteAllSubWorkProcessByParentId(subId);
deleteWorkProcessMapByParentId(subId);
removeTaskLogFile(subId);
deleteWorkProcessInstanceById(subId);
}
return 1;
}
/**
* remove task log file
*
* @param processInstanceId processInstanceId
*/
public void removeTaskLogFile(Integer processInstanceId) {
LogClientService logClient = null;
try {
logClient = new LogClientService();
List<TaskInstance> taskInstanceList = findValidTaskListByProcessId(processInstanceId);
if (CollectionUtils.isEmpty(taskInstanceList)) {
return;
}
for (TaskInstance taskInstance : taskInstanceList) {
String taskLogPath = taskInstance.getLogPath();
if (StringUtils.isEmpty(taskInstance.getHost())) {
continue;
}
int port = Constants.RPC_PORT;
String ip = "";
try {
ip = Host.of(taskInstance.getHost()).getIp();
} catch (Exception e) {
// compatible old version
ip = taskInstance.getHost();
}
// remove task log from loggerserver
logClient.removeTaskLog(ip, port, taskLogPath);
}
} finally {
if (logClient != null) {
logClient.close();
}
}
}
/**
* calculate sub process number in the process define.
*
* @param processDefinitionId processDefinitionId
* @return process thread num count
*/
private Integer workProcessThreadNumCount(Integer processDefinitionId) {
List<Integer> ids = new ArrayList<>();
recurseFindSubProcessId(processDefinitionId, ids);
return ids.size() + 1;
}
/**
* recursive query sub process definition id by parent id.
*
* @param parentId parentId
* @param ids ids
*/
public void recurseFindSubProcessId(int parentId, List<Integer> ids) {
ProcessDefinition processDefinition = processDefineMapper.selectById(parentId);
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
List<TaskNode> taskNodeList = processData.getTasks();
if (taskNodeList != null && !taskNodeList.isEmpty()) {
for (TaskNode taskNode : taskNodeList) {
String parameter = taskNode.getParams();
ObjectNode parameterJson = JSONUtils.parseObject(parameter);
if (parameterJson.get(CMD_PARAM_SUB_PROCESS_DEFINE_ID) != null) {
SubProcessParameters subProcessParam = JSONUtils.parseObject(parameter, SubProcessParameters.class);
ids.add(subProcessParam.getProcessDefinitionId());
recurseFindSubProcessId(subProcessParam.getProcessDefinitionId(), ids);
}
}
}
}
/**
* create recovery waiting thread command when thread pool is not enough for the process instance.
* sub work process instance need not to create recovery command.
* create recovery waiting thread command and delete origin command at the same time.
* if the recovery command is exists, only update the field update_time
*
* @param originCommand originCommand
* @param processInstance processInstance
*/
public void createRecoveryWaitingThreadCommand(Command originCommand, ProcessInstance processInstance) {
// sub process doesnot need to create wait command
if (processInstance.getIsSubProcess() == Flag.YES) {
if (originCommand != null) {
commandMapper.deleteById(originCommand.getId());
}
return;
}
Map<String, String> cmdParam = new HashMap<>();
cmdParam.put(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD, String.valueOf(processInstance.getId()));
// process instance quit by "waiting thread" state
if (originCommand == null) {
Command command = new Command(
CommandType.RECOVER_WAITTING_THREAD,
processInstance.getTaskDependType(),
processInstance.getFailureStrategy(),
processInstance.getExecutorId(),
processInstance.getProcessDefinitionId(),
JSONUtils.toJsonString(cmdParam),
processInstance.getWarningType(),
processInstance.getWarningGroupId(),
processInstance.getScheduleTime(),
processInstance.getWorkerGroup(),
processInstance.getProcessInstancePriority()
);
saveCommand(command);
return;
}
// update the command time if current command if recover from waiting
if (originCommand.getCommandType() == CommandType.RECOVER_WAITTING_THREAD) {
originCommand.setUpdateTime(new Date());
saveCommand(originCommand);
} else {
// delete old command and create new waiting thread command
commandMapper.deleteById(originCommand.getId());
originCommand.setId(0);
originCommand.setCommandType(CommandType.RECOVER_WAITTING_THREAD);
originCommand.setUpdateTime(new Date());
originCommand.setCommandParam(JSONUtils.toJsonString(cmdParam));
originCommand.setProcessInstancePriority(processInstance.getProcessInstancePriority());
saveCommand(originCommand);
}
}
/**
* get schedule time from command
*
* @param command command
* @param cmdParam cmdParam map
* @return date
*/
private Date getScheduleTime(Command command, Map<String, String> cmdParam) {
Date scheduleTime = command.getScheduleTime();
if (scheduleTime == null && cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) {
scheduleTime = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
}
return scheduleTime;
}
/**
* generate a new work process instance from command.
*
* @param processDefinition processDefinition
* @param command command
* @param cmdParam cmdParam map
* @return process instance
*/
private ProcessInstance generateNewProcessInstance(ProcessDefinition processDefinition,
Command command,
Map<String, String> cmdParam) {
ProcessInstance processInstance = new ProcessInstance(processDefinition);
processInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
processInstance.setRecovery(Flag.NO);
processInstance.setStartTime(new Date());
processInstance.setRunTimes(1);
processInstance.setMaxTryTimes(0);
processInstance.setProcessDefinitionId(command.getProcessDefinitionId());
processInstance.setCommandParam(command.getCommandParam());
processInstance.setCommandType(command.getCommandType());
processInstance.setIsSubProcess(Flag.NO);
processInstance.setTaskDependType(command.getTaskDependType());
processInstance.setFailureStrategy(command.getFailureStrategy());
processInstance.setExecutorId(command.getExecutorId());
WarningType warningType = command.getWarningType() == null ? WarningType.NONE : command.getWarningType();
processInstance.setWarningType(warningType);
Integer warningGroupId = command.getWarningGroupId() == null ? 0 : command.getWarningGroupId();
processInstance.setWarningGroupId(warningGroupId);
// schedule time
Date scheduleTime = getScheduleTime(command, cmdParam);
if (scheduleTime != null) {
processInstance.setScheduleTime(scheduleTime);
}
processInstance.setCommandStartTime(command.getStartTime());
processInstance.setLocations(processDefinition.getLocations());
processInstance.setConnects(processDefinition.getConnects());
// reset global params while there are start parameters
setGlobalParamIfCommanded(processDefinition,cmdParam);
// curing global params
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
getCommandTypeIfComplement(processInstance, command),
processInstance.getScheduleTime()));
//copy process define json to process instance
processInstance.setProcessInstanceJson(processDefinition.getProcessDefinitionJson());
// set process instance priority
processInstance.setProcessInstancePriority(command.getProcessInstancePriority());
String workerGroup = StringUtils.isBlank(command.getWorkerGroup()) ? Constants.DEFAULT_WORKER_GROUP : command.getWorkerGroup();
processInstance.setWorkerGroup(workerGroup);
processInstance.setTimeout(processDefinition.getTimeout());
processInstance.setTenantId(processDefinition.getTenantId());
return processInstance;
}
private void setGlobalParamIfCommanded(ProcessDefinition processDefinition, Map<String, String> cmdParam) {
// get start params from command param
Map<String, String> startParamMap = new HashMap<>();
if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_START_PARAMS)) {
String startParamJson = cmdParam.get(Constants.CMD_PARAM_START_PARAMS);
startParamMap = JSONUtils.toMap(startParamJson);
}
Map<String, String> fatherParamMap = new HashMap<>();
if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_FATHER_PARAMS)) {
String fatherParamJson = cmdParam.get(Constants.CMD_PARAM_FATHER_PARAMS);
fatherParamMap = JSONUtils.toMap(fatherParamJson);
}
startParamMap.putAll(fatherParamMap);
// set start param into global params
if (startParamMap.size() > 0
&& processDefinition.getGlobalParamMap() != null) {
for (Map.Entry<String, String> param : processDefinition.getGlobalParamMap().entrySet()) {
String val = startParamMap.get(param.getKey());
if (val != null) {
param.setValue(val);
}
}
}
}
/**
* get process tenant
* there is tenant id in definition, use the tenant of the definition.
* if there is not tenant id in the definiton or the tenant not exist
* use definition creator's tenant.
*
* @param tenantId tenantId
* @param userId userId
* @return tenant
*/
public Tenant getTenantForProcess(int tenantId, int userId) {
Tenant tenant = null;
if (tenantId >= 0) {
tenant = tenantMapper.queryById(tenantId);
}
if (userId == 0) {
return null;
}
if (tenant == null) {
User user = userMapper.selectById(userId);
tenant = tenantMapper.queryById(user.getTenantId());
}
return tenant;
}
/**
* check command parameters is valid
*
* @param command command
* @param cmdParam cmdParam map
* @return whether command param is valid
*/
private Boolean checkCmdParam(Command command, Map<String, String> cmdParam) {
if (command.getTaskDependType() == TaskDependType.TASK_ONLY || command.getTaskDependType() == TaskDependType.TASK_PRE) {
if (cmdParam == null
|| !cmdParam.containsKey(Constants.CMD_PARAM_START_NODE_NAMES)
|| cmdParam.get(Constants.CMD_PARAM_START_NODE_NAMES).isEmpty()) {
logger.error("command node depend type is {}, but start nodes is null ", command.getTaskDependType());
return false;
}
}
return true;
}
/**
* construct process instance according to one command.
*
* @param command command
* @param host host
* @return process instance
*/
private ProcessInstance constructProcessInstance(Command command, String host) {
ProcessInstance processInstance = null;
CommandType commandType = command.getCommandType();
Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam());
ProcessDefinition processDefinition = null;
if (command.getProcessDefinitionId() != 0) {
processDefinition = processDefineMapper.selectById(command.getProcessDefinitionId());
if (processDefinition == null) {
logger.error("cannot find the work process define! define id : {}", command.getProcessDefinitionId());
return null;
}
}
if (cmdParam != null) {
Integer processInstanceId = 0;
// recover from failure or pause tasks
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) {
String processId = cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING);
processInstanceId = Integer.parseInt(processId);
if (processInstanceId == 0) {
logger.error("command parameter is error, [ ProcessInstanceId ] is 0");
return null;
}
} else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) {
// sub process map
String pId = cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS);
processInstanceId = Integer.parseInt(pId);
} else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) {
// waiting thread command
String pId = cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD);
processInstanceId = Integer.parseInt(pId);
}
if (processInstanceId == 0) {
processInstance = generateNewProcessInstance(processDefinition, command, cmdParam);
} else {
processInstance = this.findProcessInstanceDetailById(processInstanceId);
CommandType commandTypeIfComplement = getCommandTypeIfComplement(processInstance, command);
// reset global params while repeat running is needed by cmdParam
if (commandTypeIfComplement == CommandType.REPEAT_RUNNING) {
setGlobalParamIfCommanded(processDefinition, cmdParam);
}
// Recalculate global parameters after rerun.
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
commandTypeIfComplement,
processInstance.getScheduleTime()));
}
processDefinition = processDefineMapper.selectById(processInstance.getProcessDefinitionId());
processInstance.setProcessDefinition(processDefinition);
//reset command parameter
if (processInstance.getCommandParam() != null) {
Map<String, String> processCmdParam = JSONUtils.toMap(processInstance.getCommandParam());
for (Map.Entry<String, String> entry : processCmdParam.entrySet()) {
if (!cmdParam.containsKey(entry.getKey())) {
cmdParam.put(entry.getKey(), entry.getValue());
}
}
}
// reset command parameter if sub process
if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) {
processInstance.setCommandParam(command.getCommandParam());
}
} else {
// generate one new process instance
processInstance = generateNewProcessInstance(processDefinition, command, cmdParam);
}
if (Boolean.FALSE.equals(checkCmdParam(command, cmdParam))) {
logger.error("command parameter check failed!");
return null;
}
if (command.getScheduleTime() != null) {
processInstance.setScheduleTime(command.getScheduleTime());
}
processInstance.setHost(host);
ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXECUTION;
int runTime = processInstance.getRunTimes();
switch (commandType) {
case START_PROCESS:
break;
case START_FAILURE_TASK_PROCESS:
// find failed tasks and init these tasks
List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE);
List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE);
List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL);
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
failedList.addAll(killedList);
failedList.addAll(toleranceList);
for (Integer taskId : failedList) {
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING,
String.join(Constants.COMMA, convertIntListToString(failedList)));
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
processInstance.setRunTimes(runTime + 1);
break;
case START_CURRENT_TASK_PROCESS:
break;
case RECOVER_WAITTING_THREAD:
break;
case RECOVER_SUSPENDED_PROCESS:
// find pause tasks and init task's state
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE);
List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(),
ExecutionStatus.KILL);
suspendedNodeList.addAll(stopNodeList);
for (Integer taskId : suspendedNodeList) {
// initialize the pause state
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(",", convertIntListToString(suspendedNodeList)));
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
processInstance.setRunTimes(runTime + 1);
break;
case RECOVER_TOLERANCE_FAULT_PROCESS:
// recover tolerance fault process
processInstance.setRecovery(Flag.YES);
runStatus = processInstance.getState();
break;
case COMPLEMENT_DATA:
// delete all the valid tasks when complement data
List<TaskInstance> taskInstanceList = this.findValidTaskListByProcessId(processInstance.getId());
for (TaskInstance taskInstance : taskInstanceList) {
taskInstance.setFlag(Flag.NO);
this.updateTaskInstance(taskInstance);
}
initComplementDataParam(processDefinition, processInstance, cmdParam);
break;
case REPEAT_RUNNING:
// delete the recover task names from command parameter
if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) {
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
}
// delete all the valid tasks when repeat running
List<TaskInstance> validTaskList = findValidTaskListByProcessId(processInstance.getId());
for (TaskInstance taskInstance : validTaskList) {
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
}
processInstance.setStartTime(new Date());
processInstance.setEndTime(null);
processInstance.setRunTimes(runTime + 1);
initComplementDataParam(processDefinition, processInstance, cmdParam);
break;
case SCHEDULER:
break;
default:
break;
}
processInstance.setState(runStatus);
return processInstance;
}
/**
* return complement data if the process start with complement data
*
* @param processInstance processInstance
* @param command command
* @return command type
*/
private CommandType getCommandTypeIfComplement(ProcessInstance processInstance, Command command) {
if (CommandType.COMPLEMENT_DATA == processInstance.getCmdTypeIfComplement()) {
return CommandType.COMPLEMENT_DATA;
} else {
return command.getCommandType();
}
}
/**
* initialize complement data parameters
*
* @param processDefinition processDefinition
* @param processInstance processInstance
* @param cmdParam cmdParam
*/
private void initComplementDataParam(ProcessDefinition processDefinition,
ProcessInstance processInstance,
Map<String, String> cmdParam) {
if (!processInstance.isComplementData()) {
return;
}
Date startComplementTime = DateUtils.parse(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE),
YYYY_MM_DD_HH_MM_SS);
if (Flag.NO == processInstance.getIsSubProcess()) {
processInstance.setScheduleTime(startComplementTime);
}
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
}
/**
* set sub work process parameters.
* handle sub work process instance, update relation table and command parameters
* set sub work process flag, extends parent work process command parameters
*
* @param subProcessInstance subProcessInstance
* @return process instance
*/
public ProcessInstance setSubProcessParam(ProcessInstance subProcessInstance) {
String cmdParam = subProcessInstance.getCommandParam();
if (StringUtils.isEmpty(cmdParam)) {
return subProcessInstance;
}
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
// write sub process id into cmd param.
if (paramMap.containsKey(CMD_PARAM_SUB_PROCESS)
&& CMD_PARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMD_PARAM_SUB_PROCESS))) {
paramMap.remove(CMD_PARAM_SUB_PROCESS);
paramMap.put(CMD_PARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId()));
subProcessInstance.setCommandParam(JSONUtils.toJsonString(paramMap));
subProcessInstance.setIsSubProcess(Flag.YES);
this.saveProcessInstance(subProcessInstance);
}
// copy parent instance user def params to sub process..
String parentInstanceId = paramMap.get(CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID);
if (StringUtils.isNotEmpty(parentInstanceId)) {
ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId));
if (parentInstance != null) {
subProcessInstance.setGlobalParams(
joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams()));
this.saveProcessInstance(subProcessInstance);
} else {
logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam);
}
}
ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class);
if (processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0) {
return subProcessInstance;
}
// update sub process id to process map table
processInstanceMap.setProcessInstanceId(subProcessInstance.getId());
this.updateWorkProcessInstanceMap(processInstanceMap);
return subProcessInstance;
}
/**
* join parent global params into sub process.
* only the keys doesn't in sub process global would be joined.
*
* @param parentGlobalParams parentGlobalParams
* @param subGlobalParams subGlobalParams
* @return global params join
*/
private String joinGlobalParams(String parentGlobalParams, String subGlobalParams) {
List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class);
List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class);
Map<String, String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
for (Property parent : parentPropertyList) {
if (!subMap.containsKey(parent.getProp())) {
subPropertyList.add(parent);
}
}
return JSONUtils.toJsonString(subPropertyList);
}
/**
* initialize task instance
*
* @param taskInstance taskInstance
*/
private void initTaskInstance(TaskInstance taskInstance) {
if (!taskInstance.isSubProcess()
&& (taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure())) {
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
return;
}
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
updateTaskInstance(taskInstance);
}
/**
* submit task to db
* submit sub process to command
*
* @param taskInstance taskInstance
* @return task instance
*/
@Transactional(rollbackFor = Exception.class)
public TaskInstance submitTask(TaskInstance taskInstance) {
ProcessInstance processInstance = this.findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
logger.info("start submit task : {}, instance id:{}, state: {}",
taskInstance.getName(), taskInstance.getProcessInstanceId(), processInstance.getState());
//submit to db
TaskInstance task = submitTaskInstanceToDB(taskInstance, processInstance);
if (task == null) {
logger.error("end submit task to db error, task name:{}, process id:{} state: {} ",
taskInstance.getName(), taskInstance.getProcessInstance(), processInstance.getState());
return task;
}
if (!task.getState().typeIsFinished()) {
createSubWorkProcess(processInstance, task);
}
logger.info("end submit task to db successfully:{} state:{} complete, instance id:{} state: {} ",
taskInstance.getName(), task.getState(), processInstance.getId(), processInstance.getState());
return task;
}
/**
* set work process instance map
* consider o
* repeat running does not generate new sub process instance
* set map {parent instance id, task instance id, 0(child instance id)}
*
* @param parentInstance parentInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask) {
ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId());
if (processMap != null) {
return processMap;
}
if (parentInstance.getCommandType() == CommandType.REPEAT_RUNNING) {
// update current task id to map
processMap = findPreviousTaskProcessMap(parentInstance, parentTask);
if (processMap != null) {
processMap.setParentTaskInstanceId(parentTask.getId());
updateWorkProcessInstanceMap(processMap);
return processMap;
}
}
// new task
processMap = new ProcessInstanceMap();
processMap.setParentProcessInstanceId(parentInstance.getId());
processMap.setParentTaskInstanceId(parentTask.getId());
createWorkProcessInstanceMap(processMap);
return processMap;
}
/**
* find previous task work process map.
*
* @param parentProcessInstance parentProcessInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap findPreviousTaskProcessMap(ProcessInstance parentProcessInstance,
TaskInstance parentTask) {
Integer preTaskId = 0;
List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId());
for (TaskInstance task : preTaskList) {
if (task.getName().equals(parentTask.getName())) {
preTaskId = task.getId();
ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId);
if (map != null) {
return map;
}
}
}
logger.info("sub process instance is not found,parent task:{},parent instance:{}",
parentTask.getId(), parentProcessInstance.getId());
return null;
}
/**
* create sub work process command
*
* @param parentProcessInstance parentProcessInstance
* @param task task
*/
public void createSubWorkProcess(ProcessInstance parentProcessInstance, TaskInstance task) {
if (!task.isSubProcess()) {
return;
}
//check create sub work flow firstly
ProcessInstanceMap instanceMap = findWorkProcessMapByParent(parentProcessInstance.getId(), task.getId());
if (null != instanceMap && CommandType.RECOVER_TOLERANCE_FAULT_PROCESS == parentProcessInstance.getCommandType()) {
// recover failover tolerance would not create a new command when the sub command already have been created
return;
}
instanceMap = setProcessInstanceMap(parentProcessInstance, task);
ProcessInstance childInstance = null;
if (instanceMap.getProcessInstanceId() != 0) {
childInstance = findProcessInstanceById(instanceMap.getProcessInstanceId());
}
Command subProcessCommand = createSubProcessCommand(parentProcessInstance, childInstance, instanceMap, task);
updateSubProcessDefinitionByParent(parentProcessInstance, subProcessCommand.getProcessDefinitionId());
initSubInstanceState(childInstance);
createCommand(subProcessCommand);
logger.info("sub process command created: {} ", subProcessCommand);
}
/**
* complement data needs transform parent parameter to child.
*/
private String getSubWorkFlowParam(ProcessInstanceMap instanceMap, ProcessInstance parentProcessInstance,Map<String,String> fatherParams) {
// set sub work process command
String processMapStr = JSONUtils.toJsonString(instanceMap);
Map<String, String> cmdParam = JSONUtils.toMap(processMapStr);
if (parentProcessInstance.isComplementData()) {
Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam());
String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE);
String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime);
processMapStr = JSONUtils.toJsonString(cmdParam);
}
if (fatherParams.size() != 0) {
cmdParam.put(CMD_PARAM_FATHER_PARAMS, JSONUtils.toJsonString(fatherParams));
processMapStr = JSONUtils.toJsonString(cmdParam);
}
return processMapStr;
}
public Map<String, String> getGlobalParamMap(String globalParams) {
List<Property> propList;
Map<String, String> globalParamMap = new HashMap<>();
if (StringUtils.isNotEmpty(globalParams)) {
propList = JSONUtils.toList(globalParams, Property.class);
globalParamMap = propList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
}
return globalParamMap;
}
/**
* create sub work process command
*/
public Command createSubProcessCommand(ProcessInstance parentProcessInstance,
ProcessInstance childInstance,
ProcessInstanceMap instanceMap,
TaskInstance task) {
CommandType commandType = getSubCommandType(parentProcessInstance, childInstance);
TaskNode taskNode = JSONUtils.parseObject(task.getTaskJson(), TaskNode.class);
Map<String, Object> subProcessParam = JSONUtils.toMap(taskNode.getParams(), String.class, Object.class);
Integer childDefineId = Integer.parseInt(String.valueOf(subProcessParam.get(Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID)));
Object localParams = subProcessParam.get(Constants.LOCAL_PARAMS);
List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class);
Map<String, String> globalMap = this.getGlobalParamMap(parentProcessInstance.getGlobalParams());
Map<String,String> fatherParams = new HashMap<>();
if (CollectionUtils.isNotEmpty(allParam)) {
for (Property info : allParam) {
fatherParams.put(info.getProp(), globalMap.get(info.getProp()));
}
}
String processParam = getSubWorkFlowParam(instanceMap, parentProcessInstance,fatherParams);
return new Command(
commandType,
TaskDependType.TASK_POST,
parentProcessInstance.getFailureStrategy(),
parentProcessInstance.getExecutorId(),
childDefineId,
processParam,
parentProcessInstance.getWarningType(),
parentProcessInstance.getWarningGroupId(),
parentProcessInstance.getScheduleTime(),
task.getWorkerGroup(),
parentProcessInstance.getProcessInstancePriority()
);
}
/**
* initialize sub work flow state
* child instance state would be initialized when 'recovery from pause/stop/failure'
*/
private void initSubInstanceState(ProcessInstance childInstance) {
if (childInstance != null) {
childInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
updateProcessInstance(childInstance);
}
}
/**
* get sub work flow command type
* child instance exist: child command = fatherCommand
* child instance not exists: child command = fatherCommand[0]
*/
private CommandType getSubCommandType(ProcessInstance parentProcessInstance, ProcessInstance childInstance) {
CommandType commandType = parentProcessInstance.getCommandType();
if (childInstance == null) {
String fatherHistoryCommand = parentProcessInstance.getHistoryCmd();
commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]);
}
return commandType;
}
/**
* update sub process definition
*
* @param parentProcessInstance parentProcessInstance
* @param childDefinitionId childDefinitionId
*/
private void updateSubProcessDefinitionByParent(ProcessInstance parentProcessInstance, int childDefinitionId) {
ProcessDefinition fatherDefinition = this.findProcessDefineById(parentProcessInstance.getProcessDefinitionId());
ProcessDefinition childDefinition = this.findProcessDefineById(childDefinitionId);
if (childDefinition != null && fatherDefinition != null) {
childDefinition.setWarningGroupId(fatherDefinition.getWarningGroupId());
processDefineMapper.updateById(childDefinition);
}
}
/**
* submit task to mysql
*
* @param taskInstance taskInstance
* @param processInstance processInstance
* @return task instance
*/
public TaskInstance submitTaskInstanceToDB(TaskInstance taskInstance, ProcessInstance processInstance) {
ExecutionStatus processInstanceState = processInstance.getState();
if (taskInstance.getState().typeIsFailure()) {
if (taskInstance.isSubProcess()) {
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1);
} else {
if (processInstanceState != ExecutionStatus.READY_STOP
&& processInstanceState != ExecutionStatus.READY_PAUSE) {
// failure task set invalid
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
// crate new task instance
if (taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE) {
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1);
}
taskInstance.setSubmitTime(null);
taskInstance.setStartTime(null);
taskInstance.setEndTime(null);
taskInstance.setFlag(Flag.YES);
taskInstance.setHost(null);
taskInstance.setId(0);
}
}
}
taskInstance.setExecutorId(processInstance.getExecutorId());
taskInstance.setProcessInstancePriority(processInstance.getProcessInstancePriority());
taskInstance.setState(getSubmitTaskState(taskInstance, processInstanceState));
if (taskInstance.getSubmitTime() == null) {
taskInstance.setSubmitTime(new Date());
}
if (taskInstance.getFirstSubmitTime() == null) {
taskInstance.setFirstSubmitTime(taskInstance.getSubmitTime());
}
boolean saveResult = saveTaskInstance(taskInstance);
if (!saveResult) {
return null;
}
return taskInstance;
}
/**
* get submit task instance state by the work process state
* cannot modify the task state when running/kill/submit success, or this
* task instance is already exists in task queue .
* return pause if work process state is ready pause
* return stop if work process state is ready stop
* if all of above are not satisfied, return submit success
*
* @param taskInstance taskInstance
* @param processInstanceState processInstanceState
* @return process instance state
*/
public ExecutionStatus getSubmitTaskState(TaskInstance taskInstance, ExecutionStatus processInstanceState) {
ExecutionStatus state = taskInstance.getState();
// running, delayed or killed
// the task already exists in task queue
// return state
if (
state == ExecutionStatus.RUNNING_EXECUTION
|| state == ExecutionStatus.DELAY_EXECUTION
|| state == ExecutionStatus.KILL
) {
return state;
}
//return pasue /stop if process instance state is ready pause / stop
// or return submit success
if (processInstanceState == ExecutionStatus.READY_PAUSE) {
state = ExecutionStatus.PAUSE;
} else if (processInstanceState == ExecutionStatus.READY_STOP
|| !checkProcessStrategy(taskInstance)) {
state = ExecutionStatus.KILL;
} else {
state = ExecutionStatus.SUBMITTED_SUCCESS;
}
return state;
}
/**
* check process instance strategy
*
* @param taskInstance taskInstance
* @return check strategy result
*/
private boolean checkProcessStrategy(TaskInstance taskInstance) {
ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId());
FailureStrategy failureStrategy = processInstance.getFailureStrategy();
if (failureStrategy == FailureStrategy.CONTINUE) {
return true;
}
List<TaskInstance> taskInstances = this.findValidTaskListByProcessId(taskInstance.getProcessInstanceId());
for (TaskInstance task : taskInstances) {
if (task.getState() == ExecutionStatus.FAILURE) {
return false;
}
}
return true;
}
/**
* create a new process instance
*
* @param processInstance processInstance
*/
public void createProcessInstance(ProcessInstance processInstance) {
if (processInstance != null) {
processInstanceMapper.insert(processInstance);
}
}
/**
* insert or update work process instance to data base
*
* @param processInstance processInstance
*/
public void saveProcessInstance(ProcessInstance processInstance) {
if (processInstance == null) {
logger.error("save error, process instance is null!");
return;
}
if (processInstance.getId() != 0) {
processInstanceMapper.updateById(processInstance);
} else {
createProcessInstance(processInstance);
}
}
/**
* insert or update command
*
* @param command command
* @return save command result
*/
public int saveCommand(Command command) {
if (command.getId() != 0) {
return commandMapper.updateById(command);
} else {
return commandMapper.insert(command);
}
}
/**
* insert or update task instance
*
* @param taskInstance taskInstance
* @return save task instance result
*/
public boolean saveTaskInstance(TaskInstance taskInstance) {
if (taskInstance.getId() != 0) {
return updateTaskInstance(taskInstance);
} else {
return createTaskInstance(taskInstance);
}
}
/**
* insert task instance
*
* @param taskInstance taskInstance
* @return create task instance result
*/
public boolean createTaskInstance(TaskInstance taskInstance) {
int count = taskInstanceMapper.insert(taskInstance);
return count > 0;
}
/**
* update task instance
*
* @param taskInstance taskInstance
* @return update task instance result
*/
public boolean updateTaskInstance(TaskInstance taskInstance) {
int count = taskInstanceMapper.updateById(taskInstance);
return count > 0;
}
/**
* delete a command by id
*
* @param id id
*/
public void delCommandById(int id) {
commandMapper.deleteById(id);
}
/**
* find task instance by id
*
* @param taskId task id
* @return task intance
*/
public TaskInstance findTaskInstanceById(Integer taskId) {
return taskInstanceMapper.selectById(taskId);
}
/**
* package task instance,associate processInstance and processDefine
*
* @param taskInstId taskInstId
* @return task instance
*/
public TaskInstance getTaskInstanceDetailByTaskId(int taskInstId) {
// get task instance
TaskInstance taskInstance = findTaskInstanceById(taskInstId);
if (taskInstance == null) {
return taskInstance;
}
// get process instance
ProcessInstance processInstance = findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
// get process define
ProcessDefinition processDefine = findProcessDefineById(taskInstance.getProcessDefinitionId());
taskInstance.setProcessInstance(processInstance);
taskInstance.setProcessDefine(processDefine);
return taskInstance;
}
/**
* get id list by task state
*
* @param instanceId instanceId
* @param state state
* @return task instance states
*/
public List<Integer> findTaskIdByInstanceState(int instanceId, ExecutionStatus state) {
return taskInstanceMapper.queryTaskByProcessIdAndState(instanceId, state.ordinal());
}
/**
* find valid task list by process definition id
*
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findValidTaskListByProcessId(Integer processInstanceId) {
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.YES);
}
/**
* find previous task list by work process id
*
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId) {
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO);
}
/**
* update work process instance map
*
* @param processInstanceMap processInstanceMap
* @return update process instance result
*/
public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) {
return processInstanceMapMapper.updateById(processInstanceMap);
}
/**
* create work process instance map
*
* @param processInstanceMap processInstanceMap
* @return create process instance result
*/
public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) {
int count = 0;
if (processInstanceMap != null) {
return processInstanceMapMapper.insert(processInstanceMap);
}
return count;
}
/**
* find work process map by parent process id and parent task id.
*
* @param parentWorkProcessId parentWorkProcessId
* @param parentTaskId parentTaskId
* @return process instance map
*/
public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId) {
return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId);
}
/**
* delete work process map by parent process id
*
* @param parentWorkProcessId parentWorkProcessId
* @return delete process map result
*/
public int deleteWorkProcessMapByParentId(int parentWorkProcessId) {
return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId);
}
/**
* find sub process instance
*
* @param parentProcessId parentProcessId
* @param parentTaskId parentTaskId
* @return process instance
*/
public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId) {
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId);
if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) {
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId());
return processInstance;
}
/**
* find parent process instance
*
* @param subProcessId subProcessId
* @return process instance
*/
public ProcessInstance findParentProcessInstance(Integer subProcessId) {
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId);
if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) {
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId());
return processInstance;
}
/**
* change task state
*
* @param state state
* @param startTime startTime
* @param host host
* @param executePath executePath
* @param logPath logPath
* @param taskInstId taskInstId
*/
public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date startTime, String host,
String executePath,
String logPath,
int taskInstId) {
taskInstance.setState(state);
taskInstance.setStartTime(startTime);
taskInstance.setHost(host);
taskInstance.setExecutePath(executePath);
taskInstance.setLogPath(logPath);
saveTaskInstance(taskInstance);
}
/**
* update process instance
*
* @param processInstance processInstance
* @return update process instance result
*/
public int updateProcessInstance(ProcessInstance processInstance) {
return processInstanceMapper.updateById(processInstance);
}
/**
* update the process instance
*
* @param processInstanceId processInstanceId
* @param processJson processJson
* @param globalParams globalParams
* @param scheduleTime scheduleTime
* @param flag flag
* @param locations locations
* @param connects connects
* @return update process instance result
*/
public int updateProcessInstance(Integer processInstanceId, String processJson,
String globalParams, Date scheduleTime, Flag flag,
String locations, String connects) {
ProcessInstance processInstance = processInstanceMapper.queryDetailById(processInstanceId);
if (processInstance != null) {
processInstance.setProcessInstanceJson(processJson);
processInstance.setGlobalParams(globalParams);
processInstance.setScheduleTime(scheduleTime);
processInstance.setLocations(locations);
processInstance.setConnects(connects);
return processInstanceMapper.updateById(processInstance);
}
return 0;
}
/**
* change task state
*
* @param state state
* @param endTime endTime
* @param taskInstId taskInstId
* @param varPool varPool
*/
public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state,
Date endTime,
int processId,
String appIds,
int taskInstId,
String varPool,
String result) {
taskInstance.setPid(processId);
taskInstance.setAppLink(appIds);
taskInstance.setState(state);
taskInstance.setEndTime(endTime);
taskInstance.setVarPool(varPool);
changeOutParam(result, taskInstance);
saveTaskInstance(taskInstance);
}
public void changeOutParam(String result, TaskInstance taskInstance) {
if (StringUtils.isEmpty(result)) {
return;
}
List<Map<String, String>> workerResultParam = getListMapByString(result);
if (CollectionUtils.isEmpty(workerResultParam)) {
return;
}
//if the result more than one line,just get the first .
Map<String, String> row = workerResultParam.get(0);
if (row == null || row.size() == 0) {
return;
}
TaskNode taskNode = JSONUtils.parseObject(taskInstance.getTaskJson(), TaskNode.class);
Map<String, Object> taskParams = JSONUtils.toMap(taskNode.getParams(), String.class, Object.class);
Object localParams = taskParams.get(LOCAL_PARAMS);
if (localParams == null) {
return;
}
ProcessInstance processInstance = this.processInstanceMapper.queryDetailById(taskInstance.getProcessInstanceId());
List<Property> params4Property = JSONUtils.toList(processInstance.getGlobalParams(), Property.class);
Map<String, Property> allParamMap = params4Property.stream().collect(Collectors.toMap(Property::getProp, Property -> Property));
List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class);
for (Property info : allParam) {
if (info.getDirect() == Direct.OUT) {
String paramName = info.getProp();
Property property = allParamMap.get(paramName);
if (property == null) {
continue;
}
String value = String.valueOf(row.get(paramName));
if (StringUtils.isNotEmpty(value)) {
property.setValue(value);
info.setValue(value);
}
}
}
taskParams.put(LOCAL_PARAMS, allParam);
taskNode.setParams(JSONUtils.toJsonString(taskParams));
// task instance node json
taskInstance.setTaskJson(JSONUtils.toJsonString(taskNode));
String params4ProcessString = JSONUtils.toJsonString(params4Property);
int updateCount = this.processInstanceMapper.updateGlobalParamsById(params4ProcessString, processInstance.getId());
logger.info("updateCount:{}, params4Process:{}, processInstanceId:{}", updateCount, params4ProcessString, processInstance.getId());
}
public List<Map<String, String>> getListMapByString(String json) {
List<Map<String, String>> allParams = new ArrayList<>();
ArrayNode paramsByJson = JSONUtils.parseArray(json);
Iterator<JsonNode> listIterator = paramsByJson.iterator();
while (listIterator.hasNext()) {
Map<String, String> param = JSONUtils.toMap(listIterator.next().toString(), String.class, String.class);
allParams.add(param);
}
return allParams;
}
/**
* convert integer list to string list
*
* @param intList intList
* @return string list
*/
public List<String> convertIntListToString(List<Integer> intList) {
if (intList == null) {
return new ArrayList<>();
}
List<String> result = new ArrayList<>(intList.size());
for (Integer intVar : intList) {
result.add(String.valueOf(intVar));
}
return result;
}
/**
* query schedule by id
*
* @param id id
* @return schedule
*/
public Schedule querySchedule(int id) {
return scheduleMapper.selectById(id);
}
/**
* query Schedule by processDefinitionId
*
* @param processDefinitionId processDefinitionId
* @see Schedule
*/
public List<Schedule> queryReleaseSchedulerListByProcessDefinitionId(int processDefinitionId) {
return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId);
}
/**
* query need failover process instance
*
* @param host host
* @return process instance list
*/
public List<ProcessInstance> queryNeedFailoverProcessInstances(String host) {
return processInstanceMapper.queryByHostAndStatus(host, stateArray);
}
/**
* process need failover process instance
*
* @param processInstance processInstance
*/
@Transactional(rollbackFor = RuntimeException.class)
public void processNeedFailoverProcessInstances(ProcessInstance processInstance) {
//1 update processInstance host is null
processInstance.setHost(Constants.NULL);
processInstanceMapper.updateById(processInstance);
//2 insert into recover command
Command cmd = new Command();
cmd.setProcessDefinitionId(processInstance.getProcessDefinitionId());
cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId()));
cmd.setExecutorId(processInstance.getExecutorId());
cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS);
createCommand(cmd);
}
/**
* query all need failover task instances by host
*
* @param host host
* @return task instance list
*/
public List<TaskInstance> queryNeedFailoverTaskInstances(String host) {
return taskInstanceMapper.queryByHostAndStatus(host,
stateArray);
}
/**
* find data source by id
*
* @param id id
* @return datasource
*/
public DataSource findDataSourceById(int id) {
return dataSourceMapper.selectById(id);
}
/**
* update process instance state by id
*
* @param processInstanceId processInstanceId
* @param executionStatus executionStatus
* @return update process result
*/
public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) {
ProcessInstance instance = processInstanceMapper.selectById(processInstanceId);
instance.setState(executionStatus);
return processInstanceMapper.updateById(instance);
}
/**
* find process instance by the task id
*
* @param taskId taskId
* @return process instance
*/
public ProcessInstance findProcessInstanceByTaskId(int taskId) {
TaskInstance taskInstance = taskInstanceMapper.selectById(taskId);
if (taskInstance != null) {
return processInstanceMapper.selectById(taskInstance.getProcessInstanceId());
}
return null;
}
/**
* find udf function list by id list string
*
* @param ids ids
* @return udf function list
*/
public List<UdfFunc> queryUdfFunListByIds(int[] ids) {
return udfFuncMapper.queryUdfByIdStr(ids, null);
}
/**
* find tenant code by resource name
*
* @param resName resource name
* @param resourceType resource type
* @return tenant code
*/
public String queryTenantCodeByResName(String resName, ResourceType resourceType) {
// in order to query tenant code successful although the version is older
String fullName = resName.startsWith("/") ? resName : String.format("/%s", resName);
List<Resource> resourceList = resourceMapper.queryResource(fullName, resourceType.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
return StringUtils.EMPTY;
}
int userId = resourceList.get(0).getUserId();
User user = userMapper.selectById(userId);
if (Objects.isNull(user)) {
return StringUtils.EMPTY;
}
Tenant tenant = tenantMapper.selectById(user.getTenantId());
if (Objects.isNull(tenant)) {
return StringUtils.EMPTY;
}
return tenant.getTenantCode();
}
/**
* find schedule list by process define id.
*
* @param ids ids
* @return schedule list
*/
public List<Schedule> selectAllByProcessDefineId(int[] ids) {
return scheduleMapper.selectAllByProcessDefineArray(
ids);
}
/**
* get dependency cycle by work process define id and scheduler fire time
*
* @param masterId masterId
* @param processDefinitionId processDefinitionId
* @param scheduledFireTime the time the task schedule is expected to trigger
* @return CycleDependency
* @throws Exception if error throws Exception
*/
public CycleDependency getCycleDependency(int masterId, int processDefinitionId, Date scheduledFireTime) throws Exception {
List<CycleDependency> list = getCycleDependencies(masterId, new int[]{processDefinitionId}, scheduledFireTime);
return !list.isEmpty() ? list.get(0) : null;
}
/**
* get dependency cycle list by work process define id list and scheduler fire time
*
* @param masterId masterId
* @param ids ids
* @param scheduledFireTime the time the task schedule is expected to trigger
* @return CycleDependency list
* @throws Exception if error throws Exception
*/
public List<CycleDependency> getCycleDependencies(int masterId, int[] ids, Date scheduledFireTime) throws Exception {
List<CycleDependency> cycleDependencyList = new ArrayList<>();
if (null == ids || ids.length == 0) {
logger.warn("ids[] is empty!is invalid!");
return cycleDependencyList;
}
if (scheduledFireTime == null) {
logger.warn("scheduledFireTime is null!is invalid!");
return cycleDependencyList;
}
String strCrontab = "";
CronExpression depCronExpression;
Cron depCron;
List<Date> list;
List<Schedule> schedules = this.selectAllByProcessDefineId(ids);
// for all scheduling information
for (Schedule depSchedule : schedules) {
strCrontab = depSchedule.getCrontab();
depCronExpression = CronUtils.parse2CronExpression(strCrontab);
depCron = CronUtils.parse2Cron(strCrontab);
CycleEnum cycleEnum = CronUtils.getMiniCycle(depCron);
if (cycleEnum == null) {
logger.error("{} is not valid", strCrontab);
continue;
}
Calendar calendar = Calendar.getInstance();
switch (cycleEnum) {
case HOUR:
calendar.add(Calendar.HOUR, -25);
break;
case DAY:
case WEEK:
calendar.add(Calendar.DATE, -32);
break;
case MONTH:
calendar.add(Calendar.MONTH, -13);
break;
default:
String cycleName = cycleEnum.name();
logger.warn("Dependent process definition's cycleEnum is {},not support!!", cycleName);
continue;
}
Date start = calendar.getTime();
if (depSchedule.getProcessDefinitionId() == masterId) {
list = CronUtils.getSelfFireDateList(start, scheduledFireTime, depCronExpression);
} else {
list = CronUtils.getFireDateList(start, scheduledFireTime, depCronExpression);
}
if (!list.isEmpty()) {
start = list.get(list.size() - 1);
CycleDependency dependency = new CycleDependency(depSchedule.getProcessDefinitionId(), start, CronUtils.getExpirationTime(start, cycleEnum), cycleEnum);
cycleDependencyList.add(dependency);
}
}
return cycleDependencyList;
}
/**
* find last scheduler process instance in the date interval
*
* @param definitionId definitionId
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastSchedulerProcessInterval(int definitionId, DateInterval dateInterval) {
return processInstanceMapper.queryLastSchedulerProcess(definitionId,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last manual process instance interval
*
* @param definitionId process definition id
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastManualProcessInterval(int definitionId, DateInterval dateInterval) {
return processInstanceMapper.queryLastManualProcess(definitionId,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last running process instance
*
* @param definitionId process definition id
* @param startTime start time
* @param endTime end time
* @return process instance
*/
public ProcessInstance findLastRunningProcess(int definitionId, Date startTime, Date endTime) {
return processInstanceMapper.queryLastRunningProcess(definitionId,
startTime,
endTime,
stateArray);
}
/**
* query user queue by process instance id
*
* @param processInstanceId processInstanceId
* @return queue
*/
public String queryUserQueueByProcessInstanceId(int processInstanceId) {
String queue = "";
ProcessInstance processInstance = processInstanceMapper.selectById(processInstanceId);
if (processInstance == null) {
return queue;
}
User executor = userMapper.selectById(processInstance.getExecutorId());
if (executor != null) {
queue = executor.getQueue();
}
return queue;
}
/**
* query project name and user name by processInstanceId.
*
* @param processInstanceId processInstanceId
* @return projectName and userName
*/
public ProjectUser queryProjectWithUserByProcessInstanceId(int processInstanceId) {
return projectMapper.queryProjectWithUserByProcessInstanceId(processInstanceId);
}
/**
* get task worker group
*
* @param taskInstance taskInstance
* @return workerGroupId
*/
public String getTaskWorkerGroup(TaskInstance taskInstance) {
String workerGroup = taskInstance.getWorkerGroup();
if (StringUtils.isNotBlank(workerGroup)) {
return workerGroup;
}
int processInstanceId = taskInstance.getProcessInstanceId();
ProcessInstance processInstance = findProcessInstanceById(processInstanceId);
if (processInstance != null) {
return processInstance.getWorkerGroup();
}
logger.info("task : {} will use default worker group", taskInstance.getId());
return Constants.DEFAULT_WORKER_GROUP;
}
/**
* get have perm project list
*
* @param userId userId
* @return project list
*/
public List<Project> getProjectListHavePerm(int userId) {
List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId);
List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId);
if (createProjects == null) {
createProjects = new ArrayList<>();
}
if (authedProjects != null) {
createProjects.addAll(authedProjects);
}
return createProjects;
}
/**
* get have perm project ids
*
* @param userId userId
* @return project ids
*/
public List<Integer> getProjectIdListHavePerm(int userId) {
List<Integer> projectIdList = new ArrayList<>();
for (Project project : getProjectListHavePerm(userId)) {
projectIdList.add(project.getId());
}
return projectIdList;
}
/**
* list unauthorized udf function
*
* @param userId user id
* @param needChecks data source id array
* @return unauthorized udf function list
*/
public <T> List<T> listUnauthorized(int userId, T[] needChecks, AuthorizationType authorizationType) {
List<T> resultList = new ArrayList<>();
if (Objects.nonNull(needChecks) && needChecks.length > 0) {
Set<T> originResSet = new HashSet<>(Arrays.asList(needChecks));
switch (authorizationType) {
case RESOURCE_FILE_ID:
case UDF_FILE:
List<Resource> ownUdfResources = resourceMapper.listAuthorizedResourceById(userId, needChecks);
addAuthorizedResources(ownUdfResources, userId);
Set<Integer> authorizedResourceFiles = ownUdfResources.stream().map(Resource::getId).collect(toSet());
originResSet.removeAll(authorizedResourceFiles);
break;
case RESOURCE_FILE_NAME:
List<Resource> ownResources = resourceMapper.listAuthorizedResource(userId, needChecks);
addAuthorizedResources(ownResources, userId);
Set<String> authorizedResources = ownResources.stream().map(Resource::getFullName).collect(toSet());
originResSet.removeAll(authorizedResources);
break;
case DATASOURCE:
Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId, needChecks).stream().map(DataSource::getId).collect(toSet());
originResSet.removeAll(authorizedDatasources);
break;
case UDF:
Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(UdfFunc::getId).collect(toSet());
originResSet.removeAll(authorizedUdfs);
break;
default:
break;
}
resultList.addAll(originResSet);
}
return resultList;
}
/**
* get user by user id
*
* @param userId user id
* @return User
*/
public User getUserById(int userId) {
return userMapper.selectById(userId);
}
/**
* get resource by resoruce id
*
* @param resoruceId resource id
* @return Resource
*/
public Resource getResourceById(int resoruceId) {
return resourceMapper.selectById(resoruceId);
}
/**
* list resources by ids
*
* @param resIds resIds
* @return resource list
*/
public List<Resource> listResourceByIds(Integer[] resIds) {
return resourceMapper.listResourceByIds(resIds);
}
/**
* format task app id in task instance
*/
public String formatTaskAppId(TaskInstance taskInstance) {
ProcessDefinition definition = this.findProcessDefineById(taskInstance.getProcessDefinitionId());
ProcessInstance processInstanceById = this.findProcessInstanceById(taskInstance.getProcessInstanceId());
if (definition == null || processInstanceById == null) {
return "";
}
return String.format("%s_%s_%s",
definition.getId(),
processInstanceById.getId(),
taskInstance.getId());
}
/**
* solve the branch rename bug
*
* @param processData
* @param oldJson
* @return String
*/
public String changeJson(ProcessData processData, String oldJson) {
ProcessData oldProcessData = JSONUtils.parseObject(oldJson, ProcessData.class);
HashMap<String, String> oldNameTaskId = new HashMap<>();
List<TaskNode> oldTasks = oldProcessData.getTasks();
for (int i = 0; i < oldTasks.size(); i++) {
TaskNode taskNode = oldTasks.get(i);
String oldName = taskNode.getName();
String oldId = taskNode.getId();
oldNameTaskId.put(oldName, oldId);
}
// take the processdefinitionjson saved this time, and then save the taskid and name
HashMap<String, String> newNameTaskId = new HashMap<>();
List<TaskNode> newTasks = processData.getTasks();
for (int i = 0; i < newTasks.size(); i++) {
TaskNode taskNode = newTasks.get(i);
String newId = taskNode.getId();
String newName = taskNode.getName();
newNameTaskId.put(newId, newName);
}
// replace the previous conditionresult with a new one
List<TaskNode> tasks = processData.getTasks();
for (int i = 0; i < tasks.size(); i++) {
TaskNode taskNode = newTasks.get(i);
String type = taskNode.getType();
if (TaskType.CONDITIONS.getDescp().equalsIgnoreCase(type)) {
ConditionsParameters conditionsParameters = JSONUtils.parseObject(taskNode.getConditionResult(), ConditionsParameters.class);
String oldSuccessNodeName = conditionsParameters.getSuccessNode().get(0);
String oldFailedNodeName = conditionsParameters.getFailedNode().get(0);
String newSuccessNodeName = newNameTaskId.get(oldNameTaskId.get(oldSuccessNodeName));
String newFailedNodeName = newNameTaskId.get(oldNameTaskId.get(oldFailedNodeName));
if (newSuccessNodeName != null) {
ArrayList<String> successNode = new ArrayList<>();
successNode.add(newSuccessNodeName);
conditionsParameters.setSuccessNode(successNode);
}
if (newFailedNodeName != null) {
ArrayList<String> failedNode = new ArrayList<>();
failedNode.add(newFailedNodeName);
conditionsParameters.setFailedNode(failedNode);
}
String conditionResultStr = conditionsParameters.getConditionResult();
taskNode.setConditionResult(conditionResultStr);
tasks.set(i, taskNode);
}
}
return JSONUtils.toJsonString(processData);
}
/**
* add authorized resources
* @param ownResources own resources
* @param userId userId
*/
private void addAuthorizedResources(List<Resource> ownResources, int userId) {
List<Integer> relationResourceIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 7);
List<Resource> relationResources = CollectionUtils.isNotEmpty(relationResourceIds) ? resourceMapper.queryResourceListById(relationResourceIds) : new ArrayList<>();
ownResources.addAll(relationResources);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,309 | [Bug][Common] memoryUsage is `-33%` | **To Reproduce**
`memoryUsage` is `-33%`
**Expected behavior**
Bug fixed
**Screenshots**
![image](https://user-images.githubusercontent.com/4902714/115050583-779bf480-9f0e-11eb-8eed-4abcc8df3fc4.png)
**Which version of Dolphin Scheduler:**
-[1.3.x]
-[dev]
| https://github.com/apache/dolphinscheduler/issues/5309 | https://github.com/apache/dolphinscheduler/pull/5312 | 5ef1f731b7f3b48915859b7a9595ec42df614c48 | e92e29ef9a126d1d5660011545056d4dd806d105 | "2021-04-16T15:51:04Z" | java | "2021-04-19T02:24:54Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import java.util.regex.Pattern;
/**
* Constants
*/
public final class Constants {
private Constants() {
throw new UnsupportedOperationException("Construct Constants");
}
/**
* quartz config
*/
public static final String ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS = "org.quartz.jobStore.driverDelegateClass";
public static final String ORG_QUARTZ_SCHEDULER_INSTANCENAME = "org.quartz.scheduler.instanceName";
public static final String ORG_QUARTZ_SCHEDULER_INSTANCEID = "org.quartz.scheduler.instanceId";
public static final String ORG_QUARTZ_SCHEDULER_MAKESCHEDULERTHREADDAEMON = "org.quartz.scheduler.makeSchedulerThreadDaemon";
public static final String ORG_QUARTZ_JOBSTORE_USEPROPERTIES = "org.quartz.jobStore.useProperties";
public static final String ORG_QUARTZ_THREADPOOL_CLASS = "org.quartz.threadPool.class";
public static final String ORG_QUARTZ_THREADPOOL_THREADCOUNT = "org.quartz.threadPool.threadCount";
public static final String ORG_QUARTZ_THREADPOOL_MAKETHREADSDAEMONS = "org.quartz.threadPool.makeThreadsDaemons";
public static final String ORG_QUARTZ_THREADPOOL_THREADPRIORITY = "org.quartz.threadPool.threadPriority";
public static final String ORG_QUARTZ_JOBSTORE_CLASS = "org.quartz.jobStore.class";
public static final String ORG_QUARTZ_JOBSTORE_TABLEPREFIX = "org.quartz.jobStore.tablePrefix";
public static final String ORG_QUARTZ_JOBSTORE_ISCLUSTERED = "org.quartz.jobStore.isClustered";
public static final String ORG_QUARTZ_JOBSTORE_MISFIRETHRESHOLD = "org.quartz.jobStore.misfireThreshold";
public static final String ORG_QUARTZ_JOBSTORE_CLUSTERCHECKININTERVAL = "org.quartz.jobStore.clusterCheckinInterval";
public static final String ORG_QUARTZ_JOBSTORE_ACQUIRETRIGGERSWITHINLOCK = "org.quartz.jobStore.acquireTriggersWithinLock";
public static final String ORG_QUARTZ_JOBSTORE_DATASOURCE = "org.quartz.jobStore.dataSource";
public static final String ORG_QUARTZ_DATASOURCE_MYDS_CONNECTIONPROVIDER_CLASS = "org.quartz.dataSource.myDs.connectionProvider.class";
/**
* quartz config default value
*/
public static final String QUARTZ_TABLE_PREFIX = "QRTZ_";
public static final String QUARTZ_MISFIRETHRESHOLD = "60000";
public static final String QUARTZ_CLUSTERCHECKININTERVAL = "5000";
public static final String QUARTZ_DATASOURCE = "myDs";
public static final String QUARTZ_THREADCOUNT = "25";
public static final String QUARTZ_THREADPRIORITY = "5";
public static final String QUARTZ_INSTANCENAME = "DolphinScheduler";
public static final String QUARTZ_INSTANCEID = "AUTO";
public static final String QUARTZ_ACQUIRETRIGGERSWITHINLOCK = "true";
/**
* common properties path
*/
public static final String COMMON_PROPERTIES_PATH = "/common.properties";
/**
* fs.defaultFS
*/
public static final String FS_DEFAULTFS = "fs.defaultFS";
/**
* fs s3a endpoint
*/
public static final String FS_S3A_ENDPOINT = "fs.s3a.endpoint";
/**
* fs s3a access key
*/
public static final String FS_S3A_ACCESS_KEY = "fs.s3a.access.key";
/**
* fs s3a secret key
*/
public static final String FS_S3A_SECRET_KEY = "fs.s3a.secret.key";
/**
* yarn.resourcemanager.ha.rm.ids
*/
public static final String YARN_RESOURCEMANAGER_HA_RM_IDS = "yarn.resourcemanager.ha.rm.ids";
public static final String YARN_RESOURCEMANAGER_HA_XX = "xx";
/**
* yarn.application.status.address
*/
public static final String YARN_APPLICATION_STATUS_ADDRESS = "yarn.application.status.address";
/**
* yarn.job.history.status.address
*/
public static final String YARN_JOB_HISTORY_STATUS_ADDRESS = "yarn.job.history.status.address";
/**
* hdfs configuration
* hdfs.root.user
*/
public static final String HDFS_ROOT_USER = "hdfs.root.user";
/**
* hdfs/s3 configuration
* resource.upload.path
*/
public static final String RESOURCE_UPLOAD_PATH = "resource.upload.path";
/**
* data basedir path
*/
public static final String DATA_BASEDIR_PATH = "data.basedir.path";
/**
* dolphinscheduler.env.path
*/
public static final String DOLPHINSCHEDULER_ENV_PATH = "dolphinscheduler.env.path";
/**
* environment properties default path
*/
public static final String ENV_PATH = "env/dolphinscheduler_env.sh";
/**
* python home
*/
public static final String PYTHON_HOME = "PYTHON_HOME";
/**
* resource.view.suffixs
*/
public static final String RESOURCE_VIEW_SUFFIXS = "resource.view.suffixs";
public static final String RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE = "txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js";
/**
* development.state
*/
public static final String DEVELOPMENT_STATE = "development.state";
public static final String DEVELOPMENT_STATE_DEFAULT_VALUE = "true";
/**
* sudo enable
*/
public static final String SUDO_ENABLE = "sudo.enable";
/**
* string true
*/
public static final String STRING_TRUE = "true";
/**
* string false
*/
public static final String STRING_FALSE = "false";
/**
* resource storage type
*/
public static final String RESOURCE_STORAGE_TYPE = "resource.storage.type";
/**
* MasterServer directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_MASTERS = "/nodes/master";
/**
* WorkerServer directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_WORKERS = "/nodes/worker";
/**
* all servers directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_DEAD_SERVERS = "/dead-servers";
/**
* MasterServer lock directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_MASTERS = "/lock/masters";
/**
* MasterServer failover directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_MASTERS = "/lock/failover/masters";
/**
* WorkerServer failover directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_WORKERS = "/lock/failover/workers";
/**
* MasterServer startup failover runing and fault tolerance process
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS = "/lock/failover/startup-masters";
/**
* comma ,
*/
public static final String COMMA = ",";
/**
* slash /
*/
public static final String SLASH = "/";
/**
* COLON :
*/
public static final String COLON = ":";
/**
* SPACE " "
*/
public static final String SPACE = " ";
/**
* SINGLE_SLASH /
*/
public static final String SINGLE_SLASH = "/";
/**
* DOUBLE_SLASH //
*/
public static final String DOUBLE_SLASH = "//";
/**
* SINGLE_QUOTES "'"
*/
public static final String SINGLE_QUOTES = "'";
/**
* DOUBLE_QUOTES "\""
*/
public static final String DOUBLE_QUOTES = "\"";
/**
* SEMICOLON ;
*/
public static final String SEMICOLON = ";";
/**
* EQUAL SIGN
*/
public static final String EQUAL_SIGN = "=";
/**
* AT SIGN
*/
public static final String AT_SIGN = "@";
public static final String WORKER_MAX_CPULOAD_AVG = "worker.max.cpuload.avg";
public static final String WORKER_RESERVED_MEMORY = "worker.reserved.memory";
public static final String MASTER_MAX_CPULOAD_AVG = "master.max.cpuload.avg";
public static final String MASTER_RESERVED_MEMORY = "master.reserved.memory";
/**
* date format of yyyy-MM-dd HH:mm:ss
*/
public static final String YYYY_MM_DD_HH_MM_SS = "yyyy-MM-dd HH:mm:ss";
/**
* date format of yyyyMMddHHmmss
*/
public static final String YYYYMMDDHHMMSS = "yyyyMMddHHmmss";
/**
* date format of yyyyMMddHHmmssSSS
*/
public static final String YYYYMMDDHHMMSSSSS = "yyyyMMddHHmmssSSS";
/**
* http connect time out
*/
public static final int HTTP_CONNECT_TIMEOUT = 60 * 1000;
/**
* http connect request time out
*/
public static final int HTTP_CONNECTION_REQUEST_TIMEOUT = 60 * 1000;
/**
* httpclient soceket time out
*/
public static final int SOCKET_TIMEOUT = 60 * 1000;
/**
* http header
*/
public static final String HTTP_HEADER_UNKNOWN = "unKnown";
/**
* http X-Forwarded-For
*/
public static final String HTTP_X_FORWARDED_FOR = "X-Forwarded-For";
/**
* http X-Real-IP
*/
public static final String HTTP_X_REAL_IP = "X-Real-IP";
/**
* UTF-8
*/
public static final String UTF_8 = "UTF-8";
/**
* user name regex
*/
public static final Pattern REGEX_USER_NAME = Pattern.compile("^[a-zA-Z0-9._-]{3,39}$");
/**
* email regex
*/
public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^([a-z0-9A-Z]+[_|\\-|\\.]?)+[a-z0-9A-Z]@([a-z0-9A-Z]+(-[a-z0-9A-Z]+)?\\.)+[a-zA-Z]{2,}$");
/**
* default display rows
*/
public static final int DEFAULT_DISPLAY_ROWS = 10;
/**
* read permission
*/
public static final int READ_PERMISSION = 2 * 1;
/**
* write permission
*/
public static final int WRITE_PERMISSION = 2 * 2;
/**
* execute permission
*/
public static final int EXECUTE_PERMISSION = 1;
/**
* default admin permission
*/
public static final int DEFAULT_ADMIN_PERMISSION = 7;
/**
* all permissions
*/
public static final int ALL_PERMISSIONS = READ_PERMISSION | WRITE_PERMISSION | EXECUTE_PERMISSION;
/**
* max task timeout
*/
public static final int MAX_TASK_TIMEOUT = 24 * 3600;
/**
* master cpu load
*/
public static final int DEFAULT_MASTER_CPU_LOAD = Runtime.getRuntime().availableProcessors() * 2;
/**
* master reserved memory
*/
public static final double DEFAULT_MASTER_RESERVED_MEMORY = OSUtils.totalMemorySize() / 10;
/**
* worker cpu load
*/
public static final int DEFAULT_WORKER_CPU_LOAD = Runtime.getRuntime().availableProcessors() * 2;
/**
* worker reserved memory
*/
public static final double DEFAULT_WORKER_RESERVED_MEMORY = OSUtils.totalMemorySize() / 10;
/**
* worker host weight
*/
public static final int DEFAULT_WORKER_HOST_WEIGHT = 100;
/**
* default log cache rows num,output when reach the number
*/
public static final int DEFAULT_LOG_ROWS_NUM = 4 * 16;
/**
* log flush interval?output when reach the interval
*/
public static final int DEFAULT_LOG_FLUSH_INTERVAL = 1000;
/**
* time unit secong to minutes
*/
public static final int SEC_2_MINUTES_TIME_UNIT = 60;
/***
*
* rpc port
*/
public static final int RPC_PORT = 50051;
/***
* alert rpc port
*/
public static final int ALERT_RPC_PORT = 50052;
/**
* forbid running task
*/
public static final String FLOWNODE_RUN_FLAG_FORBIDDEN = "FORBIDDEN";
/**
* datasource configuration path
*/
public static final String DATASOURCE_PROPERTIES = "/datasource.properties";
public static final String TASK_RECORD_URL = "task.record.datasource.url";
public static final String TASK_RECORD_FLAG = "task.record.flag";
public static final String TASK_RECORD_USER = "task.record.datasource.username";
public static final String TASK_RECORD_PWD = "task.record.datasource.password";
public static final String DEFAULT = "Default";
public static final String USER = "user";
public static final String PASSWORD = "password";
public static final String XXXXXX = "******";
public static final String NULL = "NULL";
public static final String THREAD_NAME_MASTER_SERVER = "Master-Server";
public static final String THREAD_NAME_WORKER_SERVER = "Worker-Server";
public static final String TASK_RECORD_TABLE_HIVE_LOG = "eamp_hive_log_hd";
public static final String TASK_RECORD_TABLE_HISTORY_HIVE_LOG = "eamp_hive_hist_log_hd";
/**
* command parameter keys
*/
public static final String CMD_PARAM_RECOVER_PROCESS_ID_STRING = "ProcessInstanceId";
public static final String CMD_PARAM_RECOVERY_START_NODE_STRING = "StartNodeIdList";
public static final String CMD_PARAM_RECOVERY_WAITING_THREAD = "WaitingThreadInstanceId";
public static final String CMD_PARAM_SUB_PROCESS = "processInstanceId";
public static final String CMD_PARAM_EMPTY_SUB_PROCESS = "0";
public static final String CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID = "parentProcessInstanceId";
public static final String CMD_PARAM_SUB_PROCESS_DEFINE_ID = "processDefinitionId";
public static final String CMD_PARAM_START_NODE_NAMES = "StartNodeNameList";
public static final String CMD_PARAM_START_PARAMS = "StartParams";
public static final String CMD_PARAM_FATHER_PARAMS = "fatherParams";
/**
* complement data start date
*/
public static final String CMDPARAM_COMPLEMENT_DATA_START_DATE = "complementStartDate";
/**
* complement data end date
*/
public static final String CMDPARAM_COMPLEMENT_DATA_END_DATE = "complementEndDate";
/**
* hadoop configuration
*/
public static final String HADOOP_RM_STATE_ACTIVE = "ACTIVE";
public static final String HADOOP_RM_STATE_STANDBY = "STANDBY";
public static final String HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT = "resource.manager.httpaddress.port";
/**
* data source config
*/
public static final String SPRING_DATASOURCE_DRIVER_CLASS_NAME = "spring.datasource.driver-class-name";
public static final String SPRING_DATASOURCE_URL = "spring.datasource.url";
public static final String SPRING_DATASOURCE_USERNAME = "spring.datasource.username";
public static final String SPRING_DATASOURCE_PASSWORD = "spring.datasource.password";
public static final String SPRING_DATASOURCE_VALIDATION_QUERY_TIMEOUT = "spring.datasource.validationQueryTimeout";
public static final String SPRING_DATASOURCE_INITIAL_SIZE = "spring.datasource.initialSize";
public static final String SPRING_DATASOURCE_MIN_IDLE = "spring.datasource.minIdle";
public static final String SPRING_DATASOURCE_MAX_ACTIVE = "spring.datasource.maxActive";
public static final String SPRING_DATASOURCE_MAX_WAIT = "spring.datasource.maxWait";
public static final String SPRING_DATASOURCE_TIME_BETWEEN_EVICTION_RUNS_MILLIS = "spring.datasource.timeBetweenEvictionRunsMillis";
public static final String SPRING_DATASOURCE_TIME_BETWEEN_CONNECT_ERROR_MILLIS = "spring.datasource.timeBetweenConnectErrorMillis";
public static final String SPRING_DATASOURCE_MIN_EVICTABLE_IDLE_TIME_MILLIS = "spring.datasource.minEvictableIdleTimeMillis";
public static final String SPRING_DATASOURCE_VALIDATION_QUERY = "spring.datasource.validationQuery";
public static final String SPRING_DATASOURCE_TEST_WHILE_IDLE = "spring.datasource.testWhileIdle";
public static final String SPRING_DATASOURCE_TEST_ON_BORROW = "spring.datasource.testOnBorrow";
public static final String SPRING_DATASOURCE_TEST_ON_RETURN = "spring.datasource.testOnReturn";
public static final String SPRING_DATASOURCE_POOL_PREPARED_STATEMENTS = "spring.datasource.poolPreparedStatements";
public static final String SPRING_DATASOURCE_DEFAULT_AUTO_COMMIT = "spring.datasource.defaultAutoCommit";
public static final String SPRING_DATASOURCE_KEEP_ALIVE = "spring.datasource.keepAlive";
public static final String SPRING_DATASOURCE_MAX_POOL_PREPARED_STATEMENT_PER_CONNECTION_SIZE = "spring.datasource.maxPoolPreparedStatementPerConnectionSize";
public static final String DEVELOPMENT = "development";
public static final String QUARTZ_PROPERTIES_PATH = "quartz.properties";
/**
* sleep time
*/
public static final int SLEEP_TIME_MILLIS = 1000;
/**
* heartbeat for zk info length
*/
public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 10;
public static final int HEARTBEAT_WITH_WEIGHT_FOR_ZOOKEEPER_INFO_LENGTH = 11;
/**
* jar
*/
public static final String JAR = "jar";
/**
* hadoop
*/
public static final String HADOOP = "hadoop";
/**
* -D <property>=<value>
*/
public static final String D = "-D";
/**
* -D mapreduce.job.name=name
*/
public static final String MR_NAME = "mapreduce.job.name";
/**
* -D mapreduce.job.queuename=queuename
*/
public static final String MR_QUEUE = "mapreduce.job.queuename";
/**
* spark params constant
*/
public static final String MASTER = "--master";
public static final String DEPLOY_MODE = "--deploy-mode";
/**
* --class CLASS_NAME
*/
public static final String MAIN_CLASS = "--class";
/**
* --driver-cores NUM
*/
public static final String DRIVER_CORES = "--driver-cores";
/**
* --driver-memory MEM
*/
public static final String DRIVER_MEMORY = "--driver-memory";
/**
* --num-executors NUM
*/
public static final String NUM_EXECUTORS = "--num-executors";
/**
* --executor-cores NUM
*/
public static final String EXECUTOR_CORES = "--executor-cores";
/**
* --executor-memory MEM
*/
public static final String EXECUTOR_MEMORY = "--executor-memory";
/**
* --name NAME
*/
public static final String SPARK_NAME = "--name";
/**
* --queue QUEUE
*/
public static final String SPARK_QUEUE = "--queue";
/**
* exit code success
*/
public static final int EXIT_CODE_SUCCESS = 0;
/**
* exit code kill
*/
public static final int EXIT_CODE_KILL = 137;
/**
* exit code failure
*/
public static final int EXIT_CODE_FAILURE = -1;
/**
* date format of yyyyMMdd
*/
public static final String PARAMETER_FORMAT_DATE = "yyyyMMdd";
/**
* date format of yyyyMMddHHmmss
*/
public static final String PARAMETER_FORMAT_TIME = "yyyyMMddHHmmss";
/**
* system date(yyyyMMddHHmmss)
*/
public static final String PARAMETER_DATETIME = "system.datetime";
/**
* system date(yyyymmdd) today
*/
public static final String PARAMETER_CURRENT_DATE = "system.biz.curdate";
/**
* system date(yyyymmdd) yesterday
*/
public static final String PARAMETER_BUSINESS_DATE = "system.biz.date";
/**
* ACCEPTED
*/
public static final String ACCEPTED = "ACCEPTED";
/**
* SUCCEEDED
*/
public static final String SUCCEEDED = "SUCCEEDED";
/**
* NEW
*/
public static final String NEW = "NEW";
/**
* NEW_SAVING
*/
public static final String NEW_SAVING = "NEW_SAVING";
/**
* SUBMITTED
*/
public static final String SUBMITTED = "SUBMITTED";
/**
* FAILED
*/
public static final String FAILED = "FAILED";
/**
* KILLED
*/
public static final String KILLED = "KILLED";
/**
* RUNNING
*/
public static final String RUNNING = "RUNNING";
/**
* underline "_"
*/
public static final String UNDERLINE = "_";
/**
* quartz job prifix
*/
public static final String QUARTZ_JOB_PRIFIX = "job";
/**
* quartz job group prifix
*/
public static final String QUARTZ_JOB_GROUP_PRIFIX = "jobgroup";
/**
* projectId
*/
public static final String PROJECT_ID = "projectId";
/**
* processId
*/
public static final String SCHEDULE_ID = "scheduleId";
/**
* schedule
*/
public static final String SCHEDULE = "schedule";
/**
* application regex
*/
public static final String APPLICATION_REGEX = "application_\\d+_\\d+";
public static final String PID = OSUtils.isWindows() ? "handle" : "pid";
/**
* month_begin
*/
public static final String MONTH_BEGIN = "month_begin";
/**
* add_months
*/
public static final String ADD_MONTHS = "add_months";
/**
* month_end
*/
public static final String MONTH_END = "month_end";
/**
* week_begin
*/
public static final String WEEK_BEGIN = "week_begin";
/**
* week_end
*/
public static final String WEEK_END = "week_end";
/**
* timestamp
*/
public static final String TIMESTAMP = "timestamp";
public static final char SUBTRACT_CHAR = '-';
public static final char ADD_CHAR = '+';
public static final char MULTIPLY_CHAR = '*';
public static final char DIVISION_CHAR = '/';
public static final char LEFT_BRACE_CHAR = '(';
public static final char RIGHT_BRACE_CHAR = ')';
public static final String ADD_STRING = "+";
public static final String MULTIPLY_STRING = "*";
public static final String DIVISION_STRING = "/";
public static final String LEFT_BRACE_STRING = "(";
public static final char P = 'P';
public static final char N = 'N';
public static final String SUBTRACT_STRING = "-";
public static final String GLOBAL_PARAMS = "globalParams";
public static final String LOCAL_PARAMS = "localParams";
public static final String LOCAL_PARAMS_LIST = "localParamsList";
public static final String SUBPROCESS_INSTANCE_ID = "subProcessInstanceId";
public static final String PROCESS_INSTANCE_STATE = "processInstanceState";
public static final String PARENT_WORKFLOW_INSTANCE = "parentWorkflowInstance";
public static final String TASK_TYPE = "taskType";
public static final String TASK_LIST = "taskList";
public static final String RWXR_XR_X = "rwxr-xr-x";
public static final String QUEUE = "queue";
public static final String QUEUE_NAME = "queueName";
public static final int LOG_QUERY_SKIP_LINE_NUMBER = 0;
public static final int LOG_QUERY_LIMIT = 4096;
/**
* master/worker server use for zk
*/
public static final String MASTER_TYPE = "master";
public static final String WORKER_TYPE = "worker";
public static final String DELETE_ZK_OP = "delete";
public static final String ADD_ZK_OP = "add";
public static final String ALIAS = "alias";
public static final String CONTENT = "content";
public static final String DEPENDENT_SPLIT = ":||";
public static final String DEPENDENT_ALL = "ALL";
/**
* preview schedule execute count
*/
public static final int PREVIEW_SCHEDULE_EXECUTE_COUNT = 5;
/**
* kerberos
*/
public static final String KERBEROS = "kerberos";
/**
* kerberos expire time
*/
public static final String KERBEROS_EXPIRE_TIME = "kerberos.expire.time";
/**
* java.security.krb5.conf
*/
public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf";
/**
* java.security.krb5.conf.path
*/
public static final String JAVA_SECURITY_KRB5_CONF_PATH = "java.security.krb5.conf.path";
/**
* hadoop.security.authentication
*/
public static final String HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authentication";
/**
* hadoop.security.authentication
*/
public static final String HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE = "hadoop.security.authentication.startup.state";
/**
* com.amazonaws.services.s3.enableV4
*/
public static final String AWS_S3_V4 = "com.amazonaws.services.s3.enableV4";
/**
* loginUserFromKeytab user
*/
public static final String LOGIN_USER_KEY_TAB_USERNAME = "login.user.keytab.username";
/**
* default worker group id
*/
public static final int DEFAULT_WORKER_ID = -1;
/**
* loginUserFromKeytab path
*/
public static final String LOGIN_USER_KEY_TAB_PATH = "login.user.keytab.path";
/**
* task log info format
*/
public static final String TASK_LOG_INFO_FORMAT = "TaskLogInfo-%s";
/**
* hive conf
*/
public static final String HIVE_CONF = "hiveconf:";
/**
* flink
*/
public static final String FLINK_YARN_CLUSTER = "yarn-cluster";
public static final String FLINK_RUN_MODE = "-m";
public static final String FLINK_YARN_SLOT = "-ys";
public static final String FLINK_APP_NAME = "-ynm";
public static final String FLINK_QUEUE = "-yqu";
public static final String FLINK_TASK_MANAGE = "-yn";
public static final String FLINK_JOB_MANAGE_MEM = "-yjm";
public static final String FLINK_TASK_MANAGE_MEM = "-ytm";
public static final String FLINK_MAIN_CLASS = "-c";
public static final String FLINK_PARALLELISM = "-p";
public static final String FLINK_SHUTDOWN_ON_ATTACHED_EXIT = "-sae";
public static final int[] NOT_TERMINATED_STATES = new int[] {
ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
ExecutionStatus.RUNNING_EXECUTION.ordinal(),
ExecutionStatus.DELAY_EXECUTION.ordinal(),
ExecutionStatus.READY_PAUSE.ordinal(),
ExecutionStatus.READY_STOP.ordinal(),
ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(),
ExecutionStatus.WAITTING_THREAD.ordinal(),
ExecutionStatus.WAITTING_DEPEND.ordinal()
};
/**
* status
*/
public static final String STATUS = "status";
/**
* message
*/
public static final String MSG = "msg";
/**
* data total
*/
public static final String COUNT = "count";
/**
* page size
*/
public static final String PAGE_SIZE = "pageSize";
/**
* current page no
*/
public static final String PAGE_NUMBER = "pageNo";
/**
*
*/
public static final String DATA_LIST = "data";
public static final String TOTAL_LIST = "totalList";
public static final String CURRENT_PAGE = "currentPage";
public static final String TOTAL_PAGE = "totalPage";
public static final String TOTAL = "total";
/**
* workflow
*/
public static final String WORKFLOW_LIST = "workFlowList";
public static final String WORKFLOW_RELATION_LIST = "workFlowRelationList";
/**
* session user
*/
public static final String SESSION_USER = "session.user";
public static final String SESSION_ID = "sessionId";
public static final String PASSWORD_DEFAULT = "******";
/**
* locale
*/
public static final String LOCALE_LANGUAGE = "language";
/**
* driver
*/
public static final String ORG_POSTGRESQL_DRIVER = "org.postgresql.Driver";
public static final String COM_MYSQL_JDBC_DRIVER = "com.mysql.jdbc.Driver";
public static final String ORG_APACHE_HIVE_JDBC_HIVE_DRIVER = "org.apache.hive.jdbc.HiveDriver";
public static final String COM_CLICKHOUSE_JDBC_DRIVER = "ru.yandex.clickhouse.ClickHouseDriver";
public static final String COM_ORACLE_JDBC_DRIVER = "oracle.jdbc.driver.OracleDriver";
public static final String COM_SQLSERVER_JDBC_DRIVER = "com.microsoft.sqlserver.jdbc.SQLServerDriver";
public static final String COM_DB2_JDBC_DRIVER = "com.ibm.db2.jcc.DB2Driver";
public static final String COM_PRESTO_JDBC_DRIVER = "com.facebook.presto.jdbc.PrestoDriver";
/**
* database type
*/
public static final String MYSQL = "MYSQL";
public static final String POSTGRESQL = "POSTGRESQL";
public static final String HIVE = "HIVE";
public static final String SPARK = "SPARK";
public static final String CLICKHOUSE = "CLICKHOUSE";
public static final String ORACLE = "ORACLE";
public static final String SQLSERVER = "SQLSERVER";
public static final String DB2 = "DB2";
public static final String PRESTO = "PRESTO";
/**
* jdbc url
*/
public static final String JDBC_MYSQL = "jdbc:mysql://";
public static final String JDBC_POSTGRESQL = "jdbc:postgresql://";
public static final String JDBC_HIVE_2 = "jdbc:hive2://";
public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse://";
public static final String JDBC_ORACLE_SID = "jdbc:oracle:thin:@";
public static final String JDBC_ORACLE_SERVICE_NAME = "jdbc:oracle:thin:@//";
public static final String JDBC_SQLSERVER = "jdbc:sqlserver://";
public static final String JDBC_DB2 = "jdbc:db2://";
public static final String JDBC_PRESTO = "jdbc:presto://";
public static final String ADDRESS = "address";
public static final String DATABASE = "database";
public static final String JDBC_URL = "jdbcUrl";
public static final String PRINCIPAL = "principal";
public static final String OTHER = "other";
public static final String ORACLE_DB_CONNECT_TYPE = "connectType";
public static final String KERBEROS_KRB5_CONF_PATH = "javaSecurityKrb5Conf";
public static final String KERBEROS_KEY_TAB_USERNAME = "loginUserKeytabUsername";
public static final String KERBEROS_KEY_TAB_PATH = "loginUserKeytabPath";
/**
* session timeout
*/
public static final int SESSION_TIME_OUT = 7200;
public static final int MAX_FILE_SIZE = 1024 * 1024 * 1024;
public static final String UDF = "UDF";
public static final String CLASS = "class";
public static final String RECEIVERS = "receivers";
public static final String RECEIVERS_CC = "receiversCc";
/**
* dataSource sensitive param
*/
public static final String DATASOURCE_PASSWORD_REGEX = "(?<=(\"password\":\")).*?(?=(\"))";
/**
* default worker group
*/
public static final String DEFAULT_WORKER_GROUP = "default";
public static final Integer TASK_INFO_LENGTH = 5;
/**
* new
* schedule time
*/
public static final String PARAMETER_SHECDULE_TIME = "schedule.time";
/**
* authorize writable perm
*/
public static final int AUTHORIZE_WRITABLE_PERM = 7;
/**
* authorize readable perm
*/
public static final int AUTHORIZE_READABLE_PERM = 4;
/**
* plugin configurations
*/
public static final String PLUGIN_JAR_SUFFIX = ".jar";
public static final int NORMAL_NODE_STATUS = 0;
public static final int ABNORMAL_NODE_STATUS = 1;
public static final String START_TIME = "start time";
public static final String END_TIME = "end time";
public static final String START_END_DATE = "startDate,endDate";
/**
* system line separator
*/
public static final String SYSTEM_LINE_SEPARATOR = System.getProperty("line.separator");
/**
* net system properties
*/
public static final String DOLPHIN_SCHEDULER_PREFERRED_NETWORK_INTERFACE = "dolphin.scheduler.network.interface.preferred";
public static final String EXCEL_SUFFIX_XLS = ".xls";
/**
* datasource encryption salt
*/
public static final String DATASOURCE_ENCRYPTION_SALT_DEFAULT = "!@#$%^&*";
public static final String DATASOURCE_ENCRYPTION_ENABLE = "datasource.encryption.enable";
public static final String DATASOURCE_ENCRYPTION_SALT = "datasource.encryption.salt";
/**
* Network IP gets priority, default inner outer
*/
public static final String NETWORK_PRIORITY_STRATEGY = "dolphin.scheduler.network.priority.strategy";
/**
* exec shell scripts
*/
public static final String SH = "sh";
/**
* pstree, get pud and sub pid
*/
public static final String PSTREE = "pstree";
/**
* docker & kubernetes
*/
public static final boolean DOCKER_MODE = StringUtils.isNotEmpty(System.getenv("DOCKER"));
public static final boolean KUBERNETES_MODE = StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_HOST")) && StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_PORT"));
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,309 | [Bug][Common] memoryUsage is `-33%` | **To Reproduce**
`memoryUsage` is `-33%`
**Expected behavior**
Bug fixed
**Screenshots**
![image](https://user-images.githubusercontent.com/4902714/115050583-779bf480-9f0e-11eb-8eed-4abcc8df3fc4.png)
**Which version of Dolphin Scheduler:**
-[1.3.x]
-[dev]
| https://github.com/apache/dolphinscheduler/issues/5309 | https://github.com/apache/dolphinscheduler/pull/5312 | 5ef1f731b7f3b48915859b7a9595ec42df614c48 | e92e29ef9a126d1d5660011545056d4dd806d105 | "2021-04-16T15:51:04Z" | java | "2021-04-19T02:24:54Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.shell.ShellExecutor;
import org.apache.commons.configuration.Configuration;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.lang.management.ManagementFactory;
import java.lang.management.OperatingSystemMXBean;
import java.lang.management.RuntimeMXBean;
import java.math.RoundingMode;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.StringTokenizer;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import oshi.SystemInfo;
import oshi.hardware.CentralProcessor;
import oshi.hardware.GlobalMemory;
import oshi.hardware.HardwareAbstractionLayer;
/**
* os utils
*/
public class OSUtils {
private static final Logger logger = LoggerFactory.getLogger(OSUtils.class);
public static final ThreadLocal<Logger> taskLoggerThreadLocal = new ThreadLocal<>();
private static final SystemInfo SI = new SystemInfo();
public static final String TWO_DECIMAL = "0.00";
/**
* return -1 when the function can not get hardware env info
* e.g {@link OSUtils#loadAverage()} {@link OSUtils#cpuUsage()}
*/
public static final double NEGATIVE_ONE = -1;
private static HardwareAbstractionLayer hal = SI.getHardware();
private OSUtils() {
throw new UnsupportedOperationException("Construct OSUtils");
}
/**
* Initialization regularization, solve the problem of pre-compilation performance,
* avoid the thread safety problem of multi-thread operation
*/
private static final Pattern PATTERN = Pattern.compile("\\s+");
/**
* get memory usage
* Keep 2 decimal
*
* @return percent %
*/
public static double memoryUsage() {
GlobalMemory memory = hal.getMemory();
double memoryUsage = (memory.getTotal() - memory.getAvailable() - memory.getSwapUsed()) * 0.1 / memory.getTotal() * 10;
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(memoryUsage));
}
/**
* get available physical memory size
* <p>
* Keep 2 decimal
*
* @return available Physical Memory Size, unit: G
*/
public static double availablePhysicalMemorySize() {
GlobalMemory memory = hal.getMemory();
double availablePhysicalMemorySize = (memory.getAvailable() + memory.getSwapUsed()) / 1024.0 / 1024 / 1024;
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(availablePhysicalMemorySize));
}
/**
* get total physical memory size
* <p>
* Keep 2 decimal
*
* @return available Physical Memory Size, unit: G
*/
public static double totalMemorySize() {
GlobalMemory memory = hal.getMemory();
double availablePhysicalMemorySize = memory.getTotal() / 1024.0 / 1024 / 1024;
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(availablePhysicalMemorySize));
}
/**
* load average
*
* @return load average
*/
public static double loadAverage() {
double loadAverage;
try {
OperatingSystemMXBean osBean = ManagementFactory.getPlatformMXBean(OperatingSystemMXBean.class);
loadAverage = osBean.getSystemLoadAverage();
} catch (Exception e) {
logger.error("get operation system load average exception, try another method ", e);
loadAverage = hal.getProcessor().getSystemLoadAverage();
if (Double.isNaN(loadAverage)) {
return NEGATIVE_ONE;
}
}
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(loadAverage));
}
/**
* get cpu usage
*
* @return cpu usage
*/
public static double cpuUsage() {
CentralProcessor processor = hal.getProcessor();
double cpuUsage = processor.getSystemCpuLoad();
if (Double.isNaN(cpuUsage)) {
return NEGATIVE_ONE;
}
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(cpuUsage));
}
public static List<String> getUserList() {
try {
if (isMacOS()) {
return getUserListFromMac();
} else if (isWindows()) {
return getUserListFromWindows();
} else {
return getUserListFromLinux();
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
return Collections.emptyList();
}
/**
* get user list from linux
*
* @return user list
*/
private static List<String> getUserListFromLinux() throws IOException {
List<String> userList = new ArrayList<>();
try (BufferedReader bufferedReader = new BufferedReader(
new InputStreamReader(new FileInputStream("/etc/passwd")))) {
String line;
while ((line = bufferedReader.readLine()) != null) {
if (line.contains(":")) {
String[] userInfo = line.split(":");
userList.add(userInfo[0]);
}
}
}
return userList;
}
/**
* get user list from mac
*
* @return user list
*/
private static List<String> getUserListFromMac() throws IOException {
String result = exeCmd("dscl . list /users");
if (StringUtils.isNotEmpty(result)) {
return Arrays.asList(result.split("\n"));
}
return Collections.emptyList();
}
/**
* get user list from windows
*
* @return user list
*/
private static List<String> getUserListFromWindows() throws IOException {
String result = exeCmd("net user");
String[] lines = result.split("\n");
int startPos = 0;
int endPos = lines.length - 2;
for (int i = 0; i < lines.length; i++) {
if (lines[i].isEmpty()) {
continue;
}
int count = 0;
if (lines[i].charAt(0) == '-') {
for (int j = 0; j < lines[i].length(); j++) {
if (lines[i].charAt(i) == '-') {
count++;
}
}
}
if (count == lines[i].length()) {
startPos = i + 1;
break;
}
}
List<String> users = new ArrayList<>();
while (startPos <= endPos) {
users.addAll(Arrays.asList(PATTERN.split(lines[startPos])));
startPos++;
}
return users;
}
/**
* create user
*
* @param userName user name
*/
public static void createUserIfAbsent(String userName) {
// if not exists this user, then create
taskLoggerThreadLocal.set(taskLoggerThreadLocal.get());
if (!getUserList().contains(userName)) {
boolean isSuccess = createUser(userName);
String infoLog = String.format("create user %s %s", userName, isSuccess ? "success" : "fail");
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog);
}
taskLoggerThreadLocal.remove();
}
/**
* create user
*
* @param userName user name
* @return true if creation was successful, otherwise false
*/
public static boolean createUser(String userName) {
try {
String userGroup = getGroup();
if (StringUtils.isEmpty(userGroup)) {
String errorLog = String.format("%s group does not exist for this operating system.", userGroup);
LoggerUtils.logError(Optional.ofNullable(logger), errorLog);
LoggerUtils.logError(Optional.ofNullable(taskLoggerThreadLocal.get()), errorLog);
return false;
}
if (isMacOS()) {
createMacUser(userName, userGroup);
} else if (isWindows()) {
createWindowsUser(userName, userGroup);
} else {
createLinuxUser(userName, userGroup);
}
return true;
} catch (Exception e) {
LoggerUtils.logError(Optional.ofNullable(logger), e);
LoggerUtils.logError(Optional.ofNullable(taskLoggerThreadLocal.get()), e);
}
return false;
}
/**
* create linux user
*
* @param userName user name
* @param userGroup user group
* @throws IOException in case of an I/O error
*/
private static void createLinuxUser(String userName, String userGroup) throws IOException {
String infoLog1 = String.format("create linux os user : %s", userName);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog1);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog1);
String cmd = String.format("sudo useradd -g %s %s", userGroup, userName);
String infoLog2 = String.format("execute cmd : %s", cmd);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog2);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog2);
exeCmd(cmd);
}
/**
* create mac user (Supports Mac OSX 10.10+)
*
* @param userName user name
* @param userGroup user group
* @throws IOException in case of an I/O error
*/
private static void createMacUser(String userName, String userGroup) throws IOException {
Optional<Logger> optionalLogger = Optional.ofNullable(logger);
Optional<Logger> optionalTaskLogger = Optional.ofNullable(taskLoggerThreadLocal.get());
String infoLog1 = String.format("create mac os user : %s", userName);
LoggerUtils.logInfo(optionalLogger, infoLog1);
LoggerUtils.logInfo(optionalTaskLogger, infoLog1);
String createUserCmd = String.format("sudo sysadminctl -addUser %s -password %s", userName, userName);
String infoLog2 = String.format("create user command : %s", createUserCmd);
LoggerUtils.logInfo(optionalLogger, infoLog2);
LoggerUtils.logInfo(optionalTaskLogger, infoLog2);
exeCmd(createUserCmd);
String appendGroupCmd = String.format("sudo dseditgroup -o edit -a %s -t user %s", userName, userGroup);
String infoLog3 = String.format("append user to group : %s", appendGroupCmd);
LoggerUtils.logInfo(optionalLogger, infoLog3);
LoggerUtils.logInfo(optionalTaskLogger, infoLog3);
exeCmd(appendGroupCmd);
}
/**
* create windows user
*
* @param userName user name
* @param userGroup user group
* @throws IOException in case of an I/O error
*/
private static void createWindowsUser(String userName, String userGroup) throws IOException {
String infoLog1 = String.format("create windows os user : %s", userName);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog1);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog1);
String userCreateCmd = String.format("net user \"%s\" /add", userName);
String infoLog2 = String.format("execute create user command : %s", userCreateCmd);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog2);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog2);
exeCmd(userCreateCmd);
String appendGroupCmd = String.format("net localgroup \"%s\" \"%s\" /add", userGroup, userName);
String infoLog3 = String.format("execute append user to group : %s", appendGroupCmd);
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog3);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog3);
exeCmd(appendGroupCmd);
}
/**
* get system group information
*
* @return system group info
* @throws IOException errors
*/
public static String getGroup() throws IOException {
if (isWindows()) {
String currentProcUserName = System.getProperty("user.name");
String result = exeCmd(String.format("net user \"%s\"", currentProcUserName));
String line = result.split("\n")[22];
String group = PATTERN.split(line)[1];
if (group.charAt(0) == '*') {
return group.substring(1);
} else {
return group;
}
} else {
String result = exeCmd("groups");
if (StringUtils.isNotEmpty(result)) {
String[] groupInfo = result.split(" ");
return groupInfo[0];
}
}
return null;
}
/**
* get sudo command
*
* @param tenantCode tenantCode
* @param command command
* @return result of sudo execute command
*/
public static String getSudoCmd(String tenantCode, String command) {
if (!CommonUtils.isSudoEnable() || StringUtils.isEmpty(tenantCode)) {
return command;
}
return String.format("sudo -u %s %s", tenantCode, command);
}
/**
* Execute the corresponding command of Linux or Windows
*
* @param command command
* @return result of execute command
* @throws IOException errors
*/
public static String exeCmd(String command) throws IOException {
StringTokenizer st = new StringTokenizer(command);
String[] cmdArray = new String[st.countTokens()];
for (int i = 0; st.hasMoreTokens(); i++) {
cmdArray[i] = st.nextToken();
}
return exeShell(cmdArray);
}
/**
* Execute the shell
*
* @param command command
* @return result of execute the shell
* @throws IOException errors
*/
public static String exeShell(String[] command) throws IOException {
return ShellExecutor.execCommand(command);
}
/**
* get process id
*
* @return process id
*/
public static int getProcessID() {
RuntimeMXBean runtimeMXBean = ManagementFactory.getRuntimeMXBean();
return Integer.parseInt(runtimeMXBean.getName().split("@")[0]);
}
/**
* whether is macOS
*
* @return true if mac
*/
public static boolean isMacOS() {
return getOSName().startsWith("Mac");
}
/**
* whether is windows
*
* @return true if windows
*/
public static boolean isWindows() {
return getOSName().startsWith("Windows");
}
/**
* get current OS name
*
* @return current OS name
*/
public static String getOSName() {
return System.getProperty("os.name");
}
/**
* check memory and cpu usage
*
* @param systemCpuLoad systemCpuLoad
* @param systemReservedMemory systemReservedMemory
* @return check memory and cpu usage
*/
public static Boolean checkResource(double systemCpuLoad, double systemReservedMemory) {
// system load average
double loadAverage = loadAverage();
// system available physical memory
double availablePhysicalMemorySize = availablePhysicalMemorySize();
if (loadAverage > systemCpuLoad || availablePhysicalMemorySize < systemReservedMemory) {
logger.warn("load is too high or availablePhysicalMemorySize(G) is too low, it's availablePhysicalMemorySize(G):{},loadAvg:{}", availablePhysicalMemorySize, loadAverage);
return false;
} else {
return true;
}
}
/**
* check memory and cpu usage
*
* @param conf conf
* @param isMaster is master
* @return check memory and cpu usage
*/
public static Boolean checkResource(Configuration conf, Boolean isMaster) {
double systemCpuLoad;
double systemReservedMemory;
if (Boolean.TRUE.equals(isMaster)) {
systemCpuLoad = conf.getDouble(Constants.MASTER_MAX_CPULOAD_AVG, Constants.DEFAULT_MASTER_CPU_LOAD);
systemReservedMemory = conf.getDouble(Constants.MASTER_RESERVED_MEMORY, Constants.DEFAULT_MASTER_RESERVED_MEMORY);
} else {
systemCpuLoad = conf.getDouble(Constants.WORKER_MAX_CPULOAD_AVG, Constants.DEFAULT_WORKER_CPU_LOAD);
systemReservedMemory = conf.getDouble(Constants.WORKER_RESERVED_MEMORY, Constants.DEFAULT_WORKER_RESERVED_MEMORY);
}
return checkResource(systemCpuLoad, systemReservedMemory);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,309 | [Bug][Common] memoryUsage is `-33%` | **To Reproduce**
`memoryUsage` is `-33%`
**Expected behavior**
Bug fixed
**Screenshots**
![image](https://user-images.githubusercontent.com/4902714/115050583-779bf480-9f0e-11eb-8eed-4abcc8df3fc4.png)
**Which version of Dolphin Scheduler:**
-[1.3.x]
-[dev]
| https://github.com/apache/dolphinscheduler/issues/5309 | https://github.com/apache/dolphinscheduler/pull/5312 | 5ef1f731b7f3b48915859b7a9595ec42df614c48 | e92e29ef9a126d1d5660011545056d4dd806d105 | "2021-04-16T15:51:04Z" | java | "2021-04-19T02:24:54Z" | dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/os/OSUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.os;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import oshi.hardware.GlobalMemory;
import java.math.RoundingMode;
import java.text.DecimalFormat;
/**
* OSUtilsTest
*/
public class OSUtilsTest {
private static Logger logger = LoggerFactory.getLogger(OSUtilsTest.class);
@Test
public void memoryUsage() {
logger.info("memoryUsage : {}", OSUtils.memoryUsage());// 0.3361799418926239
}
@Test
public void availablePhysicalMemorySize() {
logger.info("availablePhysicalMemorySize : {}", OSUtils.availablePhysicalMemorySize());
logger.info("availablePhysicalMemorySize : {}", OSUtils.totalMemorySize() / 10);
}
@Test
public void loadAverage() {
logger.info("memoryUsage : {}", OSUtils.loadAverage());
}
private void printMemory(GlobalMemory memory) {
logger.info("memoryUsage : {} %" , (memory.getTotal() - memory.getAvailable()) * 100 / memory.getTotal() );
}
@Test
public void cpuUsage() throws Exception {
logger.info("cpuUsage : {}", OSUtils.cpuUsage());
Thread.sleep(1000L);
logger.info("cpuUsage : {}", OSUtils.cpuUsage());
double cpuUsage = OSUtils.cpuUsage();
DecimalFormat df = new DecimalFormat("0.00");
df.setRoundingMode(RoundingMode.HALF_UP);
logger.info("cpuUsage1 : {}", df.format(cpuUsage));
}
}
|