status
stringclasses
1 value
repo_name
stringclasses
31 values
repo_url
stringclasses
31 values
issue_id
int64
1
104k
title
stringlengths
4
369
body
stringlengths
0
254k
issue_url
stringlengths
37
56
pull_url
stringlengths
37
54
before_fix_sha
stringlengths
40
40
after_fix_sha
stringlengths
40
40
report_datetime
unknown
language
stringclasses
5 values
commit_datetime
unknown
updated_file
stringlengths
4
188
file_content
stringlengths
0
5.12M
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,229
[Feature][server port] server port custom config
**Describe the feature** all application can custom config server port
https://github.com/apache/dolphinscheduler/issues/5229
https://github.com/apache/dolphinscheduler/pull/6947
100a9ba3fc21764158593bd33e2b261b0ef39982
0dcff1425a28da0ce0004fc3e594b97d080c5fd9
"2021-04-07T09:22:45Z"
java
"2021-11-30T16:21:03Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/log/LoggerServer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.log; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.remote.NettyRemotingServer; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.config.NettyServerConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * logger server */ public class LoggerServer { private static final Logger logger = LoggerFactory.getLogger(LoggerServer.class); /** * netty server */ private final NettyRemotingServer server; /** * netty server config */ private final NettyServerConfig serverConfig; /** * loggger request processor */ private final LoggerRequestProcessor requestProcessor; public LoggerServer() { this.serverConfig = new NettyServerConfig(); this.serverConfig.setListenPort(Constants.RPC_PORT); this.server = new NettyRemotingServer(serverConfig); this.requestProcessor = new LoggerRequestProcessor(); this.server.registerProcessor(CommandType.GET_LOG_BYTES_REQUEST, requestProcessor, requestProcessor.getExecutor()); this.server.registerProcessor(CommandType.ROLL_VIEW_LOG_REQUEST, requestProcessor, requestProcessor.getExecutor()); this.server.registerProcessor(CommandType.VIEW_WHOLE_LOG_REQUEST, requestProcessor, requestProcessor.getExecutor()); this.server.registerProcessor(CommandType.REMOVE_TAK_LOG_REQUEST, requestProcessor, requestProcessor.getExecutor()); } /** * main launches the server from the command line. * @param args arguments */ public static void main(String[] args) { final LoggerServer server = new LoggerServer(); server.start(); } /** * server start */ public void start() { this.server.start(); logger.info("logger server started, listening on port : {}", Constants.RPC_PORT); Runtime.getRuntime().addShutdownHook(new Thread(LoggerServer.this::stop)); } /** * stop */ public void stop() { this.server.close(); logger.info("logger server shut down"); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,229
[Feature][server port] server port custom config
**Describe the feature** all application can custom config server port
https://github.com/apache/dolphinscheduler/issues/5229
https://github.com/apache/dolphinscheduler/pull/6947
100a9ba3fc21764158593bd33e2b261b0ef39982
0dcff1425a28da0ce0004fc3e594b97d080c5fd9
"2021-04-07T09:22:45Z"
java
"2021-11-30T16:21:03Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/ProcessUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.utils; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.utils.CommonUtils; import org.apache.dolphinscheduler.common.utils.FileUtils; import org.apache.dolphinscheduler.common.utils.HadoopUtils; import org.apache.dolphinscheduler.common.utils.LoggerUtils; import org.apache.dolphinscheduler.common.utils.OSUtils; import org.apache.dolphinscheduler.common.utils.PropertyUtils; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.service.log.LogClientService; import org.apache.dolphinscheduler.service.queue.entity.TaskExecutionContext; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.SystemUtils; import java.io.File; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * mainly used to get the start command line of a process. */ public class ProcessUtils { private static final Logger logger = LoggerFactory.getLogger(ProcessUtils.class); /** * Initialization regularization, solve the problem of pre-compilation performance, * avoid the thread safety problem of multi-thread operation */ private static final Pattern MACPATTERN = Pattern.compile("-[+|-]-\\s(\\d+)"); /** * Expression of PID recognition in Windows scene */ private static final Pattern WINDOWSATTERN = Pattern.compile("\\w+\\((\\d+)\\)"); /** * kill yarn application. * * @param appIds app id list * @param logger logger * @param tenantCode tenant code * @param executePath execute path */ public static void cancelApplication(List<String> appIds, Logger logger, String tenantCode, String executePath) { if (appIds == null || appIds.isEmpty()) { return; } for (String appId : appIds) { try { ExecutionStatus applicationStatus = HadoopUtils.getInstance().getApplicationStatus(appId); if (!applicationStatus.typeIsFinished()) { String commandFile = String.format("%s/%s.kill", executePath, appId); String cmd = getKerberosInitCommand() + "yarn application -kill " + appId; execYarnKillCommand(logger, tenantCode, appId, commandFile, cmd); } } catch (Exception e) { logger.error("Get yarn application app id [{}}] status failed", appId, e); } } } /** * get kerberos init command */ static String getKerberosInitCommand() { logger.info("get kerberos init command"); StringBuilder kerberosCommandBuilder = new StringBuilder(); boolean hadoopKerberosState = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false); if (hadoopKerberosState) { kerberosCommandBuilder.append("export KRB5_CONFIG=") .append(PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH)) .append("\n\n") .append(String.format("kinit -k -t %s %s || true", PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH), PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME))) .append("\n\n"); logger.info("kerberos init command: {}", kerberosCommandBuilder); } return kerberosCommandBuilder.toString(); } /** * build kill command for yarn application * * @param logger logger * @param tenantCode tenant code * @param appId app id * @param commandFile command file * @param cmd cmd */ private static void execYarnKillCommand(Logger logger, String tenantCode, String appId, String commandFile, String cmd) { try { StringBuilder sb = new StringBuilder(); sb.append("#!/bin/sh\n"); sb.append("BASEDIR=$(cd `dirname $0`; pwd)\n"); sb.append("cd $BASEDIR\n"); if (CommonUtils.getSystemEnvPath() != null) { sb.append("source ").append(CommonUtils.getSystemEnvPath()).append("\n"); } sb.append("\n\n"); sb.append(cmd); File f = new File(commandFile); if (!f.exists()) { org.apache.commons.io.FileUtils.writeStringToFile(new File(commandFile), sb.toString(), StandardCharsets.UTF_8); } String runCmd = String.format("%s %s", Constants.SH, commandFile); runCmd = OSUtils.getSudoCmd(tenantCode, runCmd); logger.info("kill cmd:{}", runCmd); OSUtils.exeCmd(runCmd); } catch (Exception e) { logger.error(String.format("Kill yarn application app id [%s] failed: [%s]", appId, e.getMessage())); } } /** * get pids str. * * @param processId process id * @return pids pid String * @throws Exception exception */ public static String getPidsStr(int processId) throws Exception { List<String> pidList = new ArrayList<>(); Matcher mat = null; // pstree pid get sub pids if (SystemUtils.IS_OS_MAC) { String pids = OSUtils.exeCmd(String.format("%s -sp %d", Constants.PSTREE, processId)); if (null != pids) { mat = MACPATTERN.matcher(pids); } } else { String pids = OSUtils.exeCmd(String.format("%s -p %d", Constants.PSTREE, processId)); mat = WINDOWSATTERN.matcher(pids); } if (null != mat) { while (mat.find()) { pidList.add(mat.group(1)); } } if (CommonUtils.isSudoEnable() && !pidList.isEmpty()) { pidList = pidList.subList(1, pidList.size()); } return String.join(" ", pidList).trim(); } /** * find logs and kill yarn tasks. * * @param taskExecutionContext taskExecutionContext * @return yarn application ids */ public static List<String> killYarnJob(TaskExecutionContext taskExecutionContext) { try { Thread.sleep(Constants.SLEEP_TIME_MILLIS); String log; try (LogClientService logClient = new LogClientService()) { log = logClient.viewLog(Host.of(taskExecutionContext.getHost()).getIp(), Constants.RPC_PORT, taskExecutionContext.getLogPath()); } if (!StringUtils.isEmpty(log)) { if (StringUtils.isEmpty(taskExecutionContext.getExecutePath())) { taskExecutionContext.setExecutePath(FileUtils.getProcessExecDir(taskExecutionContext.getProjectCode(), taskExecutionContext.getProcessDefineCode(), taskExecutionContext.getProcessDefineVersion(), taskExecutionContext.getProcessInstanceId(), taskExecutionContext.getTaskInstanceId())); } FileUtils.createWorkDirIfAbsent(taskExecutionContext.getExecutePath()); List<String> appIds = LoggerUtils.getAppIds(log, logger); if (CollectionUtils.isNotEmpty(appIds)) { cancelApplication(appIds, logger, taskExecutionContext.getTenantCode(), taskExecutionContext.getExecutePath()); return appIds; } } } catch (Exception e) { logger.error("kill yarn job failure", e); } return Collections.emptyList(); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,229
[Feature][server port] server port custom config
**Describe the feature** all application can custom config server port
https://github.com/apache/dolphinscheduler/issues/5229
https://github.com/apache/dolphinscheduler/pull/6947
100a9ba3fc21764158593bd33e2b261b0ef39982
0dcff1425a28da0ce0004fc3e594b97d080c5fd9
"2021-04-07T09:22:45Z"
java
"2021-11-30T16:21:03Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/WorkerServer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.IStoppable; import org.apache.dolphinscheduler.common.enums.NodeType; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.remote.NettyRemotingServer; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.config.NettyServerConfig; import org.apache.dolphinscheduler.server.worker.config.WorkerConfig; import org.apache.dolphinscheduler.server.worker.plugin.TaskPluginManager; import org.apache.dolphinscheduler.server.worker.processor.DBTaskAckProcessor; import org.apache.dolphinscheduler.server.worker.processor.DBTaskResponseProcessor; import org.apache.dolphinscheduler.server.worker.processor.HostUpdateProcessor; import org.apache.dolphinscheduler.server.worker.processor.TaskExecuteProcessor; import org.apache.dolphinscheduler.server.worker.processor.TaskKillProcessor; import org.apache.dolphinscheduler.server.worker.registry.WorkerRegistryClient; import org.apache.dolphinscheduler.server.worker.runner.RetryReportTaskStatusThread; import org.apache.dolphinscheduler.server.worker.runner.WorkerManagerThread; import org.apache.dolphinscheduler.service.alert.AlertClientService; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import java.util.Set; import javax.annotation.PostConstruct; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.builder.SpringApplicationBuilder; import org.springframework.context.annotation.ComponentScan; import org.springframework.context.annotation.FilterType; import org.springframework.transaction.annotation.EnableTransactionManagement; /** * worker server */ @ComponentScan(value = "org.apache.dolphinscheduler", excludeFilters = { @ComponentScan.Filter(type = FilterType.REGEX, pattern = { "org.apache.dolphinscheduler.server.master.*", "org.apache.dolphinscheduler.server.monitor.*", "org.apache.dolphinscheduler.server.log.*", "org.apache.dolphinscheduler.alert.*" }) }) @EnableTransactionManagement public class WorkerServer implements IStoppable { /** * logger */ private static final Logger logger = LoggerFactory.getLogger(WorkerServer.class); /** * netty remote server */ private NettyRemotingServer nettyRemotingServer; /** * worker config */ @Autowired private WorkerConfig workerConfig; /** * spring application context * only use it for initialization */ @Autowired private SpringApplicationContext springApplicationContext; /** * alert model netty remote server */ private AlertClientService alertClientService; @Autowired private RetryReportTaskStatusThread retryReportTaskStatusThread; @Autowired private WorkerManagerThread workerManagerThread; /** * worker registry */ @Autowired private WorkerRegistryClient workerRegistryClient; @Autowired private TaskPluginManager taskPluginManager; /** * worker server startup, not use web service * * @param args arguments */ public static void main(String[] args) { Thread.currentThread().setName(Constants.THREAD_NAME_WORKER_SERVER); new SpringApplicationBuilder(WorkerServer.class) .profiles("worker") .run(args); } /** * worker server run */ @PostConstruct public void run() { // alert-server client registry alertClientService = new AlertClientService(workerConfig.getAlertListenHost(), Constants.ALERT_RPC_PORT); // init remoting server NettyServerConfig serverConfig = new NettyServerConfig(); serverConfig.setListenPort(workerConfig.getListenPort()); this.nettyRemotingServer = new NettyRemotingServer(serverConfig); this.nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_REQUEST, new TaskExecuteProcessor(alertClientService, taskPluginManager)); this.nettyRemotingServer.registerProcessor(CommandType.TASK_KILL_REQUEST, new TaskKillProcessor()); this.nettyRemotingServer.registerProcessor(CommandType.DB_TASK_ACK, new DBTaskAckProcessor()); this.nettyRemotingServer.registerProcessor(CommandType.DB_TASK_RESPONSE, new DBTaskResponseProcessor()); this.nettyRemotingServer.registerProcessor(CommandType.PROCESS_HOST_UPDATE_REQUEST, new HostUpdateProcessor()); this.nettyRemotingServer.start(); // worker registry try { this.workerRegistryClient.registry(); this.workerRegistryClient.setRegistryStoppable(this); Set<String> workerZkPaths = this.workerRegistryClient.getWorkerZkPaths(); this.workerRegistryClient.handleDeadServer(workerZkPaths, NodeType.WORKER, Constants.DELETE_OP); } catch (Exception e) { logger.error(e.getMessage(), e); throw new RuntimeException(e); } // task execute manager this.workerManagerThread.start(); // retry report task status this.retryReportTaskStatusThread.start(); /* * registry hooks, which are called before the process exits */ Runtime.getRuntime().addShutdownHook(new Thread(() -> { if (Stopper.isRunning()) { close("shutdownHook"); } })); } public void close(String cause) { try { // execute only once if (Stopper.isStopped()) { return; } logger.info("worker server is stopping ..., cause : {}", cause); // set stop signal is true Stopper.stop(); try { // thread sleep 3 seconds for thread quitely stop Thread.sleep(3000L); } catch (Exception e) { logger.warn("thread sleep exception", e); } // close this.nettyRemotingServer.close(); this.workerRegistryClient.unRegistry(); this.alertClientService.close(); this.springApplicationContext.close(); } catch (Exception e) { logger.error("worker server stop exception ", e); } } @Override public void stop(String cause) { close(cause); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,229
[Feature][server port] server port custom config
**Describe the feature** all application can custom config server port
https://github.com/apache/dolphinscheduler/issues/5229
https://github.com/apache/dolphinscheduler/pull/6947
100a9ba3fc21764158593bd33e2b261b0ef39982
0dcff1425a28da0ce0004fc3e594b97d080c5fd9
"2021-04-07T09:22:45Z"
java
"2021-11-30T16:21:03Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskKillProcessor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.processor; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.LoggerUtils; import org.apache.dolphinscheduler.common.utils.OSUtils; import org.apache.dolphinscheduler.remote.command.Command; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.command.TaskKillRequestCommand; import org.apache.dolphinscheduler.remote.command.TaskKillResponseCommand; import org.apache.dolphinscheduler.remote.processor.NettyRemoteChannel; import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.remote.utils.Pair; import org.apache.dolphinscheduler.server.utils.ProcessUtils; import org.apache.dolphinscheduler.server.worker.config.WorkerConfig; import org.apache.dolphinscheduler.server.worker.runner.WorkerManagerThread; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import org.apache.dolphinscheduler.service.log.LogClientService; import org.apache.dolphinscheduler.service.queue.entity.TaskExecutionContext; import org.apache.dolphinscheduler.spi.task.TaskExecutionContextCacheManager; import org.apache.dolphinscheduler.spi.task.request.TaskRequest; import org.apache.commons.lang.StringUtils; import java.util.Collections; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Preconditions; import io.netty.channel.Channel; /** * task kill processor */ public class TaskKillProcessor implements NettyRequestProcessor { private final Logger logger = LoggerFactory.getLogger(TaskKillProcessor.class); /** * worker config */ private final WorkerConfig workerConfig; /** * task callback service */ private final TaskCallbackService taskCallbackService; /* * task execute manager */ private final WorkerManagerThread workerManager; public TaskKillProcessor() { this.taskCallbackService = SpringApplicationContext.getBean(TaskCallbackService.class); this.workerConfig = SpringApplicationContext.getBean(WorkerConfig.class); this.workerManager = SpringApplicationContext.getBean(WorkerManagerThread.class); } /** * task kill process * * @param channel channel channel * @param command command command */ @Override public void process(Channel channel, Command command) { Preconditions.checkArgument(CommandType.TASK_KILL_REQUEST == command.getType(), String.format("invalid command type : %s", command.getType())); TaskKillRequestCommand killCommand = JSONUtils.parseObject(command.getBody(), TaskKillRequestCommand.class); logger.info("received kill command : {}", killCommand); Pair<Boolean, List<String>> result = doKill(killCommand); taskCallbackService.addRemoteChannel(killCommand.getTaskInstanceId(), new NettyRemoteChannel(channel, command.getOpaque())); TaskKillResponseCommand taskKillResponseCommand = buildKillTaskResponseCommand(killCommand, result); taskCallbackService.sendResult(taskKillResponseCommand.getTaskInstanceId(), taskKillResponseCommand.convert2Command()); TaskExecutionContextCacheManager.removeByTaskInstanceId(taskKillResponseCommand.getTaskInstanceId()); } /** * do kill * * @return kill result */ private Pair<Boolean, List<String>> doKill(TaskKillRequestCommand killCommand) { boolean processFlag = true; List<String> appIds = Collections.emptyList(); int taskInstanceId = killCommand.getTaskInstanceId(); TaskRequest taskRequest = TaskExecutionContextCacheManager.getByTaskInstanceId(taskInstanceId); TaskExecutionContext taskExecutionContext = JSONUtils.parseObject(JSONUtils.toJsonString(taskRequest), TaskExecutionContext.class); try { Integer processId = taskExecutionContext.getProcessId(); if (processId.equals(0)) { workerManager.killTaskBeforeExecuteByInstanceId(taskInstanceId); TaskExecutionContextCacheManager.removeByTaskInstanceId(taskInstanceId); logger.info("the task has not been executed and has been cancelled, task id:{}", taskInstanceId); return Pair.of(true, appIds); } String pidsStr = ProcessUtils.getPidsStr(taskExecutionContext.getProcessId()); if (!StringUtils.isEmpty(pidsStr)) { String cmd = String.format("kill -9 %s", pidsStr); cmd = OSUtils.getSudoCmd(taskExecutionContext.getTenantCode(), cmd); logger.info("process id:{}, cmd:{}", taskExecutionContext.getProcessId(), cmd); OSUtils.exeCmd(cmd); } } catch (Exception e) { processFlag = false; logger.error("kill task error", e); } // find log and kill yarn job Pair<Boolean, List<String>> yarnResult = killYarnJob(Host.of(taskExecutionContext.getHost()).getIp(), taskExecutionContext.getLogPath(), taskExecutionContext.getExecutePath(), taskExecutionContext.getTenantCode()); return Pair.of(processFlag && yarnResult.getLeft(), yarnResult.getRight()); } /** * build TaskKillResponseCommand * * @param killCommand kill command * @param result exe result * @return build TaskKillResponseCommand */ private TaskKillResponseCommand buildKillTaskResponseCommand(TaskKillRequestCommand killCommand, Pair<Boolean, List<String>> result) { TaskKillResponseCommand taskKillResponseCommand = new TaskKillResponseCommand(); taskKillResponseCommand.setStatus(result.getLeft() ? ExecutionStatus.SUCCESS.getCode() : ExecutionStatus.FAILURE.getCode()); taskKillResponseCommand.setAppIds(result.getRight()); TaskRequest taskRequest = TaskExecutionContextCacheManager.getByTaskInstanceId(killCommand.getTaskInstanceId()); if (taskRequest == null) { return taskKillResponseCommand; } TaskExecutionContext taskExecutionContext = JSONUtils.parseObject(JSONUtils.toJsonString(taskRequest), TaskExecutionContext.class); if (taskExecutionContext != null) { taskKillResponseCommand.setTaskInstanceId(taskExecutionContext.getTaskInstanceId()); taskKillResponseCommand.setHost(taskExecutionContext.getHost()); taskKillResponseCommand.setProcessId(taskExecutionContext.getProcessId()); } return taskKillResponseCommand; } /** * kill yarn job * * @param host host * @param logPath logPath * @param executePath executePath * @param tenantCode tenantCode * @return Pair<Boolean, List < String>> yarn kill result */ private Pair<Boolean, List<String>> killYarnJob(String host, String logPath, String executePath, String tenantCode) { try (LogClientService logClient = new LogClientService();) { logger.info("view log host : {},logPath : {}", host, logPath); String log = logClient.viewLog(host, Constants.RPC_PORT, logPath); List<String> appIds = Collections.emptyList(); if (!StringUtils.isEmpty(log)) { appIds = LoggerUtils.getAppIds(log, logger); if (StringUtils.isEmpty(executePath)) { logger.error("task instance execute path is empty"); throw new RuntimeException("task instance execute path is empty"); } if (appIds.size() > 0) { ProcessUtils.cancelApplication(appIds, logger, tenantCode, executePath); } } return Pair.of(true, appIds); } catch (Exception e) { logger.error("kill yarn job error", e); } return Pair.of(false, Collections.emptyList()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,229
[Feature][server port] server port custom config
**Describe the feature** all application can custom config server port
https://github.com/apache/dolphinscheduler/issues/5229
https://github.com/apache/dolphinscheduler/pull/6947
100a9ba3fc21764158593bd33e2b261b0ef39982
0dcff1425a28da0ce0004fc3e594b97d080c5fd9
"2021-04-07T09:22:45Z"
java
"2021-11-30T16:21:03Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/log/LoggerServerTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.log; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.utils.FileUtils; import org.apache.dolphinscheduler.service.log.LogClientService; import org.apache.commons.lang.StringUtils; import java.io.File; import java.io.IOException; import java.nio.charset.Charset; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; public class LoggerServerTest { private LoggerServer loggerServer; private LogClientService logClientService; @Before public void startServerAndClient() { this.loggerServer = new LoggerServer(); this.loggerServer.start(); this.logClientService = new LogClientService(); } @Test public void testRollViewLog() throws IOException { String expectedTmpDemoString = "testRolloViewLog"; org.apache.commons.io.FileUtils.writeStringToFile(new File("/tmp/demo.txt"), expectedTmpDemoString, Charset.defaultCharset()); String resultTmpDemoString = this.logClientService.rollViewLog( "localhost", Constants.RPC_PORT,"/tmp/demo.txt", 0, 1000); Assert.assertEquals(expectedTmpDemoString, resultTmpDemoString.replaceAll("[\r|\n|\t]", StringUtils.EMPTY)); FileUtils.deleteFile("/tmp/demo.txt"); } @Test public void testRemoveTaskLog() throws IOException { String expectedTmpRemoveString = "testRemoveTaskLog"; org.apache.commons.io.FileUtils.writeStringToFile(new File("/tmp/remove.txt"), expectedTmpRemoveString, Charset.defaultCharset()); Boolean b = this.logClientService.removeTaskLog("localhost", Constants.RPC_PORT,"/tmp/remove.txt"); Assert.assertTrue(b); String result = this.logClientService.viewLog("localhost", Constants.RPC_PORT,"/tmp/demo.txt"); Assert.assertEquals(StringUtils.EMPTY, result); } @After public void stopServerAndClient() { this.loggerServer.stop(); this.logClientService.close(); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,229
[Feature][server port] server port custom config
**Describe the feature** all application can custom config server port
https://github.com/apache/dolphinscheduler/issues/5229
https://github.com/apache/dolphinscheduler/pull/6947
100a9ba3fc21764158593bd33e2b261b0ef39982
0dcff1425a28da0ce0004fc3e594b97d080c5fd9
"2021-04-07T09:22:45Z"
java
"2021-11-30T16:21:03Z"
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.process; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_EMPTY_SUB_PROCESS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_FATHER_PARAMS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_CODE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID; import static org.apache.dolphinscheduler.common.Constants.LOCAL_PARAMS; import static java.util.stream.Collectors.toSet; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.DateInterval; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.process.ProcessDag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter; import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters; import org.apache.dolphinscheduler.common.utils.CodeGenerateUtils; import org.apache.dolphinscheduler.common.utils.CodeGenerateUtils.CodeGenerateException; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.TaskParametersUtils; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.DagData; import org.apache.dolphinscheduler.dao.entity.DataSource; import org.apache.dolphinscheduler.dao.entity.Environment; import org.apache.dolphinscheduler.dao.entity.ErrorCommand; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.UdfFunc; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.CommandMapper; import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper; import org.apache.dolphinscheduler.dao.mapper.EnvironmentMapper; import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper; import org.apache.dolphinscheduler.dao.utils.DagHelper; import org.apache.dolphinscheduler.remote.command.StateEventChangeCommand; import org.apache.dolphinscheduler.remote.processor.StateEventCallbackService; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import org.apache.dolphinscheduler.service.cache.processor.TenantCacheProcessor; import org.apache.dolphinscheduler.service.cache.processor.UserCacheProcessor; import org.apache.dolphinscheduler.service.exceptions.ServiceException; import org.apache.dolphinscheduler.service.log.LogClientService; import org.apache.dolphinscheduler.service.quartz.cron.CronUtils; import org.apache.dolphinscheduler.spi.enums.ResourceType; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import org.springframework.transaction.annotation.Transactional; import com.facebook.presto.jdbc.internal.guava.collect.Lists; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.node.ObjectNode; /** * process relative dao that some mappers in this. */ @Component public class ProcessService { private final Logger logger = LoggerFactory.getLogger(getClass()); private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(), ExecutionStatus.RUNNING_EXECUTION.ordinal(), ExecutionStatus.DELAY_EXECUTION.ordinal(), ExecutionStatus.READY_PAUSE.ordinal(), ExecutionStatus.READY_STOP.ordinal()}; @Autowired private UserCacheProcessor userCacheProcessor; @Autowired private ProcessDefinitionMapper processDefineMapper; @Autowired private ProcessDefinitionLogMapper processDefineLogMapper; @Autowired private ProcessInstanceMapper processInstanceMapper; @Autowired private DataSourceMapper dataSourceMapper; @Autowired private ProcessInstanceMapMapper processInstanceMapMapper; @Autowired private TaskInstanceMapper taskInstanceMapper; @Autowired private CommandMapper commandMapper; @Autowired private ScheduleMapper scheduleMapper; @Autowired private UdfFuncMapper udfFuncMapper; @Autowired private ResourceMapper resourceMapper; @Autowired private ResourceUserMapper resourceUserMapper; @Autowired private ErrorCommandMapper errorCommandMapper; @Autowired private TenantCacheProcessor tenantCacheProcessor; @Autowired private ProjectMapper projectMapper; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired private ProcessTaskRelationLogMapper processTaskRelationLogMapper; @Autowired StateEventCallbackService stateEventCallbackService; @Autowired private EnvironmentMapper environmentMapper; /** * handle Command (construct ProcessInstance from Command) , wrapped in transaction * * @param logger logger * @param host host * @param command found command * @return process instance */ @Transactional public ProcessInstance handleCommand(Logger logger, String host, Command command, HashMap<String, ProcessDefinition> processDefinitionCacheMaps) { ProcessInstance processInstance = constructProcessInstance(command, host, processDefinitionCacheMaps); // cannot construct process instance, return null if (processInstance == null) { logger.error("scan command, command parameter is error: {}", command); moveToErrorCommand(command, "process instance is null"); return null; } processInstance.setCommandType(command.getCommandType()); processInstance.addHistoryCmd(command.getCommandType()); //if the processDefination is serial ProcessDefinition processDefinition = this.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); if (processDefinition.getExecutionType().typeIsSerial()) { saveSerialProcess(processInstance, processDefinition); if (processInstance.getState() != ExecutionStatus.SUBMITTED_SUCCESS) { setSubProcessParam(processInstance); deleteCommandWithCheck(command.getId()); return null; } } else { saveProcessInstance(processInstance); } setSubProcessParam(processInstance); deleteCommandWithCheck(command.getId()); return processInstance; } private void saveSerialProcess(ProcessInstance processInstance, ProcessDefinition processDefinition) { processInstance.setState(ExecutionStatus.SERIAL_WAIT); saveProcessInstance(processInstance); //serial wait //when we get the running instance(or waiting instance) only get the priority instance(by id) if (processDefinition.getExecutionType().typeIsSerialWait()) { while (true) { List<ProcessInstance> runningProcessInstances = this.processInstanceMapper.queryByProcessDefineCodeAndStatusAndNextId(processInstance.getProcessDefinitionCode(), Constants.RUNNING_PROCESS_STATE, processInstance.getId()); if (CollectionUtils.isEmpty(runningProcessInstances)) { processInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); saveProcessInstance(processInstance); return; } ProcessInstance runningProcess = runningProcessInstances.get(0); if (this.processInstanceMapper.updateNextProcessIdById(processInstance.getId(), runningProcess.getId())) { return; } } } else if (processDefinition.getExecutionType().typeIsSerialDiscard()) { List<ProcessInstance> runningProcessInstances = this.processInstanceMapper.queryByProcessDefineCodeAndStatusAndNextId(processInstance.getProcessDefinitionCode(), Constants.RUNNING_PROCESS_STATE, processInstance.getId()); if (CollectionUtils.isEmpty(runningProcessInstances)) { processInstance.setState(ExecutionStatus.STOP); saveProcessInstance(processInstance); } } else if (processDefinition.getExecutionType().typeIsSerialPriority()) { List<ProcessInstance> runningProcessInstances = this.processInstanceMapper.queryByProcessDefineCodeAndStatusAndNextId(processInstance.getProcessDefinitionCode(), Constants.RUNNING_PROCESS_STATE, processInstance.getId()); if (CollectionUtils.isNotEmpty(runningProcessInstances)) { for (ProcessInstance info : runningProcessInstances) { info.setCommandType(CommandType.STOP); info.addHistoryCmd(CommandType.STOP); info.setState(ExecutionStatus.READY_STOP); int update = updateProcessInstance(info); // determine whether the process is normal if (update > 0) { String host = info.getHost(); String address = host.split(":")[0]; int port = Integer.parseInt(host.split(":")[1]); StateEventChangeCommand stateEventChangeCommand = new StateEventChangeCommand( info.getId(), 0, info.getState(), info.getId(), 0 ); try { stateEventCallbackService.sendResult(address, port, stateEventChangeCommand.convert2Command()); } catch (Exception e) { logger.error("sendResultError"); } } } } } } /** * save error command, and delete original command * * @param command command * @param message message */ public void moveToErrorCommand(Command command, String message) { ErrorCommand errorCommand = new ErrorCommand(command, message); this.errorCommandMapper.insert(errorCommand); this.commandMapper.deleteById(command.getId()); } /** * set process waiting thread * * @param command command * @param processInstance processInstance * @return process instance */ private ProcessInstance setWaitingThreadProcess(Command command, ProcessInstance processInstance) { processInstance.setState(ExecutionStatus.WAITING_THREAD); if (command.getCommandType() != CommandType.RECOVER_WAITING_THREAD) { processInstance.addHistoryCmd(command.getCommandType()); } saveProcessInstance(processInstance); this.setSubProcessParam(processInstance); createRecoveryWaitingThreadCommand(command, processInstance); return null; } /** * insert one command * * @param command command * @return create result */ public int createCommand(Command command) { int result = 0; if (command != null) { result = commandMapper.insert(command); } return result; } /** * get command page */ public List<Command> findCommandPage(int pageSize, int pageNumber) { return commandMapper.queryCommandPage(pageSize, pageNumber * pageSize); } /** * check the input command exists in queue list * * @param command command * @return create command result */ public boolean verifyIsNeedCreateCommand(Command command) { boolean isNeedCreate = true; EnumMap<CommandType, Integer> cmdTypeMap = new EnumMap<>(CommandType.class); cmdTypeMap.put(CommandType.REPEAT_RUNNING, 1); cmdTypeMap.put(CommandType.RECOVER_SUSPENDED_PROCESS, 1); cmdTypeMap.put(CommandType.START_FAILURE_TASK_PROCESS, 1); CommandType commandType = command.getCommandType(); if (cmdTypeMap.containsKey(commandType)) { ObjectNode cmdParamObj = JSONUtils.parseObject(command.getCommandParam()); int processInstanceId = cmdParamObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt(); List<Command> commands = commandMapper.selectList(null); // for all commands for (Command tmpCommand : commands) { if (cmdTypeMap.containsKey(tmpCommand.getCommandType())) { ObjectNode tempObj = JSONUtils.parseObject(tmpCommand.getCommandParam()); if (tempObj != null && processInstanceId == tempObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt()) { isNeedCreate = false; break; } } } } return isNeedCreate; } /** * find process instance detail by id * * @param processId processId * @return process instance */ public ProcessInstance findProcessInstanceDetailById(int processId) { return processInstanceMapper.queryDetailById(processId); } /** * get task node list by definitionId */ public List<TaskDefinition> getTaskNodeListByDefinition(long defineCode) { ProcessDefinition processDefinition = processDefineMapper.queryByCode(defineCode); if (processDefinition == null) { logger.error("process define not exists"); return new ArrayList<>(); } List<ProcessTaskRelationLog> processTaskRelations = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); Set<TaskDefinition> taskDefinitionSet = new HashSet<>(); for (ProcessTaskRelationLog processTaskRelation : processTaskRelations) { if (processTaskRelation.getPostTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion())); } } List<TaskDefinitionLog> taskDefinitionLogs = taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet); return new ArrayList<>(taskDefinitionLogs); } /** * find process instance by id * * @param processId processId * @return process instance */ public ProcessInstance findProcessInstanceById(int processId) { return processInstanceMapper.selectById(processId); } /** * find process define by id. * * @param processDefinitionId processDefinitionId * @return process definition */ public ProcessDefinition findProcessDefineById(int processDefinitionId) { return processDefineMapper.selectById(processDefinitionId); } /** * find process define by code and version. * * @param processDefinitionCode processDefinitionCode * @return process definition */ public ProcessDefinition findProcessDefinition(Long processDefinitionCode, int version) { ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode); if (processDefinition == null || processDefinition.getVersion() != version) { processDefinition = processDefineLogMapper.queryByDefinitionCodeAndVersion(processDefinitionCode, version); if (processDefinition != null) { processDefinition.setId(0); } } return processDefinition; } /** * find process define by code. * * @param processDefinitionCode processDefinitionCode * @return process definition */ public ProcessDefinition findProcessDefinitionByCode(Long processDefinitionCode) { return processDefineMapper.queryByCode(processDefinitionCode); } /** * delete work process instance by id * * @param processInstanceId processInstanceId * @return delete process instance result */ public int deleteWorkProcessInstanceById(int processInstanceId) { return processInstanceMapper.deleteById(processInstanceId); } /** * delete all sub process by parent instance id * * @param processInstanceId processInstanceId * @return delete all sub process instance result */ public int deleteAllSubWorkProcessByParentId(int processInstanceId) { List<Integer> subProcessIdList = processInstanceMapMapper.querySubIdListByParentId(processInstanceId); for (Integer subId : subProcessIdList) { deleteAllSubWorkProcessByParentId(subId); deleteWorkProcessMapByParentId(subId); removeTaskLogFile(subId); deleteWorkProcessInstanceById(subId); } return 1; } /** * remove task log file * * @param processInstanceId processInstanceId */ public void removeTaskLogFile(Integer processInstanceId) { List<TaskInstance> taskInstanceList = findValidTaskListByProcessId(processInstanceId); if (CollectionUtils.isEmpty(taskInstanceList)) { return; } try (LogClientService logClient = new LogClientService()) { for (TaskInstance taskInstance : taskInstanceList) { String taskLogPath = taskInstance.getLogPath(); if (StringUtils.isEmpty(taskInstance.getHost())) { continue; } int port = Constants.RPC_PORT; String ip = ""; try { ip = Host.of(taskInstance.getHost()).getIp(); } catch (Exception e) { // compatible old version ip = taskInstance.getHost(); } // remove task log from loggerserver logClient.removeTaskLog(ip, port, taskLogPath); } } } /** * recursive query sub process definition id by parent id. * * @param parentCode parentCode * @param ids ids */ public void recurseFindSubProcess(long parentCode, List<Long> ids) { List<TaskDefinition> taskNodeList = this.getTaskNodeListByDefinition(parentCode); if (taskNodeList != null && !taskNodeList.isEmpty()) { for (TaskDefinition taskNode : taskNodeList) { String parameter = taskNode.getTaskParams(); ObjectNode parameterJson = JSONUtils.parseObject(parameter); if (parameterJson.get(CMD_PARAM_SUB_PROCESS_DEFINE_CODE) != null) { SubProcessParameters subProcessParam = JSONUtils.parseObject(parameter, SubProcessParameters.class); ids.add(subProcessParam.getProcessDefinitionCode()); recurseFindSubProcess(subProcessParam.getProcessDefinitionCode(), ids); } } } } /** * create recovery waiting thread command when thread pool is not enough for the process instance. * sub work process instance need not to create recovery command. * create recovery waiting thread command and delete origin command at the same time. * if the recovery command is exists, only update the field update_time * * @param originCommand originCommand * @param processInstance processInstance */ public void createRecoveryWaitingThreadCommand(Command originCommand, ProcessInstance processInstance) { // sub process doesnot need to create wait command if (processInstance.getIsSubProcess() == Flag.YES) { if (originCommand != null) { commandMapper.deleteById(originCommand.getId()); } return; } Map<String, String> cmdParam = new HashMap<>(); cmdParam.put(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD, String.valueOf(processInstance.getId())); // process instance quit by "waiting thread" state if (originCommand == null) { Command command = new Command( CommandType.RECOVER_WAITING_THREAD, processInstance.getTaskDependType(), processInstance.getFailureStrategy(), processInstance.getExecutorId(), processInstance.getProcessDefinition().getCode(), JSONUtils.toJsonString(cmdParam), processInstance.getWarningType(), processInstance.getWarningGroupId(), processInstance.getScheduleTime(), processInstance.getWorkerGroup(), processInstance.getEnvironmentCode(), processInstance.getProcessInstancePriority(), processInstance.getDryRun(), processInstance.getId(), processInstance.getProcessDefinitionVersion() ); saveCommand(command); return; } // update the command time if current command if recover from waiting if (originCommand.getCommandType() == CommandType.RECOVER_WAITING_THREAD) { originCommand.setUpdateTime(new Date()); saveCommand(originCommand); } else { // delete old command and create new waiting thread command commandMapper.deleteById(originCommand.getId()); originCommand.setId(0); originCommand.setCommandType(CommandType.RECOVER_WAITING_THREAD); originCommand.setUpdateTime(new Date()); originCommand.setCommandParam(JSONUtils.toJsonString(cmdParam)); originCommand.setProcessInstancePriority(processInstance.getProcessInstancePriority()); saveCommand(originCommand); } } /** * get schedule time from command * * @param command command * @param cmdParam cmdParam map * @return date */ private Date getScheduleTime(Command command, Map<String, String> cmdParam) { Date scheduleTime = command.getScheduleTime(); if (scheduleTime == null && cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) { Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE)); Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); List<Schedule> schedules = queryReleaseSchedulerListByProcessDefinitionCode(command.getProcessDefinitionCode()); List<Date> complementDateList = CronUtils.getSelfFireDateList(start, end, schedules); if (complementDateList.size() > 0) { scheduleTime = complementDateList.get(0); } else { logger.error("set scheduler time error: complement date list is empty, command: {}", command.toString()); } } return scheduleTime; } /** * generate a new work process instance from command. * * @param processDefinition processDefinition * @param command command * @param cmdParam cmdParam map * @return process instance */ private ProcessInstance generateNewProcessInstance(ProcessDefinition processDefinition, Command command, Map<String, String> cmdParam) { ProcessInstance processInstance = new ProcessInstance(processDefinition); processInstance.setProcessDefinitionCode(processDefinition.getCode()); processInstance.setProcessDefinitionVersion(processDefinition.getVersion()); processInstance.setState(ExecutionStatus.RUNNING_EXECUTION); processInstance.setRecovery(Flag.NO); processInstance.setStartTime(new Date()); processInstance.setRunTimes(1); processInstance.setMaxTryTimes(0); processInstance.setCommandParam(command.getCommandParam()); processInstance.setCommandType(command.getCommandType()); processInstance.setIsSubProcess(Flag.NO); processInstance.setTaskDependType(command.getTaskDependType()); processInstance.setFailureStrategy(command.getFailureStrategy()); processInstance.setExecutorId(command.getExecutorId()); WarningType warningType = command.getWarningType() == null ? WarningType.NONE : command.getWarningType(); processInstance.setWarningType(warningType); Integer warningGroupId = command.getWarningGroupId() == null ? 0 : command.getWarningGroupId(); processInstance.setWarningGroupId(warningGroupId); processInstance.setDryRun(command.getDryRun()); if (command.getScheduleTime() != null) { processInstance.setScheduleTime(command.getScheduleTime()); } processInstance.setCommandStartTime(command.getStartTime()); processInstance.setLocations(processDefinition.getLocations()); // reset global params while there are start parameters setGlobalParamIfCommanded(processDefinition, cmdParam); // curing global params processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), getCommandTypeIfComplement(processInstance, command), processInstance.getScheduleTime())); // set process instance priority processInstance.setProcessInstancePriority(command.getProcessInstancePriority()); String workerGroup = StringUtils.isBlank(command.getWorkerGroup()) ? Constants.DEFAULT_WORKER_GROUP : command.getWorkerGroup(); processInstance.setWorkerGroup(workerGroup); processInstance.setEnvironmentCode(Objects.isNull(command.getEnvironmentCode()) ? -1 : command.getEnvironmentCode()); processInstance.setTimeout(processDefinition.getTimeout()); processInstance.setTenantId(processDefinition.getTenantId()); return processInstance; } private void setGlobalParamIfCommanded(ProcessDefinition processDefinition, Map<String, String> cmdParam) { // get start params from command param Map<String, String> startParamMap = new HashMap<>(); if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_START_PARAMS)) { String startParamJson = cmdParam.get(Constants.CMD_PARAM_START_PARAMS); startParamMap = JSONUtils.toMap(startParamJson); } Map<String, String> fatherParamMap = new HashMap<>(); if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_FATHER_PARAMS)) { String fatherParamJson = cmdParam.get(Constants.CMD_PARAM_FATHER_PARAMS); fatherParamMap = JSONUtils.toMap(fatherParamJson); } startParamMap.putAll(fatherParamMap); // set start param into global params if (startParamMap.size() > 0 && processDefinition.getGlobalParamMap() != null) { for (Map.Entry<String, String> param : processDefinition.getGlobalParamMap().entrySet()) { String val = startParamMap.get(param.getKey()); if (val != null) { param.setValue(val); } } } } /** * get process tenant * there is tenant id in definition, use the tenant of the definition. * if there is not tenant id in the definiton or the tenant not exist * use definition creator's tenant. * * @param tenantId tenantId * @param userId userId * @return tenant */ public Tenant getTenantForProcess(int tenantId, int userId) { Tenant tenant = null; if (tenantId >= 0) { tenant = tenantCacheProcessor.queryById(tenantId); } if (userId == 0) { return null; } if (tenant == null) { User user = userCacheProcessor.selectById(userId); tenant = tenantCacheProcessor.queryById(user.getTenantId()); } return tenant; } /** * get an environment * use the code of the environment to find a environment. * * @param environmentCode environmentCode * @return Environment */ public Environment findEnvironmentByCode(Long environmentCode) { Environment environment = null; if (environmentCode >= 0) { environment = environmentMapper.queryByEnvironmentCode(environmentCode); } return environment; } /** * check command parameters is valid * * @param command command * @param cmdParam cmdParam map * @return whether command param is valid */ private Boolean checkCmdParam(Command command, Map<String, String> cmdParam) { if (command.getTaskDependType() == TaskDependType.TASK_ONLY || command.getTaskDependType() == TaskDependType.TASK_PRE) { if (cmdParam == null || !cmdParam.containsKey(Constants.CMD_PARAM_START_NODES) || cmdParam.get(Constants.CMD_PARAM_START_NODES).isEmpty()) { logger.error("command node depend type is {}, but start nodes is null ", command.getTaskDependType()); return false; } } return true; } /** * construct process instance according to one command. * * @param command command * @param host host * @return process instance */ private ProcessInstance constructProcessInstance(Command command, String host, HashMap<String, ProcessDefinition> processDefinitionCacheMaps) { ProcessInstance processInstance; ProcessDefinition processDefinition; CommandType commandType = command.getCommandType(); String key = String.format("%d-%d", command.getProcessDefinitionCode(), command.getProcessDefinitionVersion()); if (processDefinitionCacheMaps.containsKey(key)) { processDefinition = processDefinitionCacheMaps.get(key); } else { processDefinition = this.findProcessDefinition(command.getProcessDefinitionCode(), command.getProcessDefinitionVersion()); if (processDefinition != null) { processDefinitionCacheMaps.put(key, processDefinition); } } if (processDefinition == null) { logger.error("cannot find the work process define! define code : {}", command.getProcessDefinitionCode()); return null; } Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam()); int processInstanceId = command.getProcessInstanceId(); if (processInstanceId == 0) { processInstance = generateNewProcessInstance(processDefinition, command, cmdParam); } else { processInstance = this.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { return processInstance; } } if (cmdParam != null) { CommandType commandTypeIfComplement = getCommandTypeIfComplement(processInstance, command); // reset global params while repeat running is needed by cmdParam if (commandTypeIfComplement == CommandType.REPEAT_RUNNING) { setGlobalParamIfCommanded(processDefinition, cmdParam); } // Recalculate global parameters after rerun. processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), commandTypeIfComplement, processInstance.getScheduleTime())); processInstance.setProcessDefinition(processDefinition); } //reset command parameter if (processInstance.getCommandParam() != null) { Map<String, String> processCmdParam = JSONUtils.toMap(processInstance.getCommandParam()); for (Map.Entry<String, String> entry : processCmdParam.entrySet()) { if (!cmdParam.containsKey(entry.getKey())) { cmdParam.put(entry.getKey(), entry.getValue()); } } } // reset command parameter if sub process if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { processInstance.setCommandParam(command.getCommandParam()); } if (Boolean.FALSE.equals(checkCmdParam(command, cmdParam))) { logger.error("command parameter check failed!"); return null; } if (command.getScheduleTime() != null) { processInstance.setScheduleTime(command.getScheduleTime()); } processInstance.setHost(host); ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXECUTION; int runTime = processInstance.getRunTimes(); switch (commandType) { case START_PROCESS: break; case START_FAILURE_TASK_PROCESS: // find failed tasks and init these tasks List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE); List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE); List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL); cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); failedList.addAll(killedList); failedList.addAll(toleranceList); for (Integer taskId : failedList) { initTaskInstance(this.findTaskInstanceById(taskId)); } cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(Constants.COMMA, convertIntListToString(failedList))); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); processInstance.setRunTimes(runTime + 1); break; case START_CURRENT_TASK_PROCESS: break; case RECOVER_WAITING_THREAD: break; case RECOVER_SUSPENDED_PROCESS: // find pause tasks and init task's state cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE); List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL); suspendedNodeList.addAll(stopNodeList); for (Integer taskId : suspendedNodeList) { // initialize the pause state initTaskInstance(this.findTaskInstanceById(taskId)); } cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(",", convertIntListToString(suspendedNodeList))); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); processInstance.setRunTimes(runTime + 1); break; case RECOVER_TOLERANCE_FAULT_PROCESS: // recover tolerance fault process processInstance.setRecovery(Flag.YES); runStatus = processInstance.getState(); break; case COMPLEMENT_DATA: // delete all the valid tasks when complement data if id is not null if (processInstance.getId() != 0) { List<TaskInstance> taskInstanceList = this.findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance taskInstance : taskInstanceList) { taskInstance.setFlag(Flag.NO); this.updateTaskInstance(taskInstance); } } break; case REPEAT_RUNNING: // delete the recover task names from command parameter if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) { cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); } // delete all the valid tasks when repeat running List<TaskInstance> validTaskList = findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance taskInstance : validTaskList) { taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); } processInstance.setStartTime(new Date()); processInstance.setEndTime(null); processInstance.setRunTimes(runTime + 1); initComplementDataParam(processDefinition, processInstance, cmdParam); break; case SCHEDULER: break; default: break; } processInstance.setState(runStatus); return processInstance; } /** * get process definition by command * If it is a fault-tolerant command, get the specified version of ProcessDefinition through ProcessInstance * Otherwise, get the latest version of ProcessDefinition * * @return ProcessDefinition */ private ProcessDefinition getProcessDefinitionByCommand(long processDefinitionCode, Map<String, String> cmdParam) { if (cmdParam != null) { int processInstanceId = 0; if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)); } else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS)); } else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)); } if (processInstanceId != 0) { ProcessInstance processInstance = this.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { return null; } return processDefineLogMapper.queryByDefinitionCodeAndVersion( processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); } } return processDefineMapper.queryByCode(processDefinitionCode); } /** * return complement data if the process start with complement data * * @param processInstance processInstance * @param command command * @return command type */ private CommandType getCommandTypeIfComplement(ProcessInstance processInstance, Command command) { if (CommandType.COMPLEMENT_DATA == processInstance.getCmdTypeIfComplement()) { return CommandType.COMPLEMENT_DATA; } else { return command.getCommandType(); } } /** * initialize complement data parameters * * @param processDefinition processDefinition * @param processInstance processInstance * @param cmdParam cmdParam */ private void initComplementDataParam(ProcessDefinition processDefinition, ProcessInstance processInstance, Map<String, String> cmdParam) { if (!processInstance.isComplementData()) { return; } Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE)); Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); List<Schedule> listSchedules = queryReleaseSchedulerListByProcessDefinitionCode(processInstance.getProcessDefinitionCode()); List<Date> complementDate = CronUtils.getSelfFireDateList(start, end, listSchedules); if (complementDate.size() > 0 && Flag.NO == processInstance.getIsSubProcess()) { processInstance.setScheduleTime(complementDate.get(0)); } processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime())); } /** * set sub work process parameters. * handle sub work process instance, update relation table and command parameters * set sub work process flag, extends parent work process command parameters * * @param subProcessInstance subProcessInstance */ public void setSubProcessParam(ProcessInstance subProcessInstance) { String cmdParam = subProcessInstance.getCommandParam(); if (StringUtils.isEmpty(cmdParam)) { return; } Map<String, String> paramMap = JSONUtils.toMap(cmdParam); // write sub process id into cmd param. if (paramMap.containsKey(CMD_PARAM_SUB_PROCESS) && CMD_PARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMD_PARAM_SUB_PROCESS))) { paramMap.remove(CMD_PARAM_SUB_PROCESS); paramMap.put(CMD_PARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId())); subProcessInstance.setCommandParam(JSONUtils.toJsonString(paramMap)); subProcessInstance.setIsSubProcess(Flag.YES); this.saveProcessInstance(subProcessInstance); } // copy parent instance user def params to sub process.. String parentInstanceId = paramMap.get(CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID); if (StringUtils.isNotEmpty(parentInstanceId)) { ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId)); if (parentInstance != null) { subProcessInstance.setGlobalParams( joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams())); this.saveProcessInstance(subProcessInstance); } else { logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam); } } ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class); if (processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0) { return; } // update sub process id to process map table processInstanceMap.setProcessInstanceId(subProcessInstance.getId()); this.updateWorkProcessInstanceMap(processInstanceMap); } /** * join parent global params into sub process. * only the keys doesn't in sub process global would be joined. * * @param parentGlobalParams parentGlobalParams * @param subGlobalParams subGlobalParams * @return global params join */ private String joinGlobalParams(String parentGlobalParams, String subGlobalParams) { List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class); List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class); Map<String, String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)); for (Property parent : parentPropertyList) { if (!subMap.containsKey(parent.getProp())) { subPropertyList.add(parent); } } return JSONUtils.toJsonString(subPropertyList); } /** * initialize task instance * * @param taskInstance taskInstance */ private void initTaskInstance(TaskInstance taskInstance) { if (!taskInstance.isSubProcess() && (taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure())) { taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); return; } taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); updateTaskInstance(taskInstance); } /** * retry submit task to db */ public TaskInstance submitTaskWithRetry(ProcessInstance processInstance, TaskInstance taskInstance, int commitRetryTimes, int commitInterval) { int retryTimes = 1; TaskInstance task = null; while (retryTimes <= commitRetryTimes) { try { // submit task to db task = SpringApplicationContext.getBean(ProcessService.class).submitTask(processInstance, taskInstance); if (task != null && task.getId() != 0) { break; } logger.error("task commit to db failed , taskId {} has already retry {} times, please check the database", taskInstance.getId(), retryTimes); Thread.sleep(commitInterval); } catch (Exception e) { logger.error("task commit to mysql failed", e); } retryTimes += 1; } return task; } /** * submit task to db * submit sub process to command * * @param processInstance processInstance * @param taskInstance taskInstance * @return task instance */ @Transactional(rollbackFor = Exception.class) public TaskInstance submitTask(ProcessInstance processInstance, TaskInstance taskInstance) { logger.info("start submit task : {}, instance id:{}, state: {}", taskInstance.getName(), taskInstance.getProcessInstanceId(), processInstance.getState()); //submit to db TaskInstance task = submitTaskInstanceToDB(taskInstance, processInstance); if (task == null) { logger.error("end submit task to db error, task name:{}, process id:{} state: {} ", taskInstance.getName(), taskInstance.getProcessInstance(), processInstance.getState()); return null; } if (!task.getState().typeIsFinished()) { createSubWorkProcess(processInstance, task); } logger.info("end submit task to db successfully:{} {} state:{} complete, instance id:{} state: {} ", taskInstance.getId(), taskInstance.getName(), task.getState(), processInstance.getId(), processInstance.getState()); return task; } /** * set work process instance map * consider o * repeat running does not generate new sub process instance * set map {parent instance id, task instance id, 0(child instance id)} * * @param parentInstance parentInstance * @param parentTask parentTask * @return process instance map */ private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask) { ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId()); if (processMap != null) { return processMap; } if (parentInstance.getCommandType() == CommandType.REPEAT_RUNNING) { // update current task id to map processMap = findPreviousTaskProcessMap(parentInstance, parentTask); if (processMap != null) { processMap.setParentTaskInstanceId(parentTask.getId()); updateWorkProcessInstanceMap(processMap); return processMap; } } // new task processMap = new ProcessInstanceMap(); processMap.setParentProcessInstanceId(parentInstance.getId()); processMap.setParentTaskInstanceId(parentTask.getId()); createWorkProcessInstanceMap(processMap); return processMap; } /** * find previous task work process map. * * @param parentProcessInstance parentProcessInstance * @param parentTask parentTask * @return process instance map */ private ProcessInstanceMap findPreviousTaskProcessMap(ProcessInstance parentProcessInstance, TaskInstance parentTask) { Integer preTaskId = 0; List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId()); for (TaskInstance task : preTaskList) { if (task.getName().equals(parentTask.getName())) { preTaskId = task.getId(); ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId); if (map != null) { return map; } } } logger.info("sub process instance is not found,parent task:{},parent instance:{}", parentTask.getId(), parentProcessInstance.getId()); return null; } /** * create sub work process command * * @param parentProcessInstance parentProcessInstance * @param task task */ public void createSubWorkProcess(ProcessInstance parentProcessInstance, TaskInstance task) { if (!task.isSubProcess()) { return; } //check create sub work flow firstly ProcessInstanceMap instanceMap = findWorkProcessMapByParent(parentProcessInstance.getId(), task.getId()); if (null != instanceMap && CommandType.RECOVER_TOLERANCE_FAULT_PROCESS == parentProcessInstance.getCommandType()) { // recover failover tolerance would not create a new command when the sub command already have been created return; } instanceMap = setProcessInstanceMap(parentProcessInstance, task); ProcessInstance childInstance = null; if (instanceMap.getProcessInstanceId() != 0) { childInstance = findProcessInstanceById(instanceMap.getProcessInstanceId()); } Command subProcessCommand = createSubProcessCommand(parentProcessInstance, childInstance, instanceMap, task); updateSubProcessDefinitionByParent(parentProcessInstance, subProcessCommand.getProcessDefinitionCode()); initSubInstanceState(childInstance); createCommand(subProcessCommand); logger.info("sub process command created: {} ", subProcessCommand); } /** * complement data needs transform parent parameter to child. */ private String getSubWorkFlowParam(ProcessInstanceMap instanceMap, ProcessInstance parentProcessInstance, Map<String, String> fatherParams) { // set sub work process command String processMapStr = JSONUtils.toJsonString(instanceMap); Map<String, String> cmdParam = JSONUtils.toMap(processMapStr); if (parentProcessInstance.isComplementData()) { Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam()); String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE); String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime); processMapStr = JSONUtils.toJsonString(cmdParam); } if (fatherParams.size() != 0) { cmdParam.put(CMD_PARAM_FATHER_PARAMS, JSONUtils.toJsonString(fatherParams)); processMapStr = JSONUtils.toJsonString(cmdParam); } return processMapStr; } public Map<String, String> getGlobalParamMap(String globalParams) { List<Property> propList; Map<String, String> globalParamMap = new HashMap<>(); if (StringUtils.isNotEmpty(globalParams)) { propList = JSONUtils.toList(globalParams, Property.class); globalParamMap = propList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)); } return globalParamMap; } /** * create sub work process command */ public Command createSubProcessCommand(ProcessInstance parentProcessInstance, ProcessInstance childInstance, ProcessInstanceMap instanceMap, TaskInstance task) { CommandType commandType = getSubCommandType(parentProcessInstance, childInstance); Map<String, String> subProcessParam = JSONUtils.toMap(task.getTaskParams()); long childDefineCode = 0L; if (subProcessParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS_DEFINE_CODE)) { childDefineCode = Long.parseLong(subProcessParam.get(Constants.CMD_PARAM_SUB_PROCESS_DEFINE_CODE)); } ProcessDefinition subProcessDefinition = processDefineMapper.queryByCode(childDefineCode); Object localParams = subProcessParam.get(Constants.LOCAL_PARAMS); List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class); Map<String, String> globalMap = this.getGlobalParamMap(parentProcessInstance.getGlobalParams()); Map<String, String> fatherParams = new HashMap<>(); if (CollectionUtils.isNotEmpty(allParam)) { for (Property info : allParam) { fatherParams.put(info.getProp(), globalMap.get(info.getProp())); } } String processParam = getSubWorkFlowParam(instanceMap, parentProcessInstance, fatherParams); int subProcessInstanceId = childInstance == null ? 0 : childInstance.getId(); return new Command( commandType, TaskDependType.TASK_POST, parentProcessInstance.getFailureStrategy(), parentProcessInstance.getExecutorId(), subProcessDefinition.getCode(), processParam, parentProcessInstance.getWarningType(), parentProcessInstance.getWarningGroupId(), parentProcessInstance.getScheduleTime(), task.getWorkerGroup(), task.getEnvironmentCode(), parentProcessInstance.getProcessInstancePriority(), parentProcessInstance.getDryRun(), subProcessInstanceId, subProcessDefinition.getVersion() ); } /** * initialize sub work flow state * child instance state would be initialized when 'recovery from pause/stop/failure' */ private void initSubInstanceState(ProcessInstance childInstance) { if (childInstance != null) { childInstance.setState(ExecutionStatus.RUNNING_EXECUTION); updateProcessInstance(childInstance); } } /** * get sub work flow command type * child instance exist: child command = fatherCommand * child instance not exists: child command = fatherCommand[0] */ private CommandType getSubCommandType(ProcessInstance parentProcessInstance, ProcessInstance childInstance) { CommandType commandType = parentProcessInstance.getCommandType(); if (childInstance == null) { String fatherHistoryCommand = parentProcessInstance.getHistoryCmd(); commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]); } return commandType; } /** * update sub process definition * * @param parentProcessInstance parentProcessInstance * @param childDefinitionCode childDefinitionId */ private void updateSubProcessDefinitionByParent(ProcessInstance parentProcessInstance, long childDefinitionCode) { ProcessDefinition fatherDefinition = this.findProcessDefinition(parentProcessInstance.getProcessDefinitionCode(), parentProcessInstance.getProcessDefinitionVersion()); ProcessDefinition childDefinition = this.findProcessDefinitionByCode(childDefinitionCode); if (childDefinition != null && fatherDefinition != null) { childDefinition.setWarningGroupId(fatherDefinition.getWarningGroupId()); processDefineMapper.updateById(childDefinition); } } /** * submit task to mysql * * @param taskInstance taskInstance * @param processInstance processInstance * @return task instance */ public TaskInstance submitTaskInstanceToDB(TaskInstance taskInstance, ProcessInstance processInstance) { ExecutionStatus processInstanceState = processInstance.getState(); if (taskInstance.getState().typeIsFailure()) { if (taskInstance.isSubProcess()) { taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1); } else { if (processInstanceState != ExecutionStatus.READY_STOP && processInstanceState != ExecutionStatus.READY_PAUSE) { // failure task set invalid taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); // crate new task instance if (taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE) { taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1); } taskInstance.setSubmitTime(null); taskInstance.setLogPath(null); taskInstance.setExecutePath(null); taskInstance.setStartTime(null); taskInstance.setEndTime(null); taskInstance.setFlag(Flag.YES); taskInstance.setHost(null); taskInstance.setId(0); } } } taskInstance.setExecutorId(processInstance.getExecutorId()); taskInstance.setProcessInstancePriority(processInstance.getProcessInstancePriority()); taskInstance.setState(getSubmitTaskState(taskInstance, processInstance)); if (taskInstance.getSubmitTime() == null) { taskInstance.setSubmitTime(new Date()); } if (taskInstance.getFirstSubmitTime() == null) { taskInstance.setFirstSubmitTime(taskInstance.getSubmitTime()); } boolean saveResult = saveTaskInstance(taskInstance); if (!saveResult) { return null; } return taskInstance; } /** * get submit task instance state by the work process state * cannot modify the task state when running/kill/submit success, or this * task instance is already exists in task queue . * return pause if work process state is ready pause * return stop if work process state is ready stop * if all of above are not satisfied, return submit success * * @param taskInstance taskInstance * @param processInstance processInstance * @return process instance state */ public ExecutionStatus getSubmitTaskState(TaskInstance taskInstance, ProcessInstance processInstance) { ExecutionStatus state = taskInstance.getState(); // running, delayed or killed // the task already exists in task queue // return state if ( state == ExecutionStatus.RUNNING_EXECUTION || state == ExecutionStatus.DELAY_EXECUTION || state == ExecutionStatus.KILL ) { return state; } //return pasue /stop if process instance state is ready pause / stop // or return submit success if (processInstance.getState() == ExecutionStatus.READY_PAUSE) { state = ExecutionStatus.PAUSE; } else if (processInstance.getState() == ExecutionStatus.READY_STOP || !checkProcessStrategy(taskInstance, processInstance)) { state = ExecutionStatus.KILL; } else { state = ExecutionStatus.SUBMITTED_SUCCESS; } return state; } /** * check process instance strategy * * @param taskInstance taskInstance * @return check strategy result */ private boolean checkProcessStrategy(TaskInstance taskInstance, ProcessInstance processInstance) { FailureStrategy failureStrategy = processInstance.getFailureStrategy(); if (failureStrategy == FailureStrategy.CONTINUE) { return true; } List<TaskInstance> taskInstances = this.findValidTaskListByProcessId(taskInstance.getProcessInstanceId()); for (TaskInstance task : taskInstances) { if (task.getState() == ExecutionStatus.FAILURE && task.getRetryTimes() >= task.getMaxRetryTimes()) { return false; } } return true; } /** * insert or update work process instance to data base * * @param processInstance processInstance */ public void saveProcessInstance(ProcessInstance processInstance) { if (processInstance == null) { logger.error("save error, process instance is null!"); return; } if (processInstance.getId() != 0) { processInstanceMapper.updateById(processInstance); } else { processInstanceMapper.insert(processInstance); } } /** * insert or update command * * @param command command * @return save command result */ public int saveCommand(Command command) { if (command.getId() != 0) { return commandMapper.updateById(command); } else { return commandMapper.insert(command); } } /** * insert or update task instance * * @param taskInstance taskInstance * @return save task instance result */ public boolean saveTaskInstance(TaskInstance taskInstance) { if (taskInstance.getId() != 0) { return updateTaskInstance(taskInstance); } else { return createTaskInstance(taskInstance); } } /** * insert task instance * * @param taskInstance taskInstance * @return create task instance result */ public boolean createTaskInstance(TaskInstance taskInstance) { int count = taskInstanceMapper.insert(taskInstance); return count > 0; } /** * update task instance * * @param taskInstance taskInstance * @return update task instance result */ public boolean updateTaskInstance(TaskInstance taskInstance) { int count = taskInstanceMapper.updateById(taskInstance); return count > 0; } /** * find task instance by id * * @param taskId task id * @return task intance */ public TaskInstance findTaskInstanceById(Integer taskId) { return taskInstanceMapper.selectById(taskId); } /** * package task instance */ public void packageTaskInstance(TaskInstance taskInstance, ProcessInstance processInstance) { taskInstance.setProcessInstance(processInstance); taskInstance.setProcessDefine(processInstance.getProcessDefinition()); TaskDefinition taskDefinition = this.findTaskDefinition( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion()); this.updateTaskDefinitionResources(taskDefinition); taskInstance.setTaskDefine(taskDefinition); } /** * Update {@link ResourceInfo} information in {@link TaskDefinition} * * @param taskDefinition the given {@link TaskDefinition} */ public void updateTaskDefinitionResources(TaskDefinition taskDefinition) { Map<String, Object> taskParameters = JSONUtils.parseObject( taskDefinition.getTaskParams(), new TypeReference<Map<String, Object>>() { }); if (taskParameters != null) { // if contains mainJar field, query resource from database // Flink, Spark, MR if (taskParameters.containsKey("mainJar")) { Object mainJarObj = taskParameters.get("mainJar"); ResourceInfo mainJar = JSONUtils.parseObject( JSONUtils.toJsonString(mainJarObj), ResourceInfo.class); ResourceInfo resourceInfo = updateResourceInfo(mainJar); if (resourceInfo != null) { taskParameters.put("mainJar", resourceInfo); } } // update resourceList information if (taskParameters.containsKey("resourceList")) { String resourceListStr = JSONUtils.toJsonString(taskParameters.get("resourceList")); List<ResourceInfo> resourceInfos = JSONUtils.toList(resourceListStr, ResourceInfo.class); List<ResourceInfo> updatedResourceInfos = resourceInfos .stream() .map(this::updateResourceInfo) .filter(Objects::nonNull) .collect(Collectors.toList()); taskParameters.put("resourceList", updatedResourceInfos); } // set task parameters taskDefinition.setTaskParams(JSONUtils.toJsonString(taskParameters)); } } /** * update {@link ResourceInfo} by given original ResourceInfo * * @param res origin resource info * @return {@link ResourceInfo} */ private ResourceInfo updateResourceInfo(ResourceInfo res) { ResourceInfo resourceInfo = null; // only if mainJar is not null and does not contains "resourceName" field if (res != null) { int resourceId = res.getId(); if (resourceId <= 0) { logger.error("invalid resourceId, {}", resourceId); return null; } resourceInfo = new ResourceInfo(); // get resource from database, only one resource should be returned Resource resource = getResourceById(resourceId); resourceInfo.setId(resourceId); resourceInfo.setRes(resource.getFileName()); resourceInfo.setResourceName(resource.getFullName()); if (logger.isInfoEnabled()) { logger.info("updated resource info {}", JSONUtils.toJsonString(resourceInfo)); } } return resourceInfo; } /** * get id list by task state * * @param instanceId instanceId * @param state state * @return task instance states */ public List<Integer> findTaskIdByInstanceState(int instanceId, ExecutionStatus state) { return taskInstanceMapper.queryTaskByProcessIdAndState(instanceId, state.ordinal()); } /** * find valid task list by process definition id * * @param processInstanceId processInstanceId * @return task instance list */ public List<TaskInstance> findValidTaskListByProcessId(Integer processInstanceId) { return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.YES); } /** * find previous task list by work process id * * @param processInstanceId processInstanceId * @return task instance list */ public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId) { return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO); } /** * update work process instance map * * @param processInstanceMap processInstanceMap * @return update process instance result */ public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) { return processInstanceMapMapper.updateById(processInstanceMap); } /** * create work process instance map * * @param processInstanceMap processInstanceMap * @return create process instance result */ public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) { int count = 0; if (processInstanceMap != null) { return processInstanceMapMapper.insert(processInstanceMap); } return count; } /** * find work process map by parent process id and parent task id. * * @param parentWorkProcessId parentWorkProcessId * @param parentTaskId parentTaskId * @return process instance map */ public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId) { return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId); } /** * delete work process map by parent process id * * @param parentWorkProcessId parentWorkProcessId * @return delete process map result */ public int deleteWorkProcessMapByParentId(int parentWorkProcessId) { return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId); } /** * find sub process instance * * @param parentProcessId parentProcessId * @param parentTaskId parentTaskId * @return process instance */ public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId) { ProcessInstance processInstance = null; ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId); if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) { return processInstance; } processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId()); return processInstance; } /** * find parent process instance * * @param subProcessId subProcessId * @return process instance */ public ProcessInstance findParentProcessInstance(Integer subProcessId) { ProcessInstance processInstance = null; ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId); if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) { return processInstance; } processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId()); return processInstance; } /** * change task state * * @param state state * @param startTime startTime * @param host host * @param executePath executePath * @param logPath logPath */ public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date startTime, String host, String executePath, String logPath) { taskInstance.setState(state); taskInstance.setStartTime(startTime); taskInstance.setHost(host); taskInstance.setExecutePath(executePath); taskInstance.setLogPath(logPath); saveTaskInstance(taskInstance); } /** * update process instance * * @param processInstance processInstance * @return update process instance result */ public int updateProcessInstance(ProcessInstance processInstance) { return processInstanceMapper.updateById(processInstance); } /** * change task state * * @param state state * @param endTime endTime * @param varPool varPool */ public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date endTime, int processId, String appIds, String varPool) { taskInstance.setPid(processId); taskInstance.setAppLink(appIds); taskInstance.setState(state); taskInstance.setEndTime(endTime); taskInstance.setVarPool(varPool); changeOutParam(taskInstance); saveTaskInstance(taskInstance); } /** * for show in page of taskInstance */ public void changeOutParam(TaskInstance taskInstance) { if (StringUtils.isEmpty(taskInstance.getVarPool())) { return; } List<Property> properties = JSONUtils.toList(taskInstance.getVarPool(), Property.class); if (CollectionUtils.isEmpty(properties)) { return; } //if the result more than one line,just get the first . Map<String, Object> taskParams = JSONUtils.parseObject(taskInstance.getTaskParams(), new TypeReference<Map<String, Object>>() { }); Object localParams = taskParams.get(LOCAL_PARAMS); if (localParams == null) { return; } List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class); Map<String, String> outProperty = new HashMap<>(); for (Property info : properties) { if (info.getDirect() == Direct.OUT) { outProperty.put(info.getProp(), info.getValue()); } } for (Property info : allParam) { if (info.getDirect() == Direct.OUT) { String paramName = info.getProp(); info.setValue(outProperty.get(paramName)); } } taskParams.put(LOCAL_PARAMS, allParam); taskInstance.setTaskParams(JSONUtils.toJsonString(taskParams)); } /** * convert integer list to string list * * @param intList intList * @return string list */ public List<String> convertIntListToString(List<Integer> intList) { if (intList == null) { return new ArrayList<>(); } List<String> result = new ArrayList<>(intList.size()); for (Integer intVar : intList) { result.add(String.valueOf(intVar)); } return result; } /** * query schedule by id * * @param id id * @return schedule */ public Schedule querySchedule(int id) { return scheduleMapper.selectById(id); } /** * query Schedule by processDefinitionCode * * @param processDefinitionCode processDefinitionCode * @see Schedule */ public List<Schedule> queryReleaseSchedulerListByProcessDefinitionCode(long processDefinitionCode) { return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode); } /** * query need failover process instance * * @param host host * @return process instance list */ public List<ProcessInstance> queryNeedFailoverProcessInstances(String host) { return processInstanceMapper.queryByHostAndStatus(host, stateArray); } /** * process need failover process instance * * @param processInstance processInstance */ @Transactional(rollbackFor = RuntimeException.class) public void processNeedFailoverProcessInstances(ProcessInstance processInstance) { //1 update processInstance host is null processInstance.setHost(Constants.NULL); processInstanceMapper.updateById(processInstance); ProcessDefinition processDefinition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); //2 insert into recover command Command cmd = new Command(); cmd.setProcessDefinitionCode(processDefinition.getCode()); cmd.setProcessDefinitionVersion(processDefinition.getVersion()); cmd.setProcessInstanceId(processInstance.getId()); cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId())); cmd.setExecutorId(processInstance.getExecutorId()); cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS); createCommand(cmd); } /** * query all need failover task instances by host * * @param host host * @return task instance list */ public List<TaskInstance> queryNeedFailoverTaskInstances(String host) { return taskInstanceMapper.queryByHostAndStatus(host, stateArray); } /** * find data source by id * * @param id id * @return datasource */ public DataSource findDataSourceById(int id) { return dataSourceMapper.selectById(id); } /** * update process instance state by id * * @param processInstanceId processInstanceId * @param executionStatus executionStatus * @return update process result */ public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) { ProcessInstance instance = processInstanceMapper.selectById(processInstanceId); instance.setState(executionStatus); return processInstanceMapper.updateById(instance); } /** * find process instance by the task id * * @param taskId taskId * @return process instance */ public ProcessInstance findProcessInstanceByTaskId(int taskId) { TaskInstance taskInstance = taskInstanceMapper.selectById(taskId); if (taskInstance != null) { return processInstanceMapper.selectById(taskInstance.getProcessInstanceId()); } return null; } /** * find udf function list by id list string * * @param ids ids * @return udf function list */ public List<UdfFunc> queryUdfFunListByIds(int[] ids) { return udfFuncMapper.queryUdfByIdStr(ids, null); } /** * find tenant code by resource name * * @param resName resource name * @param resourceType resource type * @return tenant code */ public String queryTenantCodeByResName(String resName, ResourceType resourceType) { // in order to query tenant code successful although the version is older String fullName = resName.startsWith("/") ? resName : String.format("/%s", resName); List<Resource> resourceList = resourceMapper.queryResource(fullName, resourceType.ordinal()); if (CollectionUtils.isEmpty(resourceList)) { return StringUtils.EMPTY; } int userId = resourceList.get(0).getUserId(); User user = userCacheProcessor.selectById(userId); if (Objects.isNull(user)) { return StringUtils.EMPTY; } Tenant tenant = tenantCacheProcessor.queryById(user.getTenantId()); if (Objects.isNull(tenant)) { return StringUtils.EMPTY; } return tenant.getTenantCode(); } /** * find schedule list by process define codes. * * @param codes codes * @return schedule list */ public List<Schedule> selectAllByProcessDefineCode(long[] codes) { return scheduleMapper.selectAllByProcessDefineArray(codes); } /** * find last scheduler process instance in the date interval * * @param definitionCode definitionCode * @param dateInterval dateInterval * @return process instance */ public ProcessInstance findLastSchedulerProcessInterval(Long definitionCode, DateInterval dateInterval) { return processInstanceMapper.queryLastSchedulerProcess(definitionCode, dateInterval.getStartTime(), dateInterval.getEndTime()); } /** * find last manual process instance interval * * @param definitionCode process definition code * @param dateInterval dateInterval * @return process instance */ public ProcessInstance findLastManualProcessInterval(Long definitionCode, DateInterval dateInterval) { return processInstanceMapper.queryLastManualProcess(definitionCode, dateInterval.getStartTime(), dateInterval.getEndTime()); } /** * find last running process instance * * @param definitionCode process definition code * @param startTime start time * @param endTime end time * @return process instance */ public ProcessInstance findLastRunningProcess(Long definitionCode, Date startTime, Date endTime) { return processInstanceMapper.queryLastRunningProcess(definitionCode, startTime, endTime, stateArray); } /** * query user queue by process instance * * @param processInstance processInstance * @return queue */ public String queryUserQueueByProcessInstance(ProcessInstance processInstance) { String queue = ""; if (processInstance == null) { return queue; } User executor = userCacheProcessor.selectById(processInstance.getExecutorId()); if (executor != null) { queue = executor.getQueue(); } return queue; } /** * query project name and user name by processInstanceId. * * @param processInstanceId processInstanceId * @return projectName and userName */ public ProjectUser queryProjectWithUserByProcessInstanceId(int processInstanceId) { return projectMapper.queryProjectWithUserByProcessInstanceId(processInstanceId); } /** * get task worker group * * @param taskInstance taskInstance * @return workerGroupId */ public String getTaskWorkerGroup(TaskInstance taskInstance) { String workerGroup = taskInstance.getWorkerGroup(); if (StringUtils.isNotBlank(workerGroup)) { return workerGroup; } int processInstanceId = taskInstance.getProcessInstanceId(); ProcessInstance processInstance = findProcessInstanceById(processInstanceId); if (processInstance != null) { return processInstance.getWorkerGroup(); } logger.info("task : {} will use default worker group", taskInstance.getId()); return Constants.DEFAULT_WORKER_GROUP; } /** * get have perm project list * * @param userId userId * @return project list */ public List<Project> getProjectListHavePerm(int userId) { List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId); List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId); if (createProjects == null) { createProjects = new ArrayList<>(); } if (authedProjects != null) { createProjects.addAll(authedProjects); } return createProjects; } /** * list unauthorized udf function * * @param userId user id * @param needChecks data source id array * @return unauthorized udf function list */ public <T> List<T> listUnauthorized(int userId, T[] needChecks, AuthorizationType authorizationType) { List<T> resultList = new ArrayList<>(); if (Objects.nonNull(needChecks) && needChecks.length > 0) { Set<T> originResSet = new HashSet<>(Arrays.asList(needChecks)); switch (authorizationType) { case RESOURCE_FILE_ID: case UDF_FILE: List<Resource> ownUdfResources = resourceMapper.listAuthorizedResourceById(userId, needChecks); addAuthorizedResources(ownUdfResources, userId); Set<Integer> authorizedResourceFiles = ownUdfResources.stream().map(Resource::getId).collect(toSet()); originResSet.removeAll(authorizedResourceFiles); break; case RESOURCE_FILE_NAME: List<Resource> ownResources = resourceMapper.listAuthorizedResource(userId, needChecks); addAuthorizedResources(ownResources, userId); Set<String> authorizedResources = ownResources.stream().map(Resource::getFullName).collect(toSet()); originResSet.removeAll(authorizedResources); break; case DATASOURCE: Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId, needChecks).stream().map(DataSource::getId).collect(toSet()); originResSet.removeAll(authorizedDatasources); break; case UDF: Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(UdfFunc::getId).collect(toSet()); originResSet.removeAll(authorizedUdfs); break; default: break; } resultList.addAll(originResSet); } return resultList; } /** * get user by user id * * @param userId user id * @return User */ public User getUserById(int userId) { return userCacheProcessor.selectById(userId); } /** * get resource by resource id * * @param resourceId resource id * @return Resource */ public Resource getResourceById(int resourceId) { return resourceMapper.selectById(resourceId); } /** * list resources by ids * * @param resIds resIds * @return resource list */ public List<Resource> listResourceByIds(Integer[] resIds) { return resourceMapper.listResourceByIds(resIds); } /** * format task app id in task instance */ public String formatTaskAppId(TaskInstance taskInstance) { ProcessInstance processInstance = findProcessInstanceById(taskInstance.getProcessInstanceId()); if (processInstance == null) { return ""; } ProcessDefinition definition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); if (definition == null) { return ""; } return String.format("%s_%s_%s", definition.getId(), processInstance.getId(), taskInstance.getId()); } /** * switch process definition version to process definition log version */ public int switchVersion(ProcessDefinition processDefinition, ProcessDefinitionLog processDefinitionLog) { if (null == processDefinition || null == processDefinitionLog) { return Constants.DEFINITION_FAILURE; } processDefinitionLog.setId(processDefinition.getId()); processDefinitionLog.setReleaseState(ReleaseState.OFFLINE); processDefinitionLog.setFlag(Flag.YES); int result = processDefineMapper.updateById(processDefinitionLog); if (result > 0) { result = switchProcessTaskRelationVersion(processDefinitionLog); if (result <= 0) { return Constants.DEFINITION_FAILURE; } } return result; } public int switchProcessTaskRelationVersion(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); if (!processTaskRelationList.isEmpty()) { processTaskRelationMapper.deleteByCode(processDefinition.getProjectCode(), processDefinition.getCode()); } List<ProcessTaskRelationLog> processTaskRelationLogList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); return processTaskRelationMapper.batchInsert(processTaskRelationLogList); } /** * get resource ids * * @param taskDefinition taskDefinition * @return resource ids */ public String getResourceIds(TaskDefinition taskDefinition) { Set<Integer> resourceIds = null; AbstractParameters params = TaskParametersUtils.getParameters(taskDefinition.getTaskType(), taskDefinition.getTaskParams()); if (params != null && CollectionUtils.isNotEmpty(params.getResourceFilesList())) { resourceIds = params.getResourceFilesList(). stream() .filter(t -> t.getId() != 0) .map(ResourceInfo::getId) .collect(Collectors.toSet()); } if (CollectionUtils.isEmpty(resourceIds)) { return StringUtils.EMPTY; } return StringUtils.join(resourceIds, ","); } public int saveTaskDefine(User operator, long projectCode, List<TaskDefinitionLog> taskDefinitionLogs) { Date now = new Date(); List<TaskDefinitionLog> newTaskDefinitionLogs = new ArrayList<>(); List<TaskDefinitionLog> updateTaskDefinitionLogs = new ArrayList<>(); for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { taskDefinitionLog.setProjectCode(projectCode); taskDefinitionLog.setUpdateTime(now); taskDefinitionLog.setOperateTime(now); taskDefinitionLog.setOperator(operator.getId()); taskDefinitionLog.setResourceIds(getResourceIds(taskDefinitionLog)); if (taskDefinitionLog.getCode() > 0 && taskDefinitionLog.getVersion() > 0) { TaskDefinitionLog definitionCodeAndVersion = taskDefinitionLogMapper .queryByDefinitionCodeAndVersion(taskDefinitionLog.getCode(), taskDefinitionLog.getVersion()); if (definitionCodeAndVersion != null) { if (!taskDefinitionLog.equals(definitionCodeAndVersion)) { taskDefinitionLog.setUserId(definitionCodeAndVersion.getUserId()); Integer version = taskDefinitionLogMapper.queryMaxVersionForDefinition(taskDefinitionLog.getCode()); taskDefinitionLog.setVersion(version + 1); taskDefinitionLog.setCreateTime(definitionCodeAndVersion.getCreateTime()); updateTaskDefinitionLogs.add(taskDefinitionLog); } continue; } } taskDefinitionLog.setUserId(operator.getId()); taskDefinitionLog.setVersion(Constants.VERSION_FIRST); taskDefinitionLog.setCreateTime(now); if (taskDefinitionLog.getCode() == 0) { try { taskDefinitionLog.setCode(CodeGenerateUtils.getInstance().genCode()); } catch (CodeGenerateException e) { logger.error("Task code get error, ", e); return Constants.DEFINITION_FAILURE; } } newTaskDefinitionLogs.add(taskDefinitionLog); } int insertResult = 0; int updateResult = 0; for (TaskDefinitionLog taskDefinitionToUpdate : updateTaskDefinitionLogs) { TaskDefinition task = taskDefinitionMapper.queryByCode(taskDefinitionToUpdate.getCode()); if (task == null) { newTaskDefinitionLogs.add(taskDefinitionToUpdate); } else { insertResult += taskDefinitionLogMapper.insert(taskDefinitionToUpdate); taskDefinitionToUpdate.setId(task.getId()); updateResult += taskDefinitionMapper.updateById(taskDefinitionToUpdate); } } if (!newTaskDefinitionLogs.isEmpty()) { updateResult += taskDefinitionMapper.batchInsert(newTaskDefinitionLogs); insertResult += taskDefinitionLogMapper.batchInsert(newTaskDefinitionLogs); } return (insertResult & updateResult) > 0 ? 1 : Constants.EXIT_CODE_SUCCESS; } /** * save processDefinition (including create or update processDefinition) */ public int saveProcessDefine(User operator, ProcessDefinition processDefinition, Boolean isFromProcessDefine) { ProcessDefinitionLog processDefinitionLog = new ProcessDefinitionLog(processDefinition); Integer version = processDefineLogMapper.queryMaxVersionForDefinition(processDefinition.getCode()); int insertVersion = version == null || version == 0 ? Constants.VERSION_FIRST : version + 1; processDefinitionLog.setVersion(insertVersion); processDefinitionLog.setReleaseState(isFromProcessDefine ? ReleaseState.OFFLINE : ReleaseState.ONLINE); processDefinitionLog.setOperator(operator.getId()); processDefinitionLog.setOperateTime(processDefinition.getUpdateTime()); int insertLog = processDefineLogMapper.insert(processDefinitionLog); int result; if (0 == processDefinition.getId()) { result = processDefineMapper.insert(processDefinitionLog); } else { processDefinitionLog.setId(processDefinition.getId()); result = processDefineMapper.updateById(processDefinitionLog); } return (insertLog & result) > 0 ? insertVersion : 0; } /** * save task relations */ public int saveTaskRelation(User operator, long projectCode, long processDefinitionCode, int processDefinitionVersion, List<ProcessTaskRelationLog> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) { Map<Long, TaskDefinitionLog> taskDefinitionLogMap = null; if (CollectionUtils.isNotEmpty(taskDefinitionLogs)) { taskDefinitionLogMap = taskDefinitionLogs.stream() .collect(Collectors.toMap(TaskDefinition::getCode, taskDefinitionLog -> taskDefinitionLog)); } Date now = new Date(); for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { processTaskRelationLog.setProjectCode(projectCode); processTaskRelationLog.setProcessDefinitionCode(processDefinitionCode); processTaskRelationLog.setProcessDefinitionVersion(processDefinitionVersion); if (taskDefinitionLogMap != null) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(processTaskRelationLog.getPreTaskCode()); if (taskDefinitionLog != null) { processTaskRelationLog.setPreTaskVersion(taskDefinitionLog.getVersion()); } processTaskRelationLog.setPostTaskVersion(taskDefinitionLogMap.get(processTaskRelationLog.getPostTaskCode()).getVersion()); } processTaskRelationLog.setCreateTime(now); processTaskRelationLog.setUpdateTime(now); processTaskRelationLog.setOperator(operator.getId()); processTaskRelationLog.setOperateTime(now); } List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); if (!processTaskRelationList.isEmpty()) { Set<Integer> processTaskRelationSet = processTaskRelationList.stream().map(ProcessTaskRelation::hashCode).collect(toSet()); Set<Integer> taskRelationSet = taskRelationList.stream().map(ProcessTaskRelationLog::hashCode).collect(toSet()); boolean result = CollectionUtils.isEqualCollection(processTaskRelationSet, taskRelationSet); if (result) { return Constants.EXIT_CODE_SUCCESS; } processTaskRelationMapper.deleteByCode(projectCode, processDefinitionCode); } int result = processTaskRelationMapper.batchInsert(taskRelationList); int resultLog = processTaskRelationLogMapper.batchInsert(taskRelationList); return (result & resultLog) > 0 ? Constants.EXIT_CODE_SUCCESS : Constants.EXIT_CODE_FAILURE; } public boolean isTaskOnline(long taskCode) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByTaskCode(taskCode); if (!processTaskRelationList.isEmpty()) { Set<Long> processDefinitionCodes = processTaskRelationList .stream() .map(ProcessTaskRelation::getProcessDefinitionCode) .collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefineMapper.queryByCodes(processDefinitionCodes); // check process definition is already online for (ProcessDefinition processDefinition : processDefinitionList) { if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { return true; } } } return false; } /** * Generate the DAG Graph based on the process definition id * * @param processDefinition process definition * @return dag graph */ public DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<TaskNode> taskNodeList = transformTask(processTaskRelations, Lists.newArrayList()); ProcessDag processDag = DagHelper.getProcessDag(taskNodeList, new ArrayList<>(processTaskRelations)); // Generate concrete Dag to be executed return DagHelper.buildDagGraph(processDag); } /** * generate DagData */ public DagData genDagData(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<TaskDefinitionLog> taskDefinitionLogList = genTaskDefineList(processTaskRelations); List<TaskDefinition> taskDefinitions = taskDefinitionLogList.stream() .map(taskDefinitionLog -> JSONUtils.parseObject(JSONUtils.toJsonString(taskDefinitionLog), TaskDefinition.class)) .collect(Collectors.toList()); return new DagData(processDefinition, processTaskRelations, taskDefinitions); } public List<TaskDefinitionLog> genTaskDefineList(List<ProcessTaskRelation> processTaskRelations) { Set<TaskDefinition> taskDefinitionSet = new HashSet<>(); for (ProcessTaskRelation processTaskRelation : processTaskRelations) { if (processTaskRelation.getPreTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPreTaskCode(), processTaskRelation.getPreTaskVersion())); } if (processTaskRelation.getPostTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion())); } } return taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet); } /** * find task definition by code and version */ public TaskDefinition findTaskDefinition(long taskCode, int taskDefinitionVersion) { return taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskCode, taskDefinitionVersion); } /** * find process task relation list by projectCode and processDefinitionCode */ public List<ProcessTaskRelation> findRelationByCode(long projectCode, long processDefinitionCode) { return processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); } /** * add authorized resources * * @param ownResources own resources * @param userId userId */ private void addAuthorizedResources(List<Resource> ownResources, int userId) { List<Integer> relationResourceIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 7); List<Resource> relationResources = CollectionUtils.isNotEmpty(relationResourceIds) ? resourceMapper.queryResourceListById(relationResourceIds) : new ArrayList<>(); ownResources.addAll(relationResources); } /** * Use temporarily before refactoring taskNode */ public List<TaskNode> transformTask(List<ProcessTaskRelation> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) { Map<Long, List<Long>> taskCodeMap = new HashMap<>(); for (ProcessTaskRelation processTaskRelation : taskRelationList) { taskCodeMap.compute(processTaskRelation.getPostTaskCode(), (k, v) -> { if (v == null) { v = new ArrayList<>(); } if (processTaskRelation.getPreTaskCode() != 0L) { v.add(processTaskRelation.getPreTaskCode()); } return v; }); } if (CollectionUtils.isEmpty(taskDefinitionLogs)) { taskDefinitionLogs = genTaskDefineList(taskRelationList); } Map<Long, TaskDefinitionLog> taskDefinitionLogMap = taskDefinitionLogs.stream() .collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog)); List<TaskNode> taskNodeList = new ArrayList<>(); for (Entry<Long, List<Long>> code : taskCodeMap.entrySet()) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(code.getKey()); if (taskDefinitionLog != null) { TaskNode taskNode = new TaskNode(); taskNode.setCode(taskDefinitionLog.getCode()); taskNode.setVersion(taskDefinitionLog.getVersion()); taskNode.setName(taskDefinitionLog.getName()); taskNode.setDesc(taskDefinitionLog.getDescription()); taskNode.setType(taskDefinitionLog.getTaskType().toUpperCase()); taskNode.setRunFlag(taskDefinitionLog.getFlag() == Flag.YES ? Constants.FLOWNODE_RUN_FLAG_NORMAL : Constants.FLOWNODE_RUN_FLAG_FORBIDDEN); taskNode.setMaxRetryTimes(taskDefinitionLog.getFailRetryTimes()); taskNode.setRetryInterval(taskDefinitionLog.getFailRetryInterval()); Map<String, Object> taskParamsMap = taskNode.taskParamsToJsonObj(taskDefinitionLog.getTaskParams()); taskNode.setConditionResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.CONDITION_RESULT))); taskNode.setSwitchResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.SWITCH_RESULT))); taskNode.setDependence(JSONUtils.toJsonString(taskParamsMap.get(Constants.DEPENDENCE))); taskParamsMap.remove(Constants.CONDITION_RESULT); taskParamsMap.remove(Constants.DEPENDENCE); taskNode.setParams(JSONUtils.toJsonString(taskParamsMap)); taskNode.setTaskInstancePriority(taskDefinitionLog.getTaskPriority()); taskNode.setWorkerGroup(taskDefinitionLog.getWorkerGroup()); taskNode.setEnvironmentCode(taskDefinitionLog.getEnvironmentCode()); taskNode.setTimeout(JSONUtils.toJsonString(new TaskTimeoutParameter(taskDefinitionLog.getTimeoutFlag() == TimeoutFlag.OPEN, taskDefinitionLog.getTimeoutNotifyStrategy(), taskDefinitionLog.getTimeout()))); taskNode.setDelayTime(taskDefinitionLog.getDelayTime()); taskNode.setPreTasks(JSONUtils.toJsonString(code.getValue().stream().map(taskDefinitionLogMap::get).map(TaskDefinition::getCode).collect(Collectors.toList()))); taskNodeList.add(taskNode); } } return taskNodeList; } public Map<ProcessInstance, TaskInstance> notifyProcessList(int processId, int taskId) { HashMap<ProcessInstance, TaskInstance> processTaskMap = new HashMap<>(); //find sub tasks ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(processId); if (processInstanceMap == null) { return processTaskMap; } ProcessInstance fatherProcess = this.findProcessInstanceById(processInstanceMap.getParentProcessInstanceId()); TaskInstance fatherTask = this.findTaskInstanceById(processInstanceMap.getParentTaskInstanceId()); if (fatherProcess != null) { processTaskMap.put(fatherProcess, fatherTask); } return processTaskMap; } public ProcessInstance loadNextProcess4Serial(long code, int state) { return this.processInstanceMapper.loadNextProcess4Serial(code, state); } private void deleteCommandWithCheck(int commandId) { int delete = this.commandMapper.deleteById(commandId); if (delete != 1) { throw new ServiceException("delete command fail, id:" + commandId); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,809
[Bug] [dolphinscheduler-task-sql] NullPointerException happened when send alert.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened NullPointerException happened when run sql task. -[2.0.0-release-prepare] ### What you expected to happen Run sql task with alert successful. ### How to reproduce [ERROR] 2021-11-11 14:31:19.462 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[210] - task scheduler failure java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.sendAlert(TaskExecuteThread.java:225) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:196) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) at org.apache.dolphinscheduler.plugin.task.sql.SqlTask.sendAttachment(SqlTask:280); `private void sendAttachment(int groupId, String title, String content) {` ` setNeedAlert(Boolean.TRUE);` ` TaskAlertInfo taskAlertInfo = new TaskAlertInfo();` ` taskAlertInfo.setAlertGroupId(groupId);` ` taskAlertInfo.setContent(content);` ` taskAlertInfo.setTitle(title);` `}` Forget to setTaskAlertInfo? `private void sendAttachment(int groupId, String title, String content) {` ` setNeedAlert(Boolean.TRUE);` ` TaskAlertInfo taskAlertInfo = new TaskAlertInfo();` ` taskAlertInfo.setAlertGroupId(groupId);` ` taskAlertInfo.setContent(content);` ` taskAlertInfo.setTitle(title);` ` setTaskAlertInfo(taskAlertInfo);` `}` ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6809
https://github.com/apache/dolphinscheduler/pull/7089
2ed3c0c3a76dd6ff631b0e63f6328d32fc1c81b3
463fc76bb7bf83d2826c680cc7531b102a7abe65
"2021-11-11T11:52:11Z"
java
"2021-12-01T06:08:40Z"
dolphinscheduler-task-plugin/dolphinscheduler-task-sql/src/main/java/org/apache/dolphinscheduler/plugin/task/sql/SqlTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.task.sql; import org.apache.dolphinscheduler.plugin.datasource.api.plugin.DataSourceClientProvider; import org.apache.dolphinscheduler.plugin.datasource.api.utils.CommonUtils; import org.apache.dolphinscheduler.plugin.datasource.api.utils.DatasourceUtil; import org.apache.dolphinscheduler.plugin.task.api.AbstractTaskExecutor; import org.apache.dolphinscheduler.plugin.task.api.TaskException; import org.apache.dolphinscheduler.plugin.task.util.MapUtils; import org.apache.dolphinscheduler.spi.datasource.BaseConnectionParam; import org.apache.dolphinscheduler.spi.enums.DbType; import org.apache.dolphinscheduler.spi.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.spi.task.AbstractParameters; import org.apache.dolphinscheduler.spi.task.Direct; import org.apache.dolphinscheduler.spi.task.Property; import org.apache.dolphinscheduler.spi.task.TaskAlertInfo; import org.apache.dolphinscheduler.spi.task.TaskConstants; import org.apache.dolphinscheduler.spi.task.paramparser.ParamUtils; import org.apache.dolphinscheduler.spi.task.paramparser.ParameterUtils; import org.apache.dolphinscheduler.spi.task.request.SQLTaskExecutionContext; import org.apache.dolphinscheduler.spi.task.request.TaskRequest; import org.apache.dolphinscheduler.spi.task.request.UdfFuncRequest; import org.apache.dolphinscheduler.spi.utils.JSONUtils; import org.apache.dolphinscheduler.spi.utils.StringUtils; import org.apache.commons.collections.CollectionUtils; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; import java.text.MessageFormat; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Optional; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; import org.slf4j.Logger; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; public class SqlTask extends AbstractTaskExecutor { /** * taskExecutionContext */ private TaskRequest taskExecutionContext; /** * sql parameters */ private SqlParameters sqlParameters; /** * base datasource */ private BaseConnectionParam baseConnectionParam; /** * create function format */ private static final String CREATE_FUNCTION_FORMAT = "create temporary function {0} as ''{1}''"; /** * default query sql limit */ private static final int QUERY_LIMIT = 10000; /** * Abstract Yarn Task * * @param taskRequest taskRequest */ public SqlTask(TaskRequest taskRequest) { super(taskRequest); this.taskExecutionContext = taskRequest; this.sqlParameters = JSONUtils.parseObject(taskExecutionContext.getTaskParams(), SqlParameters.class); assert sqlParameters != null; if (!sqlParameters.checkParameters()) { throw new RuntimeException("sql task params is not valid"); } } @Override public AbstractParameters getParameters() { return sqlParameters; } @Override public void handle() throws Exception { // set the name of the current thread String threadLoggerInfoName = String.format(TaskConstants.TASK_LOG_INFO_FORMAT, taskExecutionContext.getTaskAppId()); Thread.currentThread().setName(threadLoggerInfoName); logger.info("Full sql parameters: {}", sqlParameters); logger.info("sql type : {}, datasource : {}, sql : {} , localParams : {},udfs : {},showType : {},connParams : {},varPool : {} ,query max result limit {}", sqlParameters.getType(), sqlParameters.getDatasource(), sqlParameters.getSql(), sqlParameters.getLocalParams(), sqlParameters.getUdfs(), sqlParameters.getShowType(), sqlParameters.getConnParams(), sqlParameters.getVarPool(), sqlParameters.getLimit()); try { SQLTaskExecutionContext sqlTaskExecutionContext = taskExecutionContext.getSqlTaskExecutionContext(); // get datasource baseConnectionParam = (BaseConnectionParam) DatasourceUtil.buildConnectionParams( DbType.valueOf(sqlParameters.getType()), sqlTaskExecutionContext.getConnectionParams()); // ready to execute SQL and parameter entity Map SqlBinds mainSqlBinds = getSqlAndSqlParamsMap(sqlParameters.getSql()); List<SqlBinds> preStatementSqlBinds = Optional.ofNullable(sqlParameters.getPreStatements()) .orElse(new ArrayList<>()) .stream() .map(this::getSqlAndSqlParamsMap) .collect(Collectors.toList()); List<SqlBinds> postStatementSqlBinds = Optional.ofNullable(sqlParameters.getPostStatements()) .orElse(new ArrayList<>()) .stream() .map(this::getSqlAndSqlParamsMap) .collect(Collectors.toList()); List<String> createFuncs = createFuncs(sqlTaskExecutionContext.getUdfFuncTenantCodeMap(), sqlTaskExecutionContext.getDefaultFS(), logger); // execute sql task executeFuncAndSql(mainSqlBinds, preStatementSqlBinds, postStatementSqlBinds, createFuncs); setExitStatusCode(TaskConstants.EXIT_CODE_SUCCESS); } catch (Exception e) { setExitStatusCode(TaskConstants.EXIT_CODE_FAILURE); logger.error("sql task error: {}", e.toString()); throw e; } } /** * execute function and sql * * @param mainSqlBinds main sql binds * @param preStatementsBinds pre statements binds * @param postStatementsBinds post statements binds * @param createFuncs create functions */ public void executeFuncAndSql(SqlBinds mainSqlBinds, List<SqlBinds> preStatementsBinds, List<SqlBinds> postStatementsBinds, List<String> createFuncs) throws Exception { Connection connection = null; PreparedStatement stmt = null; ResultSet resultSet = null; try { // create connection connection = DataSourceClientProvider.getInstance().getConnection(DbType.valueOf(sqlParameters.getType()), baseConnectionParam); // create temp function if (CollectionUtils.isNotEmpty(createFuncs)) { createTempFunction(connection, createFuncs); } // pre sql preSql(connection, preStatementsBinds); stmt = prepareStatementAndBind(connection, mainSqlBinds); String result = null; // decide whether to executeQuery or executeUpdate based on sqlType if (sqlParameters.getSqlType() == SqlType.QUERY.ordinal()) { // query statements need to be convert to JsonArray and inserted into Alert to send resultSet = stmt.executeQuery(); result = resultProcess(resultSet); } else if (sqlParameters.getSqlType() == SqlType.NON_QUERY.ordinal()) { // non query statement String updateResult = String.valueOf(stmt.executeUpdate()); result = setNonQuerySqlReturn(updateResult, sqlParameters.getLocalParams()); } //deal out params sqlParameters.dealOutParam(result); postSql(connection, postStatementsBinds); } catch (Exception e) { logger.error("execute sql error: {}", e.getMessage()); throw e; } finally { close(resultSet, stmt, connection); } } private String setNonQuerySqlReturn(String updateResult, List<Property> properties) { String result = null; for (Property info : properties) { if (Direct.OUT == info.getDirect()) { List<Map<String, String>> updateRL = new ArrayList<>(); Map<String, String> updateRM = new HashMap<>(); updateRM.put(info.getProp(), updateResult); updateRL.add(updateRM); result = JSONUtils.toJsonString(updateRL); break; } } return result; } /** * result process * * @param resultSet resultSet * @throws Exception Exception */ private String resultProcess(ResultSet resultSet) throws Exception { ArrayNode resultJSONArray = JSONUtils.createArrayNode(); if (resultSet != null) { ResultSetMetaData md = resultSet.getMetaData(); int num = md.getColumnCount(); int rowCount = 0; int limit = sqlParameters.getLimit() == 0 ? QUERY_LIMIT : sqlParameters.getLimit(); while (rowCount < limit && resultSet.next()) { ObjectNode mapOfColValues = JSONUtils.createObjectNode(); for (int i = 1; i <= num; i++) { mapOfColValues.set(md.getColumnLabel(i), JSONUtils.toJsonNode(resultSet.getObject(i))); } resultJSONArray.add(mapOfColValues); rowCount++; } int displayRows = sqlParameters.getDisplayRows() > 0 ? sqlParameters.getDisplayRows() : TaskConstants.DEFAULT_DISPLAY_ROWS; displayRows = Math.min(displayRows, resultJSONArray.size()); logger.info("display sql result {} rows as follows:", displayRows); for (int i = 0; i < displayRows; i++) { String row = JSONUtils.toJsonString(resultJSONArray.get(i)); logger.info("row {} : {}", i + 1, row); } if (resultSet.next()) { logger.info("sql result limit : {} exceeding results are filtered", limit); String log = String.format("sql result limit : %d exceeding results are filtered", limit); resultJSONArray.add(JSONUtils.toJsonNode(log)); } } String result = JSONUtils.toJsonString(resultJSONArray); if (sqlParameters.getSendEmail() == null || sqlParameters.getSendEmail()) { sendAttachment(sqlParameters.getGroupId(), StringUtils.isNotEmpty(sqlParameters.getTitle()) ? sqlParameters.getTitle() : taskExecutionContext.getTaskName() + " query result sets", result); } logger.debug("execute sql result : {}", result); return result; } /** * send alert as an attachment * * @param title title * @param content content */ private void sendAttachment(int groupId, String title, String content) { setNeedAlert(Boolean.TRUE); TaskAlertInfo taskAlertInfo = new TaskAlertInfo(); taskAlertInfo.setAlertGroupId(groupId); taskAlertInfo.setContent(content); taskAlertInfo.setTitle(title); } /** * pre sql * * @param connection connection * @param preStatementsBinds preStatementsBinds */ private void preSql(Connection connection, List<SqlBinds> preStatementsBinds) throws Exception { for (SqlBinds sqlBind : preStatementsBinds) { try (PreparedStatement pstmt = prepareStatementAndBind(connection, sqlBind)) { int result = pstmt.executeUpdate(); logger.info("pre statement execute result: {}, for sql: {}", result, sqlBind.getSql()); } } } /** * post sql * * @param connection connection * @param postStatementsBinds postStatementsBinds */ private void postSql(Connection connection, List<SqlBinds> postStatementsBinds) throws Exception { for (SqlBinds sqlBind : postStatementsBinds) { try (PreparedStatement pstmt = prepareStatementAndBind(connection, sqlBind)) { int result = pstmt.executeUpdate(); logger.info("post statement execute result: {},for sql: {}", result, sqlBind.getSql()); } } } /** * create temp function * * @param connection connection * @param createFuncs createFuncs */ private void createTempFunction(Connection connection, List<String> createFuncs) throws Exception { try (Statement funcStmt = connection.createStatement()) { for (String createFunc : createFuncs) { logger.info("hive create function sql: {}", createFunc); funcStmt.execute(createFunc); } } } /** * close jdbc resource * * @param resultSet resultSet * @param pstmt pstmt * @param connection connection */ private void close(ResultSet resultSet, PreparedStatement pstmt, Connection connection) { if (resultSet != null) { try { resultSet.close(); } catch (SQLException e) { logger.error("close result set error : {}", e.getMessage(), e); } } if (pstmt != null) { try { pstmt.close(); } catch (SQLException e) { logger.error("close prepared statement error : {}", e.getMessage(), e); } } if (connection != null) { try { connection.close(); } catch (SQLException e) { logger.error("close connection error : {}", e.getMessage(), e); } } } /** * preparedStatement bind * * @param connection connection * @param sqlBinds sqlBinds * @return PreparedStatement * @throws Exception Exception */ private PreparedStatement prepareStatementAndBind(Connection connection, SqlBinds sqlBinds) { // is the timeout set boolean timeoutFlag = taskExecutionContext.getTaskTimeoutStrategy() == TaskTimeoutStrategy.FAILED || taskExecutionContext.getTaskTimeoutStrategy() == TaskTimeoutStrategy.WARNFAILED; try { PreparedStatement stmt = connection.prepareStatement(sqlBinds.getSql()); if (timeoutFlag) { stmt.setQueryTimeout(taskExecutionContext.getTaskTimeout()); } Map<Integer, Property> params = sqlBinds.getParamsMap(); if (params != null) { for (Map.Entry<Integer, Property> entry : params.entrySet()) { Property prop = entry.getValue(); ParameterUtils.setInParameter(entry.getKey(), stmt, prop.getType(), prop.getValue()); } } logger.info("prepare statement replace sql : {} ", stmt); return stmt; } catch (Exception exception) { throw new TaskException("SQL task prepareStatementAndBind error", exception); } } /** * regular expressions match the contents between two specified strings * * @param content content * @param rgex rgex * @param sqlParamsMap sql params map * @param paramsPropsMap params props map */ public void setSqlParamsMap(String content, String rgex, Map<Integer, Property> sqlParamsMap, Map<String, Property> paramsPropsMap) { Pattern pattern = Pattern.compile(rgex); Matcher m = pattern.matcher(content); int index = 1; while (m.find()) { String paramName = m.group(1); Property prop = paramsPropsMap.get(paramName); if (prop == null) { logger.error("setSqlParamsMap: No Property with paramName: {} is found in paramsPropsMap of task instance" + " with id: {}. So couldn't put Property in sqlParamsMap.", paramName, taskExecutionContext.getTaskInstanceId()); } else { sqlParamsMap.put(index, prop); index++; logger.info("setSqlParamsMap: Property with paramName: {} put in sqlParamsMap of content {} successfully.", paramName, content); } } } /** * print replace sql * * @param content content * @param formatSql format sql * @param rgex rgex * @param sqlParamsMap sql params map */ private void printReplacedSql(String content, String formatSql, String rgex, Map<Integer, Property> sqlParamsMap) { //parameter print style logger.info("after replace sql , preparing : {}", formatSql); StringBuilder logPrint = new StringBuilder("replaced sql , parameters:"); if (sqlParamsMap == null) { logger.info("printReplacedSql: sqlParamsMap is null."); } else { for (int i = 1; i <= sqlParamsMap.size(); i++) { logPrint.append(sqlParamsMap.get(i).getValue()).append("(").append(sqlParamsMap.get(i).getType()).append(")"); } } logger.info("Sql Params are {}", logPrint); } /** * ready to execute SQL and parameter entity Map * * @return SqlBinds */ private SqlBinds getSqlAndSqlParamsMap(String sql) { Map<Integer, Property> sqlParamsMap = new HashMap<>(); StringBuilder sqlBuilder = new StringBuilder(); // combining local and global parameters Map<String, Property> paramsMap = ParamUtils.convert(taskExecutionContext, getParameters()); // spell SQL according to the final user-defined variable if (paramsMap == null) { sqlBuilder.append(sql); return new SqlBinds(sqlBuilder.toString(), sqlParamsMap); } if (StringUtils.isNotEmpty(sqlParameters.getTitle())) { String title = ParameterUtils.convertParameterPlaceholders(sqlParameters.getTitle(), ParamUtils.convert(paramsMap)); logger.info("SQL title : {}", title); sqlParameters.setTitle(title); } //new //replace variable TIME with $[YYYYmmddd...] in sql when history run job and batch complement job sql = ParameterUtils.replaceScheduleTime(sql, taskExecutionContext.getScheduleTime()); // special characters need to be escaped, ${} needs to be escaped String rgex = "['\"]*\\$\\{(.*?)\\}['\"]*"; setSqlParamsMap(sql, rgex, sqlParamsMap, paramsMap); //Replace the original value in sql !{...} ,Does not participate in precompilation String rgexo = "['\"]*\\!\\{(.*?)\\}['\"]*"; sql = replaceOriginalValue(sql, rgexo, paramsMap); // replace the ${} of the SQL statement with the Placeholder String formatSql = sql.replaceAll(rgex, "?"); sqlBuilder.append(formatSql); // print repalce sql printReplacedSql(sql, formatSql, rgex, sqlParamsMap); return new SqlBinds(sqlBuilder.toString(), sqlParamsMap); } private String replaceOriginalValue(String content, String rgex, Map<String, Property> sqlParamsMap) { Pattern pattern = Pattern.compile(rgex); while (true) { Matcher m = pattern.matcher(content); if (!m.find()) { break; } String paramName = m.group(1); String paramValue = sqlParamsMap.get(paramName).getValue(); content = m.replaceFirst(paramValue); } return content; } /** * create function list * * @param udfFuncTenantCodeMap key is udf function,value is tenant code * @param logger logger * @return create function list */ public static List<String> createFuncs(Map<UdfFuncRequest, String> udfFuncTenantCodeMap, String defaultFS, Logger logger) { if (MapUtils.isEmpty(udfFuncTenantCodeMap)) { logger.info("can't find udf function resource"); return null; } List<String> funcList = new ArrayList<>(); // build jar sql buildJarSql(funcList, udfFuncTenantCodeMap, defaultFS); // build temp function sql buildTempFuncSql(funcList, new ArrayList<>(udfFuncTenantCodeMap.keySet())); return funcList; } /** * build temp function sql * * @param sqls sql list * @param udfFuncRequests udf function list */ private static void buildTempFuncSql(List<String> sqls, List<UdfFuncRequest> udfFuncRequests) { if (CollectionUtils.isNotEmpty(udfFuncRequests)) { for (UdfFuncRequest udfFuncRequest : udfFuncRequests) { sqls.add(MessageFormat .format(CREATE_FUNCTION_FORMAT, udfFuncRequest.getFuncName(), udfFuncRequest.getClassName())); } } } /** * build jar sql * @param sqls sql list * @param udfFuncTenantCodeMap key is udf function,value is tenant code */ private static void buildJarSql(List<String> sqls, Map<UdfFuncRequest,String> udfFuncTenantCodeMap, String defaultFS) { String resourceFullName; Set<Entry<UdfFuncRequest, String>> entries = udfFuncTenantCodeMap.entrySet(); for (Map.Entry<UdfFuncRequest, String> entry : entries) { String prefixPath = defaultFS.startsWith("file://") ? "file://" : defaultFS; String uploadPath = CommonUtils.getHdfsUdfDir(entry.getValue()); resourceFullName = entry.getKey().getResourceName(); resourceFullName = resourceFullName.startsWith("/") ? resourceFullName : String.format("/%s", resourceFullName); sqls.add(String.format("add jar %s%s%s", prefixPath, uploadPath, resourceFullName)); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,095
[Bug] [Create Warning Instance] Incomplete form editing information is displayed.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened After clicking the edit button, the linkage form information is not displayed completely. ![image](https://user-images.githubusercontent.com/19239641/144181670-30dbf46c-f1dc-4046-8812-25d1a8ec16bf.png) ### What you expected to happen Some parameters of `form-create` dynamically created form are `null`. ### How to reproduce Click the edit button. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7095
https://github.com/apache/dolphinscheduler/pull/7096
463fc76bb7bf83d2826c680cc7531b102a7abe65
b3efcdfeba06867e620829638ff27c3069aeac23
"2021-12-01T06:13:53Z"
java
"2021-12-01T06:41:14Z"
dolphinscheduler-ui/src/js/conf/home/pages/security/pages/warningInstance/_source/createWarningInstance.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <m-popover ref="popover" :ok-text="item ? $t('Edit') : $t('Submit')" @ok="_ok" @close="close"> <template slot="content"> <div class="create-warning-model"> <m-list-box-f> <template slot="name"><strong>*</strong>{{$t('Alarm instance name')}}</template> <template slot="content"> <el-input type="input" v-model="instanceName" maxlength="60" size="small" :placeholder="$t('Please enter group name')"> </el-input> </template> </m-list-box-f> <m-list-box-f> <template slot="name"><strong>*</strong>{{$t('Select plugin')}}</template> <template slot="content"> <el-select v-model="pluginDefineId" size="small" style="width: 100%" @change="changePlugin" disabled="true" v-if="item.id"> <el-option v-for="items in pluginInstance" :key="items.id" :value="items.id" :label="items.pluginName"> </el-option> </el-select> <el-select v-model="pluginDefineId" size="small" style="width: 100%" @change="changePlugin" v-else> <el-option v-for="items in pluginInstance" :key="items.id" :value="items.id" :label="items.pluginName"> </el-option> </el-select> </template> </m-list-box-f> <div class="alertForm"> <template> <form-create v-model="$f" :rule="rule" :option="{submitBtn:false}" size="mini"></form-create> </template> </div> </div> </template> </m-popover> </template> <script> import i18n from '@/module/i18n' import store from '@/conf/home/store' import mPopover from '@/module/components/popup/popover' import mListBoxF from '@/module/components/listBoxF/listBoxF' export default { name: 'create-warning', data () { return { store, instanceName: '', pluginDefineId: null, $f: {}, rule: [] } }, props: { item: Object, pluginInstance: Array }, methods: { _ok () { if (this._verification()) { // The name is not verified if (this.item && this.item.instanceName === this.instanceName) { this._submit() return } // Verify username this.store.dispatch('security/verifyName', { type: 'alarmInstance', instanceName: this.instanceName }).then(res => { this._submit() }).catch(e => { this.$message.error(e.msg || '') }) } }, _verification () { // group name if (!this.instanceName.replace(/\s*/g, '')) { this.$message.warning(`${i18n.$t('Please enter group name')}`) return false } if (!this.pluginDefineId) { this.$message.warning(`${i18n.$t('Select Alarm plugin')}`) return false } return true }, // Select plugin changePlugin () { this.store.dispatch('security/getUiPluginById', { pluginId: this.pluginDefineId }).then(res => { this.rule = JSON.parse(res.pluginParams) this.rule.forEach(item => { if (item.title.indexOf('$t') !== -1) { item.title = this.$t(item.field) } // fix null pointer exception if (!item.props) { item.props = {} } }) }).catch(e => { this.$message.error(e.msg || '') }) }, _submit () { this.$f.validate((valid) => { if (valid) { this.$f.rule.forEach(item => { item.title = item.name }) let param = { instanceName: this.instanceName, pluginDefineId: this.pluginDefineId, pluginInstanceParams: JSON.stringify(this.$f.rule) } if (this.item) { param.alertPluginInstanceId = this.item.id param.pluginDefineId = null } this.$refs.popover.spinnerLoading = true this.store.dispatch(`security/${this.item ? 'updateAlertPluginInstance' : 'createAlertPluginInstance'}`, param).then(res => { this.$refs.popover.spinnerLoading = false this.$emit('onUpdate') this.$message.success(res.msg) }).catch(e => { this.$message.error(e.msg || '') this.$refs.popover.spinnerLoading = false }) } else { this.$message.warning(`${i18n.$t('Instance parameter exception')}`) this.$refs.popover.spinnerLoading = false } }) }, close () { this.$emit('close') } }, watch: {}, created () { let pluginInstanceParams = [] if (this.item) { this.instanceName = this.item.instanceName this.pluginDefineId = this.item.pluginDefineId JSON.parse(this.item.pluginInstanceParams).forEach(item => { if (item.title.indexOf('$t') !== -1) { item.title = this.$t(item.field) } pluginInstanceParams.push(item) }) this.rule = pluginInstanceParams } }, mounted () { }, components: { mPopover, mListBoxF } } </script> <style lang="scss" rel="stylesheet/scss"> .alertForm { label { span { font-weight: 10!important; } } .el-row { width: 520px; } .el-form-item__label { width: 144px!important; color: #606266!important; } .el-form-item__content { margin-left: 144px!important; width: calc(100% - 162px); } } </style>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,092
[Improvement][ui] When formatting the DAG, support tasks with no location data.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description There is no location data when the third party calls the interface to associate tasks and workflows. On the workflow page, these tasks are not displayed, and they are not formatted at the same time when formatting the DAG. ### Use case When formatting the DAG on page, format and display these tasks without location data. ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7092
https://github.com/apache/dolphinscheduler/pull/7102
b3efcdfeba06867e620829638ff27c3069aeac23
173a3856185abc0fa9be16715d2567ebbe054a6f
"2021-12-01T02:54:37Z"
java
"2021-12-01T08:40:11Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/canvas/canvas.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="dag-canvas"> <dag-taskbar @on-drag-start="onDragStart" /> <div class="dag-container" ref="container" @dragenter.prevent @dragover.prevent @dragleave.prevent @drop.stop.prevent="onDrop" > <div ref="paper" class="paper"></div> <div ref="minimap" class="minimap"></div> <div class="scale-slider"> <span class="scale-title">{{$t('dagScale')}}</span> <el-slider v-model="scale" vertical :max="2" :min="0.2" :step="0.2" :marks="SCALE_MARKS" @input='scaleChange' /> </div> <context-menu ref="contextMenu" /> </div> <layout-config-modal ref="layoutModal" @submit="format" /> </div> </template> <script> import _ from 'lodash' import { Graph, DataUri } from '@antv/x6' import dagTaskbar from './taskbar.vue' import contextMenu from './contextMenu.vue' import layoutConfigModal, { LAYOUT_TYPE } from './layoutConfigModal.vue' import { NODE, EDGE, X6_NODE_NAME, X6_EDGE_NAME, NODE_STATUS_MARKUP } from './x6-helper' import { DagreLayout, GridLayout } from '@antv/layout' import { tasksType, tasksState } from '../config' import { mapActions, mapMutations, mapState } from 'vuex' import nodeStatus from './nodeStatus' import x6StyleMixin from './x6-style-mixin' const SCALE_MARKS = { 0.2: '0.2', 1: '1', 2: '2' } export default { name: 'dag-canvas', data () { return { graph: null, // Used to calculate the context menu location originalScrollPosition: { left: 0, top: 0 }, editable: true, dragging: { // Distance from the mouse to the top-left corner of the dragging element x: 0, y: 0, type: '' }, // The canvas scale scale: 1, SCALE_MARKS } }, provide () { return { dagCanvas: this } }, mixins: [x6StyleMixin], inject: ['dagChart'], components: { dagTaskbar, contextMenu, layoutConfigModal }, computed: { ...mapState('dag', ['tasks']) }, methods: { ...mapActions('dag', ['genTaskCodeList']), ...mapMutations('dag', ['removeTask']), /** * Recalculate the paper width and height */ paperResize () { const w = this.$el.offsetWidth const h = this.$el.offsetHeight this.graph.resize(w, h) }, /** * Init graph * This will be called in the dag-chart mounted event * @param {boolean} uneditable */ graphInit (editable) { const self = this this.editable = !!editable const paper = this.$refs.paper const minimap = this.$refs.minimap const graph = (this.graph = new Graph({ container: paper, selecting: { enabled: true, multiple: true, rubberband: true, rubberEdge: true, movable: true, showNodeSelectionBox: false }, scaling: { min: 0.2, max: 2 }, mousewheel: { enabled: true, modifiers: ['ctrl', 'meta'] }, scroller: true, grid: { size: 10, visible: true }, snapline: true, minimap: { enabled: true, container: minimap, scalable: false, width: 200, height: 120 }, interacting: { edgeLabelMovable: false, nodeMovable: !!editable, magnetConnectable: !!editable }, connecting: { // Whether multiple edges can be created between the same start node and end allowMulti: false, // Whether a point is allowed to connect to a blank position on the canvas allowBlank: false, // The start node and the end node are the same node allowLoop: false, // Whether an edge is allowed to link to another edge allowEdge: false, // Whether edges are allowed to link to nodes allowNode: true, // Whether to allow edge links to ports allowPort: false, // Whether all available ports or nodes are highlighted when you drag the edge highlight: true, createEdge () { return graph.createEdge({ shape: X6_EDGE_NAME }) }, validateConnection (data) { const { sourceCell, targetCell } = data if ( sourceCell && targetCell && sourceCell.isNode() && targetCell.isNode() ) { const edgeData = { sourceId: Number(sourceCell.id), targetId: Number(targetCell.id) } if (!self.edgeIsValid(edgeData)) { return false } } return true } }, highlighting: { nodeAvailable: { name: 'className', args: { className: 'available' } }, magnetAvailable: { name: 'className', args: { className: 'available' } }, magnetAdsorbed: { name: 'className', args: { className: 'adsorbed' } } } })) this.registerX6Shape() this.bindGraphEvent() this.originalScrollPosition = graph.getScrollbarPosition() }, /** * Register custom shapes */ registerX6Shape () { Graph.unregisterNode(X6_NODE_NAME) Graph.unregisterEdge(X6_EDGE_NAME) Graph.registerNode(X6_NODE_NAME, { ...NODE }) Graph.registerEdge(X6_EDGE_NAME, { ...EDGE }) }, /** * Bind grap event */ bindGraphEvent () { this.bindStyleEvent(this.graph) // update scale bar this.graph.on('scale', ({ sx }) => { this.scale = sx }) // right click this.graph.on('node:contextmenu', ({ x, y, cell }) => { const { left, top } = this.graph.getScrollbarPosition() const o = this.originalScrollPosition this.$refs.contextMenu.show(x + (o.left - left), y + (o.top - top)) this.$refs.contextMenu.setCurrentTask({ name: cell.data.taskName, type: cell.data.taskType, code: Number(cell.id) }) }) // node double click this.graph.on('node:dblclick', ({ cell }) => { this.dagChart.openFormModel(Number(cell.id), cell.data.taskType) }) // create edge label this.graph.on('edge:dblclick', ({ cell }) => { const labelName = this.getEdgeLabelName(cell) this.dagChart.$refs.edgeEditModel.show({ id: cell.id, label: labelName }) }) // Make sure the edge starts with node, not port this.graph.on('edge:connected', ({ isNew, edge }) => { if (isNew) { const sourceNode = edge.getSourceNode() edge.setSource(sourceNode) } }) }, /** * @param {Edge|string} edge */ getEdgeLabelName (edge) { if (typeof edge === 'string') edge = this.graph.getCellById(edge) const labels = edge.getLabels() const labelName = _.get(labels, ['0', 'attrs', 'label', 'text'], '') return labelName }, /** * Set edge label by id * @param {string} id * @param {string} label */ setEdgeLabel (id, label) { const edge = this.graph.getCellById(id) edge.setLabels(label) }, /** * @param {number} limit * @param {string} text * Each Chinese character is equal to two chars */ truncateText (text, n) { const exp = /[\u4E00-\u9FA5]/ let res = '' let len = text.length let chinese = text.match(new RegExp(exp, 'g')) if (chinese) { len += chinese.length } if (len > n) { let i = 0 let acc = 0 while (true) { let char = text[i] if (exp.test(char)) { acc += 2 } else { acc++ } if (acc > n) break res += char i++ } res += '...' } else { res = text } return res }, /** * Set node name by id * @param {string|number} id * @param {string} name */ setNodeName (id, name) { id += '' const node = this.graph.getCellById(id) if (node) { const truncation = this.truncateText(name, 18) node.attr('title/text', truncation) node.setData({ taskName: name }) } }, /** * Convert the graph to JSON * @return {{cells:Cell[]}} */ toJSON () { return this.graph.toJSON() }, /** * Generate graph with JSON */ fromJSON (json) { this.graph.fromJSON(json) }, /** * getNodes * @return {Node[]} */ // interface Node { // id: number; // position: {x:number;y:number}; // data: {taskType:string;taskName:string;} // } getNodes () { const nodes = this.graph.getNodes() return nodes.map((node) => { const position = node.getPosition() const data = node.getData() return { id: Number(node.id), position: position, data: data } }) }, /** * getEdges * @return {Edge[]} Edge is inherited from the Cell */ // interface Edge { // id: string; // label: string; // sourceId: number; // targetId: number; // } getEdges () { const edges = this.graph.getEdges() return edges.map((edge) => { const labelData = edge.getLabelAt(0) return { id: edge.id, label: _.get(labelData, ['attrs', 'label', 'text'], ''), sourceId: Number(edge.getSourceCellId()), targetId: Number(edge.getTargetCellId()) } }) }, /** * downloadPNG * @param {string} filename */ downloadPNG (fileName = 'chart') { this.graph.toPNG( (dataUri) => { DataUri.downloadDataUri(dataUri, `${fileName}.png`) }, { padding: { top: 50, right: 50, bottom: 50, left: 50 }, backgroundColor: '#f2f3f7' } ) }, showLayoutModal () { const layoutModal = this.$refs.layoutModal if (layoutModal) { layoutModal.show() } }, /** * format * @desc Auto layout use @antv/layout */ format (layoutConfig) { this.graph.cleanSelection() let layoutFunc = null if (layoutConfig.type === LAYOUT_TYPE.DAGRE) { layoutFunc = new DagreLayout({ type: LAYOUT_TYPE.DAGRE, rankdir: 'LR', align: 'UL', // Calculate the node spacing based on the edge label length ranksepFunc: (d) => { const edges = this.graph.getOutgoingEdges(d.id) let max = 0 if (edges && edges.length > 0) { edges.forEach((edge) => { const edgeView = this.graph.findViewByCell(edge) const labelWidth = +edgeView.findAttr( 'width', _.get(edgeView, ['labelSelectors', '0', 'body'], null) ) max = Math.max(max, labelWidth) }) } return layoutConfig.ranksep + max }, nodesep: layoutConfig.nodesep, controlPoints: true }) } else if (layoutConfig.type === LAYOUT_TYPE.GRID) { layoutFunc = new GridLayout({ type: LAYOUT_TYPE.GRID, preventOverlap: true, preventOverlapPadding: layoutConfig.padding, sortBy: '_index', rows: layoutConfig.rows || undefined, cols: layoutConfig.cols || undefined, nodeSize: 220 }) } const json = this.toJSON() const nodes = json.cells .filter((cell) => cell.shape === X6_NODE_NAME) .map((item) => { return { ...item, // sort by code aesc _index: -item.id } }) const edges = json.cells.filter((cell) => cell.shape === X6_EDGE_NAME) const newModel = layoutFunc.layout({ nodes: nodes, edges: edges }) this.fromJSON(newModel) }, /** * add a node to the graph * @param {string|number} id * @param {string} taskType * @param {{x:number;y:number}} coordinate Default is { x: 100, y: 100 } */ addNode (id, taskType, coordinate = { x: 100, y: 100 }) { id += '' if (!tasksType[taskType]) { console.warn(`taskType:${taskType} is invalid!`) return } const node = this.genNodeJSON(id, taskType, '', coordinate) this.graph.addNode(node) }, /** * generate node json * @param {number|string} id * @param {string} taskType * @param {{x:number;y:number}} coordinate Default is { x: 100, y: 100 } */ genNodeJSON (id, taskType, taskName, coordinate = { x: 100, y: 100 }) { id += '' const url = require(`../images/task-icos/${taskType.toLocaleLowerCase()}.png`) const truncation = taskName ? this.truncateText(taskName, 18) : id return { id: id, shape: X6_NODE_NAME, x: coordinate.x, y: coordinate.y, data: { taskType: taskType, taskName: taskName }, attrs: { image: { // Use href instead of xlink:href, you may lose the icon when downloadPNG 'xlink:href': url }, title: { text: truncation } } } }, /** * generate edge json * @param {number|string} sourceId * @param {number|string} targetId * @param {string} label */ genEdgeJSON (sourceId, targetId, label = '') { sourceId += '' targetId += '' return { shape: X6_EDGE_NAME, source: { cell: sourceId }, target: { cell: targetId }, labels: label ? [label] : undefined } }, /** * remove a node * @param {string|number} id NodeId */ removeNode (id) { id += '' this.graph.removeNode(id) this.removeTask(+id) }, /** * remove an edge * @param {string} id EdgeId */ removeEdge (id) { this.graph.removeEdge(id) }, /** * remove multiple cells * @param {Cell[]} cells */ removeCells (cells) { this.graph.removeCells(cells) cells.forEach((cell) => { if (cell.isNode()) { this.removeTask(+cell.id) } }) }, /** * Verify whether edge is valid * The number of edges start with CONDITIONS task cannot be greater than 2 */ edgeIsValid (edge) { const { sourceId } = edge const sourceTask = this.tasks.find((task) => task.code === sourceId) if (sourceTask.taskType === 'CONDITIONS') { const edges = this.getEdges() return edges.filter((e) => e.sourceId === sourceTask.code).length <= 2 } return true }, /** * Gets the current selections * @return {Cell[]} */ getSelections () { return this.graph.getSelectedCells() }, /** * Lock scroller */ lockScroller () { this.graph.lockScroller() }, /** * Unlock scroller */ unlockScroller () { this.graph.unlockScroller() }, /** * set node status icon * @param {number} code * @param {string} state */ setNodeStatus ({ code, state, taskInstance }) { code += '' const stateProps = tasksState[state] const node = this.graph.getCellById(code) if (node) { // Destroy the previous dom node.removeMarkup() node.setMarkup(NODE.markup.concat(NODE_STATUS_MARKUP)) const nodeView = this.graph.findViewByCell(node) const el = nodeView.find('div')[0] nodeStatus({ stateProps, taskInstance }).$mount(el) } }, /** * Drag && Drop Event */ onDragStart (e, taskType) { if (!this.editable) { e.preventDefault() return } this.dragging = { x: e.offsetX, y: e.offsetY, type: taskType.name } }, onDrop (e) { const { type } = this.dragging const { x, y } = this.calcGraphCoordinate(e.clientX, e.clientY) this.genTaskCodeList({ genNum: 1 }) .then((res) => { const [code] = res this.addNode(code, type, { x, y }) this.dagChart.openFormModel(code, type) }) .catch((err) => { console.error(err) }) }, calcGraphCoordinate (mClientX, mClientY) { // Distance from the mouse to the top-left corner of the container; const { left: cX, top: cY } = this.$refs.container.getBoundingClientRect() const mouseX = mClientX - cX const mouseY = mClientY - cY // The distance that paper has been scrolled const { left: sLeft, top: sTop } = this.graph.getScrollbarPosition() const { left: oLeft, top: oTop } = this.originalScrollPosition const scrollX = sLeft - oLeft const scrollY = sTop - oTop // Distance from the mouse to the top-left corner of the dragging element; const { x: eX, y: eY } = this.dragging return { x: mouseX + scrollX - eX, y: mouseY + scrollY - eY } }, /** * Get prev nodes by code * @param {number} code * node1 -> node2 -> node3 * getPrevNodes(node2.code) => [node1] */ getPrevNodes (code) { const nodes = this.getNodes() const edges = this.getEdges() const nodesMap = {} nodes.forEach((node) => { nodesMap[node.id] = node }) return edges .filter((edge) => edge.targetId === code) .map((edge) => nodesMap[edge.sourceId]) }, /** * set prev nodes * @param {number} code * @param {number[]} preNodeCodes * @param {boolean} override If set to true, setPreNodes will delete all edges that end with the node and rebuild */ setPreNodes (code, preNodeCodes, override) { const edges = this.getEdges() const currPreCodes = [] edges.forEach((edge) => { if (edge.targetId === code) { if (override) { this.removeEdge(edge.id) } else { currPreCodes.push(edge.sourceId) } } }) preNodeCodes.forEach((preCode) => { if (currPreCodes.includes(preCode) || preCode === code) return const edge = this.genEdgeJSON(preCode, code) this.graph.addEdge(edge) }) }, /** * Get post nodes by code * @param {number} code * node1 -> node2 -> node3 * getPostNodes(node2.code) => [node3] */ getPostNodes (code) { const nodes = this.getNodes() const edges = this.getEdges() const nodesMap = {} nodes.forEach((node) => { nodesMap[node.id] = node }) return edges .filter((edge) => edge.sourceId === code) .map((edge) => nodesMap[edge.targetId]) }, /** * set post nodes * @param {number} code * @param {number[]} postNodeCodes * @param {boolean} override If set to true, setPreNodes will delete all edges that end with the node and rebuild */ setPostNodes (code, postNodeCodes, override) { const edges = this.getEdges() const currPostCodes = [] edges.forEach((edge) => { if (edge.sourceId === code) { if (override) { this.removeEdge(edge.id) } else { currPostCodes.push(edge.targetId) } } }) postNodeCodes.forEach((postCode) => { if (currPostCodes.includes(postCode) || postCode === code) return const edge = this.genEdgeJSON(code, postCode) this.graph.addEdge(edge) }) }, /** * Navigate to cell * @param {string} taskName */ navigateTo (taskName) { const nodes = this.getNodes() nodes.forEach((node) => { if (node.data.taskName === taskName) { const id = node.id const cell = this.graph.getCellById(id) this.graph.scrollToCell(cell, { animation: { duration: 600 } }) this.graph.cleanSelection() this.graph.select(cell) } }) }, /** * Canvas scale */ scaleChange (val) { this.graph.zoomTo(val) } } } </script> <style lang="scss" scoped> @import "./canvas"; </style> <style lang="scss"> @import "./x6-style"; </style>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,092
[Improvement][ui] When formatting the DAG, support tasks with no location data.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description There is no location data when the third party calls the interface to associate tasks and workflows. On the workflow page, these tasks are not displayed, and they are not formatted at the same time when formatting the DAG. ### Use case When formatting the DAG on page, format and display these tasks without location data. ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7092
https://github.com/apache/dolphinscheduler/pull/7102
b3efcdfeba06867e620829638ff27c3069aeac23
173a3856185abc0fa9be16715d2567ebbe054a6f
"2021-12-01T02:54:37Z"
java
"2021-12-01T08:40:11Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/canvas/layoutConfigModal.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <el-dialog :title="$t('Format DAG')" :visible.sync="visible" width="500px" class="dag-layout-modal" :append-to-body="true" > <el-form ref="form" :model="form" label-width="100px" class="dag-layout-form" > <el-form-item :label="$t('layoutType')"> <el-radio-group v-model="form.type"> <el-radio label="grid">{{ $t("gridLayout") }}</el-radio> <el-radio label="dagre">{{ $t("dagreLayout") }}</el-radio> </el-radio-group> </el-form-item> <el-form-item :label="$t('rows')" v-if="form.type === LAYOUT_TYPE.GRID"> <el-input-number v-model="form.rows" :min="0" size="small" ></el-input-number> </el-form-item> <el-form-item :label="$t('cols')" v-if="form.type === LAYOUT_TYPE.GRID"> <el-input-number v-model="form.cols" :min="0" size="small" ></el-input-number> </el-form-item> </el-form> <span slot="footer" class="dialog-footer"> <el-button size="small" @click="close">{{ $t("Cancel") }}</el-button> <el-button size="small" type="primary" @click="submit">{{ $t("Confirm") }}</el-button> </span> </el-dialog> </template> <script> export const LAYOUT_TYPE = { GRID: 'grid', DAGRE: 'dagre' } export default { data () { return { visible: false, form: { type: LAYOUT_TYPE.DAGRE, rows: 0, cols: 0, padding: 50, ranksep: 50, nodesep: 50 }, LAYOUT_TYPE } }, methods: { show () { this.visible = true }, close () { this.visible = false }, submit () { this.$emit('submit', this.form) this.close() } } } </script> <style lang="scss" scoped> .dag-layout-modal { ::v-deep .el-dialog__header { border-bottom: solid 1px #d4d4d4; } ::v-deep .dag-layout-form { margin-top: 20px; } ::v-deep .el-radio { margin-bottom: 0; } .el-form-item { margin-bottom: 10px; } } </style>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,092
[Improvement][ui] When formatting the DAG, support tasks with no location data.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description There is no location data when the third party calls the interface to associate tasks and workflows. On the workflow page, these tasks are not displayed, and they are not formatted at the same time when formatting the DAG. ### Use case When formatting the DAG on page, format and display these tasks without location data. ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7092
https://github.com/apache/dolphinscheduler/pull/7102
b3efcdfeba06867e620829638ff27c3069aeac23
173a3856185abc0fa9be16715d2567ebbe054a6f
"2021-12-01T02:54:37Z"
java
"2021-12-01T08:40:11Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/dag.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div :class="['dag-chart', fullScreen ? 'full-screen' : '']"> <dag-toolbar /> <dag-canvas ref="canvas" /> <el-drawer :visible.sync="taskDrawer" size="" :with-header="false" :wrapperClosable="false" class="task-drawer" > <!-- fix the bug that Element-ui(2.13.2) auto focus on the first input --> <div style="width: 0px; height: 0px; overflow: hidden"> <el-input type="text" /> </div> <m-form-model v-if="taskDrawer" :nodeData="nodeData" @seeHistory="seeHistory" @addTaskInfo="addTaskInfo" @close="closeTaskDrawer" @onSubProcess="toSubProcess" :type="type" ></m-form-model> </el-drawer> <el-dialog :title="$t('Set the DAG diagram name')" :visible.sync="saveDialog" width="auto" > <m-udp ref="mUdp" @onUdp="onSave" @close="cancelSave"></m-udp> </el-dialog> <el-dialog :title="$t('Please set the parameters before starting')" :visible.sync="startDialog" width="auto" > <m-start :startData="{ code: definitionCode, name: name }" :startNodeList="startTaskName" :sourceType="'contextmenu'" @onUpdateStart="onUpdateStart" @closeStart="closeStart" ></m-start> </el-dialog> <edge-edit-model ref="edgeEditModel" /> <el-drawer :visible.sync="versionDrawer" size="" :with-header="false"> <!-- fix the bug that Element-ui(2.13.2) auto focus on the first input --> <div style="width: 0px; height: 0px; overflow: hidden"> <el-input type="text" /> </div> <m-versions :versionData="versionData" :isInstance="type === 'instance'" @mVersionSwitchProcessDefinitionVersion="switchProcessVersion" @mVersionGetProcessDefinitionVersionsPage="getProcessVersions" @mVersionDeleteProcessDefinitionVersion="deleteProcessVersion" @closeVersion="closeVersion" ></m-versions> </el-drawer> <m-log v-if="type === 'instance' && logDialog" :item="logTaskInstance" source='dag' :task-instance-id="logTaskInstance.id" @close="closeLogDialog" ></m-log> </div> </template> <script> import { debounce } from 'lodash' import dagToolbar from './canvas/toolbar.vue' import dagCanvas from './canvas/canvas.vue' import mFormModel from '../_source/formModel/formModel.vue' import { mapActions, mapState, mapMutations } from 'vuex' import mUdp from '../_source/udp/udp.vue' import mStart from '../../projects/pages/definition/pages/list/_source/start.vue' import edgeEditModel from './canvas/edgeEditModel.vue' import mVersions from '../../projects/pages/definition/pages/list/_source/versions.vue' import mLog from './formModel/log.vue' const DEFAULT_NODE_DATA = { id: null, taskType: '', self: {}, instanceId: null } export default { name: 'dag-chart', components: { dagCanvas, dagToolbar, mFormModel, mUdp, mStart, edgeEditModel, mVersions, mLog }, provide () { return { dagChart: this } }, inject: ['definitionDetails'], props: { type: String, releaseState: String }, data () { return { definitionCode: 0, // full screen mode fullScreen: false, // whether the task config drawer is visible taskDrawer: false, nodeData: { ...DEFAULT_NODE_DATA }, // whether the save dialog is visible saveDialog: false, // whether the start dialog is visible startDialog: false, startTaskName: '', // whether the version drawer is visible versionDrawer: false, versionData: { processDefinition: { id: null, version: '', releaseState: '' }, processDefinitionVersions: [], total: null, pageNo: null, pageSize: null }, // the task status refresh timer statusTimer: null, // the process instance id instanceId: -1, // log dialog logDialog: false, logTaskInstance: null, taskInstances: [] } }, mounted () { this.setIsEditDag(false) if (this.type === 'instance') { this.instanceId = this.$route.params.id this.definitionCode = this.$route.query.code || this.code } else if (this.type === 'definition') { this.definitionCode = this.$route.params.code } // auto resize canvas this.resizeDebounceFunc = debounce(this.canvasResize, 200) window.addEventListener('resize', this.resizeDebounceFunc) // init graph this.$refs.canvas.graphInit(!this.isDetails) // backfill graph with tasks, locations and connects this.backfill() // refresh task status if (this.type === 'instance') { this.refreshTaskStatus() // status polling this.statusTimer = setInterval(() => { this.refreshTaskStatus() }, 90000) } }, beforeDestroy () { this.resetParams() clearInterval(this.statusTimer) window.removeEventListener('resize', this.resizeDebounceFunc) }, computed: { ...mapState('dag', [ 'tasks', 'locations', 'connects', 'name', 'isDetails', 'projectCode', 'version', 'code' ]) }, methods: { ...mapActions('dag', [ 'saveDAGchart', 'updateInstance', 'updateDefinition', 'getTaskState', 'getStartCheck', 'genTaskCodeList', 'switchProcessDefinitionVersion', 'getProcessDefinitionVersionsPage', 'deleteProcessDefinitionVersion' ]), ...mapMutations('dag', [ 'addTask', 'setConnects', 'resetParams', 'setIsEditDag', 'setName', 'setLocations', 'resetLocalParam' ]), /** * Toggle full screen */ canvasResize () { const canvas = this.$refs.canvas canvas && canvas.paperResize() }, toggleFullScreen () { this.fullScreen = !this.fullScreen this.$nextTick(this.canvasResize) }, /** * Task Drawer * @param {boolean} visible */ toggleTaskDrawer (visible) { this.taskDrawer = visible }, /** * Set the current node data */ setNodeData (nodeData) { this.nodeData = Object.assign(DEFAULT_NODE_DATA, nodeData) }, /** * open form model * @desc Edit task config * @param {number} taskCode * @param {string} taskType */ openFormModel (taskCode, taskType) { this.setNodeData({ id: taskCode, taskType: taskType }) this.toggleTaskDrawer(true) }, addTaskInfo ({ item }) { this.addTask(item) this.$refs.canvas.setNodeName(item.code, item.name) this.taskDrawer = false }, closeTaskDrawer ({ flag }) { if (flag) { const canvas = this.$refs.canvas canvas.removeNode(this.nodeData.id) } this.taskDrawer = false }, /** * Save dialog */ toggleSaveDialog (value) { this.saveDialog = value if (value) { this.$nextTick(() => { this.$refs.mUdp.reloadParam() }) } }, onSave (sourceType) { this.toggleSaveDialog(false) return new Promise((resolve, reject) => { let tasks = this.tasks || [] const edges = this.$refs.canvas.getEdges() const nodes = this.$refs.canvas.getNodes() if (!nodes.length) { reject(this.$t('Failed to create node to save')) } const connects = this.buildConnects(edges, tasks) this.setConnects(connects) const locations = nodes.map((node) => { return { taskCode: node.id, x: node.position.x, y: node.position.y } }) this.setLocations(locations) resolve({ connects: connects, tasks: tasks, locations: locations }) }) .then((res) => { if (this.verifyConditions(res.tasks)) { this.loading(true) const isEdit = !!this.definitionCode if (isEdit) { const methodName = this.type === 'instance' ? 'updateInstance' : 'updateDefinition' const methodParam = this.type === 'instance' ? this.instanceId : this.definitionCode // Edit return this[methodName](methodParam) .then((res) => { this.$message({ message: res.msg, type: 'success', offset: 80 }) if (this.type === 'instance') { this.$router.push({ path: `/projects/${this.projectCode}/instance/list` }) } else { this.$router.push({ path: `/projects/${this.projectCode}/definition/list` }) } }) .catch((e) => { this.$message.error(e.msg || '') }) .finally((e) => { this.loading(false) }) } else { // Create return this.saveDAGchart() .then((res) => { this.$message.success(res.msg) // source @/conf/home/pages/dag/_source/editAffirmModel/index.js if (sourceType !== 'affirm') { // Jump process definition this.$router.push({ name: 'projects-definition-list' }) } }) .catch((e) => { this.setName('') this.$message.error(e.msg || '') }) .finally((e) => { this.loading(false) }) } } }) .catch((err) => { let msg = typeof err === 'string' ? err : err.msg || '' this.$message.error(msg) }) }, verifyConditions (value) { let tasks = value let bool = true tasks.map((v) => { if ( v.taskType === 'CONDITIONS' && (v.taskParams.conditionResult.successNode[0] === '' || v.taskParams.conditionResult.successNode[0] === null || v.taskParams.conditionResult.failedNode[0] === '' || v.taskParams.conditionResult.failedNode[0] === null) ) { bool = false return false } }) if (!bool) { this.$message.warning( `${this.$t( 'Successful branch flow and failed branch flow are required' )}` ) return false } return true }, cancelSave () { this.toggleSaveDialog(false) }, /** * build graph json */ buildGraphJSON (tasks, locations, connects) { const nodes = [] const edges = [] tasks.forEach((task) => { const location = locations.find((l) => l.taskCode === task.code) || {} const node = this.$refs.canvas.genNodeJSON( task.code, task.taskType, task.name, { x: location.x, y: location.y } ) nodes.push(node) }) connects .filter((r) => !!r.preTaskCode) .forEach((c) => { const edge = this.$refs.canvas.genEdgeJSON( c.preTaskCode, c.postTaskCode, c.name ) edges.push(edge) }) return { nodes, edges } }, /** * Build connects by edges and tasks * @param {Edge[]} edges * @param {Task[]} tasks * @returns */ buildConnects (edges, tasks) { const preTaskMap = {} const tasksMap = {} edges.forEach((edge) => { preTaskMap[edge.targetId] = { sourceId: edge.sourceId, edgeLabel: edge.label || '' } }) tasks.forEach((task) => { tasksMap[task.code] = task }) const headEdges = tasks .filter((task) => !preTaskMap[task.code]) .map((task) => { return { name: '', preTaskCode: 0, preTaskVersion: 0, postTaskCode: task.code, postTaskVersion: task.version || 0, // conditionType and conditionParams are reserved conditionType: 0, conditionParams: {} } }) return edges .map((edge) => { return { name: edge.label, preTaskCode: edge.sourceId, preTaskVersion: tasksMap[edge.sourceId].version || 0, postTaskCode: edge.targetId, postTaskVersion: tasksMap[edge.targetId].version || 0, // conditionType and conditionParams are reserved conditionType: 0, conditionParams: {} } }) .concat(headEdges) }, backfill () { const tasks = this.tasks const locations = this.locations const connects = this.connects const json = this.buildGraphJSON(tasks, locations, connects) this.$refs.canvas.fromJSON(json) }, /** * Return to the previous process */ returnToPrevProcess () { let $name = this.$route.name.split('-') let subs = this.$route.query.subs let ids = subs.split(',') const last = ids.pop() this.$router.push({ path: `/${$name[0]}/${this.projectCode}/${$name[1]}/list/${last}`, query: ids.length > 0 ? { subs: ids.join(',') } : null }) }, toSubProcess ({ subProcessCode, subInstanceId }) { const tarIdentifier = this.type === 'instance' ? subInstanceId : subProcessCode const curIdentifier = this.type === 'instance' ? this.instanceId : this.definitionCode let subs = [] let olds = this.$route.query.subs if (olds) { subs = olds.split(',') subs.push(curIdentifier) } else { subs.push(curIdentifier) } let $name = this.$route.name.split('-') this.$router.push({ path: `/${$name[0]}/${this.projectCode}/${$name[1]}/list/${tarIdentifier}`, query: { subs: subs.join(',') } }) }, seeHistory (taskName) { this.$router.push({ name: 'task-instance', query: { processInstanceId: this.$route.params.code, taskName: taskName } }) }, /** * Start dialog */ startRunning (taskName) { this.startTaskName = taskName this.getStartCheck({ processDefinitionCode: this.definitionCode }).then( (res) => { this.startDialog = true } ) }, onUpdateStart () { this.startDialog = false }, closeStart () { this.startDialog = false }, /** * Task status */ refreshTaskStatus () { const instanceId = this.$route.params.id this.loading(true) this.getTaskState(instanceId) .then((res) => { this.$message(this.$t('Refresh status succeeded')) const { taskList } = res.data if (taskList) { this.taskInstances = taskList taskList.forEach((taskInstance) => { this.$refs.canvas.setNodeStatus({ code: taskInstance.taskCode, state: taskInstance.state, taskInstance }) }) } }) .finally(() => { this.loading(false) }) }, /** * Loading * @param {boolean} visible */ loading (visible) { if (visible) { this.spinner = this.$loading({ lock: true, text: this.$t('Loading...'), spinner: 'el-icon-loading', background: 'rgba(0, 0, 0, 0.4)', customClass: 'dag-fullscreen-loading' }) } else { this.spinner && this.spinner.close() } }, /** * change process definition version */ showVersions () { this.getProcessDefinitionVersionsPage({ pageNo: 1, pageSize: 10, code: this.definitionCode }) .then((res) => { let processDefinitionVersions = res.data.totalList let total = res.data.total let pageSize = res.data.pageSize let pageNo = res.data.currentPage // this.versionData.processDefinition.id = this.urlParam.id this.versionData.processDefinition.code = this.definitionCode this.versionData.processDefinition.version = this.version this.versionData.processDefinition.releaseState = this.releaseState this.versionData.processDefinitionVersions = processDefinitionVersions this.versionData.total = total this.versionData.pageNo = pageNo this.versionData.pageSize = pageSize this.versionDrawer = true }) .catch((e) => { this.$message.error(e.msg || '') }) }, closeVersion () { this.versionDrawer = false }, switchProcessVersion ({ version, processDefinitionCode }) { this.switchProcessDefinitionVersion({ version: version, code: processDefinitionCode }) .then((res) => { this.$message.success($t('Switch Version Successfully')) this.closeVersion() this.definitionDetails.init() }) .catch((e) => { this.$message.error(e.msg || '') }) }, getProcessVersions ({ pageNo, pageSize, processDefinitionCode }) { this.getProcessDefinitionVersionsPage({ pageNo: pageNo, pageSize: pageSize, code: processDefinitionCode }) .then((res) => { this.versionData.processDefinitionVersions = res.data.totalList this.versionData.total = res.data.total this.versionData.pageSize = res.data.pageSize this.versionData.pageNo = res.data.currentPage }) .catch((e) => { this.$message.error(e.msg || '') }) }, deleteProcessVersion ({ version, processDefinitionCode }) { this.deleteProcessDefinitionVersion({ version: version, code: processDefinitionCode }) .then((res) => { this.$message.success(res.msg || '') this.getProcessVersions({ pageNo: 1, pageSize: 10, processDefinitionCode: processDefinitionCode }) }) .catch((e) => { this.$message.error(e.msg || '') }) }, /** * Log dialog */ closeLogDialog () { this.logDialog = false this.logTaskInstance = null }, showLogDialog (taskDefinitionCode) { const taskInstance = this.taskInstances.find(taskInstance => { return taskInstance.taskCode === taskDefinitionCode }) if (taskInstance) { this.logTaskInstance = { id: taskInstance.id, type: taskInstance.taskType } this.logDialog = true } } } } </script> <style lang="scss" scoped> @import "./dag"; </style> <style lang="scss"> @import "./loading"; </style>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,092
[Improvement][ui] When formatting the DAG, support tasks with no location data.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description There is no location data when the third party calls the interface to associate tasks and workflows. On the workflow page, these tasks are not displayed, and they are not formatted at the same time when formatting the DAG. ### Use case When formatting the DAG on page, format and display these tasks without location data. ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7092
https://github.com/apache/dolphinscheduler/pull/7102
b3efcdfeba06867e620829638ff27c3069aeac23
173a3856185abc0fa9be16715d2567ebbe054a6f
"2021-12-01T02:54:37Z"
java
"2021-12-01T08:40:11Z"
dolphinscheduler-ui/src/js/conf/home/store/dag/actions.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import _ from 'lodash' import io from '@/module/io' export default { /** * Task status acquisition */ getTaskState ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/${payload}/tasks`, { processInstanceId: payload }, res => { state.taskInstances = res.data.taskList resolve(res) }).catch(e => { reject(e) }) }) }, /** * Update process definition status */ editProcessState ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition/${payload.code}/release`, { name: payload.name, releaseState: payload.releaseState }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * get process definition versions pagination info */ getProcessDefinitionVersionsPage ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload.code}/versions`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * switch process definition version */ switchProcessDefinitionVersion ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload.code}/versions/${payload.version}`, {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * delete process definition version */ deleteProcessDefinitionVersion ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/process-definition/${payload.code}/versions/${payload.version}`, {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Update process instance status */ editExecutorsState ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/executors/execute`, { processInstanceId: payload.processInstanceId, executeType: payload.executeType }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Verify that the DGA map name exists */ verifDAGName ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/verify-name`, { name: payload }, res => { state.name = payload resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get process definition DAG diagram details */ getProcessDetails ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload}`, { }, res => { // process definition code state.code = res.data.processDefinition.code // version state.version = res.data.processDefinition.version // name state.name = res.data.processDefinition.name // releaseState state.releaseState = res.data.processDefinition.releaseState // description state.description = res.data.processDefinition.description // taskRelationJson state.connects = res.data.processTaskRelationList // locations state.locations = JSON.parse(res.data.processDefinition.locations) // global params state.globalParams = res.data.processDefinition.globalParamList // timeout state.timeout = res.data.processDefinition.timeout // executionType state.executionType = res.data.processDefinition.executionType // tenantCode state.tenantCode = res.data.processDefinition.tenantCode || 'default' // tasks info state.tasks = res.data.taskDefinitionList.map(task => _.pick(task, [ 'code', 'name', 'version', 'description', 'delayTime', 'taskType', 'taskParams', 'flag', 'taskPriority', 'workerGroup', 'failRetryTimes', 'failRetryInterval', 'timeoutFlag', 'timeoutNotifyStrategy', 'timeout', 'environmentCode' ])) resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get process definition DAG diagram details */ copyProcess ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition/batch-copy`, { codes: payload.codes, targetProjectCode: payload.targetProjectCode }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get process definition DAG diagram details */ moveProcess ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition/batch-move`, { codes: payload.codes, targetProjectCode: payload.targetProjectCode }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get all the items created by the logged in user */ getAllItems ({ state }, payload) { return new Promise((resolve, reject) => { io.get('projects/created-and-authed', {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get the process instance DAG diagram details */ getInstancedetail ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/${payload}`, { }, res => { const { processDefinition, processTaskRelationList, taskDefinitionList } = res.data.dagData // code state.code = processDefinition.code // version state.version = processDefinition.version // name state.name = res.data.name // desc state.description = processDefinition.description // connects state.connects = processTaskRelationList // locations state.locations = JSON.parse(processDefinition.locations) // global params state.globalParams = processDefinition.globalParamList // timeout state.timeout = processDefinition.timeout // executionType state.executionType = processDefinition.executionType // tenantCode state.tenantCode = res.data.tenantCode || 'default' // tasks info state.tasks = taskDefinitionList.map(task => _.pick(task, [ 'code', 'name', 'version', 'description', 'delayTime', 'taskType', 'taskParams', 'flag', 'taskPriority', 'workerGroup', 'failRetryTimes', 'failRetryInterval', 'timeoutFlag', 'timeoutNotifyStrategy', 'timeout', 'environmentCode' ])) // startup parameters state.startup = _.assign(state.startup, _.pick(res.data, ['commandType', 'failureStrategy', 'processInstancePriority', 'workerGroup', 'warningType', 'warningGroupId', 'receivers', 'receiversCc'])) state.startup.commandParam = JSON.parse(res.data.commandParam) resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Create process definition */ saveDAGchart ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition`, { locations: JSON.stringify(state.locations), name: _.trim(state.name), taskDefinitionJson: JSON.stringify(state.tasks), taskRelationJson: JSON.stringify(state.connects), tenantCode: state.tenantCode, executionType: state.executionType, description: _.trim(state.description), globalParams: JSON.stringify(state.globalParams), timeout: state.timeout }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Process definition update */ updateDefinition ({ state }, payload) { return new Promise((resolve, reject) => { io.put(`projects/${state.projectCode}/process-definition/${payload}`, { locations: JSON.stringify(state.locations), name: _.trim(state.name), taskDefinitionJson: JSON.stringify(state.tasks), taskRelationJson: JSON.stringify(state.connects), tenantCode: state.tenantCode, executionType: state.executionType, description: _.trim(state.description), globalParams: JSON.stringify(state.globalParams), timeout: state.timeout, releaseState: state.releaseState }, res => { resolve(res) state.isEditDag = false }).catch(e => { reject(e) }) }) }, /** * Process instance update */ updateInstance ({ state }, instanceId) { return new Promise((resolve, reject) => { io.put(`projects/${state.projectCode}/process-instances/${instanceId}`, { syncDefine: state.syncDefine, globalParams: JSON.stringify(state.globalParams), locations: JSON.stringify(state.locations), taskDefinitionJson: JSON.stringify(state.tasks), taskRelationJson: JSON.stringify(state.connects), tenantCode: state.tenantCode, timeout: state.timeout }, res => { resolve(res) state.isEditDag = false }).catch(e => { reject(e) }) }) }, /** * Get a list of process definitions (sub-workflow usage is not paged) */ getProcessList ({ state }, payload) { return new Promise((resolve, reject) => { if (state.processListS.length) { resolve() return } io.get(`projects/${state.projectCode}/process-definition/simple-list`, payload, res => { state.processListS = res.data resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get a list of process definitions (list page usage with pagination) */ getProcessListP ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition`, payload, res => { resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get a list of project */ getProjectList ({ state }, payload) { return new Promise((resolve, reject) => { if (state.projectListS.length) { resolve() return } io.get('projects/created-and-authed', payload, res => { state.projectListS = res.data resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get a list of process definitions by project code */ getProcessByProjectCode ({ state }, code) { return new Promise((resolve, reject) => { io.get(`projects/${code}/process-definition/all`, res => { resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * get datasource */ getDatasourceList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('datasources/list', { type: payload }, res => { resolve(res) }).catch(res => { reject(res) }) }) }, /** * get resources */ getResourcesList ({ state }) { return new Promise((resolve, reject) => { if (state.resourcesListS.length) { resolve() return } io.get('resources/list', { type: 'FILE' }, res => { state.resourcesListS = res.data resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * get jar */ getResourcesListJar ({ state }) { return new Promise((resolve, reject) => { if (state.resourcesListJar.length) { resolve() return } io.get('resources/query-by-type', { type: 'FILE' }, res => { state.resourcesListJar = res.data resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get process instance */ getProcessInstance ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances`, payload, res => { state.instanceListS = res.data.totalList resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get alarm list */ getNotifyGroupList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('alert-groups/list', res => { state.notifyGroupListS = _.map(res.data, v => { return { id: v.id, code: v.groupName, disabled: false } }) resolve(_.cloneDeep(state.notifyGroupListS)) }).catch(res => { reject(res) }) }) }, /** * Process definition startup interface */ processStart ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/executors/start-process-instance`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * View log */ getLog ({ state }, payload) { return new Promise((resolve, reject) => { io.get('log/detail', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get the process instance id according to the process definition id * @param taskId */ getSubProcessId ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/query-sub-by-parent`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Called before the process definition starts */ getStartCheck ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/executors/start-check`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Create timing */ createSchedule ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/schedules`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Preview timing */ previewSchedule ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/schedules/preview`, payload, res => { resolve(res.data) // alert(res.data) }).catch(e => { reject(e) }) }) }, /** * Timing list paging */ getScheduleList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/schedules`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Timing online */ scheduleOffline ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/schedules/${payload.id}/offline`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Timed offline */ scheduleOnline ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/schedules/${payload.id}/online`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Edit timing */ updateSchedule ({ state }, payload) { return new Promise((resolve, reject) => { io.put(`projects/${state.projectCode}/schedules/${payload.id}`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Delete process instance */ deleteInstance ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/process-instances/${payload.processInstanceId}`, {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Batch delete process instance */ batchDeleteInstance ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-instances/batch-delete`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Delete definition */ deleteDefinition ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/process-definition/${payload.code}`, {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Batch delete definition */ batchDeleteDefinition ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition/batch-delete`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * export definition */ exportDefinition ({ state }, payload) { const downloadBlob = (data, fileNameS = 'json') => { if (!data) { return } const blob = new Blob([data]) const fileName = `${fileNameS}.json` if ('download' in document.createElement('a')) { // 不是IE浏览器 const url = window.URL.createObjectURL(blob) const link = document.createElement('a') link.style.display = 'none' link.href = url link.setAttribute('download', fileName) document.body.appendChild(link) link.click() document.body.removeChild(link) // 下载完成移除元素 window.URL.revokeObjectURL(url) // 释放掉blob对象 } else { // IE 10+ window.navigator.msSaveBlob(blob, fileName) } } io.post(`projects/${state.projectCode}/process-definition/batch-export`, { codes: payload.codes }, res => { downloadBlob(res, payload.fileName) }, e => { }, { responseType: 'blob' }) }, /** * Process instance get variable */ getViewvariables ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/${payload.processInstanceId}/view-variables`, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get udfs function based on data source */ getUdfList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('resources/udf-func/list', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Query task instance list */ getTaskInstanceList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/task-instances`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Force fail/kill/need_fault_tolerance task success */ forceTaskSuccess ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/task-instances/${payload.taskInstanceId}/force-success`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Query task record list */ getTaskRecordList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('projects/task-record/list-paging', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Query history task record list */ getHistoryTaskRecordList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('projects/task-record/history-list-paging', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * tree chart */ getViewTree ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload.code}/view-tree`, { limit: payload.limit }, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * gantt chart */ getViewGantt ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/${payload.processInstanceId}/view-gantt`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Query task node list */ getProcessTasksList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload.code}/tasks`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, getTaskListDefIdAll ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/batch-query-tasks`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * remove timing */ deleteTiming ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/schedules/${payload.scheduleId}`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, getResourceId ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`resources/${payload.id}`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, genTaskCodeList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/task-definition/gen-task-codes`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Query Task Definitions List Paging */ getTaskDefinitionsList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/task-definition`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Delete Task Definition by code */ deleteTaskDefinition ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/task-definition/${payload.code}`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Save Task Definition */ saveTaskDefinition ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/task-definition`, { taskDefinitionJson: JSON.stringify(payload.taskDefinitionJson) }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, updateTaskDefinition ({ state }, taskDefinition) { return new Promise((resolve, reject) => { io.put(`projects/${state.projectCode}/task-definition/${taskDefinition.code}`, { taskDefinitionJsonObj: JSON.stringify(taskDefinition) }, res => { resolve(res) }).catch(e => { reject(e) }) }) } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,772
[Improvement][API] API operation optimization
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description ### Dag API 1. Create offline workflows and schedules 2. Update workflow basic information 3. Update schedule information by process definition code 4. Online or offline workflow and schedule at the same time 5. Delete the workflow and schedule at the same time 6. Online is not allowed for empty workflow ### Task API 1. Specify the online or offline status when creating tasks 2. Online or downstream dependent tasks will not be deleted 3. Task online and offline interface ### Relation API 1. Add task to workflow 2. Remove task from workflow 3. Task moves from A workflow to B workflow 4. Task delete upstream dependency 5. Task add upstream dependency ### Execution API 1. Specify task execution interface ![image](https://user-images.githubusercontent.com/42576980/141075049-faa6c4f8-9a0a-42f3-9924-0aa4f426f6a4.png) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6772
https://github.com/apache/dolphinscheduler/pull/7102
b3efcdfeba06867e620829638ff27c3069aeac23
173a3856185abc0fa9be16715d2567ebbe054a6f
"2021-11-10T08:13:37Z"
java
"2021-12-01T08:40:11Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/canvas/canvas.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="dag-canvas"> <dag-taskbar @on-drag-start="onDragStart" /> <div class="dag-container" ref="container" @dragenter.prevent @dragover.prevent @dragleave.prevent @drop.stop.prevent="onDrop" > <div ref="paper" class="paper"></div> <div ref="minimap" class="minimap"></div> <div class="scale-slider"> <span class="scale-title">{{$t('dagScale')}}</span> <el-slider v-model="scale" vertical :max="2" :min="0.2" :step="0.2" :marks="SCALE_MARKS" @input='scaleChange' /> </div> <context-menu ref="contextMenu" /> </div> <layout-config-modal ref="layoutModal" @submit="format" /> </div> </template> <script> import _ from 'lodash' import { Graph, DataUri } from '@antv/x6' import dagTaskbar from './taskbar.vue' import contextMenu from './contextMenu.vue' import layoutConfigModal, { LAYOUT_TYPE } from './layoutConfigModal.vue' import { NODE, EDGE, X6_NODE_NAME, X6_EDGE_NAME, NODE_STATUS_MARKUP } from './x6-helper' import { DagreLayout, GridLayout } from '@antv/layout' import { tasksType, tasksState } from '../config' import { mapActions, mapMutations, mapState } from 'vuex' import nodeStatus from './nodeStatus' import x6StyleMixin from './x6-style-mixin' const SCALE_MARKS = { 0.2: '0.2', 1: '1', 2: '2' } export default { name: 'dag-canvas', data () { return { graph: null, // Used to calculate the context menu location originalScrollPosition: { left: 0, top: 0 }, editable: true, dragging: { // Distance from the mouse to the top-left corner of the dragging element x: 0, y: 0, type: '' }, // The canvas scale scale: 1, SCALE_MARKS } }, provide () { return { dagCanvas: this } }, mixins: [x6StyleMixin], inject: ['dagChart'], components: { dagTaskbar, contextMenu, layoutConfigModal }, computed: { ...mapState('dag', ['tasks']) }, methods: { ...mapActions('dag', ['genTaskCodeList']), ...mapMutations('dag', ['removeTask']), /** * Recalculate the paper width and height */ paperResize () { const w = this.$el.offsetWidth const h = this.$el.offsetHeight this.graph.resize(w, h) }, /** * Init graph * This will be called in the dag-chart mounted event * @param {boolean} uneditable */ graphInit (editable) { const self = this this.editable = !!editable const paper = this.$refs.paper const minimap = this.$refs.minimap const graph = (this.graph = new Graph({ container: paper, selecting: { enabled: true, multiple: true, rubberband: true, rubberEdge: true, movable: true, showNodeSelectionBox: false }, scaling: { min: 0.2, max: 2 }, mousewheel: { enabled: true, modifiers: ['ctrl', 'meta'] }, scroller: true, grid: { size: 10, visible: true }, snapline: true, minimap: { enabled: true, container: minimap, scalable: false, width: 200, height: 120 }, interacting: { edgeLabelMovable: false, nodeMovable: !!editable, magnetConnectable: !!editable }, connecting: { // Whether multiple edges can be created between the same start node and end allowMulti: false, // Whether a point is allowed to connect to a blank position on the canvas allowBlank: false, // The start node and the end node are the same node allowLoop: false, // Whether an edge is allowed to link to another edge allowEdge: false, // Whether edges are allowed to link to nodes allowNode: true, // Whether to allow edge links to ports allowPort: false, // Whether all available ports or nodes are highlighted when you drag the edge highlight: true, createEdge () { return graph.createEdge({ shape: X6_EDGE_NAME }) }, validateConnection (data) { const { sourceCell, targetCell } = data if ( sourceCell && targetCell && sourceCell.isNode() && targetCell.isNode() ) { const edgeData = { sourceId: Number(sourceCell.id), targetId: Number(targetCell.id) } if (!self.edgeIsValid(edgeData)) { return false } } return true } }, highlighting: { nodeAvailable: { name: 'className', args: { className: 'available' } }, magnetAvailable: { name: 'className', args: { className: 'available' } }, magnetAdsorbed: { name: 'className', args: { className: 'adsorbed' } } } })) this.registerX6Shape() this.bindGraphEvent() this.originalScrollPosition = graph.getScrollbarPosition() }, /** * Register custom shapes */ registerX6Shape () { Graph.unregisterNode(X6_NODE_NAME) Graph.unregisterEdge(X6_EDGE_NAME) Graph.registerNode(X6_NODE_NAME, { ...NODE }) Graph.registerEdge(X6_EDGE_NAME, { ...EDGE }) }, /** * Bind grap event */ bindGraphEvent () { this.bindStyleEvent(this.graph) // update scale bar this.graph.on('scale', ({ sx }) => { this.scale = sx }) // right click this.graph.on('node:contextmenu', ({ x, y, cell }) => { const { left, top } = this.graph.getScrollbarPosition() const o = this.originalScrollPosition this.$refs.contextMenu.show(x + (o.left - left), y + (o.top - top)) this.$refs.contextMenu.setCurrentTask({ name: cell.data.taskName, type: cell.data.taskType, code: Number(cell.id) }) }) // node double click this.graph.on('node:dblclick', ({ cell }) => { this.dagChart.openFormModel(Number(cell.id), cell.data.taskType) }) // create edge label this.graph.on('edge:dblclick', ({ cell }) => { const labelName = this.getEdgeLabelName(cell) this.dagChart.$refs.edgeEditModel.show({ id: cell.id, label: labelName }) }) // Make sure the edge starts with node, not port this.graph.on('edge:connected', ({ isNew, edge }) => { if (isNew) { const sourceNode = edge.getSourceNode() edge.setSource(sourceNode) } }) }, /** * @param {Edge|string} edge */ getEdgeLabelName (edge) { if (typeof edge === 'string') edge = this.graph.getCellById(edge) const labels = edge.getLabels() const labelName = _.get(labels, ['0', 'attrs', 'label', 'text'], '') return labelName }, /** * Set edge label by id * @param {string} id * @param {string} label */ setEdgeLabel (id, label) { const edge = this.graph.getCellById(id) edge.setLabels(label) }, /** * @param {number} limit * @param {string} text * Each Chinese character is equal to two chars */ truncateText (text, n) { const exp = /[\u4E00-\u9FA5]/ let res = '' let len = text.length let chinese = text.match(new RegExp(exp, 'g')) if (chinese) { len += chinese.length } if (len > n) { let i = 0 let acc = 0 while (true) { let char = text[i] if (exp.test(char)) { acc += 2 } else { acc++ } if (acc > n) break res += char i++ } res += '...' } else { res = text } return res }, /** * Set node name by id * @param {string|number} id * @param {string} name */ setNodeName (id, name) { id += '' const node = this.graph.getCellById(id) if (node) { const truncation = this.truncateText(name, 18) node.attr('title/text', truncation) node.setData({ taskName: name }) } }, /** * Convert the graph to JSON * @return {{cells:Cell[]}} */ toJSON () { return this.graph.toJSON() }, /** * Generate graph with JSON */ fromJSON (json) { this.graph.fromJSON(json) }, /** * getNodes * @return {Node[]} */ // interface Node { // id: number; // position: {x:number;y:number}; // data: {taskType:string;taskName:string;} // } getNodes () { const nodes = this.graph.getNodes() return nodes.map((node) => { const position = node.getPosition() const data = node.getData() return { id: Number(node.id), position: position, data: data } }) }, /** * getEdges * @return {Edge[]} Edge is inherited from the Cell */ // interface Edge { // id: string; // label: string; // sourceId: number; // targetId: number; // } getEdges () { const edges = this.graph.getEdges() return edges.map((edge) => { const labelData = edge.getLabelAt(0) return { id: edge.id, label: _.get(labelData, ['attrs', 'label', 'text'], ''), sourceId: Number(edge.getSourceCellId()), targetId: Number(edge.getTargetCellId()) } }) }, /** * downloadPNG * @param {string} filename */ downloadPNG (fileName = 'chart') { this.graph.toPNG( (dataUri) => { DataUri.downloadDataUri(dataUri, `${fileName}.png`) }, { padding: { top: 50, right: 50, bottom: 50, left: 50 }, backgroundColor: '#f2f3f7' } ) }, showLayoutModal () { const layoutModal = this.$refs.layoutModal if (layoutModal) { layoutModal.show() } }, /** * format * @desc Auto layout use @antv/layout */ format (layoutConfig) { this.graph.cleanSelection() let layoutFunc = null if (layoutConfig.type === LAYOUT_TYPE.DAGRE) { layoutFunc = new DagreLayout({ type: LAYOUT_TYPE.DAGRE, rankdir: 'LR', align: 'UL', // Calculate the node spacing based on the edge label length ranksepFunc: (d) => { const edges = this.graph.getOutgoingEdges(d.id) let max = 0 if (edges && edges.length > 0) { edges.forEach((edge) => { const edgeView = this.graph.findViewByCell(edge) const labelWidth = +edgeView.findAttr( 'width', _.get(edgeView, ['labelSelectors', '0', 'body'], null) ) max = Math.max(max, labelWidth) }) } return layoutConfig.ranksep + max }, nodesep: layoutConfig.nodesep, controlPoints: true }) } else if (layoutConfig.type === LAYOUT_TYPE.GRID) { layoutFunc = new GridLayout({ type: LAYOUT_TYPE.GRID, preventOverlap: true, preventOverlapPadding: layoutConfig.padding, sortBy: '_index', rows: layoutConfig.rows || undefined, cols: layoutConfig.cols || undefined, nodeSize: 220 }) } const json = this.toJSON() const nodes = json.cells .filter((cell) => cell.shape === X6_NODE_NAME) .map((item) => { return { ...item, // sort by code aesc _index: -item.id } }) const edges = json.cells.filter((cell) => cell.shape === X6_EDGE_NAME) const newModel = layoutFunc.layout({ nodes: nodes, edges: edges }) this.fromJSON(newModel) }, /** * add a node to the graph * @param {string|number} id * @param {string} taskType * @param {{x:number;y:number}} coordinate Default is { x: 100, y: 100 } */ addNode (id, taskType, coordinate = { x: 100, y: 100 }) { id += '' if (!tasksType[taskType]) { console.warn(`taskType:${taskType} is invalid!`) return } const node = this.genNodeJSON(id, taskType, '', coordinate) this.graph.addNode(node) }, /** * generate node json * @param {number|string} id * @param {string} taskType * @param {{x:number;y:number}} coordinate Default is { x: 100, y: 100 } */ genNodeJSON (id, taskType, taskName, coordinate = { x: 100, y: 100 }) { id += '' const url = require(`../images/task-icos/${taskType.toLocaleLowerCase()}.png`) const truncation = taskName ? this.truncateText(taskName, 18) : id return { id: id, shape: X6_NODE_NAME, x: coordinate.x, y: coordinate.y, data: { taskType: taskType, taskName: taskName }, attrs: { image: { // Use href instead of xlink:href, you may lose the icon when downloadPNG 'xlink:href': url }, title: { text: truncation } } } }, /** * generate edge json * @param {number|string} sourceId * @param {number|string} targetId * @param {string} label */ genEdgeJSON (sourceId, targetId, label = '') { sourceId += '' targetId += '' return { shape: X6_EDGE_NAME, source: { cell: sourceId }, target: { cell: targetId }, labels: label ? [label] : undefined } }, /** * remove a node * @param {string|number} id NodeId */ removeNode (id) { id += '' this.graph.removeNode(id) this.removeTask(+id) }, /** * remove an edge * @param {string} id EdgeId */ removeEdge (id) { this.graph.removeEdge(id) }, /** * remove multiple cells * @param {Cell[]} cells */ removeCells (cells) { this.graph.removeCells(cells) cells.forEach((cell) => { if (cell.isNode()) { this.removeTask(+cell.id) } }) }, /** * Verify whether edge is valid * The number of edges start with CONDITIONS task cannot be greater than 2 */ edgeIsValid (edge) { const { sourceId } = edge const sourceTask = this.tasks.find((task) => task.code === sourceId) if (sourceTask.taskType === 'CONDITIONS') { const edges = this.getEdges() return edges.filter((e) => e.sourceId === sourceTask.code).length <= 2 } return true }, /** * Gets the current selections * @return {Cell[]} */ getSelections () { return this.graph.getSelectedCells() }, /** * Lock scroller */ lockScroller () { this.graph.lockScroller() }, /** * Unlock scroller */ unlockScroller () { this.graph.unlockScroller() }, /** * set node status icon * @param {number} code * @param {string} state */ setNodeStatus ({ code, state, taskInstance }) { code += '' const stateProps = tasksState[state] const node = this.graph.getCellById(code) if (node) { // Destroy the previous dom node.removeMarkup() node.setMarkup(NODE.markup.concat(NODE_STATUS_MARKUP)) const nodeView = this.graph.findViewByCell(node) const el = nodeView.find('div')[0] nodeStatus({ stateProps, taskInstance }).$mount(el) } }, /** * Drag && Drop Event */ onDragStart (e, taskType) { if (!this.editable) { e.preventDefault() return } this.dragging = { x: e.offsetX, y: e.offsetY, type: taskType.name } }, onDrop (e) { const { type } = this.dragging const { x, y } = this.calcGraphCoordinate(e.clientX, e.clientY) this.genTaskCodeList({ genNum: 1 }) .then((res) => { const [code] = res this.addNode(code, type, { x, y }) this.dagChart.openFormModel(code, type) }) .catch((err) => { console.error(err) }) }, calcGraphCoordinate (mClientX, mClientY) { // Distance from the mouse to the top-left corner of the container; const { left: cX, top: cY } = this.$refs.container.getBoundingClientRect() const mouseX = mClientX - cX const mouseY = mClientY - cY // The distance that paper has been scrolled const { left: sLeft, top: sTop } = this.graph.getScrollbarPosition() const { left: oLeft, top: oTop } = this.originalScrollPosition const scrollX = sLeft - oLeft const scrollY = sTop - oTop // Distance from the mouse to the top-left corner of the dragging element; const { x: eX, y: eY } = this.dragging return { x: mouseX + scrollX - eX, y: mouseY + scrollY - eY } }, /** * Get prev nodes by code * @param {number} code * node1 -> node2 -> node3 * getPrevNodes(node2.code) => [node1] */ getPrevNodes (code) { const nodes = this.getNodes() const edges = this.getEdges() const nodesMap = {} nodes.forEach((node) => { nodesMap[node.id] = node }) return edges .filter((edge) => edge.targetId === code) .map((edge) => nodesMap[edge.sourceId]) }, /** * set prev nodes * @param {number} code * @param {number[]} preNodeCodes * @param {boolean} override If set to true, setPreNodes will delete all edges that end with the node and rebuild */ setPreNodes (code, preNodeCodes, override) { const edges = this.getEdges() const currPreCodes = [] edges.forEach((edge) => { if (edge.targetId === code) { if (override) { this.removeEdge(edge.id) } else { currPreCodes.push(edge.sourceId) } } }) preNodeCodes.forEach((preCode) => { if (currPreCodes.includes(preCode) || preCode === code) return const edge = this.genEdgeJSON(preCode, code) this.graph.addEdge(edge) }) }, /** * Get post nodes by code * @param {number} code * node1 -> node2 -> node3 * getPostNodes(node2.code) => [node3] */ getPostNodes (code) { const nodes = this.getNodes() const edges = this.getEdges() const nodesMap = {} nodes.forEach((node) => { nodesMap[node.id] = node }) return edges .filter((edge) => edge.sourceId === code) .map((edge) => nodesMap[edge.targetId]) }, /** * set post nodes * @param {number} code * @param {number[]} postNodeCodes * @param {boolean} override If set to true, setPreNodes will delete all edges that end with the node and rebuild */ setPostNodes (code, postNodeCodes, override) { const edges = this.getEdges() const currPostCodes = [] edges.forEach((edge) => { if (edge.sourceId === code) { if (override) { this.removeEdge(edge.id) } else { currPostCodes.push(edge.targetId) } } }) postNodeCodes.forEach((postCode) => { if (currPostCodes.includes(postCode) || postCode === code) return const edge = this.genEdgeJSON(code, postCode) this.graph.addEdge(edge) }) }, /** * Navigate to cell * @param {string} taskName */ navigateTo (taskName) { const nodes = this.getNodes() nodes.forEach((node) => { if (node.data.taskName === taskName) { const id = node.id const cell = this.graph.getCellById(id) this.graph.scrollToCell(cell, { animation: { duration: 600 } }) this.graph.cleanSelection() this.graph.select(cell) } }) }, /** * Canvas scale */ scaleChange (val) { this.graph.zoomTo(val) } } } </script> <style lang="scss" scoped> @import "./canvas"; </style> <style lang="scss"> @import "./x6-style"; </style>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,772
[Improvement][API] API operation optimization
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description ### Dag API 1. Create offline workflows and schedules 2. Update workflow basic information 3. Update schedule information by process definition code 4. Online or offline workflow and schedule at the same time 5. Delete the workflow and schedule at the same time 6. Online is not allowed for empty workflow ### Task API 1. Specify the online or offline status when creating tasks 2. Online or downstream dependent tasks will not be deleted 3. Task online and offline interface ### Relation API 1. Add task to workflow 2. Remove task from workflow 3. Task moves from A workflow to B workflow 4. Task delete upstream dependency 5. Task add upstream dependency ### Execution API 1. Specify task execution interface ![image](https://user-images.githubusercontent.com/42576980/141075049-faa6c4f8-9a0a-42f3-9924-0aa4f426f6a4.png) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6772
https://github.com/apache/dolphinscheduler/pull/7102
b3efcdfeba06867e620829638ff27c3069aeac23
173a3856185abc0fa9be16715d2567ebbe054a6f
"2021-11-10T08:13:37Z"
java
"2021-12-01T08:40:11Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/canvas/layoutConfigModal.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <el-dialog :title="$t('Format DAG')" :visible.sync="visible" width="500px" class="dag-layout-modal" :append-to-body="true" > <el-form ref="form" :model="form" label-width="100px" class="dag-layout-form" > <el-form-item :label="$t('layoutType')"> <el-radio-group v-model="form.type"> <el-radio label="grid">{{ $t("gridLayout") }}</el-radio> <el-radio label="dagre">{{ $t("dagreLayout") }}</el-radio> </el-radio-group> </el-form-item> <el-form-item :label="$t('rows')" v-if="form.type === LAYOUT_TYPE.GRID"> <el-input-number v-model="form.rows" :min="0" size="small" ></el-input-number> </el-form-item> <el-form-item :label="$t('cols')" v-if="form.type === LAYOUT_TYPE.GRID"> <el-input-number v-model="form.cols" :min="0" size="small" ></el-input-number> </el-form-item> </el-form> <span slot="footer" class="dialog-footer"> <el-button size="small" @click="close">{{ $t("Cancel") }}</el-button> <el-button size="small" type="primary" @click="submit">{{ $t("Confirm") }}</el-button> </span> </el-dialog> </template> <script> export const LAYOUT_TYPE = { GRID: 'grid', DAGRE: 'dagre' } export default { data () { return { visible: false, form: { type: LAYOUT_TYPE.DAGRE, rows: 0, cols: 0, padding: 50, ranksep: 50, nodesep: 50 }, LAYOUT_TYPE } }, methods: { show () { this.visible = true }, close () { this.visible = false }, submit () { this.$emit('submit', this.form) this.close() } } } </script> <style lang="scss" scoped> .dag-layout-modal { ::v-deep .el-dialog__header { border-bottom: solid 1px #d4d4d4; } ::v-deep .dag-layout-form { margin-top: 20px; } ::v-deep .el-radio { margin-bottom: 0; } .el-form-item { margin-bottom: 10px; } } </style>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,772
[Improvement][API] API operation optimization
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description ### Dag API 1. Create offline workflows and schedules 2. Update workflow basic information 3. Update schedule information by process definition code 4. Online or offline workflow and schedule at the same time 5. Delete the workflow and schedule at the same time 6. Online is not allowed for empty workflow ### Task API 1. Specify the online or offline status when creating tasks 2. Online or downstream dependent tasks will not be deleted 3. Task online and offline interface ### Relation API 1. Add task to workflow 2. Remove task from workflow 3. Task moves from A workflow to B workflow 4. Task delete upstream dependency 5. Task add upstream dependency ### Execution API 1. Specify task execution interface ![image](https://user-images.githubusercontent.com/42576980/141075049-faa6c4f8-9a0a-42f3-9924-0aa4f426f6a4.png) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6772
https://github.com/apache/dolphinscheduler/pull/7102
b3efcdfeba06867e620829638ff27c3069aeac23
173a3856185abc0fa9be16715d2567ebbe054a6f
"2021-11-10T08:13:37Z"
java
"2021-12-01T08:40:11Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/dag.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div :class="['dag-chart', fullScreen ? 'full-screen' : '']"> <dag-toolbar /> <dag-canvas ref="canvas" /> <el-drawer :visible.sync="taskDrawer" size="" :with-header="false" :wrapperClosable="false" class="task-drawer" > <!-- fix the bug that Element-ui(2.13.2) auto focus on the first input --> <div style="width: 0px; height: 0px; overflow: hidden"> <el-input type="text" /> </div> <m-form-model v-if="taskDrawer" :nodeData="nodeData" @seeHistory="seeHistory" @addTaskInfo="addTaskInfo" @close="closeTaskDrawer" @onSubProcess="toSubProcess" :type="type" ></m-form-model> </el-drawer> <el-dialog :title="$t('Set the DAG diagram name')" :visible.sync="saveDialog" width="auto" > <m-udp ref="mUdp" @onUdp="onSave" @close="cancelSave"></m-udp> </el-dialog> <el-dialog :title="$t('Please set the parameters before starting')" :visible.sync="startDialog" width="auto" > <m-start :startData="{ code: definitionCode, name: name }" :startNodeList="startTaskName" :sourceType="'contextmenu'" @onUpdateStart="onUpdateStart" @closeStart="closeStart" ></m-start> </el-dialog> <edge-edit-model ref="edgeEditModel" /> <el-drawer :visible.sync="versionDrawer" size="" :with-header="false"> <!-- fix the bug that Element-ui(2.13.2) auto focus on the first input --> <div style="width: 0px; height: 0px; overflow: hidden"> <el-input type="text" /> </div> <m-versions :versionData="versionData" :isInstance="type === 'instance'" @mVersionSwitchProcessDefinitionVersion="switchProcessVersion" @mVersionGetProcessDefinitionVersionsPage="getProcessVersions" @mVersionDeleteProcessDefinitionVersion="deleteProcessVersion" @closeVersion="closeVersion" ></m-versions> </el-drawer> <m-log v-if="type === 'instance' && logDialog" :item="logTaskInstance" source='dag' :task-instance-id="logTaskInstance.id" @close="closeLogDialog" ></m-log> </div> </template> <script> import { debounce } from 'lodash' import dagToolbar from './canvas/toolbar.vue' import dagCanvas from './canvas/canvas.vue' import mFormModel from '../_source/formModel/formModel.vue' import { mapActions, mapState, mapMutations } from 'vuex' import mUdp from '../_source/udp/udp.vue' import mStart from '../../projects/pages/definition/pages/list/_source/start.vue' import edgeEditModel from './canvas/edgeEditModel.vue' import mVersions from '../../projects/pages/definition/pages/list/_source/versions.vue' import mLog from './formModel/log.vue' const DEFAULT_NODE_DATA = { id: null, taskType: '', self: {}, instanceId: null } export default { name: 'dag-chart', components: { dagCanvas, dagToolbar, mFormModel, mUdp, mStart, edgeEditModel, mVersions, mLog }, provide () { return { dagChart: this } }, inject: ['definitionDetails'], props: { type: String, releaseState: String }, data () { return { definitionCode: 0, // full screen mode fullScreen: false, // whether the task config drawer is visible taskDrawer: false, nodeData: { ...DEFAULT_NODE_DATA }, // whether the save dialog is visible saveDialog: false, // whether the start dialog is visible startDialog: false, startTaskName: '', // whether the version drawer is visible versionDrawer: false, versionData: { processDefinition: { id: null, version: '', releaseState: '' }, processDefinitionVersions: [], total: null, pageNo: null, pageSize: null }, // the task status refresh timer statusTimer: null, // the process instance id instanceId: -1, // log dialog logDialog: false, logTaskInstance: null, taskInstances: [] } }, mounted () { this.setIsEditDag(false) if (this.type === 'instance') { this.instanceId = this.$route.params.id this.definitionCode = this.$route.query.code || this.code } else if (this.type === 'definition') { this.definitionCode = this.$route.params.code } // auto resize canvas this.resizeDebounceFunc = debounce(this.canvasResize, 200) window.addEventListener('resize', this.resizeDebounceFunc) // init graph this.$refs.canvas.graphInit(!this.isDetails) // backfill graph with tasks, locations and connects this.backfill() // refresh task status if (this.type === 'instance') { this.refreshTaskStatus() // status polling this.statusTimer = setInterval(() => { this.refreshTaskStatus() }, 90000) } }, beforeDestroy () { this.resetParams() clearInterval(this.statusTimer) window.removeEventListener('resize', this.resizeDebounceFunc) }, computed: { ...mapState('dag', [ 'tasks', 'locations', 'connects', 'name', 'isDetails', 'projectCode', 'version', 'code' ]) }, methods: { ...mapActions('dag', [ 'saveDAGchart', 'updateInstance', 'updateDefinition', 'getTaskState', 'getStartCheck', 'genTaskCodeList', 'switchProcessDefinitionVersion', 'getProcessDefinitionVersionsPage', 'deleteProcessDefinitionVersion' ]), ...mapMutations('dag', [ 'addTask', 'setConnects', 'resetParams', 'setIsEditDag', 'setName', 'setLocations', 'resetLocalParam' ]), /** * Toggle full screen */ canvasResize () { const canvas = this.$refs.canvas canvas && canvas.paperResize() }, toggleFullScreen () { this.fullScreen = !this.fullScreen this.$nextTick(this.canvasResize) }, /** * Task Drawer * @param {boolean} visible */ toggleTaskDrawer (visible) { this.taskDrawer = visible }, /** * Set the current node data */ setNodeData (nodeData) { this.nodeData = Object.assign(DEFAULT_NODE_DATA, nodeData) }, /** * open form model * @desc Edit task config * @param {number} taskCode * @param {string} taskType */ openFormModel (taskCode, taskType) { this.setNodeData({ id: taskCode, taskType: taskType }) this.toggleTaskDrawer(true) }, addTaskInfo ({ item }) { this.addTask(item) this.$refs.canvas.setNodeName(item.code, item.name) this.taskDrawer = false }, closeTaskDrawer ({ flag }) { if (flag) { const canvas = this.$refs.canvas canvas.removeNode(this.nodeData.id) } this.taskDrawer = false }, /** * Save dialog */ toggleSaveDialog (value) { this.saveDialog = value if (value) { this.$nextTick(() => { this.$refs.mUdp.reloadParam() }) } }, onSave (sourceType) { this.toggleSaveDialog(false) return new Promise((resolve, reject) => { let tasks = this.tasks || [] const edges = this.$refs.canvas.getEdges() const nodes = this.$refs.canvas.getNodes() if (!nodes.length) { reject(this.$t('Failed to create node to save')) } const connects = this.buildConnects(edges, tasks) this.setConnects(connects) const locations = nodes.map((node) => { return { taskCode: node.id, x: node.position.x, y: node.position.y } }) this.setLocations(locations) resolve({ connects: connects, tasks: tasks, locations: locations }) }) .then((res) => { if (this.verifyConditions(res.tasks)) { this.loading(true) const isEdit = !!this.definitionCode if (isEdit) { const methodName = this.type === 'instance' ? 'updateInstance' : 'updateDefinition' const methodParam = this.type === 'instance' ? this.instanceId : this.definitionCode // Edit return this[methodName](methodParam) .then((res) => { this.$message({ message: res.msg, type: 'success', offset: 80 }) if (this.type === 'instance') { this.$router.push({ path: `/projects/${this.projectCode}/instance/list` }) } else { this.$router.push({ path: `/projects/${this.projectCode}/definition/list` }) } }) .catch((e) => { this.$message.error(e.msg || '') }) .finally((e) => { this.loading(false) }) } else { // Create return this.saveDAGchart() .then((res) => { this.$message.success(res.msg) // source @/conf/home/pages/dag/_source/editAffirmModel/index.js if (sourceType !== 'affirm') { // Jump process definition this.$router.push({ name: 'projects-definition-list' }) } }) .catch((e) => { this.setName('') this.$message.error(e.msg || '') }) .finally((e) => { this.loading(false) }) } } }) .catch((err) => { let msg = typeof err === 'string' ? err : err.msg || '' this.$message.error(msg) }) }, verifyConditions (value) { let tasks = value let bool = true tasks.map((v) => { if ( v.taskType === 'CONDITIONS' && (v.taskParams.conditionResult.successNode[0] === '' || v.taskParams.conditionResult.successNode[0] === null || v.taskParams.conditionResult.failedNode[0] === '' || v.taskParams.conditionResult.failedNode[0] === null) ) { bool = false return false } }) if (!bool) { this.$message.warning( `${this.$t( 'Successful branch flow and failed branch flow are required' )}` ) return false } return true }, cancelSave () { this.toggleSaveDialog(false) }, /** * build graph json */ buildGraphJSON (tasks, locations, connects) { const nodes = [] const edges = [] tasks.forEach((task) => { const location = locations.find((l) => l.taskCode === task.code) || {} const node = this.$refs.canvas.genNodeJSON( task.code, task.taskType, task.name, { x: location.x, y: location.y } ) nodes.push(node) }) connects .filter((r) => !!r.preTaskCode) .forEach((c) => { const edge = this.$refs.canvas.genEdgeJSON( c.preTaskCode, c.postTaskCode, c.name ) edges.push(edge) }) return { nodes, edges } }, /** * Build connects by edges and tasks * @param {Edge[]} edges * @param {Task[]} tasks * @returns */ buildConnects (edges, tasks) { const preTaskMap = {} const tasksMap = {} edges.forEach((edge) => { preTaskMap[edge.targetId] = { sourceId: edge.sourceId, edgeLabel: edge.label || '' } }) tasks.forEach((task) => { tasksMap[task.code] = task }) const headEdges = tasks .filter((task) => !preTaskMap[task.code]) .map((task) => { return { name: '', preTaskCode: 0, preTaskVersion: 0, postTaskCode: task.code, postTaskVersion: task.version || 0, // conditionType and conditionParams are reserved conditionType: 0, conditionParams: {} } }) return edges .map((edge) => { return { name: edge.label, preTaskCode: edge.sourceId, preTaskVersion: tasksMap[edge.sourceId].version || 0, postTaskCode: edge.targetId, postTaskVersion: tasksMap[edge.targetId].version || 0, // conditionType and conditionParams are reserved conditionType: 0, conditionParams: {} } }) .concat(headEdges) }, backfill () { const tasks = this.tasks const locations = this.locations const connects = this.connects const json = this.buildGraphJSON(tasks, locations, connects) this.$refs.canvas.fromJSON(json) }, /** * Return to the previous process */ returnToPrevProcess () { let $name = this.$route.name.split('-') let subs = this.$route.query.subs let ids = subs.split(',') const last = ids.pop() this.$router.push({ path: `/${$name[0]}/${this.projectCode}/${$name[1]}/list/${last}`, query: ids.length > 0 ? { subs: ids.join(',') } : null }) }, toSubProcess ({ subProcessCode, subInstanceId }) { const tarIdentifier = this.type === 'instance' ? subInstanceId : subProcessCode const curIdentifier = this.type === 'instance' ? this.instanceId : this.definitionCode let subs = [] let olds = this.$route.query.subs if (olds) { subs = olds.split(',') subs.push(curIdentifier) } else { subs.push(curIdentifier) } let $name = this.$route.name.split('-') this.$router.push({ path: `/${$name[0]}/${this.projectCode}/${$name[1]}/list/${tarIdentifier}`, query: { subs: subs.join(',') } }) }, seeHistory (taskName) { this.$router.push({ name: 'task-instance', query: { processInstanceId: this.$route.params.code, taskName: taskName } }) }, /** * Start dialog */ startRunning (taskName) { this.startTaskName = taskName this.getStartCheck({ processDefinitionCode: this.definitionCode }).then( (res) => { this.startDialog = true } ) }, onUpdateStart () { this.startDialog = false }, closeStart () { this.startDialog = false }, /** * Task status */ refreshTaskStatus () { const instanceId = this.$route.params.id this.loading(true) this.getTaskState(instanceId) .then((res) => { this.$message(this.$t('Refresh status succeeded')) const { taskList } = res.data if (taskList) { this.taskInstances = taskList taskList.forEach((taskInstance) => { this.$refs.canvas.setNodeStatus({ code: taskInstance.taskCode, state: taskInstance.state, taskInstance }) }) } }) .finally(() => { this.loading(false) }) }, /** * Loading * @param {boolean} visible */ loading (visible) { if (visible) { this.spinner = this.$loading({ lock: true, text: this.$t('Loading...'), spinner: 'el-icon-loading', background: 'rgba(0, 0, 0, 0.4)', customClass: 'dag-fullscreen-loading' }) } else { this.spinner && this.spinner.close() } }, /** * change process definition version */ showVersions () { this.getProcessDefinitionVersionsPage({ pageNo: 1, pageSize: 10, code: this.definitionCode }) .then((res) => { let processDefinitionVersions = res.data.totalList let total = res.data.total let pageSize = res.data.pageSize let pageNo = res.data.currentPage // this.versionData.processDefinition.id = this.urlParam.id this.versionData.processDefinition.code = this.definitionCode this.versionData.processDefinition.version = this.version this.versionData.processDefinition.releaseState = this.releaseState this.versionData.processDefinitionVersions = processDefinitionVersions this.versionData.total = total this.versionData.pageNo = pageNo this.versionData.pageSize = pageSize this.versionDrawer = true }) .catch((e) => { this.$message.error(e.msg || '') }) }, closeVersion () { this.versionDrawer = false }, switchProcessVersion ({ version, processDefinitionCode }) { this.switchProcessDefinitionVersion({ version: version, code: processDefinitionCode }) .then((res) => { this.$message.success($t('Switch Version Successfully')) this.closeVersion() this.definitionDetails.init() }) .catch((e) => { this.$message.error(e.msg || '') }) }, getProcessVersions ({ pageNo, pageSize, processDefinitionCode }) { this.getProcessDefinitionVersionsPage({ pageNo: pageNo, pageSize: pageSize, code: processDefinitionCode }) .then((res) => { this.versionData.processDefinitionVersions = res.data.totalList this.versionData.total = res.data.total this.versionData.pageSize = res.data.pageSize this.versionData.pageNo = res.data.currentPage }) .catch((e) => { this.$message.error(e.msg || '') }) }, deleteProcessVersion ({ version, processDefinitionCode }) { this.deleteProcessDefinitionVersion({ version: version, code: processDefinitionCode }) .then((res) => { this.$message.success(res.msg || '') this.getProcessVersions({ pageNo: 1, pageSize: 10, processDefinitionCode: processDefinitionCode }) }) .catch((e) => { this.$message.error(e.msg || '') }) }, /** * Log dialog */ closeLogDialog () { this.logDialog = false this.logTaskInstance = null }, showLogDialog (taskDefinitionCode) { const taskInstance = this.taskInstances.find(taskInstance => { return taskInstance.taskCode === taskDefinitionCode }) if (taskInstance) { this.logTaskInstance = { id: taskInstance.id, type: taskInstance.taskType } this.logDialog = true } } } } </script> <style lang="scss" scoped> @import "./dag"; </style> <style lang="scss"> @import "./loading"; </style>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,772
[Improvement][API] API operation optimization
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description ### Dag API 1. Create offline workflows and schedules 2. Update workflow basic information 3. Update schedule information by process definition code 4. Online or offline workflow and schedule at the same time 5. Delete the workflow and schedule at the same time 6. Online is not allowed for empty workflow ### Task API 1. Specify the online or offline status when creating tasks 2. Online or downstream dependent tasks will not be deleted 3. Task online and offline interface ### Relation API 1. Add task to workflow 2. Remove task from workflow 3. Task moves from A workflow to B workflow 4. Task delete upstream dependency 5. Task add upstream dependency ### Execution API 1. Specify task execution interface ![image](https://user-images.githubusercontent.com/42576980/141075049-faa6c4f8-9a0a-42f3-9924-0aa4f426f6a4.png) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6772
https://github.com/apache/dolphinscheduler/pull/7102
b3efcdfeba06867e620829638ff27c3069aeac23
173a3856185abc0fa9be16715d2567ebbe054a6f
"2021-11-10T08:13:37Z"
java
"2021-12-01T08:40:11Z"
dolphinscheduler-ui/src/js/conf/home/store/dag/actions.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import _ from 'lodash' import io from '@/module/io' export default { /** * Task status acquisition */ getTaskState ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/${payload}/tasks`, { processInstanceId: payload }, res => { state.taskInstances = res.data.taskList resolve(res) }).catch(e => { reject(e) }) }) }, /** * Update process definition status */ editProcessState ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition/${payload.code}/release`, { name: payload.name, releaseState: payload.releaseState }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * get process definition versions pagination info */ getProcessDefinitionVersionsPage ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload.code}/versions`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * switch process definition version */ switchProcessDefinitionVersion ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload.code}/versions/${payload.version}`, {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * delete process definition version */ deleteProcessDefinitionVersion ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/process-definition/${payload.code}/versions/${payload.version}`, {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Update process instance status */ editExecutorsState ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/executors/execute`, { processInstanceId: payload.processInstanceId, executeType: payload.executeType }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Verify that the DGA map name exists */ verifDAGName ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/verify-name`, { name: payload }, res => { state.name = payload resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get process definition DAG diagram details */ getProcessDetails ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload}`, { }, res => { // process definition code state.code = res.data.processDefinition.code // version state.version = res.data.processDefinition.version // name state.name = res.data.processDefinition.name // releaseState state.releaseState = res.data.processDefinition.releaseState // description state.description = res.data.processDefinition.description // taskRelationJson state.connects = res.data.processTaskRelationList // locations state.locations = JSON.parse(res.data.processDefinition.locations) // global params state.globalParams = res.data.processDefinition.globalParamList // timeout state.timeout = res.data.processDefinition.timeout // executionType state.executionType = res.data.processDefinition.executionType // tenantCode state.tenantCode = res.data.processDefinition.tenantCode || 'default' // tasks info state.tasks = res.data.taskDefinitionList.map(task => _.pick(task, [ 'code', 'name', 'version', 'description', 'delayTime', 'taskType', 'taskParams', 'flag', 'taskPriority', 'workerGroup', 'failRetryTimes', 'failRetryInterval', 'timeoutFlag', 'timeoutNotifyStrategy', 'timeout', 'environmentCode' ])) resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get process definition DAG diagram details */ copyProcess ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition/batch-copy`, { codes: payload.codes, targetProjectCode: payload.targetProjectCode }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get process definition DAG diagram details */ moveProcess ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition/batch-move`, { codes: payload.codes, targetProjectCode: payload.targetProjectCode }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get all the items created by the logged in user */ getAllItems ({ state }, payload) { return new Promise((resolve, reject) => { io.get('projects/created-and-authed', {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get the process instance DAG diagram details */ getInstancedetail ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/${payload}`, { }, res => { const { processDefinition, processTaskRelationList, taskDefinitionList } = res.data.dagData // code state.code = processDefinition.code // version state.version = processDefinition.version // name state.name = res.data.name // desc state.description = processDefinition.description // connects state.connects = processTaskRelationList // locations state.locations = JSON.parse(processDefinition.locations) // global params state.globalParams = processDefinition.globalParamList // timeout state.timeout = processDefinition.timeout // executionType state.executionType = processDefinition.executionType // tenantCode state.tenantCode = res.data.tenantCode || 'default' // tasks info state.tasks = taskDefinitionList.map(task => _.pick(task, [ 'code', 'name', 'version', 'description', 'delayTime', 'taskType', 'taskParams', 'flag', 'taskPriority', 'workerGroup', 'failRetryTimes', 'failRetryInterval', 'timeoutFlag', 'timeoutNotifyStrategy', 'timeout', 'environmentCode' ])) // startup parameters state.startup = _.assign(state.startup, _.pick(res.data, ['commandType', 'failureStrategy', 'processInstancePriority', 'workerGroup', 'warningType', 'warningGroupId', 'receivers', 'receiversCc'])) state.startup.commandParam = JSON.parse(res.data.commandParam) resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Create process definition */ saveDAGchart ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition`, { locations: JSON.stringify(state.locations), name: _.trim(state.name), taskDefinitionJson: JSON.stringify(state.tasks), taskRelationJson: JSON.stringify(state.connects), tenantCode: state.tenantCode, executionType: state.executionType, description: _.trim(state.description), globalParams: JSON.stringify(state.globalParams), timeout: state.timeout }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Process definition update */ updateDefinition ({ state }, payload) { return new Promise((resolve, reject) => { io.put(`projects/${state.projectCode}/process-definition/${payload}`, { locations: JSON.stringify(state.locations), name: _.trim(state.name), taskDefinitionJson: JSON.stringify(state.tasks), taskRelationJson: JSON.stringify(state.connects), tenantCode: state.tenantCode, executionType: state.executionType, description: _.trim(state.description), globalParams: JSON.stringify(state.globalParams), timeout: state.timeout, releaseState: state.releaseState }, res => { resolve(res) state.isEditDag = false }).catch(e => { reject(e) }) }) }, /** * Process instance update */ updateInstance ({ state }, instanceId) { return new Promise((resolve, reject) => { io.put(`projects/${state.projectCode}/process-instances/${instanceId}`, { syncDefine: state.syncDefine, globalParams: JSON.stringify(state.globalParams), locations: JSON.stringify(state.locations), taskDefinitionJson: JSON.stringify(state.tasks), taskRelationJson: JSON.stringify(state.connects), tenantCode: state.tenantCode, timeout: state.timeout }, res => { resolve(res) state.isEditDag = false }).catch(e => { reject(e) }) }) }, /** * Get a list of process definitions (sub-workflow usage is not paged) */ getProcessList ({ state }, payload) { return new Promise((resolve, reject) => { if (state.processListS.length) { resolve() return } io.get(`projects/${state.projectCode}/process-definition/simple-list`, payload, res => { state.processListS = res.data resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get a list of process definitions (list page usage with pagination) */ getProcessListP ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition`, payload, res => { resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get a list of project */ getProjectList ({ state }, payload) { return new Promise((resolve, reject) => { if (state.projectListS.length) { resolve() return } io.get('projects/created-and-authed', payload, res => { state.projectListS = res.data resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get a list of process definitions by project code */ getProcessByProjectCode ({ state }, code) { return new Promise((resolve, reject) => { io.get(`projects/${code}/process-definition/all`, res => { resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * get datasource */ getDatasourceList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('datasources/list', { type: payload }, res => { resolve(res) }).catch(res => { reject(res) }) }) }, /** * get resources */ getResourcesList ({ state }) { return new Promise((resolve, reject) => { if (state.resourcesListS.length) { resolve() return } io.get('resources/list', { type: 'FILE' }, res => { state.resourcesListS = res.data resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * get jar */ getResourcesListJar ({ state }) { return new Promise((resolve, reject) => { if (state.resourcesListJar.length) { resolve() return } io.get('resources/query-by-type', { type: 'FILE' }, res => { state.resourcesListJar = res.data resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get process instance */ getProcessInstance ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances`, payload, res => { state.instanceListS = res.data.totalList resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get alarm list */ getNotifyGroupList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('alert-groups/list', res => { state.notifyGroupListS = _.map(res.data, v => { return { id: v.id, code: v.groupName, disabled: false } }) resolve(_.cloneDeep(state.notifyGroupListS)) }).catch(res => { reject(res) }) }) }, /** * Process definition startup interface */ processStart ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/executors/start-process-instance`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * View log */ getLog ({ state }, payload) { return new Promise((resolve, reject) => { io.get('log/detail', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get the process instance id according to the process definition id * @param taskId */ getSubProcessId ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/query-sub-by-parent`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Called before the process definition starts */ getStartCheck ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/executors/start-check`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Create timing */ createSchedule ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/schedules`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Preview timing */ previewSchedule ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/schedules/preview`, payload, res => { resolve(res.data) // alert(res.data) }).catch(e => { reject(e) }) }) }, /** * Timing list paging */ getScheduleList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/schedules`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Timing online */ scheduleOffline ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/schedules/${payload.id}/offline`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Timed offline */ scheduleOnline ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/schedules/${payload.id}/online`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Edit timing */ updateSchedule ({ state }, payload) { return new Promise((resolve, reject) => { io.put(`projects/${state.projectCode}/schedules/${payload.id}`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Delete process instance */ deleteInstance ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/process-instances/${payload.processInstanceId}`, {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Batch delete process instance */ batchDeleteInstance ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-instances/batch-delete`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Delete definition */ deleteDefinition ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/process-definition/${payload.code}`, {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Batch delete definition */ batchDeleteDefinition ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition/batch-delete`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * export definition */ exportDefinition ({ state }, payload) { const downloadBlob = (data, fileNameS = 'json') => { if (!data) { return } const blob = new Blob([data]) const fileName = `${fileNameS}.json` if ('download' in document.createElement('a')) { // 不是IE浏览器 const url = window.URL.createObjectURL(blob) const link = document.createElement('a') link.style.display = 'none' link.href = url link.setAttribute('download', fileName) document.body.appendChild(link) link.click() document.body.removeChild(link) // 下载完成移除元素 window.URL.revokeObjectURL(url) // 释放掉blob对象 } else { // IE 10+ window.navigator.msSaveBlob(blob, fileName) } } io.post(`projects/${state.projectCode}/process-definition/batch-export`, { codes: payload.codes }, res => { downloadBlob(res, payload.fileName) }, e => { }, { responseType: 'blob' }) }, /** * Process instance get variable */ getViewvariables ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/${payload.processInstanceId}/view-variables`, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get udfs function based on data source */ getUdfList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('resources/udf-func/list', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Query task instance list */ getTaskInstanceList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/task-instances`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Force fail/kill/need_fault_tolerance task success */ forceTaskSuccess ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/task-instances/${payload.taskInstanceId}/force-success`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Query task record list */ getTaskRecordList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('projects/task-record/list-paging', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Query history task record list */ getHistoryTaskRecordList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('projects/task-record/history-list-paging', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * tree chart */ getViewTree ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload.code}/view-tree`, { limit: payload.limit }, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * gantt chart */ getViewGantt ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/${payload.processInstanceId}/view-gantt`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Query task node list */ getProcessTasksList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload.code}/tasks`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, getTaskListDefIdAll ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/batch-query-tasks`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * remove timing */ deleteTiming ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/schedules/${payload.scheduleId}`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, getResourceId ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`resources/${payload.id}`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, genTaskCodeList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/task-definition/gen-task-codes`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Query Task Definitions List Paging */ getTaskDefinitionsList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/task-definition`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Delete Task Definition by code */ deleteTaskDefinition ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/task-definition/${payload.code}`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Save Task Definition */ saveTaskDefinition ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/task-definition`, { taskDefinitionJson: JSON.stringify(payload.taskDefinitionJson) }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, updateTaskDefinition ({ state }, taskDefinition) { return new Promise((resolve, reject) => { io.put(`projects/${state.projectCode}/task-definition/${taskDefinition.code}`, { taskDefinitionJsonObj: JSON.stringify(taskDefinition) }, res => { resolve(res) }).catch(e => { reject(e) }) }) } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,070
[Bug] [dolphinscheduler-api] Create or edit a workflow definition, check the timeoutNotifyStrategy, the timeoutNotifyStrategy is null in the database
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Create or edit a workflow definition, check the timeoutNotifyStrategy, the timeoutNotifyStrategy is null in the database ![image](https://user-images.githubusercontent.com/95271106/144008124-e24a57e3-59d6-40e6-b019-f9beeac8b2ed.png) ![image](https://user-images.githubusercontent.com/95271106/144007568-57b371e1-24cf-4694-8378-9529fb6351d8.png) ![image](https://user-images.githubusercontent.com/95271106/144007567-6fd24e2e-bb34-4e69-a71a-240c789d2294.png) ### What you expected to happen The timeoutNotifyStrategywas not written to the database when saving the workflow definition ### How to reproduce When saving the workflow definition, write the timeoutNotifyStrategywas to the database ### Anything else _No response_ ### Version 2.0.0 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7070
https://github.com/apache/dolphinscheduler/pull/7107
173a3856185abc0fa9be16715d2567ebbe054a6f
d7eb7453e555309bf060809e6f6c61232be469cd
"2021-11-30T07:58:35Z"
java
"2021-12-01T13:03:49Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/TaskDefinition.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.dao.entity; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.commons.lang.StringUtils; import java.util.Date; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.stream.Collectors; import com.baomidou.mybatisplus.annotation.IdType; import com.baomidou.mybatisplus.annotation.TableField; import com.baomidou.mybatisplus.annotation.TableId; import com.baomidou.mybatisplus.annotation.TableName; import com.fasterxml.jackson.annotation.JsonFormat; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.fasterxml.jackson.databind.annotation.JsonSerialize; /** * task definition */ @TableName("t_ds_task_definition") public class TaskDefinition { /** * id */ @TableId(value = "id", type = IdType.AUTO) private int id; /** * code */ private long code; /** * name */ private String name; /** * version */ private int version; /** * description */ private String description; /** * project code */ private long projectCode; /** * task user id */ private int userId; /** * task type */ private String taskType; /** * user defined parameters */ @JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class) @JsonSerialize(using = JSONUtils.JsonDataSerializer.class) private String taskParams; /** * user defined parameter list */ @TableField(exist = false) private List<Property> taskParamList; /** * user define parameter map */ @TableField(exist = false) private Map<String, String> taskParamMap; /** * task is valid: yes/no */ private Flag flag; /** * task priority */ private Priority taskPriority; /** * user name */ @TableField(exist = false) private String userName; /** * project name */ @TableField(exist = false) private String projectName; /** * worker group */ private String workerGroup; /** * environment code */ private long environmentCode; /** * fail retry times */ private int failRetryTimes; /** * fail retry interval */ private int failRetryInterval; /** * timeout flag */ private TimeoutFlag timeoutFlag; /** * timeout notify strategy */ private TaskTimeoutStrategy timeoutNotifyStrategy; /** * task warning time out. unit: minute */ private int timeout; /** * delay execution time. */ private int delayTime; /** * resource ids */ private String resourceIds; /** * create time */ @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8") private Date createTime; /** * update time */ @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8") private Date updateTime; /** * modify user name */ @TableField(exist = false) private String modifyBy; public TaskDefinition() { } public TaskDefinition(long code, int version) { this.code = code; this.version = version; } public String getName() { return name; } public void setName(String name) { this.name = name; } public int getId() { return id; } public void setId(int id) { this.id = id; } public Date getCreateTime() { return createTime; } public void setCreateTime(Date createTime) { this.createTime = createTime; } public Date getUpdateTime() { return updateTime; } public void setUpdateTime(Date updateTime) { this.updateTime = updateTime; } public Flag getFlag() { return flag; } public void setFlag(Flag flag) { this.flag = flag; } public int getUserId() { return userId; } public void setUserId(int userId) { this.userId = userId; } public String getUserName() { return userName; } public void setUserName(String userName) { this.userName = userName; } public String getProjectName() { return projectName; } public void setProjectName(String projectName) { this.projectName = projectName; } public String getTaskParams() { return taskParams; } public void setTaskParams(String taskParams) { this.taskParams = taskParams; } public List<Property> getTaskParamList() { JsonNode localParams = JSONUtils.parseObject(taskParams).findValue("localParams"); if (localParams != null) { taskParamList = JSONUtils.toList(localParams.toString(), Property.class); } return taskParamList; } public void setTaskParamList(List<Property> taskParamList) { this.taskParamList = taskParamList; } public void setTaskParamMap(Map<String, String> taskParamMap) { this.taskParamMap = taskParamMap; } public Map<String, String> getTaskParamMap() { if (taskParamMap == null && StringUtils.isNotEmpty(taskParams)) { JsonNode localParams = JSONUtils.parseObject(taskParams).findValue("localParams"); if (localParams != null) { List<Property> propList = JSONUtils.toList(localParams.toString(), Property.class); taskParamMap = propList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)); } } return taskParamMap; } public int getTimeout() { return timeout; } public void setTimeout(int timeout) { this.timeout = timeout; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } public long getCode() { return code; } public void setCode(long code) { this.code = code; } public int getVersion() { return version; } public void setVersion(int version) { this.version = version; } public long getProjectCode() { return projectCode; } public void setProjectCode(long projectCode) { this.projectCode = projectCode; } public String getTaskType() { return taskType; } public void setTaskType(String taskType) { this.taskType = taskType; } public Priority getTaskPriority() { return taskPriority; } public void setTaskPriority(Priority taskPriority) { this.taskPriority = taskPriority; } public String getWorkerGroup() { return workerGroup; } public void setWorkerGroup(String workerGroup) { this.workerGroup = workerGroup; } public int getFailRetryTimes() { return failRetryTimes; } public void setFailRetryTimes(int failRetryTimes) { this.failRetryTimes = failRetryTimes; } public int getFailRetryInterval() { return failRetryInterval; } public void setFailRetryInterval(int failRetryInterval) { this.failRetryInterval = failRetryInterval; } public TaskTimeoutStrategy getTimeoutNotifyStrategy() { return timeoutNotifyStrategy; } public void setTimeoutNotifyStrategy(TaskTimeoutStrategy timeoutNotifyStrategy) { this.timeoutNotifyStrategy = timeoutNotifyStrategy; } public TimeoutFlag getTimeoutFlag() { return timeoutFlag; } public void setTimeoutFlag(TimeoutFlag timeoutFlag) { this.timeoutFlag = timeoutFlag; } public String getResourceIds() { return resourceIds; } public void setResourceIds(String resourceIds) { this.resourceIds = resourceIds; } public int getDelayTime() { return delayTime; } public void setDelayTime(int delayTime) { this.delayTime = delayTime; } public String getDependence() { return JSONUtils.getNodeString(this.taskParams, Constants.DEPENDENCE); } public String getModifyBy() { return modifyBy; } public void setModifyBy(String modifyBy) { this.modifyBy = modifyBy; } public long getEnvironmentCode() { return this.environmentCode; } public void setEnvironmentCode(long environmentCode) { this.environmentCode = environmentCode; } @Override public boolean equals(Object o) { if (o == null) { return false; } TaskDefinition that = (TaskDefinition) o; return failRetryTimes == that.failRetryTimes && failRetryInterval == that.failRetryInterval && timeout == that.timeout && delayTime == that.delayTime && Objects.equals(name, that.name) && Objects.equals(description, that.description) && Objects.equals(taskType, that.taskType) && Objects.equals(taskParams, that.taskParams) && flag == that.flag && taskPriority == that.taskPriority && Objects.equals(workerGroup, that.workerGroup) && timeoutFlag == that.timeoutFlag && timeoutNotifyStrategy == that.timeoutNotifyStrategy && Objects.equals(resourceIds, that.resourceIds) && environmentCode == that.environmentCode; } @Override public String toString() { return "TaskDefinition{" + "id=" + id + ", code=" + code + ", name='" + name + '\'' + ", version=" + version + ", description='" + description + '\'' + ", projectCode=" + projectCode + ", userId=" + userId + ", taskType=" + taskType + ", taskParams='" + taskParams + '\'' + ", taskParamList=" + taskParamList + ", taskParamMap=" + taskParamMap + ", flag=" + flag + ", taskPriority=" + taskPriority + ", userName='" + userName + '\'' + ", projectName='" + projectName + '\'' + ", workerGroup='" + workerGroup + '\'' + ", failRetryTimes=" + failRetryTimes + ", environmentCode='" + environmentCode + '\'' + ", failRetryInterval=" + failRetryInterval + ", timeoutFlag=" + timeoutFlag + ", timeoutNotifyStrategy=" + timeoutNotifyStrategy + ", timeout=" + timeout + ", delayTime=" + delayTime + ", resourceIds='" + resourceIds + '\'' + ", createTime=" + createTime + ", updateTime=" + updateTime + '}'; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,070
[Bug] [dolphinscheduler-api] Create or edit a workflow definition, check the timeoutNotifyStrategy, the timeoutNotifyStrategy is null in the database
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Create or edit a workflow definition, check the timeoutNotifyStrategy, the timeoutNotifyStrategy is null in the database ![image](https://user-images.githubusercontent.com/95271106/144008124-e24a57e3-59d6-40e6-b019-f9beeac8b2ed.png) ![image](https://user-images.githubusercontent.com/95271106/144007568-57b371e1-24cf-4694-8378-9529fb6351d8.png) ![image](https://user-images.githubusercontent.com/95271106/144007567-6fd24e2e-bb34-4e69-a71a-240c789d2294.png) ### What you expected to happen The timeoutNotifyStrategywas not written to the database when saving the workflow definition ### How to reproduce When saving the workflow definition, write the timeoutNotifyStrategywas to the database ### Anything else _No response_ ### Version 2.0.0 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7070
https://github.com/apache/dolphinscheduler/pull/7107
173a3856185abc0fa9be16715d2567ebbe054a6f
d7eb7453e555309bf060809e6f6c61232be469cd
"2021-11-30T07:58:35Z"
java
"2021-12-01T13:03:49Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/_source/timeoutAlarm.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="timeout-alarm-model"> <div class="clearfix list"> <div class="text-box"> <span>{{$t('Timeout alarm')}}</span> </div> <div class="cont-box"> <label class="label-box"> <div style="padding-top: 5px;"> <el-switch v-model="enable" size="small" @change="_onSwitch" :disabled="isDetails"></el-switch> </div> </label> </div> </div> <div class="clearfix list" v-if="enable"> <div class="text-box"> <span>{{$t('Timeout strategy')}}</span> </div> <div class="cont-box"> <label class="label-box"> <div style="padding-top: 6px;"> <el-checkbox-group v-model="strategy" size="small"> <el-checkbox label="WARN" :disabled="isDetails">{{$t('Timeout alarm')}}</el-checkbox> <el-checkbox label="FAILED" :disabled="isDetails">{{$t('Timeout failure')}}</el-checkbox> </el-checkbox-group> </div> </label> </div> </div> <div class="clearfix list" v-if="enable"> <div class="text-box"> <span>{{$t('Timeout period')}}</span> </div> <div class="cont-box"> <label class="label-box"> <el-input v-model="interval" size="small" style="width: 200px;" :disabled="isDetails" maxlength="9"> <span slot="append">{{$t('Minute')}}</span> </el-input> </label> </div> </div> </div> </template> <script> import _ from 'lodash' import disabledState from '@/module/mixin/disabledState' export default { name: 'form-timeout-alarm', data () { return { // Timeout display hiding enable: false, // Timeout strategy strategy: [], // Timeout period interval: null } }, mixins: [disabledState], props: { backfillItem: Object }, methods: { _onSwitch (is) { // Timeout strategy this.strategy = is ? ['WARN'] : [] // Timeout period this.interval = is ? 30 : null }, _verification () { // Verification timeout policy if (this.enable && !this.strategy.length) { this.$message.warning(`${this.$t('Timeout strategy must be selected')}`) return false } // Verify timeout duration Non 0 positive integer const reg = /^[1-9]\d*$/ if (this.enable && !reg.test(this.interval)) { this.$message.warning(`${this.$t('Timeout must be a positive integer')}`) return false } this.$emit('on-timeout', { strategy: (() => { // Handling checkout sequence let strategy = this.strategy if (strategy.length === 2 && strategy[0] === 'FAILED') { return [strategy[1], strategy[0]].join(',') } else { return strategy.join(',') } })(), interval: parseInt(this.interval), enable: this.enable }) return true } }, watch: { }, created () { let o = this.backfillItem // Non-null objects represent backfill if (!_.isEmpty(o) && o.timeout) { this.enable = o.timeout.enable || false this.strategy = _.split(o.timeout.strategy, ',') || ['WARN'] this.interval = o.timeout.interval || null } }, mounted () { }, components: {} } </script>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,083
[Bug] [UI] The switch task is missing the else branch
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The switch task is missing the else branch: ![image](https://user-images.githubusercontent.com/87303815/144031211-140c994c-881c-4d9c-951e-7d7381e3ed63.png) ### What you expected to happen should be as follows: ![image](https://user-images.githubusercontent.com/87303815/144031229-0b80c0b3-e2fc-49db-8859-bf7abd64e6f1.png) ### How to reproduce Create a switch task on the DAG page. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7083
https://github.com/apache/dolphinscheduler/pull/7084
b265297fc86108501ce8ca306a38fcfed1eb2fe5
e0623973e000ba3c5cc90ac72c0ba222e6947d1e
"2021-11-30T10:36:59Z"
java
"2021-12-02T06:35:36Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/conditions.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="conditions-model"> <m-list-box> <div slot="text">{{$t('Custom Parameters')}}</div> <div slot="content"> <div class="dep-opt"> <a href="javascript:" @click="!isDetails && _addDep()" class="add-dep"> <em v-if="!isLoading" class="el-icon-circle-plus-outline" :class="_isDetails" data-toggle="tooltip" :title="$t('Add')"></em> <em v-if="isLoading" class="el-icon-loading as as-spin" data-toggle="tooltip" :title="$t('Add')"></em> </a> </div> <div class="dep-box"> <span class="dep-relation" @click="!isDetails && _setGlobalRelation()" v-if="dependTaskList.length"> {{relation === 'AND' ? $t('and') : $t('or')}} </span> <div class="dep-list" v-for="(el,$index) in dependTaskList" :key='$index'> <span class="dep-line-pie" v-if="el.dependItemList.length" @click="!isDetails && _setRelation($index)"> {{el.relation === 'AND' ? $t('and') : $t('or')}} </span> <em class="el-icon-delete dep-delete" data-toggle="tooltip" data-container="body" :class="_isDetails" @click="!isDetails && _deleteDep($index)" :title="$t('Delete')" > </em> <m-node-status :dependTaskList='dependTaskList' v-model="el.dependItemList" @on-delete-all="_onDeleteAll" @getDependTaskList="getDependTaskList" :index="$index" :prev-tasks="prevTasks"> </m-node-status> </div> </div> </div> </m-list-box> </div> </template> <script> import _ from 'lodash' import mListBox from './_source/listBox' import mNodeStatus from './_source/nodeStatus' import disabledState from '@/module/mixin/disabledState' export default { name: 'dependence', data () { return { relation: 'AND', dependTaskList: [], isLoading: false } }, mixins: [disabledState], props: { backfillItem: Object, prevTasks: Array }, methods: { _addDep () { if (!this.isLoading) { this.isLoading = true this.dependTaskList.push({ dependItemList: [], relation: 'AND' }) } }, _deleteDep (i) { // remove index dependent this.dependTaskList.splice(i, 1) // remove tootip $('body').find('.tooltip.fade.top.in').remove() }, _onDeleteAll (i) { this.dependTaskList.map((item, i) => { if (item.dependItemList.length === 0) { this.dependTaskList.splice(i, 1) } }) // this._deleteDep(i) }, _setGlobalRelation () { this.relation = this.relation === 'AND' ? 'OR' : 'AND' }, getDependTaskList (i) { // console.log('getDependTaskList',i) }, _setRelation (i) { this.dependTaskList[i].relation = this.dependTaskList[i].relation === 'AND' ? 'OR' : 'AND' }, _verification () { this.$emit('on-dependent', { relation: this.relation, dependTaskList: _.map(this.dependTaskList, v => { return { relation: v.relation, dependItemList: _.map(v.dependItemList, v1 => _.omit(v1, ['depTasksList', 'state', 'dateValueList'])) } }) }) return true } }, watch: { dependTaskList (e) { setTimeout(() => { this.isLoading = false }, 600) }, cacheDependence (val) { this.$emit('on-cache-dependent', val) } }, beforeCreate () { }, created () { let o = this.backfillItem // Does not represent an empty object backfill if (!_.isEmpty(o)) { this.relation = _.cloneDeep(o.dependence.relation) || 'AND' this.dependTaskList = _.cloneDeep(o.dependence.dependTaskList) || [] // Process instance return status display matches by key _.map(this.dependTaskList, v => _.map(v.dependItemList, v1 => { $(`#${o.id}`).siblings().each(function () { if (v1.depTaskCode === $(this).text()) { v1.state = $(this).attr('data-dependent-depstate') } }) })) } }, mounted () { }, destroyed () { }, computed: { cacheDependence () { return { relation: this.relation, dependTaskList: _.map(this.dependTaskList, v => { return { relation: v.relation, dependItemList: _.map(v.dependItemList, v1 => _.omit(v1, ['depTasksList', 'state', 'dateValueList'])) } }) } } }, components: { mListBox, mNodeStatus } } </script> <style lang="scss" rel="stylesheet/scss"> .conditions-model { margin-top: -10px; .dep-opt { margin-bottom: 10px; padding-top: 3px; line-height: 24px; .add-dep { color: #0097e0; margin-right: 10px; i { font-size: 18px; vertical-align: middle; } } } .dep-list { margin-bottom: 16px; position: relative; border-left: 1px solid #eee; padding-left: 16px; margin-left: -16px; transition: all 0.2s ease-out; padding-bottom: 20px; &:hover{ border-left: 1px solid #0097e0; transition: all 0.2s ease-out; .dep-line-pie { transition: all 0.2s ease-out; border: 1px solid #0097e0; background: #0097e0; color: #fff; } } .dep-line-pie { transition: all 0.2s ease-out; position: absolute; width: 20px; height: 20px; border: 1px solid #e2e2e2; text-align: center; top: 50%; margin-top: -20px; z-index: 1; left: -10px; border-radius: 10px; background: #fff; font-size: 12px; cursor: pointer; &::selection { background:transparent; } &::-moz-selection { background:transparent; } &::-webkit-selection { background:transparent; } } .dep-delete { position: absolute; bottom: -6px; left: 14px; font-size: 18px; color: #ff0000; cursor: pointer; } } .dep-box { border-left: 4px solid #eee; margin-left: -46px; padding-left: 42px; position: relative; .dep-relation { position: absolute; width: 20px; height: 20px; border: 1px solid #e2e2e2; text-align: center; top: 50%; margin-top: -10px; z-index: 1; left: -12px; border-radius: 10px; background: #fff; font-size: 12px; cursor: pointer; &::selection { background:transparent; } &::-moz-selection { background:transparent; } &::-webkit-selection { background:transparent; } } } } </style>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,083
[Bug] [UI] The switch task is missing the else branch
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The switch task is missing the else branch: ![image](https://user-images.githubusercontent.com/87303815/144031211-140c994c-881c-4d9c-951e-7d7381e3ed63.png) ### What you expected to happen should be as follows: ![image](https://user-images.githubusercontent.com/87303815/144031229-0b80c0b3-e2fc-49db-8859-bf7abd64e6f1.png) ### How to reproduce Create a switch task on the DAG page. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7083
https://github.com/apache/dolphinscheduler/pull/7084
b265297fc86108501ce8ca306a38fcfed1eb2fe5
e0623973e000ba3c5cc90ac72c0ba222e6947d1e
"2021-11-30T10:36:59Z"
java
"2021-12-02T06:35:36Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/switch.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="conditions-model"> <m-list-box> <div slot="text">{{$t('condition')}}</div> <div slot="content"> <div class="dep-opt"> <a href="javascript:" @click="!isDetails && _addDep()" class="add-dep"> <em v-if="!isLoading" class="el-icon-circle-plus-outline" :class="_isDetails" data-toggle="tooltip" :title="$t('Add')"></em> <em v-if="isLoading" class="el-icon-loading as as-spin" data-toggle="tooltip" :title="$t('Add')"></em> </a> </div> <div class="dep-list-model"> <div v-for="(el,index) in dependItemList" :key='index' class="switch-list"> <label style="display:block"> <textarea :id="`code-switch-mirror${index}`" :name="`code-switch-mirror${index}`" style="opacity: 0;"> </textarea> </label> <span class="text-b" style="padding-left: 0">{{$t('Branch flow')}}</span> <el-select style="width: 157px;" size="small" v-model="el.nextNode" clearable :disabled="isDetails"> <el-option v-for="item in postTasks" :key="item.code" :value="item.code" :label="item.name"></el-option> </el-select> <span class="operation"> <a href="javascript:" class="delete" @click="!isDetails && _removeDep(index)" v-if="index === (dependItemList.length - 1)"> <em class="iconfont el-icon-delete" :class="_isDetails" data-toggle="tooltip" data-container="body" :title="$t('Delete')" ></em> </a> <a href="javascript:" class="add" @click="!isDetails && _addDep()" v-if="index === (dependItemList.length - 1)"> <em class="iconfont el-icon-circle-plus-outline" :class="_isDetails" data-toggle="tooltip" data-container="body" :title="$t('Add')"></em> </a> </span> </div> </div> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Branch flow')}}</div> <div slot="content"> <el-select style="width: 157px;" size="small" v-model="nextNode" clearable :disabled="isDetails"> <el-option v-for="item in postTasks" :key="item.code" :value="item.code" :label="item.name"></el-option> </el-select> </div> </m-list-box> </div> </template> <script> import _ from 'lodash' import mListBox from './_source/listBox' import disabledState from '@/module/mixin/disabledState' import codemirror from '@/conf/home/pages/resource/pages/file/pages/_source/codemirror' let editArray = [] export default { name: 'dependence', data () { return { nextNode: '', relation: 'AND', dependItemList: [], editArray: [], isLoading: false, oldList: [] } }, mixins: [disabledState], props: { nodeData: Object, backfillItem: Object, postTasks: Array }, methods: { editList (index) { // editor const self = this const editor = codemirror(`code-switch-mirror${index}`, { mode: 'shell', readOnly: this.isDetails }, this) editor.on('change', function () { const outputList = _.cloneDeep(self.dependItemList) outputList[index].condition = editor.getValue() self.dependItemList = outputList }) this.keypress = () => { if (!editor.getOption('readOnly')) { editor.showHint({ completeSingle: false }) } } editor.on('keypress', this.keypress) editor.setValue(this.dependItemList[index].condition || '') editor.setSize('580px', '60px') editArray.push(editor) this.oldList = _.cloneDeep(this.dependItemList) return editArray }, _addDep () { if (!this.isLoading) { this.isLoading = true this.dependItemList.push({ condition: '', nextNode: '' }) let dependItemListLen = this.dependItemList.length if (dependItemListLen > 0) { setTimeout(() => { this.editList(dependItemListLen - 1) this.isLoading = false }, 200) } this._removeTip() } }, _removeDep (i) { this.dependItemList.splice(i, 1) this._removeTip() }, _removeTip () { $('body').find('.tooltip.fade.top.in').remove() }, _verification () { let flag = this.dependItemList.some((item) => { return !item.condition }) if (flag) { this.$message.warning(`${this.$t('The condition content cannot be empty')}`) return false } let params = { dependTaskList: this.dependItemList || [], nextNode: this.nextNode || '' } this.$emit('on-switch-result', params) return true } }, watch: {}, beforeCreate () { }, created () { const o = this.backfillItem if (!_.isEmpty(o)) { let switchResult = o.switchResult || {} this.dependItemList = _.cloneDeep(switchResult.dependTaskList) || [] this.nextNode = _.cloneDeep(switchResult.nextNode) || '' } }, mounted () { if (this.dependItemList && this.dependItemList.length > 0) { setTimeout(() => { this.dependItemList.forEach((item, index) => { this.editList(index) }) }) } }, destroyed () { }, computed: { }, components: { mListBox } } </script> <style lang="scss" rel="stylesheet/scss"> .conditions-model { margin-top: -10px; .dep-opt { margin-bottom: 10px; padding-top: 3px; line-height: 24px; .add-dep { color: #0097e0; margin-right: 10px; i { font-size: 18px; vertical-align: middle; } } } .dep-list-model{ position: relative; min-height: 0px; .switch-list { margin-bottom: 6px; .operation { padding-left: 4px; a { i { font-size: 18px; vertical-align: middle; } } .delete { color: #ff0000; } .add { color: #0097e0; } } } } } </style>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,103
[Bug] [Process definition] sqoop component issues
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened 1.sqoop component custom parameters have no checks and can add multiple empty parameters, resulting in an exception when the workflow is selected by dependent components. 2. If you add a sqoop component to a workflow and add multiple custom parameters at the same time, saving will result in data loss for the entire workflow. ### What you expected to happen The sqoop component needs to enforce the parameter name required, and the workflow can be saved normally without losing data. ### How to reproduce 1. Create a workflow, add a component, such as SQL components or SHELL components can be saved, open normal. 2. Continue to add sqoop components, but do not add custom parameters, save, open normal. 3. Add multiple empty parameters to the sqoop component, save, open and discover that everything is gone. ### Anything else _No response_ ### Version 2.0.0 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7103
https://github.com/apache/dolphinscheduler/pull/7119
9fffbdb3163381002b7fd6d15e2698cdc5cded59
6beae191e2df497b7624728f0208c3a5ab7f0543
"2021-12-01T08:39:17Z"
java
"2021-12-02T11:33:51Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/sqoop.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="sqoop-model"> <m-list-box> <div slot="text">{{$t('Custom Job')}}</div> <div slot="content"> <el-switch size="small" v-model="isCustomTask" @change="_onSwitch" :disabled="isDetails"></el-switch> </div> </m-list-box> <m-list-box v-show="isCustomTask"> <div slot="text">{{$t('Custom Script')}}</div> <div slot="content"> <div class="form-mirror"> <textarea id="code-shell-mirror" name="code-shell-mirror" style="opacity: 0;"></textarea> </div> </div> </m-list-box> <template v-if="!isCustomTask"> <m-list-box> <div slot="text">{{$t('Sqoop Job Name')}}</div> <div slot="content"> <el-input :disabled="isDetails" size="small" type="text" v-model="jobName" :placeholder="$t('Please enter Job Name(required)')"></el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Direct')}}</div> <div slot="content"> <el-select style="width: 130px;" size="small" v-model="modelType" :disabled="isDetails" @change="_handleModelTypeChange"> <el-option v-for="city in modelTypeList" :key="city.code" :value="city.code" :label="city.code"> </el-option> </el-select> </div> </m-list-box> <m-list-box> <div slot="text" style="width: 110px;">{{$t('Hadoop Custom Params')}}</div> <div slot="content"> <m-local-params ref="refMapColumnHadoopParams" @on-local-params="_onHadoopCustomParams" :udp-list="hadoopCustomParams" :hide="false"> </m-local-params> </div> </m-list-box> <m-list-box> <div slot="text" style="width: 100px;">{{$t('Sqoop Advanced Parameters')}}</div> <div slot="content"> <m-local-params ref="refMapColumnAdvancedParams" @on-local-params="_onSqoopAdvancedParams" :udp-list="sqoopAdvancedParams" :hide="false"> </m-local-params> </div> </m-list-box> <m-list-box> <div slot="text" style="font-weight:bold">{{$t('Data Source')}}</div> </m-list-box> <hr style="margin-left: 60px;"> <m-list-box> <div slot="text">{{$t('Type')}}</div> <div slot="content"> <el-select style="width: 130px;" size="small" v-model="sourceType" :disabled="isDetails" @change="_handleSourceTypeChange"> <el-option v-for="city in sourceTypeList" :key="city.code" :value="city.code" :label="city.code"> </el-option> </el-select> </div> </m-list-box> <template v-if="sourceType === 'MYSQL'"> <m-list-box> <div slot="text">{{$t('Datasource')}}</div> <div slot="content"> <m-datasource ref="refSourceDs" @on-dsData="_onSourceDsData" :data="{type:sourceMysqlParams.srcType, typeList: [{id: 0, code: 'MYSQL', disabled: false}], datasource:sourceMysqlParams.srcDatasource }" > </m-datasource> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('ModelType')}}</div> <div slot="content"> <el-radio-group v-model="srcQueryType" size="small" @change="_handleQueryType"> <el-radio label="0">{{$t('Form')}}</el-radio> <el-radio label="1">SQL</el-radio> </el-radio-group> </div> </m-list-box> <template v-if="sourceMysqlParams.srcQueryType === '0'"> <m-list-box> <div slot="text">{{$t('Table')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="sourceMysqlParams.srcTable" :placeholder="$t('Please enter Mysql Table(required)')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('ColumnType')}}</div> <div slot="content"> <el-radio-group v-model="sourceMysqlParams.srcColumnType" size="small" style="vertical-align: sub;"> <el-radio label="0">{{$t('All Columns')}}</el-radio> <el-radio label="1">{{$t('Some Columns')}}</el-radio> </el-radio-group> </div> </m-list-box> <m-list-box v-if="sourceMysqlParams.srcColumnType === '1'"> <div slot="text">{{$t('Column')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="sourceMysqlParams.srcColumns" :placeholder="$t('Please enter Columns (Comma separated)')"> </el-input> </div> </m-list-box> </template> </template> <template v-if="sourceType === 'HIVE'"> <m-list-box> <div slot="text">{{$t('Database')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="sourceHiveParams.hiveDatabase" :placeholder="$t('Please enter Hive Database(required)')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Table')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="sourceHiveParams.hiveTable" :placeholder="$t('Please enter Hive Table(required)')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Hive partition Keys')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="sourceHiveParams.hivePartitionKey" :placeholder="$t('Please enter Hive Partition Keys')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Hive partition Values')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="sourceHiveParams.hivePartitionValue" :placeholder="$t('Please enter Hive Partition Values')"> </el-input> </div> </m-list-box> </template> <template v-if="sourceType === 'HDFS'"> <m-list-box> <div slot="text">{{$t('Export Dir')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="sourceHdfsParams.exportDir" :placeholder="$t('Please enter Export Dir(required)')"> </el-input> </div> </m-list-box> </template> <template v-if="sourceType === 'MYSQL'"> <m-list-box v-show="srcQueryType === '1'"> <div slot="text">{{$t('SQL Statement')}}</div> <div slot="content"> <div class="form-mirror"> <textarea id="code-sqoop-mirror" name="code-sqoop-mirror" style="opacity: 0;"> </textarea> <a class="ans-modal-box-max"> <em class="el-icon-full-screen" @click="setEditorVal"></em> </a> </div> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Map Column Hive')}}</div> <div slot="content"> <m-local-params ref="refMapColumnHiveParams" @on-local-params="_onMapColumnHive" :udp-list="sourceMysqlParams.mapColumnHive" :hide="false"> </m-local-params> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Map Column Java')}}</div> <div slot="content"> <m-local-params ref="refMapColumnJavaParams" @on-local-params="_onMapColumnJava" :udp-list="sourceMysqlParams.mapColumnJava" :hide="false"> </m-local-params> </div> </m-list-box> </template> <m-list-box> <div slot="text" style="font-weight:bold">{{$t('Data Target')}}</div> </m-list-box> <hr style="margin-left: 60px;"> <m-list-box> <div slot="text">{{$t('Type')}}</div> <div slot="content"> <el-select style="width: 130px;" size="small" v-model="targetType" :disabled="isDetails"> <el-option v-for="city in targetTypeList" :key="city.code" :value="city.code" :label="city.code"> </el-option> </el-select> </div> </m-list-box> <template v-if="targetType === 'HIVE'"> <m-list-box> <div slot="text">{{$t('Database')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="targetHiveParams.hiveDatabase" :placeholder="$t('Please enter Hive Database(required)')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Table')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="targetHiveParams.hiveTable" :placeholder="$t('Please enter Hive Table(required)')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('CreateHiveTable')}}</div> <div slot="content"> <el-switch v-model="targetHiveParams.createHiveTable" size="small"></el-switch> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('DropDelimiter')}}</div> <div slot="content"> <el-switch v-model="targetHiveParams.dropDelimiter" size="small"></el-switch> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('OverWriteSrc')}}</div> <div slot="content"> <el-switch v-model="targetHiveParams.hiveOverWrite" size="small"></el-switch> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('ReplaceDelimiter')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="targetHiveParams.replaceDelimiter" :placeholder="$t('Please enter Replace Delimiter')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Hive partition Keys')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="targetHiveParams.hivePartitionKey" :placeholder="$t('Please enter Hive Partition Keys')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Hive partition Values')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="targetHiveParams.hivePartitionValue" :placeholder="$t('Please enter Hive Partition Values')"> </el-input> </div> </m-list-box> </template> <template v-if="targetType === 'HDFS'"> <m-list-box> <div slot="text">{{$t('Target Dir')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="targetHdfsParams.targetPath" :placeholder="$t('Please enter Target Dir(required)')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('DeleteTargetDir')}}</div> <div slot="content"> <el-switch v-model="targetHdfsParams.deleteTargetDir" size="small"></el-switch> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('CompressionCodec')}}</div> <div slot="content"> <el-radio-group v-model="targetHdfsParams.compressionCodec" size="small"> <el-radio label="snappy">snappy</el-radio> <el-radio label="lzo">lzo</el-radio> <el-radio label="gzip">gzip</el-radio> <el-radio label="">no</el-radio> </el-radio-group> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('FileType')}}</div> <div slot="content"> <el-radio-group v-model="targetHdfsParams.fileType" size="small"> <el-radio label="--as-avrodatafile">avro</el-radio> <el-radio label="--as-sequencefile">sequence</el-radio> <el-radio label="--as-textfile">text</el-radio> <el-radio label="--as-parquetfile">parquet</el-radio> </el-radio-group> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('FieldsTerminated')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="targetHdfsParams.fieldsTerminated" :placeholder="$t('Please enter Fields Terminated')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('LinesTerminated')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="targetHdfsParams.linesTerminated" :placeholder="$t('Please enter Lines Terminated')"> </el-input> </div> </m-list-box> </template> <template v-if="targetType === 'MYSQL'"> <m-list-box> <div slot="text">{{$t('Datasource')}}</div> <div slot="content"> <m-datasource ref="refTargetDs" @on-dsData="_onTargetDsData" :data="{ type:targetMysqlParams.targetType, typeList: [{id: 0, code: 'MYSQL', disabled: false}], datasource:targetMysqlParams.targetDatasource }" > </m-datasource> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Table')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="targetMysqlParams.targetTable" :placeholder="$t('Please enter Mysql Table(required)')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Column')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="targetMysqlParams.targetColumns" :placeholder="$t('Please enter Columns (Comma separated)')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('FieldsTerminated')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="targetMysqlParams.fieldsTerminated" :placeholder="$t('Please enter Fields Terminated')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('LinesTerminated')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="targetMysqlParams.linesTerminated" :placeholder="$t('Please enter Lines Terminated')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('IsUpdate')}}</div> <div slot="content"> <el-switch v-model="targetMysqlParams.isUpdate" size="small"></el-switch> </div> </m-list-box> <m-list-box v-show="targetMysqlParams.isUpdate"> <div slot="text">{{$t('UpdateKey')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="targetMysqlParams.targetUpdateKey" :placeholder="$t('Please enter Update Key')"> </el-input> </div> </m-list-box> <m-list-box v-show="targetMysqlParams.isUpdate"> <div slot="text">{{$t('UpdateMode')}}</div> <div slot="content"> <el-radio-group v-model="targetMysqlParams.targetUpdateMode" size="small"> <el-radio label="updateonly">{{$t('OnlyUpdate')}}</el-radio> <el-radio label="allowinsert">{{$t('AllowInsert')}}</el-radio> </el-radio-group> </div> </m-list-box> </template> <m-list-box> <div slot="text">{{$t('Concurrency')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="text" size="small" v-model="concurrency" :placeholder="$t('Please enter Concurrency')"> </el-input> </div> </m-list-box> </template> <m-list-box> <div slot="text">{{$t('Custom Parameters')}}</div> <div slot="content"> <m-local-params ref="refLocalParams" @on-local-params="_onLocalParams" :udp-list="localParams" :hide="false"> </m-local-params> </div> </m-list-box> <el-dialog :visible.sync="scriptBoxDialog" append-to-body="true" width="80%"> <m-script-box :item="item" @getSriptBoxValue="getSriptBoxValue" @closeAble="closeAble"></m-script-box> </el-dialog> </div> </template> <script> import _ from 'lodash' import i18n from '@/module/i18n' import mListBox from './_source/listBox' import mScriptBox from './_source/scriptBox' import mDatasource from './_source/datasource' import mLocalParams from './_source/localParams' import disabledState from '@/module/mixin/disabledState' import codemirror from '@/conf/home/pages/resource/pages/file/pages/_source/codemirror' let editor let shellEditor export default { name: 'sql', data () { return { /** * Is Custom Task */ isCustomTask: false, /** * Customer Params */ localParams: [], /** * Hadoop Custom Params */ hadoopCustomParams: [], /** * Sqoop Advanced Params */ sqoopAdvancedParams: [], /** * script */ customShell: '', /** * task name */ jobName: '', /** * mysql query type */ srcQueryType: '1', /** * source data source */ srcDatasource: '', /** * target data source */ targetDatasource: '', /** * concurrency */ concurrency: 1, /** * default job type */ jobType: 'TEMPLATE', /** * direct model type */ modelType: 'import', modelTypeList: [{ code: 'import' }, { code: 'export' }], sourceTypeList: [ { code: 'MYSQL' } ], targetTypeList: [ { code: 'HIVE' }, { code: 'HDFS' } ], sourceType: 'MYSQL', targetType: 'HDFS', sourceMysqlParams: { srcType: 'MYSQL', srcDatasource: '', srcTable: '', srcQueryType: '1', srcQuerySql: '', srcColumnType: '0', srcColumns: '', srcConditionList: [], mapColumnHive: [], mapColumnJava: [] }, sourceHdfsParams: { exportDir: '' }, sourceHiveParams: { hiveDatabase: '', hiveTable: '', hivePartitionKey: '', hivePartitionValue: '' }, targetHdfsParams: { targetPath: '', deleteTargetDir: true, fileType: '--as-avrodatafile', compressionCodec: 'snappy', fieldsTerminated: '', linesTerminated: '' }, targetMysqlParams: { targetType: 'MYSQL', targetDatasource: '', targetTable: '', targetColumns: '', fieldsTerminated: '', linesTerminated: '', preQuery: '', isUpdate: false, targetUpdateKey: '', targetUpdateMode: 'allowinsert' }, targetHiveParams: { hiveDatabase: '', hiveTable: '', createHiveTable: false, dropDelimiter: false, hiveOverWrite: true, replaceDelimiter: '', hivePartitionKey: '', hivePartitionValue: '' }, item: '', scriptBoxDialog: false } }, mixins: [disabledState], props: { backfillItem: Object }, methods: { setEditorVal () { this.item = editor.getValue() this.scriptBoxDialog = true }, getSriptBoxValue (val) { editor.setValue(val) }, _onSwitch (is) { if (is) { this.jobType = 'CUSTOM' this.isCustomTask = true setTimeout(() => { this._handlerShellEditor() }, 200) } else { this.jobType = 'TEMPLATE' this.isCustomTask = false if (this.srcQueryType === '1') { setTimeout(() => { this._handlerEditor() }, 200) } } }, _handleQueryType (o) { this.sourceMysqlParams.srcQueryType = this.srcQueryType this._getTargetTypeList(this.sourceType) this.targetType = this.targetTypeList[0].code if (this.srcQueryType === '1') { setTimeout(() => { this._handlerEditor() }, 200) } }, _handleModelTypeChange (a) { this._getSourceTypeList(a) this.sourceType = this.sourceTypeList[0].code this._handleSourceTypeChange({ label: this.sourceType, value: this.sourceType }) }, _handleSourceTypeChange (a) { this._getTargetTypeList(a.label) this.targetType = this.targetTypeList[0].code }, _getSourceTypeList (data) { switch (data) { case 'import': this.sourceTypeList = [ { code: 'MYSQL' } ] break case 'export': this.sourceTypeList = [ { code: 'HDFS' }, { code: 'HIVE' } ] break default: this.sourceTypeList = [ { code: 'MYSQL' }, { code: 'HIVE' }, { code: 'HDFS' } ] break } }, _getTargetTypeList (data) { switch (data) { case 'MYSQL': if (this.srcQueryType === '1') { this.targetTypeList = [ { code: 'HDFS' }] } else { this.targetTypeList = [ { code: 'HIVE' }, { code: 'HDFS' } ] } break case 'HDFS': this.targetTypeList = [ { code: 'MYSQL' } ] break case 'HIVE': this.targetTypeList = [ { code: 'MYSQL' } ] break default: this.targetTypeList = [ { code: 'HIVE' }, { code: 'HDFS' } ] break } }, _onMapColumnHive (a) { this.sourceMysqlParams.mapColumnHive = a }, _onMapColumnJava (a) { this.sourceMysqlParams.mapColumnJava = a }, /** * return data source */ _onSourceDsData (o) { this.sourceMysqlParams.srcType = o.type this.sourceMysqlParams.srcDatasource = o.datasource }, /** * return data source */ _onTargetDsData (o) { this.targetMysqlParams.targetType = o.type this.targetMysqlParams.targetDatasource = o.datasource }, /** * stringify the source params */ _handleSourceParams () { let params = null switch (this.sourceType) { case 'MYSQL': this.sourceMysqlParams.srcQuerySql = this.sourceMysqlParams.srcQueryType === '1' && editor ? editor.getValue() : this.sourceMysqlParams.srcQuerySql params = JSON.stringify(this.sourceMysqlParams) break case 'ORACLE': params = JSON.stringify(this.sourceOracleParams) break case 'HDFS': params = JSON.stringify(this.sourceHdfsParams) break case 'HIVE': params = JSON.stringify(this.sourceHiveParams) break default: params = '' break } return params }, /** * stringify the target params */ _handleTargetParams () { let params = null switch (this.targetType) { case 'HIVE': params = JSON.stringify(this.targetHiveParams) break case 'HDFS': params = JSON.stringify(this.targetHdfsParams) break case 'MYSQL': params = JSON.stringify(this.targetMysqlParams) break default: params = '' break } return params }, /** * get source params by source type */ _getSourceParams (data) { switch (this.sourceType) { case 'MYSQL': this.sourceMysqlParams = JSON.parse(data) this.srcDatasource = this.sourceMysqlParams.srcDatasource break case 'ORACLE': this.sourceOracleParams = JSON.parse(data) break case 'HDFS': this.sourceHdfsParams = JSON.parse(data) break case 'HIVE': this.sourceHiveParams = JSON.parse(data) break default: break } }, /** * get target params by target type */ _getTargetParams (data) { switch (this.targetType) { case 'HIVE': this.targetHiveParams = JSON.parse(data) break case 'HDFS': this.targetHdfsParams = JSON.parse(data) break case 'MYSQL': this.targetMysqlParams = JSON.parse(data) this.targetDatasource = this.targetMysqlParams.targetDatasource break default: break } }, /** * verification */ _verification () { let sqoopParams = { jobType: this.jobType, localParams: this.localParams } if (this.jobType === 'CUSTOM') { if (!shellEditor.getValue()) { this.$message.warning(`${i18n.$t('Please enter Custom Shell(required)')}`) return false } sqoopParams.customShell = shellEditor.getValue() } else { if (!this.jobName) { this.$message.warning(`${i18n.$t('Please enter Job Name(required)')}`) return false } switch (this.sourceType) { case 'MYSQL': if (!this.$refs.refSourceDs._verifDatasource()) { return false } if (this.srcQueryType === '1') { if (!editor.getValue()) { this.$message.warning(`${i18n.$t('Please enter a SQL Statement(required)')}`) return false } this.sourceMysqlParams.srcTable = '' this.sourceMysqlParams.srcColumnType = '0' this.sourceMysqlParams.srcColumns = '' } else { if (this.sourceMysqlParams.srcTable === '') { this.$message.warning(`${i18n.$t('Please enter Mysql Table(required)')}`) return false } this.sourceMysqlParams.srcQuerySql = '' if (this.sourceMysqlParams.srcColumnType === '1' && this.sourceMysqlParams.srcColumns === '') { this.$message.warning(`${i18n.$t('Please enter Columns (Comma separated)')}`) return false } if (this.sourceMysqlParams.srcColumnType === '0') { this.sourceMysqlParams.srcColumns = '' } } break case 'HDFS': if (this.sourceHdfsParams.exportDir === '') { this.$message.warning(`${i18n.$t('Please enter Export Dir(required)')}`) return false } break case 'HIVE': if (this.sourceHiveParams.hiveDatabase === '') { this.$message.warning(`${i18n.$t('Please enter Hive Database(required)')}`) return false } if (this.sourceHiveParams.hiveTable === '') { this.$message.warning(`${i18n.$t('Please enter Hive Table(required)')}`) return false } break default: break } switch (this.targetType) { case 'HIVE': if (this.targetHiveParams.hiveDatabase === '') { this.$message.warning(`${i18n.$t('Please enter Hive Database(required)')}`) return false } if (this.targetHiveParams.hiveTable === '') { this.$message.warning(`${i18n.$t('Please enter Hive Table(required)')}`) return false } break case 'HDFS': if (this.targetHdfsParams.targetPath === '') { this.$message.warning(`${i18n.$t('Please enter Target Dir(required)')}`) return false } break case 'MYSQL': if (!this.$refs.refTargetDs._verifDatasource()) { return false } if (this.targetMysqlParams.targetTable === '') { this.$message.warning(`${i18n.$t('Please enter Mysql Table(required)')}`) return false } break default: break } sqoopParams.jobName = this.jobName sqoopParams.hadoopCustomParams = this.hadoopCustomParams sqoopParams.sqoopAdvancedParams = this.sqoopAdvancedParams sqoopParams.concurrency = this.concurrency sqoopParams.modelType = this.modelType sqoopParams.sourceType = this.sourceType sqoopParams.targetType = this.targetType sqoopParams.targetParams = this._handleTargetParams() sqoopParams.sourceParams = this._handleSourceParams() } // storage this.$emit('on-params', sqoopParams) return true }, /** * Processing code highlighting */ _handlerEditor () { this._destroyEditor() editor = codemirror('code-sqoop-mirror', { mode: 'sql', readOnly: this.isDetails }) this.keypress = () => { if (!editor.getOption('readOnly')) { editor.showHint({ completeSingle: false }) } } this.changes = () => { this._cacheParams() } // Monitor keyboard editor.on('keypress', this.keypress) editor.on('changes', this.changes) editor.setValue(this.sourceMysqlParams.srcQuerySql) return editor }, /** * Processing code highlighting */ _handlerShellEditor () { this._destroyShellEditor() // shellEditor shellEditor = codemirror('code-shell-mirror', { mode: 'shell', readOnly: this.isDetails }) this.keypress = () => { if (!shellEditor.getOption('readOnly')) { shellEditor.showHint({ completeSingle: false }) } } // Monitor keyboard shellEditor.on('keypress', this.keypress) shellEditor.setValue(this.customShell) return shellEditor }, /** * return localParams */ _onLocalParams (a) { this.localParams = a }, /** * return hadoopParams */ _onHadoopCustomParams (a) { this.hadoopCustomParams = a }, /** * return sqoopAdvancedParams */ _onSqoopAdvancedParams (a) { this.sqoopAdvancedParams = a }, _cacheParams () { this.$emit('on-cache-params', { concurrency: this.concurrency, modelType: this.modelType, sourceType: this.sourceType, targetType: this.targetType, sourceParams: this._handleSourceParams(), targetParams: this._handleTargetParams(), localParams: this.localParams }) }, _destroyEditor () { if (editor) { editor.toTextArea() // Uninstall editor.off($('.code-sqoop-mirror'), 'keypress', this.keypress) editor.off($('.code-sqoop-mirror'), 'changes', this.changes) editor = null } }, _destroyShellEditor () { if (shellEditor) { shellEditor.toTextArea() // Uninstall shellEditor.off($('.code-shell-mirror'), 'keypress', this.keypress) shellEditor.off($('.code-shell-mirror'), 'changes', this.changes) } } }, watch: { // Listening to sqlType sqlType (val) { if (val === 0) { this.showType = [] } if (val !== 0) { this.title = '' } }, // Listening data source type (val) { if (val !== 'HIVE') { this.connParams = '' } }, // Watch the cacheParams cacheParams (val) { this._cacheParams() } }, created () { this._destroyEditor() let o = this.backfillItem // Non-null objects represent backfill if (!_.isEmpty(o)) { this.jobType = o.params.jobType this.isCustomTask = false if (this.jobType === 'CUSTOM') { this.customShell = o.params.customShell this.isCustomTask = true } else { this.jobName = o.params.jobName this.hadoopCustomParams = o.params.hadoopCustomParams this.sqoopAdvancedParams = o.params.sqoopAdvancedParams this.concurrency = o.params.concurrency || 1 this.modelType = o.params.modelType this.sourceType = o.params.sourceType this._getTargetTypeList(this.sourceType) this.targetType = o.params.targetType this._getSourceParams(o.params.sourceParams) this._getTargetParams(o.params.targetParams) this.localParams = o.params.localParams } } }, mounted () { setTimeout(() => { this._handlerEditor() }, 200) setTimeout(() => { this._handlerShellEditor() }, 200) setTimeout(() => { this.srcQueryType = this.sourceMysqlParams.srcQueryType }, 500) }, destroyed () { /** * Destroy the editor instance */ if (editor) { editor.toTextArea() // Uninstall editor.off($('.code-sqoop-mirror'), 'keypress', this.keypress) editor.off($('.code-sqoop-mirror'), 'changes', this.changes) editor = null } }, computed: { cacheParams () { return { concurrency: this.concurrency, modelType: this.modelType, sourceType: this.sourceType, targetType: this.targetType, localParams: this.localParams, sourceMysqlParams: this.sourceMysqlParams, sourceHdfsParams: this.sourceHdfsParams, sourceHiveParams: this.sourceHiveParams, targetHdfsParams: this.targetHdfsParams, targetMysqlParams: this.targetMysqlParams, targetHiveParams: this.targetHiveParams } } }, components: { mListBox, mDatasource, mLocalParams, mScriptBox } } </script>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,103
[Bug] [Process definition] sqoop component issues
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened 1.sqoop component custom parameters have no checks and can add multiple empty parameters, resulting in an exception when the workflow is selected by dependent components. 2. If you add a sqoop component to a workflow and add multiple custom parameters at the same time, saving will result in data loss for the entire workflow. ### What you expected to happen The sqoop component needs to enforce the parameter name required, and the workflow can be saved normally without losing data. ### How to reproduce 1. Create a workflow, add a component, such as SQL components or SHELL components can be saved, open normal. 2. Continue to add sqoop components, but do not add custom parameters, save, open normal. 3. Add multiple empty parameters to the sqoop component, save, open and discover that everything is gone. ### Anything else _No response_ ### Version 2.0.0 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7103
https://github.com/apache/dolphinscheduler/pull/7119
9fffbdb3163381002b7fd6d15e2698cdc5cded59
6beae191e2df497b7624728f0208c3a5ab7f0543
"2021-12-01T08:39:17Z"
java
"2021-12-02T11:33:51Z"
dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ export default { 'User Name': '用户名', 'Please enter user name': '请输入用户名', Password: '密码', 'Please enter your password': '请输入密码', 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': '密码至少包含数字,字母和字符的两种组合,长度在6-22之间', Login: '登录', Home: '首页', 'Failed to create node to save': '未创建节点保存失败', 'Global parameters': '全局参数', 'Local parameters': '局部参数', 'Copy success': '复制成功', 'The browser does not support automatic copying': '该浏览器不支持自动复制', 'Whether to save the DAG graph': '是否保存DAG图', 'Current node settings': '当前节点设置', 'View history': '查看历史', 'View log': '查看日志', 'Force success': '强制成功', 'Enter this child node': '进入该子节点', 'Node name': '节点名称', 'Please enter name (required)': '请输入名称(必填)', 'Run flag': '运行标志', Normal: '正常', 'Prohibition execution': '禁止执行', 'Please enter description': '请输入描述', 'Number of failed retries': '失败重试次数', Times: '次', 'Failed retry interval': '失败重试间隔', Minute: '分', 'Delay execution time': '延时执行时间', 'Delay execution': '延时执行', 'Forced success': '强制成功', Cancel: '取消', 'Confirm add': '确认添加', 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': '新创建子工作流还未执行,不能进入子工作流', 'The task has not been executed and cannot enter the sub-Process': '该任务还未执行,不能进入子工作流', 'Name already exists': '名称已存在请重新输入', 'Download Log': '下载日志', 'Refresh Log': '刷新日志', 'Enter full screen': '进入全屏', 'Cancel full screen': '取消全屏', Close: '关闭', 'Update log success': '更新日志成功', 'No more logs': '暂无更多日志', 'No log': '暂无日志', 'Loading Log...': '正在努力请求日志中...', 'Set the DAG diagram name': '设置DAG图名称', 'Please enter description(optional)': '请输入描述(选填)', 'Set global': '设置全局', 'Whether to go online the process definition': '是否上线流程定义', 'Whether to update the process definition': '是否更新流程定义', Add: '添加', 'DAG graph name cannot be empty': 'DAG图名称不能为空', 'Create Datasource': '创建数据源', 'Project Home': '工作流监控', 'Project Manage': '项目管理', 'Create Project': '创建项目', 'Cron Manage': '定时管理', 'Copy Workflow': '复制工作流', 'Tenant Manage': '租户管理', 'Create Tenant': '创建租户', 'User Manage': '用户管理', 'Create User': '创建用户', 'User Information': '用户信息', 'Edit Password': '密码修改', Success: '成功', Failed: '失败', Delete: '删除', 'Please choose': '请选择', 'Please enter a positive integer': '请输入正整数', 'Program Type': '程序类型', 'Main Class': '主函数的Class', 'Main Jar Package': '主Jar包', 'Please enter main jar package': '请选择主Jar包', 'Please enter main class': '请填写主函数的Class', 'Main Arguments': '主程序参数', 'Please enter main arguments': '请输入主程序参数', 'Option Parameters': '选项参数', 'Please enter option parameters': '请输入选项参数', Resources: '资源', 'Custom Parameters': '自定义参数', 'Custom template': '自定义模版', Datasource: '数据源', methods: '方法', 'Please enter the procedure method': '请输入存储脚本 \n\n调用存储过程:{call <procedure-name>[(<arg1>,<arg2>, ...)]}\n\n调用存储函数:{?= call <procedure-name>[(<arg1>,<arg2>, ...)]} ', 'The procedure method script example': '示例:{call <procedure-name>[(?,?, ...)]} 或 {?= call <procedure-name>[(?,?, ...)]}', Script: '脚本', 'Please enter script(required)': '请输入脚本(必填)', 'Deploy Mode': '部署方式', 'Driver Cores': 'Driver核心数', 'Please enter Driver cores': '请输入Driver核心数', 'Driver Memory': 'Driver内存数', 'Please enter Driver memory': '请输入Driver内存数', 'Executor Number': 'Executor数量', 'Please enter Executor number': '请输入Executor数量', 'The Executor number should be a positive integer': 'Executor数量为正整数', 'Executor Memory': 'Executor内存数', 'Please enter Executor memory': '请输入Executor内存数', 'Executor Cores': 'Executor核心数', 'Please enter Executor cores': '请输入Executor核心数', 'Memory should be a positive integer': '内存数为数字', 'Core number should be positive integer': '核心数为正整数', 'Flink Version': 'Flink版本', 'JobManager Memory': 'JobManager内存数', 'Please enter JobManager memory': '请输入JobManager内存数', 'TaskManager Memory': 'TaskManager内存数', 'Please enter TaskManager memory': '请输入TaskManager内存数', 'Slot Number': 'Slot数量', 'Please enter Slot number': '请输入Slot数量', Parallelism: '并行度', 'Custom Parallelism': '自定义并行度', 'Please enter Parallelism': '请输入并行度', 'Parallelism number should be positive integer': '并行度必须为正整数', 'Parallelism tip': '如果存在大量任务需要补数时,可以利用自定义并行度将补数的任务线程设置成合理的数值,避免对服务器造成过大的影响', 'TaskManager Number': 'TaskManager数量', 'Please enter TaskManager number': '请输入TaskManager数量', 'App Name': '任务名称', 'Please enter app name(optional)': '请输入任务名称(选填)', 'SQL Type': 'sql类型', 'Send Email': '发送邮件', 'Log display': '日志显示', 'rows of result': '行查询结果', Title: '主题', 'Please enter the title of email': '请输入邮件主题', Table: '表名', TableMode: '表格', Attachment: '附件', 'SQL Parameter': 'sql参数', 'SQL Statement': 'sql语句', 'UDF Function': 'UDF函数', 'Please enter a SQL Statement(required)': '请输入sql语句(必填)', 'Please enter a JSON Statement(required)': '请输入json语句(必填)', 'One form or attachment must be selected': '表格、附件必须勾选一个', 'Mail subject required': '邮件主题必填', 'Child Node': '子节点', 'Please select a sub-Process': '请选择子工作流', Edit: '编辑', 'Switch To This Version': '切换到该版本', 'Datasource Name': '数据源名称', 'Please enter datasource name': '请输入数据源名称', IP: 'IP主机名', 'Please enter IP': '请输入IP主机名', Port: '端口', 'Please enter port': '请输入端口', 'Database Name': '数据库名', 'Please enter database name': '请输入数据库名', 'Oracle Connect Type': '服务名或SID', 'Oracle Service Name': '服务名', 'Oracle SID': 'SID', 'jdbc connect parameters': 'jdbc连接参数', 'Test Connect': '测试连接', 'Please enter resource name': '请输入数据源名称', 'Please enter resource folder name': '请输入资源文件夹名称', 'Please enter a non-query SQL statement': '请输入非查询sql语句', 'Please enter IP/hostname': '请输入IP/主机名', 'jdbc connection parameters is not a correct JSON format': 'jdbc连接参数不是一个正确的JSON格式', '#': '编号', 'Datasource Type': '数据源类型', 'Datasource Parameter': '数据源参数', 'Create Time': '创建时间', 'Update Time': '更新时间', Operation: '操作', 'Current Version': '当前版本', 'Click to view': '点击查看', 'Delete?': '确定删除吗?', 'Switch Version Successfully': '切换版本成功', 'Confirm Switch To This Version?': '确定切换到该版本吗?', Confirm: '确定', 'Task status statistics': '任务状态统计', Number: '数量', State: '状态', 'Dry-run flag': '空跑标识', 'Process Status Statistics': '流程状态统计', 'Process Definition Statistics': '流程定义统计', 'Project Name': '项目名称', 'Please enter name': '请输入名称', 'Owned Users': '所属用户', 'Process Pid': '进程Pid', 'Zk registration directory': 'zk注册目录', cpuUsage: 'cpuUsage', memoryUsage: 'memoryUsage', 'Last heartbeat time': '最后心跳时间', 'Edit Tenant': '编辑租户', 'OS Tenant Code': '操作系统租户', 'Tenant Name': '租户名称', Queue: '队列', 'Please select a queue': '默认为租户关联队列', 'Please enter the os tenant code in English': '请输入操作系统租户只允许英文', 'Please enter os tenant code in English': '请输入英文操作系统租户', 'Please enter os tenant code': '请输入操作系统租户', 'Please enter tenant Name': '请输入租户名称', 'The os tenant code. Only letters or a combination of letters and numbers are allowed': '操作系统租户只允许字母或字母与数字组合', 'Edit User': '编辑用户', Tenant: '租户', Email: '邮件', Phone: '手机', 'User Type': '用户类型', 'Please enter phone number': '请输入手机', 'Please enter email': '请输入邮箱', 'Please enter the correct email format': '请输入正确的邮箱格式', 'Please enter the correct mobile phone format': '请输入正确的手机格式', Project: '项目', Authorize: '授权', 'File resources': '文件资源', 'UDF resources': 'UDF资源', 'UDF resources directory': 'UDF资源目录', 'Please select UDF resources directory': '请选择UDF资源目录', 'Alarm group': '告警组', 'Alarm group required': '告警组必填', 'Edit alarm group': '编辑告警组', 'Create alarm group': '创建告警组', 'Create Alarm Instance': '创建告警实例', 'Edit Alarm Instance': '编辑告警实例', 'Group Name': '组名称', 'Alarm instance name': '告警实例名称', 'Alarm plugin name': '告警插件名称', 'Select plugin': '选择插件', 'Select Alarm plugin': '请选择告警插件', 'Please enter group name': '请输入组名称', 'Instance parameter exception': '实例参数异常', 'Group Type': '组类型', 'Alarm plugin instance': '告警插件实例', 'Select Alarm plugin instance': '请选择告警插件实例', Remarks: '备注', SMS: '短信', 'Managing Users': '管理用户', Permission: '权限', Administrator: '管理员', 'Confirm Password': '确认密码', 'Please enter confirm password': '请输入确认密码', 'Password cannot be in Chinese': '密码不能为中文', 'Please enter a password (6-22) character password': '请输入密码(6-22)字符密码', 'Confirmation password cannot be in Chinese': '确认密码不能为中文', 'Please enter a confirmation password (6-22) character password': '请输入确认密码(6-22)字符密码', 'The password is inconsistent with the confirmation password': '密码与确认密码不一致,请重新确认', 'Please select the datasource': '请选择数据源', 'Please select resources': '请选择资源', Query: '查询', 'Non Query': '非查询', 'prop(required)': 'prop(必填)', 'value(optional)': 'value(选填)', 'value(required)': 'value(必填)', 'prop is empty': 'prop不能为空', 'value is empty': 'value不能为空', 'prop is repeat': 'prop中有重复', 'Start Time': '开始时间', 'End Time': '结束时间', crontab: 'crontab', 'Failure Strategy': '失败策略', online: '上线', offline: '下线', 'Task Status': '任务状态', 'Process Instance': '工作流实例', 'Task Instance': '任务实例', 'Select date range': '选择日期区间', startDate: '开始日期', endDate: '结束日期', Date: '日期', Waiting: '等待', Execution: '执行中', Finish: '完成', 'Create File': '创建文件', 'Create folder': '创建文件夹', 'File Name': '文件名称', 'Folder Name': '文件夹名称', 'File Format': '文件格式', 'Folder Format': '文件夹格式', 'File Content': '文件内容', 'Upload File Size': '文件大小不能超过1G', Create: '创建', 'Please enter the resource content': '请输入资源内容', 'Resource content cannot exceed 3000 lines': '资源内容不能超过3000行', 'File Details': '文件详情', 'Download Details': '下载详情', Return: '返回', Save: '保存', 'File Manage': '文件管理', 'Upload Files': '上传文件', 'Create UDF Function': '创建UDF函数', 'Upload UDF Resources': '上传UDF资源', 'Service-Master': '服务管理-Master', 'Service-Worker': '服务管理-Worker', 'Process Name': '工作流名称', Executor: '执行用户', 'Run Type': '运行类型', 'Scheduling Time': '调度时间', 'Run Times': '运行次数', host: 'host', 'fault-tolerant sign': '容错标识', Rerun: '重跑', 'Recovery Failed': '恢复失败', Stop: '停止', Pause: '暂停', 'Recovery Suspend': '恢复运行', Gantt: '甘特图', 'Node Type': '节点类型', 'Submit Time': '提交时间', Duration: '运行时长', 'Retry Count': '重试次数', 'Task Name': '任务名称', 'Task Date': '任务日期', 'Source Table': '源表', 'Record Number': '记录数', 'Target Table': '目标表', 'Online viewing type is not supported': '不支持在线查看类型', Size: '大小', Rename: '重命名', Download: '下载', Export: '导出', 'Version Info': '版本信息', Submit: '提交', 'Edit UDF Function': '编辑UDF函数', type: '类型', 'UDF Function Name': 'UDF函数名称', FILE: '文件', UDF: 'UDF', 'File Subdirectory': '文件子目录', 'Please enter a function name': '请输入函数名', 'Package Name': '包名类名', 'Please enter a Package name': '请输入包名类名', Parameter: '参数', 'Please enter a parameter': '请输入参数', 'UDF Resources': 'UDF资源', 'Upload Resources': '上传资源', Instructions: '使用说明', 'Please enter a instructions': '请输入使用说明', 'Please enter a UDF function name': '请输入UDF函数名称', 'Select UDF Resources': '请选择UDF资源', 'Class Name': '类名', 'Jar Package': 'jar包', 'Library Name': '库名', 'UDF Resource Name': 'UDF资源名称', 'File Size': '文件大小', Description: '描述', 'Drag Nodes and Selected Items': '拖动节点和选中项', 'Select Line Connection': '选择线条连接', 'Delete selected lines or nodes': '删除选中的线或节点', 'Full Screen': '全屏', Unpublished: '未发布', 'Start Process': '启动工作流', 'Execute from the current node': '从当前节点开始执行', 'Recover tolerance fault process': '恢复被容错的工作流', 'Resume the suspension process': '恢复运行流程', 'Execute from the failed nodes': '从失败节点开始执行', 'Complement Data': '补数', 'Scheduling execution': '调度执行', 'Recovery waiting thread': '恢复等待线程', 'Submitted successfully': '提交成功', Executing: '正在执行', 'Ready to pause': '准备暂停', 'Ready to stop': '准备停止', 'Need fault tolerance': '需要容错', Kill: 'Kill', 'Waiting for thread': '等待线程', 'Waiting for dependence': '等待依赖', Start: '运行', Copy: '复制节点', 'Copy name': '复制名称', 'Copy path': '复制路径', 'Please enter keyword': '请输入关键词', 'File Upload': '文件上传', 'Drag the file into the current upload window': '请将文件拖拽到当前上传窗口内!', 'Drag area upload': '拖动区域上传', Upload: '上传', 'ReUpload File': '重新上传文件', 'Please enter file name': '请输入文件名', 'Please select the file to upload': '请选择要上传的文件', 'Resources manage': '资源中心', Security: '安全中心', Logout: '退出', 'No data': '查询无数据', 'Uploading...': '文件上传中', 'Loading...': '正在努力加载中...', List: '列表', 'Unable to download without proper url': '无下载url无法下载', Process: '工作流', 'Process definition': '工作流定义', 'Task record': '任务记录', 'Warning group manage': '告警组管理', 'Warning instance manage': '告警实例管理', 'Servers manage': '服务管理', 'UDF manage': 'UDF管理', 'Resource manage': '资源管理', 'Function manage': '函数管理', 'Edit password': '修改密码', 'Ordinary users': '普通用户', 'Create process': '创建工作流', 'Import process': '导入工作流', 'Timing state': '定时状态', Timing: '定时', Timezone: '时区', TreeView: '树形图', 'Mailbox already exists! Recipients and copyers cannot repeat': '邮箱已存在!收件人和抄送人不能重复', 'Mailbox input is illegal': '邮箱输入不合法', 'Please set the parameters before starting': '启动前请先设置参数', Continue: '继续', End: '结束', 'Node execution': '节点执行', 'Backward execution': '向后执行', 'Forward execution': '向前执行', 'Execute only the current node': '仅执行当前节点', 'Notification strategy': '通知策略', 'Notification group': '通知组', 'Please select a notification group': '请选择通知组', 'Whether it is a complement process?': '是否补数', 'Schedule date': '调度日期', 'Mode of execution': '执行方式', 'Serial execution': '串行执行', 'Parallel execution': '并行执行', 'Set parameters before timing': '定时前请先设置参数', 'Start and stop time': '起止时间', 'Please select time': '请选择时间', 'Please enter crontab': '请输入crontab', none_1: '都不发', success_1: '成功发', failure_1: '失败发', All_1: '成功或失败都发', Toolbar: '工具栏', 'View variables': '查看变量', 'Format DAG': '格式化DAG', 'Refresh DAG status': '刷新DAG状态', Return_1: '返回上一节点', 'Please enter format': '请输入格式为', 'connection parameter': '连接参数', 'Process definition details': '流程定义详情', 'Create process definition': '创建流程定义', 'Scheduled task list': '定时任务列表', 'Process instance details': '流程实例详情', 'Create Resource': '创建资源', 'User Center': '用户中心', AllStatus: '全部状态', None: '无', Name: '名称', 'Process priority': '流程优先级', 'Task priority': '任务优先级', 'Task timeout alarm': '任务超时告警', 'Timeout strategy': '超时策略', 'Timeout alarm': '超时告警', 'Timeout failure': '超时失败', 'Timeout period': '超时时长', 'Waiting Dependent complete': '等待依赖完成', 'Waiting Dependent start': '等待依赖启动', 'Check interval': '检查间隔', 'Timeout must be longer than check interval': '超时时间必须比检查间隔长', 'Timeout strategy must be selected': '超时策略必须选一个', 'Timeout must be a positive integer': '超时时长必须为正整数', 'Add dependency': '添加依赖', 'Whether dry-run': '是否空跑', and: '且', or: '或', month: '月', week: '周', day: '日', hour: '时', Running: '正在运行', 'Waiting for dependency to complete': '等待依赖完成', Selected: '已选', CurrentHour: '当前小时', Last1Hour: '前1小时', Last2Hours: '前2小时', Last3Hours: '前3小时', Last24Hours: '前24小时', today: '今天', Last1Days: '昨天', Last2Days: '前两天', Last3Days: '前三天', Last7Days: '前七天', ThisWeek: '本周', LastWeek: '上周', LastMonday: '上周一', LastTuesday: '上周二', LastWednesday: '上周三', LastThursday: '上周四', LastFriday: '上周五', LastSaturday: '上周六', LastSunday: '上周日', ThisMonth: '本月', LastMonth: '上月', LastMonthBegin: '上月初', LastMonthEnd: '上月末', 'Refresh status succeeded': '刷新状态成功', 'Queue manage': 'Yarn 队列管理', 'Create queue': '创建队列', 'Edit queue': '编辑队列', 'Datasource manage': '数据源中心', 'History task record': '历史任务记录', 'Please go online': '不要忘记上线', 'Queue value': '队列值', 'Please enter queue value': '请输入队列值', 'Worker group manage': 'Worker分组管理', 'Create worker group': '创建Worker分组', 'Edit worker group': '编辑Worker分组', 'Token manage': '令牌管理', 'Create token': '创建令牌', 'Edit token': '编辑令牌', Addresses: '地址', 'Worker Addresses': 'Worker地址', 'Please select the worker addresses': '请选择Worker地址', 'Failure time': '失效时间', 'Expiration time': '失效时间', User: '用户', 'Please enter token': '请输入令牌', 'Generate token': '生成令牌', Monitor: '监控中心', Group: '分组', 'Queue statistics': '队列统计', 'Command status statistics': '命令状态统计', 'Task kill': '等待kill任务', 'Task queue': '等待执行任务', 'Error command count': '错误指令数', 'Normal command count': '正确指令数', Manage: '管理', 'Number of connections': '连接数', Sent: '发送量', Received: '接收量', 'Min latency': '最低延时', 'Avg latency': '平均延时', 'Max latency': '最大延时', 'Node count': '节点数', 'Query time': '当前查询时间', 'Node self-test status': '节点自检状态', 'Health status': '健康状态', 'Max connections': '最大连接数', 'Threads connections': '当前连接数', 'Max used connections': '同时使用连接最大数', 'Threads running connections': '数据库当前活跃连接数', 'Worker group': 'Worker分组', 'Please enter a positive integer greater than 0': '请输入大于 0 的正整数', 'Pre Statement': '前置sql', 'Post Statement': '后置sql', 'Statement cannot be empty': '语句不能为空', 'Process Define Count': '工作流定义数', 'Process Instance Running Count': '正在运行的流程数', 'command number of waiting for running': '待执行的命令数', 'failure command number': '执行失败的命令数', 'tasks number of waiting running': '待运行任务数', 'task number of ready to kill': '待杀死任务数', 'Statistics manage': '统计管理', statistics: '统计', 'select tenant': '选择租户', 'Please enter Principal': '请输入Principal', 'Please enter the kerberos authentication parameter java.security.krb5.conf': '请输入kerberos认证参数 java.security.krb5.conf', 'Please enter the kerberos authentication parameter login.user.keytab.username': '请输入kerberos认证参数 login.user.keytab.username', 'Please enter the kerberos authentication parameter login.user.keytab.path': '请输入kerberos认证参数 login.user.keytab.path', 'The start time must not be the same as the end': '开始时间和结束时间不能相同', 'Startup parameter': '启动参数', 'Startup type': '启动类型', 'warning of timeout': '超时告警', 'Next five execution times': '接下来五次执行时间', 'Execute time': '执行时间', 'Complement range': '补数范围', 'Http Url': '请求地址', 'Http Method': '请求类型', 'Http Parameters': '请求参数', 'Http Parameters Key': '参数名', 'Http Parameters Position': '参数位置', 'Http Parameters Value': '参数值', 'Http Check Condition': '校验条件', 'Http Condition': '校验内容', 'Please Enter Http Url': '请填写请求地址(必填)', 'Please Enter Http Condition': '请填写校验内容', 'There is no data for this period of time': '该时间段无数据', 'Worker addresses cannot be empty': 'Worker地址不能为空', 'Please generate token': '请生成Token', 'Please Select token': '请选择Token失效时间', 'Spark Version': 'Spark版本', TargetDataBase: '目标库', TargetTable: '目标表', TargetJobName: '目标任务名', 'Please enter Pigeon job name': '请输入Pigeon任务名', 'Please enter the table of target': '请输入目标表名', 'Please enter a Target Table(required)': '请输入目标表(必填)', SpeedByte: '限流(字节数)', SpeedRecord: '限流(记录数)', '0 means unlimited by byte': 'KB,0代表不限制', '0 means unlimited by count': '0代表不限制', 'Modify User': '修改用户', 'Whether directory': '是否文件夹', Yes: '是', No: '否', 'Hadoop Custom Params': 'Hadoop参数', 'Sqoop Advanced Parameters': 'Sqoop参数', 'Sqoop Job Name': '任务名称', 'Please enter Mysql Database(required)': '请输入Mysql数据库(必填)', 'Please enter Mysql Table(required)': '请输入Mysql表名(必填)', 'Please enter Columns (Comma separated)': '请输入列名,用 , 隔开', 'Please enter Target Dir(required)': '请输入目标路径(必填)', 'Please enter Export Dir(required)': '请输入数据源路径(必填)', 'Please enter Hive Database(required)': '请输入Hive数据库(必填)', 'Please enter Hive Table(required)': '请输入Hive表名(必填)', 'Please enter Hive Partition Keys': '请输入分区键', 'Please enter Hive Partition Values': '请输入分区值', 'Please enter Replace Delimiter': '请输入替换分隔符', 'Please enter Fields Terminated': '请输入列分隔符', 'Please enter Lines Terminated': '请输入行分隔符', 'Please enter Concurrency': '请输入并发度', 'Please enter Update Key': '请输入更新列', 'Please enter Job Name(required)': '请输入任务名称(必填)', 'Please enter Custom Shell(required)': '请输入自定义脚本', Direct: '流向', Type: '类型', ModelType: '模式', ColumnType: '列类型', Database: '数据库', Column: '列', 'Map Column Hive': 'Hive类型映射', 'Map Column Java': 'Java类型映射', 'Export Dir': '数据源路径', 'Hive partition Keys': 'Hive 分区键', 'Hive partition Values': 'Hive 分区值', FieldsTerminated: '列分隔符', LinesTerminated: '行分隔符', IsUpdate: '是否更新', UpdateKey: '更新列', UpdateMode: '更新类型', 'Target Dir': '目标路径', DeleteTargetDir: '是否删除目录', FileType: '保存格式', CompressionCodec: '压缩类型', CreateHiveTable: '是否创建新表', DropDelimiter: '是否删除分隔符', OverWriteSrc: '是否覆盖数据源', ReplaceDelimiter: '替换分隔符', Concurrency: '并发度', Form: '表单', OnlyUpdate: '只更新', AllowInsert: '无更新便插入', 'Data Source': '数据来源', 'Data Target': '数据目的', 'All Columns': '全表导入', 'Some Columns': '选择列', 'Branch flow': '分支流转', 'Custom Job': '自定义任务', 'Custom Script': '自定义脚本', 'Cannot select the same node for successful branch flow and failed branch flow': '成功分支流转和失败分支流转不能选择同一个节点', 'Successful branch flow and failed branch flow are required': 'conditions节点成功和失败分支流转必填', 'No resources exist': '不存在资源', 'Please delete all non-existing resources': '请删除所有不存在资源', 'Unauthorized or deleted resources': '未授权或已删除资源', 'Please delete all non-existent resources': '请删除所有未授权或已删除资源', Kinship: '工作流关系', Reset: '重置', KinshipStateActive: '当前选择', KinshipState1: '已上线', KinshipState0: '工作流未上线', KinshipState10: '调度未上线', 'Dag label display control': 'Dag节点名称显隐', Enable: '启用', Disable: '停用', 'The Worker group no longer exists, please select the correct Worker group!': '该Worker分组已经不存在,请选择正确的Worker分组!', 'Please confirm whether the workflow has been saved before downloading': '下载前请确定工作流是否已保存', 'User name length is between 3 and 39': '用户名长度在3~39之间', 'Timeout Settings': '超时设置', 'Connect Timeout': '连接超时', 'Socket Timeout': 'Socket超时', 'Connect timeout be a positive integer': '连接超时必须为数字', 'Socket Timeout be a positive integer': 'Socket超时必须为数字', ms: '毫秒', 'Please Enter Url': '请直接填写地址,例如:127.0.0.1:7077', Master: 'Master', 'Please select the waterdrop resources': '请选择waterdrop配置文件', zkDirectory: 'zk注册目录', 'Directory detail': '查看目录详情', 'Connection name': '连线名', 'Current connection settings': '当前连线设置', 'Please save the DAG before formatting': '格式化前请先保存DAG', 'Batch copy': '批量复制', 'Related items': '关联项目', 'Project name is required': '项目名称必填', 'Batch move': '批量移动', Version: '版本', 'Pre tasks': '前置任务', 'Running Memory': '运行内存', 'Max Memory': '最大内存', 'Min Memory': '最小内存', 'The workflow canvas is abnormal and cannot be saved, please recreate': '该工作流画布异常,无法保存,请重新创建', Info: '提示', 'Datasource userName': '所属用户', 'Resource userName': '所属用户', 'Environment manage': '环境管理', 'Create environment': '创建环境', 'Edit environment': '编辑', 'Environment value': 'Environment value', 'Environment Name': '环境名称', 'Environment Code': '环境编码', 'Environment Config': '环境配置', 'Environment Desc': '详细描述', 'Environment Worker Group': 'Worker组', 'Please enter environment config': '请输入环境配置信息', 'Please enter environment desc': '请输入详细描述', 'Please select worker groups': '请选择Worker分组', condition: '条件', 'The condition content cannot be empty': '条件内容不能为空', 'Reference from': '使用已有任务', 'No more...': '没有更多了...', 'Task Definition': '任务定义', 'Create task': '创建任务', 'Task Type': '任务类型', 'Process execute type': '执行策略', parallel: '并行', 'Serial wait': '串行等待', 'Serial discard': '串行抛弃', 'Serial priority': '串行优先', 'Recover serial wait': '串行恢复', IsEnableProxy: '启用代理', WebHook: 'Web钩子', Keyword: '密钥', Proxy: '代理', receivers: '收件人', receiverCcs: '抄送人', transportProtocol: '邮件协议', serverHost: 'SMTP服务器', serverPort: 'SMTP端口', sender: '发件人', enableSmtpAuth: '请求认证', starttlsEnable: 'STARTTLS连接', sslEnable: 'SSL连接', smtpSslTrust: 'SSL证书信任', url: 'URL', requestType: '请求方式', headerParams: '请求头', bodyParams: '请求体', contentField: '内容字段', path: '脚本路径', userParams: '自定义参数', corpId: '企业ID', secret: '密钥', teamSendMsg: '群发信息', userSendMsg: '群员信息', agentId: '应用ID', users: '群员', Username: '用户名', showType: '内容展示类型', 'Please select a task type (required)': '请选择任务类型(必选)', layoutType: '布局类型', gridLayout: '网格布局', dagreLayout: '层次布局', rows: '行数', cols: '列数', processOnline: '已上线', searchNode: '搜索节点', dagScale: '缩放', workflowName: '工作流名称', scheduleStartTime: '定时开始时间', scheduleEndTime: '定时结束时间', crontabExpression: 'Crontab', workflowPublishStatus: '工作流上线状态', schedulePublishStatus: '定时状态' }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,128
[Feature][Workflow Relationship] Improve English support
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description ![1638441032](https://user-images.githubusercontent.com/19239641/144405129-c58241e0-6424-4288-affd-089e2a9b995e.png) ![image](https://user-images.githubusercontent.com/19239641/144405171-82c0bd73-1969-466b-8fcc-cac48b6f8434.png) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7128
https://github.com/apache/dolphinscheduler/pull/7129
6beae191e2df497b7624728f0208c3a5ab7f0543
791ed6de77893d7dec7595e7cfeb775a2122e68c
"2021-12-02T10:32:05Z"
java
"2021-12-02T17:05:45Z"
dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ export default { 'User Name': 'User Name', 'Please enter user name': 'Please enter user name', Password: 'Password', 'Please enter your password': 'Please enter your password', 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22', Login: 'Login', Home: 'Home', 'Failed to create node to save': 'Failed to create node to save', 'Global parameters': 'Global parameters', 'Local parameters': 'Local parameters', 'Copy success': 'Copy success', 'The browser does not support automatic copying': 'The browser does not support automatic copying', 'Whether to save the DAG graph': 'Whether to save the DAG graph', 'Current node settings': 'Current node settings', 'View history': 'View history', 'View log': 'View log', 'Force success': 'Force success', 'Enter this child node': 'Enter this child node', 'Node name': 'Node name', 'Please enter name (required)': 'Please enter name (required)', 'Run flag': 'Run flag', Normal: 'Normal', 'Prohibition execution': 'Prohibition execution', 'Please enter description': 'Please enter description', 'Number of failed retries': 'Number of failed retries', Times: 'Times', 'Failed retry interval': 'Failed retry interval', Minute: 'Minute', 'Delay execution time': 'Delay execution time', 'Delay execution': 'Delay execution', 'Forced success': 'Forced success', Cancel: 'Cancel', 'Confirm add': 'Confirm add', 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process', 'The task has not been executed and cannot enter the sub-Process': 'The task has not been executed and cannot enter the sub-Process', 'Name already exists': 'Name already exists', 'Download Log': 'Download Log', 'Refresh Log': 'Refresh Log', 'Enter full screen': 'Enter full screen', 'Cancel full screen': 'Cancel full screen', Close: 'Close', 'Update log success': 'Update log success', 'No more logs': 'No more logs', 'No log': 'No log', 'Loading Log...': 'Loading Log...', 'Set the DAG diagram name': 'Set the DAG diagram name', 'Please enter description(optional)': 'Please enter description(optional)', 'Set global': 'Set global', 'Whether to go online the process definition': 'Whether to go online the process definition', 'Whether to update the process definition': 'Whether to update the process definition', Add: 'Add', 'DAG graph name cannot be empty': 'DAG graph name cannot be empty', 'Create Datasource': 'Create Datasource', 'Project Home': 'Workflow Monitor', 'Project Manage': 'Project', 'Create Project': 'Create Project', 'Cron Manage': 'Cron Manage', 'Copy Workflow': 'Copy Workflow', 'Tenant Manage': 'Tenant Manage', 'Create Tenant': 'Create Tenant', 'User Manage': 'User Manage', 'Create User': 'Create User', 'User Information': 'User Information', 'Edit Password': 'Edit Password', Success: 'Success', Failed: 'Failed', Delete: 'Delete', 'Please choose': 'Please choose', 'Please enter a positive integer': 'Please enter a positive integer', 'Program Type': 'Program Type', 'Main Class': 'Main Class', 'Main Jar Package': 'Main Jar Package', 'Please enter main jar package': 'Please enter main jar package', 'Please enter main class': 'Please enter main class', 'Main Arguments': 'Main Arguments', 'Please enter main arguments': 'Please enter main arguments', 'Option Parameters': 'Option Parameters', 'Please enter option parameters': 'Please enter option parameters', Resources: 'Resources', 'Custom Parameters': 'Custom Parameters', 'Custom template': 'Custom template', Datasource: 'Datasource', methods: 'methods', 'Please enter the procedure method': 'Please enter the procedure script \n\ncall procedure:{call <procedure-name>[(<arg1>,<arg2>, ...)]}\n\ncall function:{?= call <procedure-name>[(<arg1>,<arg2>, ...)]} ', 'The procedure method script example': 'example:{call <procedure-name>[(?,?, ...)]} or {?= call <procedure-name>[(?,?, ...)]}', Script: 'Script', 'Please enter script(required)': 'Please enter script(required)', 'Deploy Mode': 'Deploy Mode', 'Driver Cores': 'Driver Cores', 'Please enter Driver cores': 'Please enter Driver cores', 'Driver Memory': 'Driver Memory', 'Please enter Driver memory': 'Please enter Driver memory', 'Executor Number': 'Executor Number', 'Please enter Executor number': 'Please enter Executor number', 'The Executor number should be a positive integer': 'The Executor number should be a positive integer', 'Executor Memory': 'Executor Memory', 'Please enter Executor memory': 'Please enter Executor memory', 'Executor Cores': 'Executor Cores', 'Please enter Executor cores': 'Please enter Executor cores', 'Memory should be a positive integer': 'Memory should be a positive integer', 'Core number should be positive integer': 'Core number should be positive integer', 'Flink Version': 'Flink Version', 'JobManager Memory': 'JobManager Memory', 'Please enter JobManager memory': 'Please enter JobManager memory', 'TaskManager Memory': 'TaskManager Memory', 'Please enter TaskManager memory': 'Please enter TaskManager memory', 'Slot Number': 'Slot Number', 'Please enter Slot number': 'Please enter Slot number', Parallelism: 'Parallelism', 'Custom Parallelism': 'Configure parallelism', 'Please enter Parallelism': 'Please enter Parallelism', 'Parallelism tip': 'If there are a large number of tasks requiring complement, you can use the custom parallelism to ' + 'set the complement task thread to a reasonable value to avoid too large impact on the server.', 'Parallelism number should be positive integer': 'Parallelism number should be positive integer', 'TaskManager Number': 'TaskManager Number', 'Please enter TaskManager number': 'Please enter TaskManager number', 'App Name': 'App Name', 'Please enter app name(optional)': 'Please enter app name(optional)', 'SQL Type': 'SQL Type', 'Send Email': 'Send Email', 'Log display': 'Log display', 'rows of result': 'rows of result', Title: 'Title', 'Please enter the title of email': 'Please enter the title of email', Table: 'Table', TableMode: 'Table', Attachment: 'Attachment', 'SQL Parameter': 'SQL Parameter', 'SQL Statement': 'SQL Statement', 'UDF Function': 'UDF Function', 'Please enter a SQL Statement(required)': 'Please enter a SQL Statement(required)', 'Please enter a JSON Statement(required)': 'Please enter a JSON Statement(required)', 'One form or attachment must be selected': 'One form or attachment must be selected', 'Mail subject required': 'Mail subject required', 'Child Node': 'Child Node', 'Please select a sub-Process': 'Please select a sub-Process', Edit: 'Edit', 'Switch To This Version': 'Switch To This Version', 'Datasource Name': 'Datasource Name', 'Please enter datasource name': 'Please enter datasource name', IP: 'IP', 'Please enter IP': 'Please enter IP', Port: 'Port', 'Please enter port': 'Please enter port', 'Database Name': 'Database Name', 'Please enter database name': 'Please enter database name', 'Oracle Connect Type': 'ServiceName or SID', 'Oracle Service Name': 'ServiceName', 'Oracle SID': 'SID', 'jdbc connect parameters': 'jdbc connect parameters', 'Test Connect': 'Test Connect', 'Please enter resource name': 'Please enter resource name', 'Please enter resource folder name': 'Please enter resource folder name', 'Please enter a non-query SQL statement': 'Please enter a non-query SQL statement', 'Please enter IP/hostname': 'Please enter IP/hostname', 'jdbc connection parameters is not a correct JSON format': 'jdbc connection parameters is not a correct JSON format', '#': '#', 'Datasource Type': 'Datasource Type', 'Datasource Parameter': 'Datasource Parameter', 'Create Time': 'Create Time', 'Update Time': 'Update Time', Operation: 'Operation', 'Current Version': 'Current Version', 'Click to view': 'Click to view', 'Delete?': 'Delete?', 'Switch Version Successfully': 'Switch Version Successfully', 'Confirm Switch To This Version?': 'Confirm Switch To This Version?', Confirm: 'Confirm', 'Task status statistics': 'Task Status Statistics', Number: 'Number', State: 'State', 'Dry-run flag': 'Dry-run flag', 'Process Status Statistics': 'Process Status Statistics', 'Process Definition Statistics': 'Process Definition Statistics', 'Project Name': 'Project Name', 'Please enter name': 'Please enter name', 'Owned Users': 'Owned Users', 'Process Pid': 'Process Pid', 'Zk registration directory': 'Zk registration directory', cpuUsage: 'cpuUsage', memoryUsage: 'memoryUsage', 'Last heartbeat time': 'Last heartbeat time', 'Edit Tenant': 'Edit Tenant', 'OS Tenant Code': 'OS Tenant Code', 'Tenant Name': 'Tenant Name', Queue: 'Yarn Queue', 'Please select a queue': 'default is tenant association queue', 'Please enter the os tenant code in English': 'Please enter the os tenant code in English', 'Please enter os tenant code in English': 'Please enter os tenant code in English', 'Please enter os tenant code': 'Please enter os tenant code', 'Please enter tenant Name': 'Please enter tenant Name', 'The os tenant code. Only letters or a combination of letters and numbers are allowed': 'The os tenant code. Only letters or a combination of letters and numbers are allowed', 'Edit User': 'Edit User', Tenant: 'Tenant', Email: 'Email', Phone: 'Phone', 'User Type': 'User Type', 'Please enter phone number': 'Please enter phone number', 'Please enter email': 'Please enter email', 'Please enter the correct email format': 'Please enter the correct email format', 'Please enter the correct mobile phone format': 'Please enter the correct mobile phone format', Project: 'Project', Authorize: 'Authorize', 'File resources': 'File resources', 'UDF resources': 'UDF resources', 'UDF resources directory': 'UDF resources directory', 'Please select UDF resources directory': 'Please select UDF resources directory', 'Alarm group': 'Alarm group', 'Alarm group required': 'Alarm group required', 'Edit alarm group': 'Edit alarm group', 'Create alarm group': 'Create alarm group', 'Create Alarm Instance': 'Create Alarm Instance', 'Edit Alarm Instance': 'Edit Alarm Instance', 'Group Name': 'Group Name', 'Alarm instance name': 'Alarm instance name', 'Alarm plugin name': 'Alarm plugin name', 'Select plugin': 'Select plugin', 'Select Alarm plugin': 'Please select an Alarm plugin', 'Please enter group name': 'Please enter group name', 'Instance parameter exception': 'Instance parameter exception', 'Group Type': 'Group Type', 'Alarm plugin instance': 'Alarm plugin instance', 'Select Alarm plugin instance': 'Please select an Alarm plugin instance', Remarks: 'Remarks', SMS: 'SMS', 'Managing Users': 'Managing Users', Permission: 'Permission', Administrator: 'Administrator', 'Confirm Password': 'Confirm Password', 'Please enter confirm password': 'Please enter confirm password', 'Password cannot be in Chinese': 'Password cannot be in Chinese', 'Please enter a password (6-22) character password': 'Please enter a password (6-22) character password', 'Confirmation password cannot be in Chinese': 'Confirmation password cannot be in Chinese', 'Please enter a confirmation password (6-22) character password': 'Please enter a confirmation password (6-22) character password', 'The password is inconsistent with the confirmation password': 'The password is inconsistent with the confirmation password', 'Please select the datasource': 'Please select the datasource', 'Please select resources': 'Please select resources', Query: 'Query', 'Non Query': 'Non Query', 'prop(required)': 'prop(required)', 'value(optional)': 'value(optional)', 'value(required)': 'value(required)', 'prop is empty': 'prop is empty', 'value is empty': 'value is empty', 'prop is repeat': 'prop is repeat', 'Start Time': 'Start Time', 'End Time': 'End Time', crontab: 'crontab', 'Failure Strategy': 'Failure Strategy', online: 'online', offline: 'offline', 'Task Status': 'Task Status', 'Process Instance': 'Process Instance', 'Task Instance': 'Task Instance', 'Select date range': 'Select date range', startDate: 'startDate', endDate: 'endDate', Date: 'Date', Waiting: 'Waiting', Execution: 'Execution', Finish: 'Finish', 'Create File': 'Create File', 'Create folder': 'Create folder', 'File Name': 'File Name', 'Folder Name': 'Folder Name', 'File Format': 'File Format', 'Folder Format': 'Folder Format', 'File Content': 'File Content', 'Upload File Size': 'Upload File size cannot exceed 1g', Create: 'Create', 'Please enter the resource content': 'Please enter the resource content', 'Resource content cannot exceed 3000 lines': 'Resource content cannot exceed 3000 lines', 'File Details': 'File Details', 'Download Details': 'Download Details', Return: 'Return', Save: 'Save', 'File Manage': 'File Manage', 'Upload Files': 'Upload Files', 'Create UDF Function': 'Create UDF Function', 'Upload UDF Resources': 'Upload UDF Resources', 'Service-Master': 'Service-Master', 'Service-Worker': 'Service-Worker', 'Process Name': 'Process Name', Executor: 'Executor', 'Run Type': 'Run Type', 'Scheduling Time': 'Scheduling Time', 'Run Times': 'Run Times', host: 'host', 'fault-tolerant sign': 'fault-tolerant sign', Rerun: 'Rerun', 'Recovery Failed': 'Recovery Failed', Stop: 'Stop', Pause: 'Pause', 'Recovery Suspend': 'Recovery Suspend', Gantt: 'Gantt', 'Node Type': 'Node Type', 'Submit Time': 'Submit Time', Duration: 'Duration', 'Retry Count': 'Retry Count', 'Task Name': 'Task Name', 'Task Date': 'Task Date', 'Source Table': 'Source Table', 'Record Number': 'Record Number', 'Target Table': 'Target Table', 'Online viewing type is not supported': 'Online viewing type is not supported', Size: 'Size', Rename: 'Rename', Download: 'Download', Export: 'Export', 'Version Info': 'Version Info', Submit: 'Submit', 'Edit UDF Function': 'Edit UDF Function', type: 'type', 'UDF Function Name': 'UDF Function Name', FILE: 'FILE', UDF: 'UDF', 'File Subdirectory': 'File Subdirectory', 'Please enter a function name': 'Please enter a function name', 'Package Name': 'Package Name', 'Please enter a Package name': 'Please enter a Package name', Parameter: 'Parameter', 'Please enter a parameter': 'Please enter a parameter', 'UDF Resources': 'UDF Resources', 'Upload Resources': 'Upload Resources', Instructions: 'Instructions', 'Please enter a instructions': 'Please enter a instructions', 'Please enter a UDF function name': 'Please enter a UDF function name', 'Select UDF Resources': 'Select UDF Resources', 'Class Name': 'Class Name', 'Jar Package': 'Jar Package', 'Library Name': 'Library Name', 'UDF Resource Name': 'UDF Resource Name', 'File Size': 'File Size', Description: 'Description', 'Drag Nodes and Selected Items': 'Drag Nodes and Selected Items', 'Select Line Connection': 'Select Line Connection', 'Delete selected lines or nodes': 'Delete selected lines or nodes', 'Full Screen': 'Full Screen', Unpublished: 'Unpublished', 'Start Process': 'Start Process', 'Execute from the current node': 'Execute from the current node', 'Recover tolerance fault process': 'Recover tolerance fault process', 'Resume the suspension process': 'Resume the suspension process', 'Execute from the failed nodes': 'Execute from the failed nodes', 'Complement Data': 'Complement Data', 'Scheduling execution': 'Scheduling execution', 'Recovery waiting thread': 'Recovery waiting thread', 'Submitted successfully': 'Submitted successfully', Executing: 'Executing', 'Ready to pause': 'Ready to pause', 'Ready to stop': 'Ready to stop', 'Need fault tolerance': 'Need fault tolerance', Kill: 'Kill', 'Waiting for thread': 'Waiting for thread', 'Waiting for dependence': 'Waiting for dependence', Start: 'Start', Copy: 'Copy', 'Copy name': 'Copy name', 'Copy path': 'Copy path', 'Please enter keyword': 'Please enter keyword', 'File Upload': 'File Upload', 'Drag the file into the current upload window': 'Drag the file into the current upload window', 'Drag area upload': 'Drag area upload', Upload: 'Upload', 'ReUpload File': 'ReUpload File', 'Please enter file name': 'Please enter file name', 'Please select the file to upload': 'Please select the file to upload', 'Resources manage': 'Resources', Security: 'Security', Logout: 'Logout', 'No data': 'No data', 'Uploading...': 'Uploading...', 'Loading...': 'Loading...', List: 'List', 'Unable to download without proper url': 'Unable to download without proper url', Process: 'Process', 'Process definition': 'Process definition', 'Task record': 'Task record', 'Warning group manage': 'Warning group manage', 'Warning instance manage': 'Warning instance manage', 'Servers manage': 'Servers manage', 'UDF manage': 'UDF manage', 'Resource manage': 'Resource manage', 'Function manage': 'Function manage', 'Edit password': 'Edit password', 'Ordinary users': 'Ordinary users', 'Create process': 'Create process', 'Import process': 'Import process', 'Timing state': 'Timing state', Timing: 'Timing', Timezone: 'Timezone', TreeView: 'TreeView', 'Mailbox already exists! Recipients and copyers cannot repeat': 'Mailbox already exists! Recipients and copyers cannot repeat', 'Mailbox input is illegal': 'Mailbox input is illegal', 'Please set the parameters before starting': 'Please set the parameters before starting', Continue: 'Continue', End: 'End', 'Node execution': 'Node execution', 'Backward execution': 'Backward execution', 'Forward execution': 'Forward execution', 'Execute only the current node': 'Execute only the current node', 'Notification strategy': 'Notification strategy', 'Notification group': 'Notification group', 'Please select a notification group': 'Please select a notification group', 'Whether it is a complement process?': 'Whether it is a complement process?', 'Schedule date': 'Schedule date', 'Mode of execution': 'Mode of execution', 'Serial execution': 'Serial execution', 'Parallel execution': 'Parallel execution', 'Set parameters before timing': 'Set parameters before timing', 'Start and stop time': 'Start and stop time', 'Please select time': 'Please select time', 'Please enter crontab': 'Please enter crontab', none_1: 'none', success_1: 'success', failure_1: 'failure', All_1: 'All', Toolbar: 'Toolbar', 'View variables': 'View variables', 'Format DAG': 'Format DAG', 'Refresh DAG status': 'Refresh DAG status', Return_1: 'Return', 'Please enter format': 'Please enter format', 'connection parameter': 'connection parameter', 'Process definition details': 'Process definition details', 'Create process definition': 'Create process definition', 'Scheduled task list': 'Scheduled task list', 'Process instance details': 'Process instance details', 'Create Resource': 'Create Resource', 'User Center': 'User Center', AllStatus: 'All', None: 'None', Name: 'Name', 'Process priority': 'Process priority', 'Task priority': 'Task priority', 'Task timeout alarm': 'Task timeout alarm', 'Timeout strategy': 'Timeout strategy', 'Timeout alarm': 'Timeout alarm', 'Timeout failure': 'Timeout failure', 'Timeout period': 'Timeout period', 'Waiting Dependent complete': 'Waiting Dependent complete', 'Waiting Dependent start': 'Waiting Dependent start', 'Check interval': 'Check interval', 'Timeout must be longer than check interval': 'Timeout must be longer than check interval', 'Timeout strategy must be selected': 'Timeout strategy must be selected', 'Timeout must be a positive integer': 'Timeout must be a positive integer', 'Add dependency': 'Add dependency', 'Whether dry-run': 'Whether dry-run', and: 'and', or: 'or', month: 'month', week: 'week', day: 'day', hour: 'hour', Running: 'Running', 'Waiting for dependency to complete': 'Waiting for dependency to complete', Selected: 'Selected', CurrentHour: 'CurrentHour', Last1Hour: 'Last1Hour', Last2Hours: 'Last2Hours', Last3Hours: 'Last3Hours', Last24Hours: 'Last24Hours', today: 'today', Last1Days: 'Last1Days', Last2Days: 'Last2Days', Last3Days: 'Last3Days', Last7Days: 'Last7Days', ThisWeek: 'ThisWeek', LastWeek: 'LastWeek', LastMonday: 'LastMonday', LastTuesday: 'LastTuesday', LastWednesday: 'LastWednesday', LastThursday: 'LastThursday', LastFriday: 'LastFriday', LastSaturday: 'LastSaturday', LastSunday: 'LastSunday', ThisMonth: 'ThisMonth', LastMonth: 'LastMonth', LastMonthBegin: 'LastMonthBegin', LastMonthEnd: 'LastMonthEnd', 'Refresh status succeeded': 'Refresh status succeeded', 'Queue manage': 'Yarn Queue manage', 'Create queue': 'Create queue', 'Edit queue': 'Edit queue', 'Datasource manage': 'Datasource', 'History task record': 'History task record', 'Please go online': 'Please go online', 'Queue value': 'Queue value', 'Please enter queue value': 'Please enter queue value', 'Worker group manage': 'Worker group manage', 'Create worker group': 'Create worker group', 'Edit worker group': 'Edit worker group', 'Token manage': 'Token manage', 'Create token': 'Create token', 'Edit token': 'Edit token', Addresses: 'Addresses', 'Worker Addresses': 'Worker Addresses', 'Please select the worker addresses': 'Please select the worker addresses', 'Failure time': 'Failure time', 'Expiration time': 'Expiration time', User: 'User', 'Please enter token': 'Please enter token', 'Generate token': 'Generate token', Monitor: 'Monitor', Group: 'Group', 'Queue statistics': 'Queue statistics', 'Command status statistics': 'Command status statistics', 'Task kill': 'Task Kill', 'Task queue': 'Task queue', 'Error command count': 'Error command count', 'Normal command count': 'Normal command count', Manage: ' Manage', 'Number of connections': 'Number of connections', Sent: 'Sent', Received: 'Received', 'Min latency': 'Min latency', 'Avg latency': 'Avg latency', 'Max latency': 'Max latency', 'Node count': 'Node count', 'Query time': 'Query time', 'Node self-test status': 'Node self-test status', 'Health status': 'Health status', 'Max connections': 'Max connections', 'Threads connections': 'Threads connections', 'Max used connections': 'Max used connections', 'Threads running connections': 'Threads running connections', 'Worker group': 'Worker group', 'Please enter a positive integer greater than 0': 'Please enter a positive integer greater than 0', 'Pre Statement': 'Pre Statement', 'Post Statement': 'Post Statement', 'Statement cannot be empty': 'Statement cannot be empty', 'Process Define Count': 'Work flow Define Count', 'Process Instance Running Count': 'Process Instance Running Count', 'command number of waiting for running': 'command number of waiting for running', 'failure command number': 'failure command number', 'tasks number of waiting running': 'tasks number of waiting running', 'task number of ready to kill': 'task number of ready to kill', 'Statistics manage': 'Statistics Manage', statistics: 'Statistics', 'select tenant': 'select tenant', 'Please enter Principal': 'Please enter Principal', 'Please enter the kerberos authentication parameter java.security.krb5.conf': 'Please enter the kerberos authentication parameter java.security.krb5.conf', 'Please enter the kerberos authentication parameter login.user.keytab.username': 'Please enter the kerberos authentication parameter login.user.keytab.username', 'Please enter the kerberos authentication parameter login.user.keytab.path': 'Please enter the kerberos authentication parameter login.user.keytab.path', 'The start time must not be the same as the end': 'The start time must not be the same as the end', 'Startup parameter': 'Startup parameter', 'Startup type': 'Startup type', 'warning of timeout': 'warning of timeout', 'Next five execution times': 'Next five execution times', 'Execute time': 'Execute time', 'Complement range': 'Complement range', 'Http Url': 'Http Url', 'Http Method': 'Http Method', 'Http Parameters': 'Http Parameters', 'Http Parameters Key': 'Http Parameters Key', 'Http Parameters Position': 'Http Parameters Position', 'Http Parameters Value': 'Http Parameters Value', 'Http Check Condition': 'Http Check Condition', 'Http Condition': 'Http Condition', 'Please Enter Http Url': 'Please Enter Http Url(required)', 'Please Enter Http Condition': 'Please Enter Http Condition', 'There is no data for this period of time': 'There is no data for this period of time', 'Worker addresses cannot be empty': 'Worker addresses cannot be empty', 'Please generate token': 'Please generate token', 'Please Select token': 'Please select the expiration time of token', 'Spark Version': 'Spark Version', TargetDataBase: 'target database', TargetTable: 'target table', TargetJobName: 'target job name', 'Please enter Pigeon job name': 'Please enter Pigeon job name', 'Please enter the table of target': 'Please enter the table of target', 'Please enter a Target Table(required)': 'Please enter a Target Table(required)', SpeedByte: 'speed(byte count)', SpeedRecord: 'speed(record count)', '0 means unlimited by byte': '0 means unlimited', '0 means unlimited by count': '0 means unlimited', 'Modify User': 'Modify User', 'Whether directory': 'Whether directory', Yes: 'Yes', No: 'No', 'Hadoop Custom Params': 'Hadoop Params', 'Sqoop Advanced Parameters': 'Sqoop Params', 'Sqoop Job Name': 'Job Name', 'Please enter Mysql Database(required)': 'Please enter Mysql Database(required)', 'Please enter Mysql Table(required)': 'Please enter Mysql Table(required)', 'Please enter Columns (Comma separated)': 'Please enter Columns (Comma separated)', 'Please enter Target Dir(required)': 'Please enter Target Dir(required)', 'Please enter Export Dir(required)': 'Please enter Export Dir(required)', 'Please enter Hive Database(required)': 'Please enter Hive Databasec(required)', 'Please enter Hive Table(required)': 'Please enter Hive Table(required)', 'Please enter Hive Partition Keys': 'Please enter Hive Partition Key', 'Please enter Hive Partition Values': 'Please enter Partition Value', 'Please enter Replace Delimiter': 'Please enter Replace Delimiter', 'Please enter Fields Terminated': 'Please enter Fields Terminated', 'Please enter Lines Terminated': 'Please enter Lines Terminated', 'Please enter Concurrency': 'Please enter Concurrency', 'Please enter Update Key': 'Please enter Update Key', 'Please enter Job Name(required)': 'Please enter Job Name(required)', 'Please enter Custom Shell(required)': 'Please enter Custom Shell(required)', Direct: 'Direct', Type: 'Type', ModelType: 'ModelType', ColumnType: 'ColumnType', Database: 'Database', Column: 'Column', 'Map Column Hive': 'Map Column Hive', 'Map Column Java': 'Map Column Java', 'Export Dir': 'Export Dir', 'Hive partition Keys': 'Hive partition Keys', 'Hive partition Values': 'Hive partition Values', FieldsTerminated: 'FieldsTerminated', LinesTerminated: 'LinesTerminated', IsUpdate: 'IsUpdate', UpdateKey: 'UpdateKey', UpdateMode: 'UpdateMode', 'Target Dir': 'Target Dir', DeleteTargetDir: 'DeleteTargetDir', FileType: 'FileType', CompressionCodec: 'CompressionCodec', CreateHiveTable: 'CreateHiveTable', DropDelimiter: 'DropDelimiter', OverWriteSrc: 'OverWriteSrc', ReplaceDelimiter: 'ReplaceDelimiter', Concurrency: 'Concurrency', Form: 'Form', OnlyUpdate: 'OnlyUpdate', AllowInsert: 'AllowInsert', 'Data Source': 'Data Source', 'Data Target': 'Data Target', 'All Columns': 'All Columns', 'Some Columns': 'Some Columns', 'Branch flow': 'Branch flow', 'Custom Job': 'Custom Job', 'Custom Script': 'Custom Script', 'Cannot select the same node for successful branch flow and failed branch flow': 'Cannot select the same node for successful branch flow and failed branch flow', 'Successful branch flow and failed branch flow are required': 'conditions node Successful and failed branch flow are required', 'No resources exist': 'No resources exist', 'Please delete all non-existing resources': 'Please delete all non-existing resources', 'Unauthorized or deleted resources': 'Unauthorized or deleted resources', 'Please delete all non-existent resources': 'Please delete all non-existent resources', Kinship: 'Workflow relationship', Reset: 'Reset', KinshipStateActive: 'Active', KinshipState1: 'Online', KinshipState0: 'Workflow is not online', KinshipState10: 'Scheduling is not online', 'Dag label display control': 'Dag label display control', Enable: 'Enable', Disable: 'Disable', 'The Worker group no longer exists, please select the correct Worker group!': 'The Worker group no longer exists, please select the correct Worker group!', 'Please confirm whether the workflow has been saved before downloading': 'Please confirm whether the workflow has been saved before downloading', 'User name length is between 3 and 39': 'User name length is between 3 and 39', 'Timeout Settings': 'Timeout Settings', 'Connect Timeout': 'Connect Timeout', 'Socket Timeout': 'Socket Timeout', 'Connect timeout be a positive integer': 'Connect timeout be a positive integer', 'Socket Timeout be a positive integer': 'Socket Timeout be a positive integer', ms: 'ms', 'Please Enter Url': 'Please Enter Url eg. 127.0.0.1:7077', Master: 'Master', 'Please select the waterdrop resources': 'Please select the waterdrop resources', zkDirectory: 'zkDirectory', 'Directory detail': 'Directory detail', 'Connection name': 'Connection name', 'Current connection settings': 'Current connection settings', 'Please save the DAG before formatting': 'Please save the DAG before formatting', 'Batch copy': 'Batch copy', 'Related items': 'Related items', 'Project name is required': 'Project name is required', 'Batch move': 'Batch move', Version: 'Version', 'Pre tasks': 'Pre tasks', 'Running Memory': 'Running Memory', 'Max Memory': 'Max Memory', 'Min Memory': 'Min Memory', 'The workflow canvas is abnormal and cannot be saved, please recreate': 'The workflow canvas is abnormal and cannot be saved, please recreate', Info: 'Info', 'Datasource userName': 'owner', 'Resource userName': 'owner', 'Environment manage': 'Environment manage', 'Create environment': 'Create environment', 'Edit environment': 'Edit environment', 'Environment value': 'Environment value', 'Environment Name': 'Environment Name', 'Environment Code': 'Environment Code', 'Environment Config': 'Environment Config', 'Environment Desc': 'Environment Desc', 'Environment Worker Group': 'Worker Groups', 'Please enter environment config': 'Please enter environment config', 'Please enter environment desc': 'Please enter environment desc', 'Please select worker groups': 'Please select worker groups', condition: 'condition', 'The condition content cannot be empty': 'The condition content cannot be empty', 'Reference from': 'Reference from', 'No more...': 'No more...', 'Task Definition': 'Task Definition', 'Create task': 'Create task', 'Task Type': 'Task Type', 'Process execute type': 'Process execute type', parallel: 'parallel', 'Serial wait': 'Serial wait', 'Serial discard': 'Serial discard', 'Serial priority': 'Serial priority', 'Recover serial wait': 'Recover serial wait', IsEnableProxy: 'Enable Proxy', WebHook: 'WebHook', Keyword: 'Keyword', Proxy: 'Proxy', receivers: 'Receivers', receiverCcs: 'ReceiverCcs', transportProtocol: 'Transport Protocol', serverHost: 'SMTP Host', serverPort: 'SMTP Port', sender: 'Sender', enableSmtpAuth: 'SMTP Auth', starttlsEnable: 'SMTP STARTTLS Enable', sslEnable: 'SMTP SSL Enable', smtpSslTrust: 'SMTP SSL Trust', url: 'URL', requestType: 'Request Type', headerParams: 'Headers', bodyParams: 'Body', contentField: 'Content Field', path: 'Script Path', userParams: 'User Params', corpId: 'CorpId', secret: 'Secret', userSendMsg: 'UserSendMsg', agentId: 'AgentId', users: 'Users', Username: 'Username', showType: 'Show Type', 'Please select a task type (required)': 'Please select a task type (required)', layoutType: 'Layout Type', gridLayout: 'Grid', dagreLayout: 'Dagre', rows: 'Rows', cols: 'Cols', processOnline: 'Online', searchNode: 'Search Node', dagScale: 'Scale', workflowName: 'Workflow Name', scheduleStartTime: 'Schedule Start Time', scheduleEndTime: 'Schedule End Time', crontabExpression: 'Crontab', workflowPublishStatus: 'Workflow Publish Status', schedulePublishStatus: 'Schedule Publish Status' }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,600
[Bug][ui] there are some problem on the workflow relationship page
1.Time format changed to 2020-08-26 00:00:00 2. The workflow release status and scheduling release status are changed to "online" or "offline" ![image](https://user-images.githubusercontent.com/55787491/91282787-5b4f1a00-e7bc-11ea-87e8-36cadc845f65.png) 3.when '_' exists in the workflow name, the name on the icon does not show '_' 4.change null to ‘_’ ![image](https://user-images.githubusercontent.com/55787491/91284581-ab2ee080-e7be-11ea-9473-bd4648bac5ad.png) 5.After clicking Reset, the workflow relationship is not set to the initial state. Such as: 1)select queried workflow 2)hide the name of the workflow, clicking Reset does not restore the initial state ![image](https://user-images.githubusercontent.com/55787491/91284715-ddd8d900-e7be-11ea-9a28-6de179308082.png) 6.The ’Active‘ in the English version should be changed to current selection ![image](https://user-images.githubusercontent.com/55787491/91286569-2abdaf00-e7c1-11ea-8e7a-2d4a6f9ff40c.png) which version? -[dev]
https://github.com/apache/dolphinscheduler/issues/3600
https://github.com/apache/dolphinscheduler/pull/7129
6beae191e2df497b7624728f0208c3a5ab7f0543
791ed6de77893d7dec7595e7cfeb775a2122e68c
"2020-08-26T09:32:03Z"
java
"2021-12-02T17:05:45Z"
dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ export default { 'User Name': 'User Name', 'Please enter user name': 'Please enter user name', Password: 'Password', 'Please enter your password': 'Please enter your password', 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22', Login: 'Login', Home: 'Home', 'Failed to create node to save': 'Failed to create node to save', 'Global parameters': 'Global parameters', 'Local parameters': 'Local parameters', 'Copy success': 'Copy success', 'The browser does not support automatic copying': 'The browser does not support automatic copying', 'Whether to save the DAG graph': 'Whether to save the DAG graph', 'Current node settings': 'Current node settings', 'View history': 'View history', 'View log': 'View log', 'Force success': 'Force success', 'Enter this child node': 'Enter this child node', 'Node name': 'Node name', 'Please enter name (required)': 'Please enter name (required)', 'Run flag': 'Run flag', Normal: 'Normal', 'Prohibition execution': 'Prohibition execution', 'Please enter description': 'Please enter description', 'Number of failed retries': 'Number of failed retries', Times: 'Times', 'Failed retry interval': 'Failed retry interval', Minute: 'Minute', 'Delay execution time': 'Delay execution time', 'Delay execution': 'Delay execution', 'Forced success': 'Forced success', Cancel: 'Cancel', 'Confirm add': 'Confirm add', 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process', 'The task has not been executed and cannot enter the sub-Process': 'The task has not been executed and cannot enter the sub-Process', 'Name already exists': 'Name already exists', 'Download Log': 'Download Log', 'Refresh Log': 'Refresh Log', 'Enter full screen': 'Enter full screen', 'Cancel full screen': 'Cancel full screen', Close: 'Close', 'Update log success': 'Update log success', 'No more logs': 'No more logs', 'No log': 'No log', 'Loading Log...': 'Loading Log...', 'Set the DAG diagram name': 'Set the DAG diagram name', 'Please enter description(optional)': 'Please enter description(optional)', 'Set global': 'Set global', 'Whether to go online the process definition': 'Whether to go online the process definition', 'Whether to update the process definition': 'Whether to update the process definition', Add: 'Add', 'DAG graph name cannot be empty': 'DAG graph name cannot be empty', 'Create Datasource': 'Create Datasource', 'Project Home': 'Workflow Monitor', 'Project Manage': 'Project', 'Create Project': 'Create Project', 'Cron Manage': 'Cron Manage', 'Copy Workflow': 'Copy Workflow', 'Tenant Manage': 'Tenant Manage', 'Create Tenant': 'Create Tenant', 'User Manage': 'User Manage', 'Create User': 'Create User', 'User Information': 'User Information', 'Edit Password': 'Edit Password', Success: 'Success', Failed: 'Failed', Delete: 'Delete', 'Please choose': 'Please choose', 'Please enter a positive integer': 'Please enter a positive integer', 'Program Type': 'Program Type', 'Main Class': 'Main Class', 'Main Jar Package': 'Main Jar Package', 'Please enter main jar package': 'Please enter main jar package', 'Please enter main class': 'Please enter main class', 'Main Arguments': 'Main Arguments', 'Please enter main arguments': 'Please enter main arguments', 'Option Parameters': 'Option Parameters', 'Please enter option parameters': 'Please enter option parameters', Resources: 'Resources', 'Custom Parameters': 'Custom Parameters', 'Custom template': 'Custom template', Datasource: 'Datasource', methods: 'methods', 'Please enter the procedure method': 'Please enter the procedure script \n\ncall procedure:{call <procedure-name>[(<arg1>,<arg2>, ...)]}\n\ncall function:{?= call <procedure-name>[(<arg1>,<arg2>, ...)]} ', 'The procedure method script example': 'example:{call <procedure-name>[(?,?, ...)]} or {?= call <procedure-name>[(?,?, ...)]}', Script: 'Script', 'Please enter script(required)': 'Please enter script(required)', 'Deploy Mode': 'Deploy Mode', 'Driver Cores': 'Driver Cores', 'Please enter Driver cores': 'Please enter Driver cores', 'Driver Memory': 'Driver Memory', 'Please enter Driver memory': 'Please enter Driver memory', 'Executor Number': 'Executor Number', 'Please enter Executor number': 'Please enter Executor number', 'The Executor number should be a positive integer': 'The Executor number should be a positive integer', 'Executor Memory': 'Executor Memory', 'Please enter Executor memory': 'Please enter Executor memory', 'Executor Cores': 'Executor Cores', 'Please enter Executor cores': 'Please enter Executor cores', 'Memory should be a positive integer': 'Memory should be a positive integer', 'Core number should be positive integer': 'Core number should be positive integer', 'Flink Version': 'Flink Version', 'JobManager Memory': 'JobManager Memory', 'Please enter JobManager memory': 'Please enter JobManager memory', 'TaskManager Memory': 'TaskManager Memory', 'Please enter TaskManager memory': 'Please enter TaskManager memory', 'Slot Number': 'Slot Number', 'Please enter Slot number': 'Please enter Slot number', Parallelism: 'Parallelism', 'Custom Parallelism': 'Configure parallelism', 'Please enter Parallelism': 'Please enter Parallelism', 'Parallelism tip': 'If there are a large number of tasks requiring complement, you can use the custom parallelism to ' + 'set the complement task thread to a reasonable value to avoid too large impact on the server.', 'Parallelism number should be positive integer': 'Parallelism number should be positive integer', 'TaskManager Number': 'TaskManager Number', 'Please enter TaskManager number': 'Please enter TaskManager number', 'App Name': 'App Name', 'Please enter app name(optional)': 'Please enter app name(optional)', 'SQL Type': 'SQL Type', 'Send Email': 'Send Email', 'Log display': 'Log display', 'rows of result': 'rows of result', Title: 'Title', 'Please enter the title of email': 'Please enter the title of email', Table: 'Table', TableMode: 'Table', Attachment: 'Attachment', 'SQL Parameter': 'SQL Parameter', 'SQL Statement': 'SQL Statement', 'UDF Function': 'UDF Function', 'Please enter a SQL Statement(required)': 'Please enter a SQL Statement(required)', 'Please enter a JSON Statement(required)': 'Please enter a JSON Statement(required)', 'One form or attachment must be selected': 'One form or attachment must be selected', 'Mail subject required': 'Mail subject required', 'Child Node': 'Child Node', 'Please select a sub-Process': 'Please select a sub-Process', Edit: 'Edit', 'Switch To This Version': 'Switch To This Version', 'Datasource Name': 'Datasource Name', 'Please enter datasource name': 'Please enter datasource name', IP: 'IP', 'Please enter IP': 'Please enter IP', Port: 'Port', 'Please enter port': 'Please enter port', 'Database Name': 'Database Name', 'Please enter database name': 'Please enter database name', 'Oracle Connect Type': 'ServiceName or SID', 'Oracle Service Name': 'ServiceName', 'Oracle SID': 'SID', 'jdbc connect parameters': 'jdbc connect parameters', 'Test Connect': 'Test Connect', 'Please enter resource name': 'Please enter resource name', 'Please enter resource folder name': 'Please enter resource folder name', 'Please enter a non-query SQL statement': 'Please enter a non-query SQL statement', 'Please enter IP/hostname': 'Please enter IP/hostname', 'jdbc connection parameters is not a correct JSON format': 'jdbc connection parameters is not a correct JSON format', '#': '#', 'Datasource Type': 'Datasource Type', 'Datasource Parameter': 'Datasource Parameter', 'Create Time': 'Create Time', 'Update Time': 'Update Time', Operation: 'Operation', 'Current Version': 'Current Version', 'Click to view': 'Click to view', 'Delete?': 'Delete?', 'Switch Version Successfully': 'Switch Version Successfully', 'Confirm Switch To This Version?': 'Confirm Switch To This Version?', Confirm: 'Confirm', 'Task status statistics': 'Task Status Statistics', Number: 'Number', State: 'State', 'Dry-run flag': 'Dry-run flag', 'Process Status Statistics': 'Process Status Statistics', 'Process Definition Statistics': 'Process Definition Statistics', 'Project Name': 'Project Name', 'Please enter name': 'Please enter name', 'Owned Users': 'Owned Users', 'Process Pid': 'Process Pid', 'Zk registration directory': 'Zk registration directory', cpuUsage: 'cpuUsage', memoryUsage: 'memoryUsage', 'Last heartbeat time': 'Last heartbeat time', 'Edit Tenant': 'Edit Tenant', 'OS Tenant Code': 'OS Tenant Code', 'Tenant Name': 'Tenant Name', Queue: 'Yarn Queue', 'Please select a queue': 'default is tenant association queue', 'Please enter the os tenant code in English': 'Please enter the os tenant code in English', 'Please enter os tenant code in English': 'Please enter os tenant code in English', 'Please enter os tenant code': 'Please enter os tenant code', 'Please enter tenant Name': 'Please enter tenant Name', 'The os tenant code. Only letters or a combination of letters and numbers are allowed': 'The os tenant code. Only letters or a combination of letters and numbers are allowed', 'Edit User': 'Edit User', Tenant: 'Tenant', Email: 'Email', Phone: 'Phone', 'User Type': 'User Type', 'Please enter phone number': 'Please enter phone number', 'Please enter email': 'Please enter email', 'Please enter the correct email format': 'Please enter the correct email format', 'Please enter the correct mobile phone format': 'Please enter the correct mobile phone format', Project: 'Project', Authorize: 'Authorize', 'File resources': 'File resources', 'UDF resources': 'UDF resources', 'UDF resources directory': 'UDF resources directory', 'Please select UDF resources directory': 'Please select UDF resources directory', 'Alarm group': 'Alarm group', 'Alarm group required': 'Alarm group required', 'Edit alarm group': 'Edit alarm group', 'Create alarm group': 'Create alarm group', 'Create Alarm Instance': 'Create Alarm Instance', 'Edit Alarm Instance': 'Edit Alarm Instance', 'Group Name': 'Group Name', 'Alarm instance name': 'Alarm instance name', 'Alarm plugin name': 'Alarm plugin name', 'Select plugin': 'Select plugin', 'Select Alarm plugin': 'Please select an Alarm plugin', 'Please enter group name': 'Please enter group name', 'Instance parameter exception': 'Instance parameter exception', 'Group Type': 'Group Type', 'Alarm plugin instance': 'Alarm plugin instance', 'Select Alarm plugin instance': 'Please select an Alarm plugin instance', Remarks: 'Remarks', SMS: 'SMS', 'Managing Users': 'Managing Users', Permission: 'Permission', Administrator: 'Administrator', 'Confirm Password': 'Confirm Password', 'Please enter confirm password': 'Please enter confirm password', 'Password cannot be in Chinese': 'Password cannot be in Chinese', 'Please enter a password (6-22) character password': 'Please enter a password (6-22) character password', 'Confirmation password cannot be in Chinese': 'Confirmation password cannot be in Chinese', 'Please enter a confirmation password (6-22) character password': 'Please enter a confirmation password (6-22) character password', 'The password is inconsistent with the confirmation password': 'The password is inconsistent with the confirmation password', 'Please select the datasource': 'Please select the datasource', 'Please select resources': 'Please select resources', Query: 'Query', 'Non Query': 'Non Query', 'prop(required)': 'prop(required)', 'value(optional)': 'value(optional)', 'value(required)': 'value(required)', 'prop is empty': 'prop is empty', 'value is empty': 'value is empty', 'prop is repeat': 'prop is repeat', 'Start Time': 'Start Time', 'End Time': 'End Time', crontab: 'crontab', 'Failure Strategy': 'Failure Strategy', online: 'online', offline: 'offline', 'Task Status': 'Task Status', 'Process Instance': 'Process Instance', 'Task Instance': 'Task Instance', 'Select date range': 'Select date range', startDate: 'startDate', endDate: 'endDate', Date: 'Date', Waiting: 'Waiting', Execution: 'Execution', Finish: 'Finish', 'Create File': 'Create File', 'Create folder': 'Create folder', 'File Name': 'File Name', 'Folder Name': 'Folder Name', 'File Format': 'File Format', 'Folder Format': 'Folder Format', 'File Content': 'File Content', 'Upload File Size': 'Upload File size cannot exceed 1g', Create: 'Create', 'Please enter the resource content': 'Please enter the resource content', 'Resource content cannot exceed 3000 lines': 'Resource content cannot exceed 3000 lines', 'File Details': 'File Details', 'Download Details': 'Download Details', Return: 'Return', Save: 'Save', 'File Manage': 'File Manage', 'Upload Files': 'Upload Files', 'Create UDF Function': 'Create UDF Function', 'Upload UDF Resources': 'Upload UDF Resources', 'Service-Master': 'Service-Master', 'Service-Worker': 'Service-Worker', 'Process Name': 'Process Name', Executor: 'Executor', 'Run Type': 'Run Type', 'Scheduling Time': 'Scheduling Time', 'Run Times': 'Run Times', host: 'host', 'fault-tolerant sign': 'fault-tolerant sign', Rerun: 'Rerun', 'Recovery Failed': 'Recovery Failed', Stop: 'Stop', Pause: 'Pause', 'Recovery Suspend': 'Recovery Suspend', Gantt: 'Gantt', 'Node Type': 'Node Type', 'Submit Time': 'Submit Time', Duration: 'Duration', 'Retry Count': 'Retry Count', 'Task Name': 'Task Name', 'Task Date': 'Task Date', 'Source Table': 'Source Table', 'Record Number': 'Record Number', 'Target Table': 'Target Table', 'Online viewing type is not supported': 'Online viewing type is not supported', Size: 'Size', Rename: 'Rename', Download: 'Download', Export: 'Export', 'Version Info': 'Version Info', Submit: 'Submit', 'Edit UDF Function': 'Edit UDF Function', type: 'type', 'UDF Function Name': 'UDF Function Name', FILE: 'FILE', UDF: 'UDF', 'File Subdirectory': 'File Subdirectory', 'Please enter a function name': 'Please enter a function name', 'Package Name': 'Package Name', 'Please enter a Package name': 'Please enter a Package name', Parameter: 'Parameter', 'Please enter a parameter': 'Please enter a parameter', 'UDF Resources': 'UDF Resources', 'Upload Resources': 'Upload Resources', Instructions: 'Instructions', 'Please enter a instructions': 'Please enter a instructions', 'Please enter a UDF function name': 'Please enter a UDF function name', 'Select UDF Resources': 'Select UDF Resources', 'Class Name': 'Class Name', 'Jar Package': 'Jar Package', 'Library Name': 'Library Name', 'UDF Resource Name': 'UDF Resource Name', 'File Size': 'File Size', Description: 'Description', 'Drag Nodes and Selected Items': 'Drag Nodes and Selected Items', 'Select Line Connection': 'Select Line Connection', 'Delete selected lines or nodes': 'Delete selected lines or nodes', 'Full Screen': 'Full Screen', Unpublished: 'Unpublished', 'Start Process': 'Start Process', 'Execute from the current node': 'Execute from the current node', 'Recover tolerance fault process': 'Recover tolerance fault process', 'Resume the suspension process': 'Resume the suspension process', 'Execute from the failed nodes': 'Execute from the failed nodes', 'Complement Data': 'Complement Data', 'Scheduling execution': 'Scheduling execution', 'Recovery waiting thread': 'Recovery waiting thread', 'Submitted successfully': 'Submitted successfully', Executing: 'Executing', 'Ready to pause': 'Ready to pause', 'Ready to stop': 'Ready to stop', 'Need fault tolerance': 'Need fault tolerance', Kill: 'Kill', 'Waiting for thread': 'Waiting for thread', 'Waiting for dependence': 'Waiting for dependence', Start: 'Start', Copy: 'Copy', 'Copy name': 'Copy name', 'Copy path': 'Copy path', 'Please enter keyword': 'Please enter keyword', 'File Upload': 'File Upload', 'Drag the file into the current upload window': 'Drag the file into the current upload window', 'Drag area upload': 'Drag area upload', Upload: 'Upload', 'ReUpload File': 'ReUpload File', 'Please enter file name': 'Please enter file name', 'Please select the file to upload': 'Please select the file to upload', 'Resources manage': 'Resources', Security: 'Security', Logout: 'Logout', 'No data': 'No data', 'Uploading...': 'Uploading...', 'Loading...': 'Loading...', List: 'List', 'Unable to download without proper url': 'Unable to download without proper url', Process: 'Process', 'Process definition': 'Process definition', 'Task record': 'Task record', 'Warning group manage': 'Warning group manage', 'Warning instance manage': 'Warning instance manage', 'Servers manage': 'Servers manage', 'UDF manage': 'UDF manage', 'Resource manage': 'Resource manage', 'Function manage': 'Function manage', 'Edit password': 'Edit password', 'Ordinary users': 'Ordinary users', 'Create process': 'Create process', 'Import process': 'Import process', 'Timing state': 'Timing state', Timing: 'Timing', Timezone: 'Timezone', TreeView: 'TreeView', 'Mailbox already exists! Recipients and copyers cannot repeat': 'Mailbox already exists! Recipients and copyers cannot repeat', 'Mailbox input is illegal': 'Mailbox input is illegal', 'Please set the parameters before starting': 'Please set the parameters before starting', Continue: 'Continue', End: 'End', 'Node execution': 'Node execution', 'Backward execution': 'Backward execution', 'Forward execution': 'Forward execution', 'Execute only the current node': 'Execute only the current node', 'Notification strategy': 'Notification strategy', 'Notification group': 'Notification group', 'Please select a notification group': 'Please select a notification group', 'Whether it is a complement process?': 'Whether it is a complement process?', 'Schedule date': 'Schedule date', 'Mode of execution': 'Mode of execution', 'Serial execution': 'Serial execution', 'Parallel execution': 'Parallel execution', 'Set parameters before timing': 'Set parameters before timing', 'Start and stop time': 'Start and stop time', 'Please select time': 'Please select time', 'Please enter crontab': 'Please enter crontab', none_1: 'none', success_1: 'success', failure_1: 'failure', All_1: 'All', Toolbar: 'Toolbar', 'View variables': 'View variables', 'Format DAG': 'Format DAG', 'Refresh DAG status': 'Refresh DAG status', Return_1: 'Return', 'Please enter format': 'Please enter format', 'connection parameter': 'connection parameter', 'Process definition details': 'Process definition details', 'Create process definition': 'Create process definition', 'Scheduled task list': 'Scheduled task list', 'Process instance details': 'Process instance details', 'Create Resource': 'Create Resource', 'User Center': 'User Center', AllStatus: 'All', None: 'None', Name: 'Name', 'Process priority': 'Process priority', 'Task priority': 'Task priority', 'Task timeout alarm': 'Task timeout alarm', 'Timeout strategy': 'Timeout strategy', 'Timeout alarm': 'Timeout alarm', 'Timeout failure': 'Timeout failure', 'Timeout period': 'Timeout period', 'Waiting Dependent complete': 'Waiting Dependent complete', 'Waiting Dependent start': 'Waiting Dependent start', 'Check interval': 'Check interval', 'Timeout must be longer than check interval': 'Timeout must be longer than check interval', 'Timeout strategy must be selected': 'Timeout strategy must be selected', 'Timeout must be a positive integer': 'Timeout must be a positive integer', 'Add dependency': 'Add dependency', 'Whether dry-run': 'Whether dry-run', and: 'and', or: 'or', month: 'month', week: 'week', day: 'day', hour: 'hour', Running: 'Running', 'Waiting for dependency to complete': 'Waiting for dependency to complete', Selected: 'Selected', CurrentHour: 'CurrentHour', Last1Hour: 'Last1Hour', Last2Hours: 'Last2Hours', Last3Hours: 'Last3Hours', Last24Hours: 'Last24Hours', today: 'today', Last1Days: 'Last1Days', Last2Days: 'Last2Days', Last3Days: 'Last3Days', Last7Days: 'Last7Days', ThisWeek: 'ThisWeek', LastWeek: 'LastWeek', LastMonday: 'LastMonday', LastTuesday: 'LastTuesday', LastWednesday: 'LastWednesday', LastThursday: 'LastThursday', LastFriday: 'LastFriday', LastSaturday: 'LastSaturday', LastSunday: 'LastSunday', ThisMonth: 'ThisMonth', LastMonth: 'LastMonth', LastMonthBegin: 'LastMonthBegin', LastMonthEnd: 'LastMonthEnd', 'Refresh status succeeded': 'Refresh status succeeded', 'Queue manage': 'Yarn Queue manage', 'Create queue': 'Create queue', 'Edit queue': 'Edit queue', 'Datasource manage': 'Datasource', 'History task record': 'History task record', 'Please go online': 'Please go online', 'Queue value': 'Queue value', 'Please enter queue value': 'Please enter queue value', 'Worker group manage': 'Worker group manage', 'Create worker group': 'Create worker group', 'Edit worker group': 'Edit worker group', 'Token manage': 'Token manage', 'Create token': 'Create token', 'Edit token': 'Edit token', Addresses: 'Addresses', 'Worker Addresses': 'Worker Addresses', 'Please select the worker addresses': 'Please select the worker addresses', 'Failure time': 'Failure time', 'Expiration time': 'Expiration time', User: 'User', 'Please enter token': 'Please enter token', 'Generate token': 'Generate token', Monitor: 'Monitor', Group: 'Group', 'Queue statistics': 'Queue statistics', 'Command status statistics': 'Command status statistics', 'Task kill': 'Task Kill', 'Task queue': 'Task queue', 'Error command count': 'Error command count', 'Normal command count': 'Normal command count', Manage: ' Manage', 'Number of connections': 'Number of connections', Sent: 'Sent', Received: 'Received', 'Min latency': 'Min latency', 'Avg latency': 'Avg latency', 'Max latency': 'Max latency', 'Node count': 'Node count', 'Query time': 'Query time', 'Node self-test status': 'Node self-test status', 'Health status': 'Health status', 'Max connections': 'Max connections', 'Threads connections': 'Threads connections', 'Max used connections': 'Max used connections', 'Threads running connections': 'Threads running connections', 'Worker group': 'Worker group', 'Please enter a positive integer greater than 0': 'Please enter a positive integer greater than 0', 'Pre Statement': 'Pre Statement', 'Post Statement': 'Post Statement', 'Statement cannot be empty': 'Statement cannot be empty', 'Process Define Count': 'Work flow Define Count', 'Process Instance Running Count': 'Process Instance Running Count', 'command number of waiting for running': 'command number of waiting for running', 'failure command number': 'failure command number', 'tasks number of waiting running': 'tasks number of waiting running', 'task number of ready to kill': 'task number of ready to kill', 'Statistics manage': 'Statistics Manage', statistics: 'Statistics', 'select tenant': 'select tenant', 'Please enter Principal': 'Please enter Principal', 'Please enter the kerberos authentication parameter java.security.krb5.conf': 'Please enter the kerberos authentication parameter java.security.krb5.conf', 'Please enter the kerberos authentication parameter login.user.keytab.username': 'Please enter the kerberos authentication parameter login.user.keytab.username', 'Please enter the kerberos authentication parameter login.user.keytab.path': 'Please enter the kerberos authentication parameter login.user.keytab.path', 'The start time must not be the same as the end': 'The start time must not be the same as the end', 'Startup parameter': 'Startup parameter', 'Startup type': 'Startup type', 'warning of timeout': 'warning of timeout', 'Next five execution times': 'Next five execution times', 'Execute time': 'Execute time', 'Complement range': 'Complement range', 'Http Url': 'Http Url', 'Http Method': 'Http Method', 'Http Parameters': 'Http Parameters', 'Http Parameters Key': 'Http Parameters Key', 'Http Parameters Position': 'Http Parameters Position', 'Http Parameters Value': 'Http Parameters Value', 'Http Check Condition': 'Http Check Condition', 'Http Condition': 'Http Condition', 'Please Enter Http Url': 'Please Enter Http Url(required)', 'Please Enter Http Condition': 'Please Enter Http Condition', 'There is no data for this period of time': 'There is no data for this period of time', 'Worker addresses cannot be empty': 'Worker addresses cannot be empty', 'Please generate token': 'Please generate token', 'Please Select token': 'Please select the expiration time of token', 'Spark Version': 'Spark Version', TargetDataBase: 'target database', TargetTable: 'target table', TargetJobName: 'target job name', 'Please enter Pigeon job name': 'Please enter Pigeon job name', 'Please enter the table of target': 'Please enter the table of target', 'Please enter a Target Table(required)': 'Please enter a Target Table(required)', SpeedByte: 'speed(byte count)', SpeedRecord: 'speed(record count)', '0 means unlimited by byte': '0 means unlimited', '0 means unlimited by count': '0 means unlimited', 'Modify User': 'Modify User', 'Whether directory': 'Whether directory', Yes: 'Yes', No: 'No', 'Hadoop Custom Params': 'Hadoop Params', 'Sqoop Advanced Parameters': 'Sqoop Params', 'Sqoop Job Name': 'Job Name', 'Please enter Mysql Database(required)': 'Please enter Mysql Database(required)', 'Please enter Mysql Table(required)': 'Please enter Mysql Table(required)', 'Please enter Columns (Comma separated)': 'Please enter Columns (Comma separated)', 'Please enter Target Dir(required)': 'Please enter Target Dir(required)', 'Please enter Export Dir(required)': 'Please enter Export Dir(required)', 'Please enter Hive Database(required)': 'Please enter Hive Databasec(required)', 'Please enter Hive Table(required)': 'Please enter Hive Table(required)', 'Please enter Hive Partition Keys': 'Please enter Hive Partition Key', 'Please enter Hive Partition Values': 'Please enter Partition Value', 'Please enter Replace Delimiter': 'Please enter Replace Delimiter', 'Please enter Fields Terminated': 'Please enter Fields Terminated', 'Please enter Lines Terminated': 'Please enter Lines Terminated', 'Please enter Concurrency': 'Please enter Concurrency', 'Please enter Update Key': 'Please enter Update Key', 'Please enter Job Name(required)': 'Please enter Job Name(required)', 'Please enter Custom Shell(required)': 'Please enter Custom Shell(required)', Direct: 'Direct', Type: 'Type', ModelType: 'ModelType', ColumnType: 'ColumnType', Database: 'Database', Column: 'Column', 'Map Column Hive': 'Map Column Hive', 'Map Column Java': 'Map Column Java', 'Export Dir': 'Export Dir', 'Hive partition Keys': 'Hive partition Keys', 'Hive partition Values': 'Hive partition Values', FieldsTerminated: 'FieldsTerminated', LinesTerminated: 'LinesTerminated', IsUpdate: 'IsUpdate', UpdateKey: 'UpdateKey', UpdateMode: 'UpdateMode', 'Target Dir': 'Target Dir', DeleteTargetDir: 'DeleteTargetDir', FileType: 'FileType', CompressionCodec: 'CompressionCodec', CreateHiveTable: 'CreateHiveTable', DropDelimiter: 'DropDelimiter', OverWriteSrc: 'OverWriteSrc', ReplaceDelimiter: 'ReplaceDelimiter', Concurrency: 'Concurrency', Form: 'Form', OnlyUpdate: 'OnlyUpdate', AllowInsert: 'AllowInsert', 'Data Source': 'Data Source', 'Data Target': 'Data Target', 'All Columns': 'All Columns', 'Some Columns': 'Some Columns', 'Branch flow': 'Branch flow', 'Custom Job': 'Custom Job', 'Custom Script': 'Custom Script', 'Cannot select the same node for successful branch flow and failed branch flow': 'Cannot select the same node for successful branch flow and failed branch flow', 'Successful branch flow and failed branch flow are required': 'conditions node Successful and failed branch flow are required', 'No resources exist': 'No resources exist', 'Please delete all non-existing resources': 'Please delete all non-existing resources', 'Unauthorized or deleted resources': 'Unauthorized or deleted resources', 'Please delete all non-existent resources': 'Please delete all non-existent resources', Kinship: 'Workflow relationship', Reset: 'Reset', KinshipStateActive: 'Active', KinshipState1: 'Online', KinshipState0: 'Workflow is not online', KinshipState10: 'Scheduling is not online', 'Dag label display control': 'Dag label display control', Enable: 'Enable', Disable: 'Disable', 'The Worker group no longer exists, please select the correct Worker group!': 'The Worker group no longer exists, please select the correct Worker group!', 'Please confirm whether the workflow has been saved before downloading': 'Please confirm whether the workflow has been saved before downloading', 'User name length is between 3 and 39': 'User name length is between 3 and 39', 'Timeout Settings': 'Timeout Settings', 'Connect Timeout': 'Connect Timeout', 'Socket Timeout': 'Socket Timeout', 'Connect timeout be a positive integer': 'Connect timeout be a positive integer', 'Socket Timeout be a positive integer': 'Socket Timeout be a positive integer', ms: 'ms', 'Please Enter Url': 'Please Enter Url eg. 127.0.0.1:7077', Master: 'Master', 'Please select the waterdrop resources': 'Please select the waterdrop resources', zkDirectory: 'zkDirectory', 'Directory detail': 'Directory detail', 'Connection name': 'Connection name', 'Current connection settings': 'Current connection settings', 'Please save the DAG before formatting': 'Please save the DAG before formatting', 'Batch copy': 'Batch copy', 'Related items': 'Related items', 'Project name is required': 'Project name is required', 'Batch move': 'Batch move', Version: 'Version', 'Pre tasks': 'Pre tasks', 'Running Memory': 'Running Memory', 'Max Memory': 'Max Memory', 'Min Memory': 'Min Memory', 'The workflow canvas is abnormal and cannot be saved, please recreate': 'The workflow canvas is abnormal and cannot be saved, please recreate', Info: 'Info', 'Datasource userName': 'owner', 'Resource userName': 'owner', 'Environment manage': 'Environment manage', 'Create environment': 'Create environment', 'Edit environment': 'Edit environment', 'Environment value': 'Environment value', 'Environment Name': 'Environment Name', 'Environment Code': 'Environment Code', 'Environment Config': 'Environment Config', 'Environment Desc': 'Environment Desc', 'Environment Worker Group': 'Worker Groups', 'Please enter environment config': 'Please enter environment config', 'Please enter environment desc': 'Please enter environment desc', 'Please select worker groups': 'Please select worker groups', condition: 'condition', 'The condition content cannot be empty': 'The condition content cannot be empty', 'Reference from': 'Reference from', 'No more...': 'No more...', 'Task Definition': 'Task Definition', 'Create task': 'Create task', 'Task Type': 'Task Type', 'Process execute type': 'Process execute type', parallel: 'parallel', 'Serial wait': 'Serial wait', 'Serial discard': 'Serial discard', 'Serial priority': 'Serial priority', 'Recover serial wait': 'Recover serial wait', IsEnableProxy: 'Enable Proxy', WebHook: 'WebHook', Keyword: 'Keyword', Proxy: 'Proxy', receivers: 'Receivers', receiverCcs: 'ReceiverCcs', transportProtocol: 'Transport Protocol', serverHost: 'SMTP Host', serverPort: 'SMTP Port', sender: 'Sender', enableSmtpAuth: 'SMTP Auth', starttlsEnable: 'SMTP STARTTLS Enable', sslEnable: 'SMTP SSL Enable', smtpSslTrust: 'SMTP SSL Trust', url: 'URL', requestType: 'Request Type', headerParams: 'Headers', bodyParams: 'Body', contentField: 'Content Field', path: 'Script Path', userParams: 'User Params', corpId: 'CorpId', secret: 'Secret', userSendMsg: 'UserSendMsg', agentId: 'AgentId', users: 'Users', Username: 'Username', showType: 'Show Type', 'Please select a task type (required)': 'Please select a task type (required)', layoutType: 'Layout Type', gridLayout: 'Grid', dagreLayout: 'Dagre', rows: 'Rows', cols: 'Cols', processOnline: 'Online', searchNode: 'Search Node', dagScale: 'Scale', workflowName: 'Workflow Name', scheduleStartTime: 'Schedule Start Time', scheduleEndTime: 'Schedule End Time', crontabExpression: 'Crontab', workflowPublishStatus: 'Workflow Publish Status', schedulePublishStatus: 'Schedule Publish Status' }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,122
[Feature][Workflow relationship] Format time.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description ![1638433979(1)](https://user-images.githubusercontent.com/19239641/144386304-609f157d-8dd4-4589-a188-d096dce3b030.jpg) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7122
https://github.com/apache/dolphinscheduler/pull/7123
791ed6de77893d7dec7595e7cfeb775a2122e68c
3304edee47947e0b5a2a9445fe602133efc3d5d7
"2021-12-02T08:35:09Z"
java
"2021-12-03T02:02:33Z"
dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/kinship/_source/graphGridOption.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import _ from 'lodash' import i18n from '@/module/i18n/index.js' const getCategory = (categoryDic, { workFlowPublishStatus, schedulePublishStatus, code }, sourceWorkFlowCode) => { if (code === sourceWorkFlowCode) return categoryDic.active switch (true) { case workFlowPublishStatus === '0': return categoryDic['0'] case workFlowPublishStatus === '1' && schedulePublishStatus === '0': return categoryDic['10'] case workFlowPublishStatus === '1' && schedulePublishStatus === '1': default: return categoryDic['1'] } } const publishStatusFormat = (status) => { return status === 0 || status === '0' ? i18n.$t('offline') : status === 1 || status === '1' ? i18n.$t('online') : '-' } export default function (locations, links, sourceWorkFlowCode, isShowLabel) { const categoryDic = { active: { color: '#2D8DF0', category: i18n.$t('KinshipStateActive') }, 1: { color: '#00C800', category: i18n.$t('KinshipState1') }, 0: { color: '#999999', category: i18n.$t('KinshipState0') }, 10: { color: '#FF8F05', category: i18n.$t('KinshipState10') } } const newData = _.map(locations, (item) => { const { color, category } = getCategory(categoryDic, item, sourceWorkFlowCode) return { ...item, id: item.code, emphasis: { itemStyle: { color } }, category } }) const categories = [ { name: categoryDic.active.category }, { name: categoryDic['1'].category }, { name: categoryDic['0'].category }, { name: categoryDic['10'].category } ] const option = { tooltip: { trigger: 'item', triggerOn: 'mousemove', backgroundColor: '#2D303A', padding: [8, 12], formatter: (params) => { if (!params.data.name) return '' const { name, scheduleStartTime, scheduleEndTime, crontab, workFlowPublishStatus, schedulePublishStatus } = params.data return ` ${i18n.$t('workflowName')}:${name}<br/> ${i18n.$t('scheduleStartTime')}:${scheduleStartTime}<br/> ${i18n.$t('scheduleEndTime')}:${scheduleEndTime}<br/> ${i18n.$t('crontabExpression')}:${crontab}<br/> ${i18n.$t('workflowPublishStatus')}:${publishStatusFormat(workFlowPublishStatus)}<br/> ${i18n.$t('schedulePublishStatus')}:${publishStatusFormat(schedulePublishStatus)}<br/> ` }, color: '#2D303A', textStyle: { rich: { a: { fontSize: 12, color: '#2D303A', lineHeight: 12, align: 'left', padding: [4, 4, 4, 4] } } } }, color: [categoryDic.active.color, categoryDic['1'].color, categoryDic['0'].color, categoryDic['10'].color], legend: [{ orient: 'horizontal', top: 6, left: 6, data: categories }], series: [{ type: 'graph', layout: 'force', nodeScaleRatio: 1.2, draggable: true, animation: false, data: newData, roam: true, symbol: 'roundRect', symbolSize: 70, categories, label: { show: isShowLabel, position: 'inside', formatter: (params) => { if (!params.data.name) return '' const str = params.data.name.split('_').map(item => `{a|${item}\n}`).join('') return str }, color: '#222222', textStyle: { rich: { a: { fontSize: 12, color: '#222222', lineHeight: 12, align: 'left', padding: [4, 4, 4, 4] } } } }, edgeSymbol: ['circle', 'arrow'], edgeSymbolSize: [4, 12], force: { repulsion: 1000, edgeLength: 300 }, links: links, lineStyle: { color: '#999999' } }] } return option }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,930
[Feature][Python] Add workflow as code task type sub_process
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Add python api task type sub_process. sub task in #6407. we should cover all parameter from UI side and make it suitable for python. ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6930
https://github.com/apache/dolphinscheduler/pull/7022
3304edee47947e0b5a2a9445fe602133efc3d5d7
f480b8adf8f86b7bf1d6727385c8fad8307a452b
"2021-11-19T07:09:05Z"
java
"2021-12-03T02:26:45Z"
dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/constants.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Constants for pydolphinscheduler.""" class ProcessDefinitionReleaseState: """Constants for :class:`pydolphinscheduler.core.process_definition.ProcessDefinition` release state.""" ONLINE: str = "ONLINE" OFFLINE: str = "OFFLINE" class ProcessDefinitionDefault: """Constants default value for :class:`pydolphinscheduler.core.process_definition.ProcessDefinition`.""" PROJECT: str = "project-pydolphin" TENANT: str = "tenant_pydolphin" USER: str = "userPythonGateway" # TODO simple set password same as username USER_PWD: str = "userPythonGateway" USER_EMAIL: str = "userPythonGateway@dolphinscheduler.com" USER_PHONE: str = "11111111111" USER_STATE: int = 1 QUEUE: str = "queuePythonGateway" WORKER_GROUP: str = "default" TIME_ZONE: str = "Asia/Shanghai" class TaskPriority(str): """Constants for task priority.""" HIGHEST = "HIGHEST" HIGH = "HIGH" MEDIUM = "MEDIUM" LOW = "LOW" LOWEST = "LOWEST" class TaskFlag(str): """Constants for task flag.""" YES = "YES" NO = "NO" class TaskTimeoutFlag(str): """Constants for task timeout flag.""" CLOSE = "CLOSE" class TaskType(str): """Constants for task type, it will also show you which kind we support up to now.""" SHELL = "SHELL" HTTP = "HTTP" PYTHON = "PYTHON" SQL = "SQL" class DefaultTaskCodeNum(str): """Constants and default value for default task code number.""" DEFAULT = 1 class JavaGatewayDefault(str): """Constants and default value for java gateway.""" RESULT_MESSAGE_KEYWORD = "msg" RESULT_MESSAGE_SUCCESS = "success" RESULT_STATUS_KEYWORD = "status" RESULT_STATUS_SUCCESS = "SUCCESS" RESULT_DATA = "data" class Delimiter(str): """Constants for delimiter.""" BAR = "-" DASH = "/" COLON = ":" UNDERSCORE = "_" class Time(str): """Constants for date.""" FMT_STD_DATE = "%Y-%m-%d" LEN_STD_DATE = 10 FMT_DASH_DATE = "%Y/%m/%d" FMT_SHORT_DATE = "%Y%m%d" LEN_SHORT_DATE = 8 FMT_STD_TIME = "%H:%M:%S" FMT_NO_COLON_TIME = "%H%M%S"
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,930
[Feature][Python] Add workflow as code task type sub_process
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Add python api task type sub_process. sub task in #6407. we should cover all parameter from UI side and make it suitable for python. ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6930
https://github.com/apache/dolphinscheduler/pull/7022
3304edee47947e0b5a2a9445fe602133efc3d5d7
f480b8adf8f86b7bf1d6727385c8fad8307a452b
"2021-11-19T07:09:05Z"
java
"2021-12-03T02:26:45Z"
dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/exceptions.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Exceptions for pydolphinscheduler.""" class PyDSBaseException(Exception): """Base exception for pydolphinscheduler.""" pass class PyDSParamException(PyDSBaseException): """Exception for pydolphinscheduler parameter verify error.""" pass class PyDSTaskNoFoundException(PyDSBaseException): """Exception for pydolphinscheduler workflow task no found error.""" pass class PyDSJavaGatewayException(PyDSBaseException): """Exception for pydolphinscheduler Java gateway error.""" pass
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,930
[Feature][Python] Add workflow as code task type sub_process
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Add python api task type sub_process. sub task in #6407. we should cover all parameter from UI side and make it suitable for python. ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6930
https://github.com/apache/dolphinscheduler/pull/7022
3304edee47947e0b5a2a9445fe602133efc3d5d7
f480b8adf8f86b7bf1d6727385c8fad8307a452b
"2021-11-19T07:09:05Z"
java
"2021-12-03T02:26:45Z"
dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/tasks/sub_process.py
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,930
[Feature][Python] Add workflow as code task type sub_process
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Add python api task type sub_process. sub task in #6407. we should cover all parameter from UI side and make it suitable for python. ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6930
https://github.com/apache/dolphinscheduler/pull/7022
3304edee47947e0b5a2a9445fe602133efc3d5d7
f480b8adf8f86b7bf1d6727385c8fad8307a452b
"2021-11-19T07:09:05Z"
java
"2021-12-03T02:26:45Z"
dolphinscheduler-python/pydolphinscheduler/tests/tasks/test_sub_process.py
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,930
[Feature][Python] Add workflow as code task type sub_process
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Add python api task type sub_process. sub task in #6407. we should cover all parameter from UI side and make it suitable for python. ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6930
https://github.com/apache/dolphinscheduler/pull/7022
3304edee47947e0b5a2a9445fe602133efc3d5d7
f480b8adf8f86b7bf1d6727385c8fad8307a452b
"2021-11-19T07:09:05Z"
java
"2021-12-03T02:26:45Z"
dolphinscheduler-python/src/main/java/org/apache/dolphinscheduler/server/PythonGatewayServer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.ExecutorService; import org.apache.dolphinscheduler.api.service.ProcessDefinitionService; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.api.service.QueueService; import org.apache.dolphinscheduler.api.service.SchedulerService; import org.apache.dolphinscheduler.api.service.TaskDefinitionService; import org.apache.dolphinscheduler.api.service.TenantService; import org.apache.dolphinscheduler.api.service.UsersService; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.ProcessExecutionTypeEnum; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.RunMode; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.utils.CodeGenerateUtils; import org.apache.dolphinscheduler.dao.entity.DataSource; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Queue; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import javax.annotation.PostConstruct; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.SpringApplication; import org.springframework.boot.web.servlet.support.SpringBootServletInitializer; import org.springframework.context.annotation.ComponentScan; import org.springframework.context.annotation.FilterType; import py4j.GatewayServer; @ComponentScan(value = "org.apache.dolphinscheduler", excludeFilters = { @ComponentScan.Filter(type = FilterType.REGEX, pattern = { "org.apache.dolphinscheduler.server.master.*", "org.apache.dolphinscheduler.server.worker.*", "org.apache.dolphinscheduler.server.monitor.*", "org.apache.dolphinscheduler.server.log.*", "org.apache.dolphinscheduler.alert.*" }) }) public class PythonGatewayServer extends SpringBootServletInitializer { private static final Logger LOGGER = LoggerFactory.getLogger(PythonGatewayServer.class); private static final WarningType DEFAULT_WARNING_TYPE = WarningType.NONE; private static final int DEFAULT_WARNING_GROUP_ID = 0; private static final FailureStrategy DEFAULT_FAILURE_STRATEGY = FailureStrategy.CONTINUE; private static final Priority DEFAULT_PRIORITY = Priority.MEDIUM; private static final Long DEFAULT_ENVIRONMENT_CODE = -1L; private static final TaskDependType DEFAULT_TASK_DEPEND_TYPE = TaskDependType.TASK_POST; private static final RunMode DEFAULT_RUN_MODE = RunMode.RUN_MODE_SERIAL; private static final int DEFAULT_DRY_RUN = 0; @Autowired private ProcessDefinitionMapper processDefinitionMapper; @Autowired private ProjectService projectService; @Autowired private TenantService tenantService; @Autowired private ExecutorService executorService; @Autowired private ProcessDefinitionService processDefinitionService; @Autowired private TaskDefinitionService taskDefinitionService; @Autowired private UsersService usersService; @Autowired private QueueService queueService; @Autowired private ProjectMapper projectMapper; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private SchedulerService schedulerService; @Autowired private ScheduleMapper scheduleMapper; @Autowired private DataSourceMapper dataSourceMapper; // TODO replace this user to build in admin user if we make sure build in one could not be change private final User dummyAdminUser = new User() { { setId(Integer.MAX_VALUE); setUserName("dummyUser"); setUserType(UserType.ADMIN_USER); } }; private final Queue queuePythonGateway = new Queue() { { setId(Integer.MAX_VALUE); setQueueName("queuePythonGateway"); } }; public String ping() { return "PONG"; } // TODO Should we import package in python client side? utils package can but service can not, why // Core api public Map<String, Object> genTaskCodeList(Integer genNum) { return taskDefinitionService.genTaskCodeList(genNum); } public Map<String, Long> getCodeAndVersion(String projectName, String taskName) throws CodeGenerateUtils.CodeGenerateException { Project project = projectMapper.queryByName(projectName); Map<String, Long> result = new HashMap<>(); // project do not exists, mean task not exists too, so we should directly return init value if (project == null) { result.put("code", CodeGenerateUtils.getInstance().genCode()); result.put("version", 0L); return result; } TaskDefinition taskDefinition = taskDefinitionMapper.queryByName(project.getCode(), taskName); if (taskDefinition == null) { result.put("code", CodeGenerateUtils.getInstance().genCode()); result.put("version", 0L); } else { result.put("code", taskDefinition.getCode()); result.put("version", (long) taskDefinition.getVersion()); } return result; } /** * create or update process definition. * If process definition do not exists in Project=`projectCode` would create a new one * If process definition already exists in Project=`projectCode` would update it * * @param userName user name who create or update process definition * @param projectName project name which process definition belongs to * @param name process definition name * @param description description * @param globalParams global params * @param schedule schedule for process definition, will not set schedule if null, * and if would always fresh exists schedule if not null * @param locations locations json object about all tasks * @param timeout timeout for process definition working, if running time longer than timeout, * task will mark as fail * @param workerGroup run task in which worker group * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return create result code */ public Long createOrUpdateProcessDefinition(String userName, String projectName, String name, String description, String globalParams, String schedule, String locations, int timeout, String workerGroup, String tenantCode, String taskRelationJson, String taskDefinitionJson, ProcessExecutionTypeEnum executionType) { User user = usersService.queryUser(userName); Project project = (Project) projectService.queryByName(user, projectName).get(Constants.DATA_LIST); long projectCode = project.getCode(); Map<String, Object> verifyProcessDefinitionExists = processDefinitionService.verifyProcessDefinitionName(user, projectCode, name); Status verifyStatus = (Status) verifyProcessDefinitionExists.get(Constants.STATUS); long processDefinitionCode; // create or update process definition if (verifyStatus == Status.PROCESS_DEFINITION_NAME_EXIST) { ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, name); processDefinitionCode = processDefinition.getCode(); // make sure process definition offline which could edit processDefinitionService.releaseProcessDefinition(user, projectCode, processDefinitionCode, ReleaseState.OFFLINE); Map<String, Object> result = processDefinitionService.updateProcessDefinition(user, projectCode, name, processDefinitionCode, description, globalParams, locations, timeout, tenantCode, taskRelationJson, taskDefinitionJson,executionType); } else if (verifyStatus == Status.SUCCESS) { Map<String, Object> result = processDefinitionService.createProcessDefinition(user, projectCode, name, description, globalParams, locations, timeout, tenantCode, taskRelationJson, taskDefinitionJson,executionType); ProcessDefinition processDefinition = (ProcessDefinition) result.get(Constants.DATA_LIST); processDefinitionCode = processDefinition.getCode(); } else { String msg = "Verify process definition exists status is invalid, neither SUCCESS or PROCESS_DEFINITION_NAME_EXIST."; LOGGER.error(msg); throw new RuntimeException(msg); } // Fresh process definition schedule if (schedule != null) { createOrUpdateSchedule(user, projectCode, processDefinitionCode, schedule, workerGroup); } processDefinitionService.releaseProcessDefinition(user, projectCode, processDefinitionCode, ReleaseState.ONLINE); return processDefinitionCode; } /** * create or update process definition schedule. * It would always use latest schedule define in workflow-as-code, and set schedule online when * it's not null * * @param user user who create or update schedule * @param projectCode project which process definition belongs to * @param processDefinitionCode process definition code * @param schedule schedule expression * @param workerGroup work group */ private void createOrUpdateSchedule(User user, long projectCode, long processDefinitionCode, String schedule, String workerGroup) { Schedule scheduleObj = scheduleMapper.queryByProcessDefinitionCode(processDefinitionCode); // create or update schedule int scheduleId; if (scheduleObj == null) { processDefinitionService.releaseProcessDefinition(user, projectCode, processDefinitionCode, ReleaseState.ONLINE); Map<String, Object> result = schedulerService.insertSchedule(user, projectCode, processDefinitionCode, schedule, DEFAULT_WARNING_TYPE, DEFAULT_WARNING_GROUP_ID, DEFAULT_FAILURE_STRATEGY, DEFAULT_PRIORITY, workerGroup, DEFAULT_ENVIRONMENT_CODE); scheduleId = (int) result.get("scheduleId"); } else { scheduleId = scheduleObj.getId(); processDefinitionService.releaseProcessDefinition(user, projectCode, processDefinitionCode, ReleaseState.OFFLINE); schedulerService.updateSchedule(user, projectCode, scheduleId, schedule, DEFAULT_WARNING_TYPE, DEFAULT_WARNING_GROUP_ID, DEFAULT_FAILURE_STRATEGY, DEFAULT_PRIORITY, workerGroup, DEFAULT_ENVIRONMENT_CODE); } schedulerService.setScheduleState(user, projectCode, scheduleId, ReleaseState.ONLINE); } public void execProcessInstance(String userName, String projectName, String processDefinitionName, String cronTime, String workerGroup, Integer timeout ) { User user = usersService.queryUser(userName); Project project = projectMapper.queryByName(projectName); ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(project.getCode(), processDefinitionName); // make sure process definition online processDefinitionService.releaseProcessDefinition(user, project.getCode(), processDefinition.getCode(), ReleaseState.ONLINE); executorService.execProcessInstance(user, project.getCode(), processDefinition.getCode(), cronTime, null, DEFAULT_FAILURE_STRATEGY, null, DEFAULT_TASK_DEPEND_TYPE, DEFAULT_WARNING_TYPE, DEFAULT_WARNING_GROUP_ID, DEFAULT_RUN_MODE, DEFAULT_PRIORITY, workerGroup, DEFAULT_ENVIRONMENT_CODE, timeout, null, null, DEFAULT_DRY_RUN ); } // side object public Map<String, Object> createProject(String userName, String name, String desc) { User user = usersService.queryUser(userName); return projectService.createProject(user, name, desc); } public Map<String, Object> createQueue(String name, String queueName) { Result<Object> verifyQueueExists = queueService.verifyQueue(name, queueName); if (verifyQueueExists.getCode() == 0) { return queueService.createQueue(dummyAdminUser, name, queueName); } else { Map<String, Object> result = new HashMap<>(); // TODO function putMsg do not work here result.put(Constants.STATUS, Status.SUCCESS); result.put(Constants.MSG, Status.SUCCESS.getMsg()); return result; } } public Map<String, Object> createTenant(String tenantCode, String desc, String queueName) throws Exception { if (tenantService.checkTenantExists(tenantCode)) { Map<String, Object> result = new HashMap<>(); // TODO function putMsg do not work here result.put(Constants.STATUS, Status.SUCCESS); result.put(Constants.MSG, Status.SUCCESS.getMsg()); return result; } else { Result<Object> verifyQueueExists = queueService.verifyQueue(queueName, queueName); if (verifyQueueExists.getCode() == 0) { // TODO why create do not return id? queueService.createQueue(dummyAdminUser, queueName, queueName); } Map<String, Object> result = queueService.queryQueueName(queueName); List<Queue> queueList = (List<Queue>) result.get(Constants.DATA_LIST); Queue queue = queueList.get(0); return tenantService.createTenant(dummyAdminUser, tenantCode, queue.getId(), desc); } } public void createUser(String userName, String userPassword, String email, String phone, String tenantCode, String queue, int state) { User user = usersService.queryUser(userName); if (Objects.isNull(user)) { Map<String, Object> tenantResult = tenantService.queryByTenantCode(tenantCode); Tenant tenant = (Tenant) tenantResult.get(Constants.DATA_LIST); usersService.createUser(userName, userPassword, email, tenant.getId(), phone, queue, state); } } /** * Get datasource by given datasource name. It return map contain datasource id, type, name. * Useful in Python API create sql task which need datasource information. * * @param datasourceName user who create or update schedule */ public Map<String, Object> getDatasourceInfo(String datasourceName) { Map<String, Object> result = new HashMap<>(); List<DataSource> dataSourceList = dataSourceMapper.queryDataSourceByName(datasourceName); if (dataSourceList.size() > 1) { String msg = String.format("Get more than one datasource by name %s", datasourceName); logger.error(msg); throw new IllegalArgumentException(msg); } else if (dataSourceList.size() == 0) { String msg = String.format("Can not find any datasource by name %s", datasourceName); logger.error(msg); throw new IllegalArgumentException(msg); } else { DataSource dataSource = dataSourceList.get(0); result.put("id", dataSource.getId()); result.put("type", dataSource.getType().name()); result.put("name", dataSource.getName()); } return result; } @PostConstruct public void run() { GatewayServer server = new GatewayServer(this); GatewayServer.turnLoggingOn(); // Start server to accept python client RPC server.start(); } public static void main(String[] args) { SpringApplication.run(PythonGatewayServer.class, args); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,144
[Feature][Workflow Relationship] Format name.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description ![1638503788](https://user-images.githubusercontent.com/19239641/144542789-533eede1-2dd6-4729-9e21-83734dd7f0c6.jpg) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7144
https://github.com/apache/dolphinscheduler/pull/7143
2b519f31e919e5ccf8d58b747e27594eb2d7306d
99f6ab38478d44e5e2ce2278d5872a2e4e092a3c
"2021-12-03T03:56:36Z"
java
"2021-12-03T06:43:45Z"
dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/kinship/_source/graphGridOption.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import _ from 'lodash' import i18n from '@/module/i18n/index.js' import dayjs from 'dayjs' const getCategory = (categoryDic, { workFlowPublishStatus, schedulePublishStatus, code }, sourceWorkFlowCode) => { if (code === sourceWorkFlowCode) return categoryDic.active switch (true) { case workFlowPublishStatus === '0': return categoryDic['0'] case workFlowPublishStatus === '1' && schedulePublishStatus === '0': return categoryDic['10'] case workFlowPublishStatus === '1' && schedulePublishStatus === '1': default: return categoryDic['1'] } } const publishStatusFormat = (status) => { return status === 0 || status === '0' ? i18n.$t('offline') : status === 1 || status === '1' ? i18n.$t('online') : '-' } export default function (locations, links, sourceWorkFlowCode, isShowLabel) { const categoryDic = { active: { color: '#2D8DF0', category: i18n.$t('KinshipStateActive') }, 1: { color: '#00C800', category: i18n.$t('KinshipState1') }, 0: { color: '#999999', category: i18n.$t('KinshipState0') }, 10: { color: '#FF8F05', category: i18n.$t('KinshipState10') } } const newData = _.map(locations, (item) => { const { color, category } = getCategory(categoryDic, item, sourceWorkFlowCode) return { ...item, id: item.code, emphasis: { itemStyle: { color } }, category } }) const categories = [ { name: categoryDic.active.category }, { name: categoryDic['1'].category }, { name: categoryDic['0'].category }, { name: categoryDic['10'].category } ] const option = { tooltip: { trigger: 'item', triggerOn: 'mousemove', backgroundColor: '#2D303A', padding: [8, 12], formatter: (params) => { if (!params.data.name) return '' const { name, scheduleStartTime, scheduleEndTime, crontab, workFlowPublishStatus, schedulePublishStatus } = params.data return ` ${i18n.$t('workflowName')}:${name}<br/> ${i18n.$t('scheduleStartTime')}:${dayjs(scheduleStartTime).format('YYYY-MM-DD HH:mm:ss')}<br/> ${i18n.$t('scheduleEndTime')}:${dayjs(scheduleEndTime).format('YYYY-MM-DD HH:mm:ss')}<br/> ${i18n.$t('crontabExpression')}:${crontab}<br/> ${i18n.$t('workflowPublishStatus')}:${publishStatusFormat(workFlowPublishStatus)}<br/> ${i18n.$t('schedulePublishStatus')}:${publishStatusFormat(schedulePublishStatus)}<br/> ` }, color: '#2D303A', textStyle: { rich: { a: { fontSize: 12, color: '#2D303A', lineHeight: 12, align: 'left', padding: [4, 4, 4, 4] } } } }, color: [categoryDic.active.color, categoryDic['1'].color, categoryDic['0'].color, categoryDic['10'].color], legend: [{ orient: 'horizontal', top: 6, left: 6, data: categories }], series: [{ type: 'graph', layout: 'force', nodeScaleRatio: 1.2, draggable: true, animation: false, data: newData, roam: true, symbol: 'roundRect', symbolSize: 70, categories, label: { show: isShowLabel, position: 'inside', formatter: (params) => { if (!params.data.name) return '' const str = params.data.name.split('_').map(item => `{a|${item}\n}`).join('') return str }, color: '#222222', textStyle: { rich: { a: { fontSize: 12, color: '#222222', lineHeight: 12, align: 'left', padding: [4, 4, 4, 4] } } } }, edgeSymbol: ['circle', 'arrow'], edgeSymbolSize: [4, 12], force: { repulsion: 1000, edgeLength: 300 }, links: links, lineStyle: { color: '#999999' } }] } return option }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,147
[Implement][UI] Create worker pop-up page display
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Create worker pop-up page display ![iShot2021-12-03 09 19 26](https://user-images.githubusercontent.com/37063904/144547993-2fce7b49-d0dc-48a6-ac7d-dbf08cf53e8e.png) ### Use case Create a worker pop-up page to display normally ### Related issues Create a worker ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7147
https://github.com/apache/dolphinscheduler/pull/7148
8ebe060658edb7075bdcf123f9ad5bb3d0fc9ceb
3de182dc0859c25877d1623c156fbc6faa97e0d3
"2021-12-03T05:02:02Z"
java
"2021-12-03T08:55:58Z"
dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/_source/list.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="list-model"> <div class="table-box"> <el-table :data="list" size="mini" style="width: 100%"> <el-table-column type="index" :label="$t('#')" width="50"></el-table-column> <el-table-column :label="$t('UDF Resource Name')" min-width="100"> <template slot-scope="scope"> <el-popover trigger="hover" placement="top"> <p>{{ scope.row.alias }}</p> <div slot="reference" class="name-wrapper"> <a href="javascript:" class="links" @click="_go(scope.row)">{{ scope.row.alias }}</a> </div> </el-popover> </template> </el-table-column> <el-table-column :label="$t('Whether directory')" min-width="100"> <template slot-scope="scope"> {{scope.row.directory? $t('Yes') : $t('No')}} </template> </el-table-column> <el-table-column prop="fileName" :label="$t('File Name')"></el-table-column> <el-table-column :label="$t('File Size')"> <template slot-scope="scope"> {{_rtSize(scope.row.size)}} </template> </el-table-column> <el-table-column :label="$t('Description')" width="200"> <template slot-scope="scope"> <span>{{scope.row.description | filterNull}}</span> </template> </el-table-column> <el-table-column :label="$t('Create Time')" min-width="120"> <template slot-scope="scope"> <span>{{scope.row.createTime | formatDate}}</span> </template> </el-table-column> <el-table-column :label="$t('Update Time')" min-width="120"> <template slot-scope="scope"> <span>{{scope.row.updateTime | formatDate}}</span> </template> </el-table-column> <el-table-column :label="$t('Operation')" min-width="120"> <template slot-scope="scope"> <el-tooltip :content="$t('Rename')" placement="top" :enterable="false"> <span><el-button type="primary" size="mini" icon="el-icon-edit" @click="_rename(scope.row,scope.$index)" circle></el-button></span> </el-tooltip> <el-tooltip :content="$t('Download')" placement="top" :enterable="false"> <span><el-button type="primary" size="mini" icon="el-icon-download" @click="_downloadFile(scope.row)" :disabled="scope.row.directory? true: false" circle></el-button></span> </el-tooltip> <el-tooltip :content="$t('Delete')" placement="top" :enterable="false"> <el-popconfirm :confirmButtonText="$t('Confirm')" :cancelButtonText="$t('Cancel')" icon="el-icon-info" iconColor="red" :title="$t('Delete?')" @onConfirm="_delete(scope.row,scope.row.id)" > <el-button type="danger" size="mini" icon="el-icon-delete" circle slot="reference"></el-button> </el-popconfirm> </el-tooltip> </template> </el-table-column> </el-table> </div> <el-dialog :visible.sync="renameDialog" width="45%"> <m-rename :item="item" @onUpDate="onUpDate" @close="close"></m-rename> </el-dialog> </div> </template> <script> import { mapActions } from 'vuex' import mRename from './rename' import { downloadFile } from '@/module/download' import { bytesToSize } from '@/module/util/util' import localStore from '@/module/util/localStorage' export default { name: 'udf-manage-list', data () { return { list: [], renameDialog: false, index: null } }, props: { udfResourcesList: Array, pageNo: Number, pageSize: Number }, methods: { ...mapActions('resource', ['deleteResource']), _downloadFile (item) { downloadFile(`resources/${item.id}/download`) }, _go (item) { localStore.setItem('file', `${item.alias}|${item.size}`) if (item.directory) { localStore.setItem('pid', `${item.id}`) localStore.setItem('currentDir', `${item.fullName}`) this.$router.push({ path: `/resource/udf/subUdfDirectory/${item.id}` }) } }, _rtSize (val) { return bytesToSize(parseInt(val)) }, _delete (item, i) { this.deleteResource({ id: item.id }).then(res => { this.$emit('on-update') this.$message.success(res.msg) }).catch(e => { this.$message.error(e.msg || '') }) }, _rename (item, i) { this.item = item this.index = i this.renameDialog = true }, onUpDate (item) { this.$set(this.list, this.index, item) this.renameDialog = false }, close () { this.renameDialog = false } }, watch: { udfResourcesList (a) { this.list = [] setTimeout(() => { this.list = a }) } }, created () { }, mounted () { this.list = this.udfResourcesList }, components: { mRename } } </script>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,147
[Implement][UI] Create worker pop-up page display
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Create worker pop-up page display ![iShot2021-12-03 09 19 26](https://user-images.githubusercontent.com/37063904/144547993-2fce7b49-d0dc-48a6-ac7d-dbf08cf53e8e.png) ### Use case Create a worker pop-up page to display normally ### Related issues Create a worker ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7147
https://github.com/apache/dolphinscheduler/pull/7148
8ebe060658edb7075bdcf123f9ad5bb3d0fc9ceb
3de182dc0859c25877d1623c156fbc6faa97e0d3
"2021-12-03T05:02:02Z"
java
"2021-12-03T08:55:58Z"
dolphinscheduler-ui/src/js/conf/home/pages/security/pages/workerGroups/index.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <m-list-construction :title="$t('Worker group manage')"> <template slot="conditions"> <m-conditions @on-conditions="_onConditions"> <template slot="button-group" v-if="isADMIN"> <el-button size="mini" @click="_create('')">{{$t('Create worker group')}}</el-button> <el-dialog :title="item ? $t('Edit worker group') : $t('Create worker group')" v-if="createWorkerGroupDialog" :visible.sync="createWorkerGroupDialog" width="50%"> <m-create-worker :item="item" :worker-address-list="workerAddressList" @onUpdate="onUpdate" @close="close"></m-create-worker> </el-dialog> </template> </m-conditions> </template> <template slot="content"> <template v-if="workerGroupList.length || total>0"> <m-list @on-update="_onUpdate" @on-edit="_onEdit" :worker-group-list="workerGroupList" :page-no="searchParams.pageNo" :page-size="searchParams.pageSize"> </m-list> <div class="page-box"> <el-pagination background @current-change="_page" @size-change="_pageSize" :page-size="searchParams.pageSize" :current-page.sync="searchParams.pageNo" :page-sizes="[10, 30, 50]" layout="sizes, prev, pager, next, jumper" :total="total"> </el-pagination> </div> </template> <template v-if="!workerGroupList.length && total<=0"> <m-no-data></m-no-data> </template> <m-spin :is-spin="isLoading"></m-spin> </template> </m-list-construction> </template> <script> import _ from 'lodash' import { mapActions } from 'vuex' import mList from './_source/list' import store from '@/conf/home/store' import mSpin from '@/module/components/spin/spin' import mNoData from '@/module/components/noData/noData' import listUrlParamHandle from '@/module/mixin/listUrlParamHandle' import mConditions from '@/module/components/conditions/conditions' import mListConstruction from '@/module/components/listConstruction/listConstruction' import mCreateWorker from './_source/createWorker' export default { name: 'worker-groups-index', data () { return { total: null, isLoading: false, workerGroupList: [], workerAddressList: [], searchParams: { pageSize: 10, pageNo: 1, searchVal: '' }, isADMIN: store.state.user.userInfo.userType === 'ADMIN_USER', createWorkerGroupDialog: false, item: {} } }, mixins: [listUrlParamHandle], props: {}, methods: { ...mapActions('security', ['getWorkerGroups', 'getWorkerAddresses']), /** * Inquire */ _onConditions (o) { this.searchParams = _.assign(this.searchParams, o) this.searchParams.pageNo = 1 }, _page (val) { this.searchParams.pageNo = val }, _pageSize (val) { this.searchParams.pageSize = val }, _onUpdate () { this._debounceGET() }, _onEdit (item) { this._create(item) }, _create (item) { this.createWorkerGroupDialog = true this.item = item }, onUpdate () { this._debounceGET('false') this.createWorkerGroupDialog = false }, close () { this.createWorkerGroupDialog = false }, _getList (flag) { this.isLoading = !flag this.getWorkerGroups(this.searchParams).then(res => { if (this.searchParams.pageNo > 1 && res.totalList.length === 0) { this.searchParams.pageNo = this.searchParams.pageNo - 1 } else { this.workerGroupList = [] this.workerGroupList = res.totalList this.total = res.total this.isLoading = false } }).catch(e => { this.isLoading = false }) }, _getWorkerAddressList () { this.getWorkerAddresses().then(res => { this.workerAddressList = res.data.map(x => ({ id: x, label: x })) }) } }, watch: { // router '$route' (a) { // url no params get instance list this.searchParams.pageNo = _.isEmpty(a.query) ? 1 : a.query.pageNo } }, created () { this._getWorkerAddressList() }, mounted () { }, components: { mList, mListConstruction, mConditions, mSpin, mNoData, mCreateWorker } } </script>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,112
[Bug] [UI] The modal of datasource will disappear when I click the area outside of this modal.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened As creating a datasource , the modal of datasource will disappear if I click the area outside of this modal. But the info in the modal is gone. Filling out some info. <img width="1046" alt="image" src="https://user-images.githubusercontent.com/4928204/144344041-453097c0-e3b7-4cd0-8368-b408020e045e.png"> Everything is gone. <img width="850" alt="image" src="https://user-images.githubusercontent.com/4928204/144344575-6585f175-ca50-4dcf-a8e4-fa7ae638753e.png"> ### What you expected to happen I expect that the modal shouldn't disappear when clicking the area outside to the modal. ### How to reproduce 1.Create a datasource and fill out the form. 2.Click the area of the modal and then the modal disappear. 3.So the info you have filled is gone at the same time. ### Anything else _No response_ ### Version 2.0.0 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7112
https://github.com/apache/dolphinscheduler/pull/7120
2bf73603bbae916fe2bb26456a86015735573d4e
226bed1768332801c5ef4c47dfba9ddd86f3d309
"2021-12-02T02:09:04Z"
java
"2021-12-03T08:56:30Z"
dolphinscheduler-ui/src/js/conf/home/pages/datasource/pages/list/index.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <m-list-construction :title="$t('Datasource')"> <template slot="conditions"> <m-conditions @on-conditions="_onConditions"> <template slot="button-group"> <el-button size="mini" @click="_create('')">{{$t('Create Datasource')}}</el-button> <el-dialog :title="item ?($t('Edit')+$t('Datasource')) : ($t('Create')+$t('Datasource'))" v-if="dialogVisible" :visible.sync="dialogVisible" width="auto" :append-to-body="true"> <m-create-data-source :item="item" @onUpdate="onUpdate" @close="close"></m-create-data-source> </el-dialog> </template> </m-conditions> </template> <template slot="content"> <template v-if="datasourcesList.length || total>0"> <m-list @on-update="_onUpdate" :datasources-list="datasourcesList" :page-no="searchParams.pageNo" :page-size="searchParams.pageSize"></m-list> <div class="page-box"> <el-pagination background @current-change="_page" @size-change="_pageSize" :page-size="searchParams.pageSize" :current-page.sync="searchParams.pageNo" :page-sizes="[10, 30, 50]" layout="sizes, prev, pager, next, jumper" :total="total"> </el-pagination> </div> </template> <template v-if="!datasourcesList.length && total<=0"> <m-no-data></m-no-data> </template> <m-spin :is-spin="isLoading" :is-left="false"> </m-spin> </template> </m-list-construction> </template> <script> import _ from 'lodash' import { mapActions } from 'vuex' import mList from './_source/list' import mSpin from '@/module/components/spin/spin' import mNoData from '@/module/components/noData/noData' import mCreateDataSource from './_source/createDataSource' import listUrlParamHandle from '@/module/mixin/listUrlParamHandle' import mConditions from '@/module/components/conditions/conditions' import mListConstruction from '@/module/components/listConstruction/listConstruction' export default { name: 'datasource-indexP', data () { return { // loading isLoading: true, // Total number of articles total: 20, // data sources(List) datasourcesList: [], searchParams: { // Number of pages per page pageSize: 10, // Number of pages pageNo: 1, // Search value searchVal: '' }, dialogVisible: false, item: {} } }, mixins: [listUrlParamHandle], props: {}, methods: { ...mapActions('datasource', ['getDatasourcesListP']), /** * create data source */ _create (item) { this.item = item this.dialogVisible = true }, onUpdate () { this._debounceGET('false') this.dialogVisible = false }, close () { this.dialogVisible = false }, /** * page */ _page (val) { this.searchParams.pageNo = val }, _pageSize (val) { this.searchParams.pageSize = val }, /** * conditions event */ _onConditions (o) { this.searchParams = _.assign(this.searchParams, o) this.searchParams.pageNo = 1 }, /** * get data(List) */ _getList (flag) { this.isLoading = !flag this.getDatasourcesListP(this.searchParams).then(res => { if (this.searchParams.pageNo > 1 && res.totalList.length === 0) { this.searchParams.pageNo = this.searchParams.pageNo - 1 } else { this.datasourcesList = [] this.datasourcesList = res.totalList this.total = res.total this.isLoading = false } }).catch(e => { this.isLoading = false }) }, _onUpdate () { this._debounceGET('false') } }, watch: { // router '$route' (a) { // url no params get instance list this.searchParams.pageNo = _.isEmpty(a.query) ? 1 : a.query.pageNo } }, created () { }, mounted () { }, components: { mList, mConditions, mSpin, mListConstruction, mNoData, mCreateDataSource } } </script>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,061
[Bug] [dolphinscheduler-api] The pages of project management, workflow instance and task instance are accessed slowly
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The pages of project management, workflow instance and task instance are accessed slowly ![image](https://user-images.githubusercontent.com/95271106/143994546-7125905d-b772-4a75-bcf1-91573804387b.png) ### What you expected to happen When the amount of data is large, the SQL query statement is slow ### How to reproduce Optimize SQL query statements ### Anything else _No response_ ### Version 2.0.0 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7061
https://github.com/apache/dolphinscheduler/pull/7166
ed9fca6c976761abcf4cef2656b61516fd976ad0
0e75bd8008b62795976919409b79f9137e929a4c
"2021-11-30T06:10:47Z"
java
"2021-12-04T07:53:18Z"
dolphinscheduler-dao/src/main/resources/sql/dolphinscheduler_mysql.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ SET FOREIGN_KEY_CHECKS=0; -- ---------------------------- -- Table structure for QRTZ_BLOB_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_BLOB_TRIGGERS`; CREATE TABLE `QRTZ_BLOB_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `BLOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `SCHED_NAME` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_BLOB_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_BLOB_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_CALENDARS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_CALENDARS`; CREATE TABLE `QRTZ_CALENDARS` ( `SCHED_NAME` varchar(120) NOT NULL, `CALENDAR_NAME` varchar(200) NOT NULL, `CALENDAR` blob NOT NULL, PRIMARY KEY (`SCHED_NAME`,`CALENDAR_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_CALENDARS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_CRON_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_CRON_TRIGGERS`; CREATE TABLE `QRTZ_CRON_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `CRON_EXPRESSION` varchar(120) NOT NULL, `TIME_ZONE_ID` varchar(80) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_CRON_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_CRON_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_FIRED_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_FIRED_TRIGGERS`; CREATE TABLE `QRTZ_FIRED_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `ENTRY_ID` varchar(200) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `INSTANCE_NAME` varchar(200) NOT NULL, `FIRED_TIME` bigint(13) NOT NULL, `SCHED_TIME` bigint(13) NOT NULL, `PRIORITY` int(11) NOT NULL, `STATE` varchar(16) NOT NULL, `JOB_NAME` varchar(200) DEFAULT NULL, `JOB_GROUP` varchar(200) DEFAULT NULL, `IS_NONCONCURRENT` varchar(1) DEFAULT NULL, `REQUESTS_RECOVERY` varchar(1) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`ENTRY_ID`), KEY `IDX_QRTZ_FT_TRIG_INST_NAME` (`SCHED_NAME`,`INSTANCE_NAME`), KEY `IDX_QRTZ_FT_INST_JOB_REQ_RCVRY` (`SCHED_NAME`,`INSTANCE_NAME`,`REQUESTS_RECOVERY`), KEY `IDX_QRTZ_FT_J_G` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_FT_JG` (`SCHED_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_FT_T_G` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_FT_TG` (`SCHED_NAME`,`TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_FIRED_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_JOB_DETAILS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_JOB_DETAILS`; CREATE TABLE `QRTZ_JOB_DETAILS` ( `SCHED_NAME` varchar(120) NOT NULL, `JOB_NAME` varchar(200) NOT NULL, `JOB_GROUP` varchar(200) NOT NULL, `DESCRIPTION` varchar(250) DEFAULT NULL, `JOB_CLASS_NAME` varchar(250) NOT NULL, `IS_DURABLE` varchar(1) NOT NULL, `IS_NONCONCURRENT` varchar(1) NOT NULL, `IS_UPDATE_DATA` varchar(1) NOT NULL, `REQUESTS_RECOVERY` varchar(1) NOT NULL, `JOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_J_REQ_RECOVERY` (`SCHED_NAME`,`REQUESTS_RECOVERY`), KEY `IDX_QRTZ_J_GRP` (`SCHED_NAME`,`JOB_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_JOB_DETAILS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_LOCKS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_LOCKS`; CREATE TABLE `QRTZ_LOCKS` ( `SCHED_NAME` varchar(120) NOT NULL, `LOCK_NAME` varchar(40) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`LOCK_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_LOCKS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_PAUSED_TRIGGER_GRPS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_PAUSED_TRIGGER_GRPS`; CREATE TABLE `QRTZ_PAUSED_TRIGGER_GRPS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_PAUSED_TRIGGER_GRPS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SCHEDULER_STATE -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SCHEDULER_STATE`; CREATE TABLE `QRTZ_SCHEDULER_STATE` ( `SCHED_NAME` varchar(120) NOT NULL, `INSTANCE_NAME` varchar(200) NOT NULL, `LAST_CHECKIN_TIME` bigint(13) NOT NULL, `CHECKIN_INTERVAL` bigint(13) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`INSTANCE_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SCHEDULER_STATE -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SIMPLE_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SIMPLE_TRIGGERS`; CREATE TABLE `QRTZ_SIMPLE_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `REPEAT_COUNT` bigint(7) NOT NULL, `REPEAT_INTERVAL` bigint(12) NOT NULL, `TIMES_TRIGGERED` bigint(10) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_SIMPLE_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SIMPLE_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SIMPROP_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SIMPROP_TRIGGERS`; CREATE TABLE `QRTZ_SIMPROP_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `STR_PROP_1` varchar(512) DEFAULT NULL, `STR_PROP_2` varchar(512) DEFAULT NULL, `STR_PROP_3` varchar(512) DEFAULT NULL, `INT_PROP_1` int(11) DEFAULT NULL, `INT_PROP_2` int(11) DEFAULT NULL, `LONG_PROP_1` bigint(20) DEFAULT NULL, `LONG_PROP_2` bigint(20) DEFAULT NULL, `DEC_PROP_1` decimal(13,4) DEFAULT NULL, `DEC_PROP_2` decimal(13,4) DEFAULT NULL, `BOOL_PROP_1` varchar(1) DEFAULT NULL, `BOOL_PROP_2` varchar(1) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_SIMPROP_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SIMPROP_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_TRIGGERS`; CREATE TABLE `QRTZ_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `JOB_NAME` varchar(200) NOT NULL, `JOB_GROUP` varchar(200) NOT NULL, `DESCRIPTION` varchar(250) DEFAULT NULL, `NEXT_FIRE_TIME` bigint(13) DEFAULT NULL, `PREV_FIRE_TIME` bigint(13) DEFAULT NULL, `PRIORITY` int(11) DEFAULT NULL, `TRIGGER_STATE` varchar(16) NOT NULL, `TRIGGER_TYPE` varchar(8) NOT NULL, `START_TIME` bigint(13) NOT NULL, `END_TIME` bigint(13) DEFAULT NULL, `CALENDAR_NAME` varchar(200) DEFAULT NULL, `MISFIRE_INSTR` smallint(2) DEFAULT NULL, `JOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_T_J` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_T_JG` (`SCHED_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_T_C` (`SCHED_NAME`,`CALENDAR_NAME`), KEY `IDX_QRTZ_T_G` (`SCHED_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_T_STATE` (`SCHED_NAME`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_N_STATE` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_N_G_STATE` (`SCHED_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_NEXT_FIRE_TIME` (`SCHED_NAME`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_ST` (`SCHED_NAME`,`TRIGGER_STATE`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_ST_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_NFT_ST_MISFIRE_GRP` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), CONSTRAINT `QRTZ_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`) REFERENCES `QRTZ_JOB_DETAILS` (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_access_token -- ---------------------------- DROP TABLE IF EXISTS `t_ds_access_token`; CREATE TABLE `t_ds_access_token` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) DEFAULT NULL COMMENT 'user id', `token` varchar(64) DEFAULT NULL COMMENT 'token', `expire_time` datetime DEFAULT NULL COMMENT 'end time of token ', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_access_token -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_alert -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert`; CREATE TABLE `t_ds_alert` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `title` varchar(64) DEFAULT NULL COMMENT 'title', `content` text COMMENT 'Message content (can be email, can be SMS. Mail is stored in JSON map, and SMS is string)', `alert_status` tinyint(4) DEFAULT '0' COMMENT '0:wait running,1:success,2:failed', `log` text COMMENT 'log', `alertgroup_id` int(11) DEFAULT NULL COMMENT 'alert group id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_alert -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_alertgroup -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alertgroup`; CREATE TABLE `t_ds_alertgroup`( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `alert_instance_ids` varchar (255) DEFAULT NULL COMMENT 'alert instance ids', `create_user_id` int(11) DEFAULT NULL COMMENT 'create user id', `group_name` varchar(255) DEFAULT NULL COMMENT 'group name', `description` varchar(255) DEFAULT NULL, `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `t_ds_alertgroup_name_un` (`group_name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_alertgroup -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_command -- ---------------------------- DROP TABLE IF EXISTS `t_ds_command`; CREATE TABLE `t_ds_command` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `command_type` tinyint(4) DEFAULT NULL COMMENT 'Command type: 0 start workflow, 1 start execution from current node, 2 resume fault-tolerant workflow, 3 resume pause process, 4 start execution from failed node, 5 complement, 6 schedule, 7 rerun, 8 pause, 9 stop, 10 resume waiting thread', `process_definition_code` bigint(20) NOT NULL COMMENT 'process definition code', `process_definition_version` int(11) DEFAULT '0' COMMENT 'process definition version', `process_instance_id` int(11) DEFAULT '0' COMMENT 'process instance id', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'Node dependency type: 0 current node, 1 forward, 2 backward', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'Failed policy: 0 end, 1 continue', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group', `schedule_time` datetime DEFAULT NULL COMMENT 'schedule time', `start_time` datetime DEFAULT NULL COMMENT 'start time', `executor_id` int(11) DEFAULT NULL COMMENT 'executor id', `update_time` datetime DEFAULT NULL COMMENT 'update time', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority: 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) COMMENT 'worker group', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `dry_run` tinyint(4) DEFAULT '0' COMMENT 'dry run flag:0 normal, 1 dry run', PRIMARY KEY (`id`), KEY `priority_id_index` (`process_instance_priority`,`id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_command -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_datasource -- ---------------------------- DROP TABLE IF EXISTS `t_ds_datasource`; CREATE TABLE `t_ds_datasource` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(64) NOT NULL COMMENT 'data source name', `note` varchar(255) DEFAULT NULL COMMENT 'description', `type` tinyint(4) NOT NULL COMMENT 'data source type: 0:mysql,1:postgresql,2:hive,3:spark', `user_id` int(11) NOT NULL COMMENT 'the creator id', `connection_params` text NOT NULL COMMENT 'json connection params', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `t_ds_datasource_name_un` (`name`, `type`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_datasource -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_error_command -- ---------------------------- DROP TABLE IF EXISTS `t_ds_error_command`; CREATE TABLE `t_ds_error_command` ( `id` int(11) NOT NULL COMMENT 'key', `command_type` tinyint(4) DEFAULT NULL COMMENT 'command type', `executor_id` int(11) DEFAULT NULL COMMENT 'executor id', `process_definition_code` bigint(20) NOT NULL COMMENT 'process definition code', `process_definition_version` int(11) DEFAULT '0' COMMENT 'process definition version', `process_instance_id` int(11) DEFAULT '0' COMMENT 'process instance id: 0', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id', `schedule_time` datetime DEFAULT NULL COMMENT 'scheduler time', `start_time` datetime DEFAULT NULL COMMENT 'start time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority, 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) COMMENT 'worker group', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `message` text COMMENT 'message', `dry_run` tinyint(4) DEFAULT '0' COMMENT 'dry run flag: 0 normal, 1 dry run', PRIMARY KEY (`id`) USING BTREE ) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC; -- ---------------------------- -- Records of t_ds_error_command -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_process_definition -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_definition`; CREATE TABLE `t_ds_process_definition` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(255) DEFAULT NULL COMMENT 'process definition name', `version` int(11) DEFAULT '0' COMMENT 'process definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online', `user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available', `locations` text COMMENT 'Node location information', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out, unit: minute', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `execution_type` tinyint(4) DEFAULT '0' COMMENT 'execution_type 0:parallel,1:serial wait,2:serial discard,3:serial priority', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`,`code`), UNIQUE KEY `process_unique` (`name`,`project_code`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_process_definition -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_process_definition_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_definition_log`; CREATE TABLE `t_ds_process_definition_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'process definition name', `version` int(11) DEFAULT '0' COMMENT 'process definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online', `user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available', `locations` text COMMENT 'Node location information', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out,unit: minute', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `execution_type` tinyint(4) DEFAULT '0' COMMENT 'execution_type 0:parallel,1:serial wait,2:serial discard,3:serial priority', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_task_definition -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_definition`; CREATE TABLE `t_ds_task_definition` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'task definition name', `version` int(11) DEFAULT '0' COMMENT 'task definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_params` longtext COMMENT 'job custom parameters', `flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available', `task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority', `worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries', `fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval', `timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open', `timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail', `timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute', `delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute', `resource_ids` text COMMENT 'resource id, separated by comma', `task_group_id` int(11) DEFAULT NULL COMMENT 'task group id', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`,`code`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_task_definition_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_definition_log`; CREATE TABLE `t_ds_task_definition_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'task definition name', `version` int(11) DEFAULT '0' COMMENT 'task definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_params` longtext COMMENT 'job custom parameters', `flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available', `task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority', `worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries', `fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval', `timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open', `timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail', `timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute', `delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute', `resource_ids` text DEFAULT NULL COMMENT 'resource id, separated by comma', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `task_group_id` int(11) DEFAULT NULL COMMENT 'task group id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_task_relation -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_task_relation`; CREATE TABLE `t_ds_process_task_relation` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `name` varchar(200) DEFAULT NULL COMMENT 'relation name', `project_code` bigint(20) NOT NULL COMMENT 'project code', `process_definition_code` bigint(20) NOT NULL COMMENT 'process code', `process_definition_version` int(11) NOT NULL COMMENT 'process version', `pre_task_code` bigint(20) NOT NULL COMMENT 'pre task code', `pre_task_version` int(11) NOT NULL COMMENT 'pre task version', `post_task_code` bigint(20) NOT NULL COMMENT 'post task code', `post_task_version` int(11) NOT NULL COMMENT 'post task version', `condition_type` tinyint(2) DEFAULT NULL COMMENT 'condition type : 0 none, 1 judge 2 delay', `condition_params` text COMMENT 'condition params(json)', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_task_relation_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_task_relation_log`; CREATE TABLE `t_ds_process_task_relation_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `name` varchar(200) DEFAULT NULL COMMENT 'relation name', `project_code` bigint(20) NOT NULL COMMENT 'project code', `process_definition_code` bigint(20) NOT NULL COMMENT 'process code', `process_definition_version` int(11) NOT NULL COMMENT 'process version', `pre_task_code` bigint(20) NOT NULL COMMENT 'pre task code', `pre_task_version` int(11) NOT NULL COMMENT 'pre task version', `post_task_code` bigint(20) NOT NULL COMMENT 'post task code', `post_task_version` int(11) NOT NULL COMMENT 'post task version', `condition_type` tinyint(2) DEFAULT NULL COMMENT 'condition type : 0 none, 1 judge 2 delay', `condition_params` text COMMENT 'condition params(json)', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_instance`; CREATE TABLE `t_ds_process_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(255) DEFAULT NULL COMMENT 'process instance name', `process_definition_code` bigint(20) NOT NULL COMMENT 'process definition code', `process_definition_version` int(11) DEFAULT '0' COMMENT 'process definition version', `state` tinyint(4) DEFAULT NULL COMMENT 'process instance Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete', `recovery` tinyint(4) DEFAULT NULL COMMENT 'process instance failover flag:0:normal,1:failover instance', `start_time` datetime DEFAULT NULL COMMENT 'process instance start time', `end_time` datetime DEFAULT NULL COMMENT 'process instance end time', `run_times` int(11) DEFAULT NULL COMMENT 'process instance run times', `host` varchar(135) DEFAULT NULL COMMENT 'process instance host', `command_type` tinyint(4) DEFAULT NULL COMMENT 'command type', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type. 0: only current node,1:before the node,2:later nodes', `max_try_times` tinyint(4) DEFAULT '0' COMMENT 'max try times', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy. 0:end the process when node failed,1:continue running the other nodes when node failed', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type. 0:no warning,1:warning if process success,2:warning if process failed,3:warning if success', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id', `schedule_time` datetime DEFAULT NULL COMMENT 'schedule time', `command_start_time` datetime DEFAULT NULL COMMENT 'command start time', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT '1' COMMENT 'flag', `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `is_sub_process` int(11) DEFAULT '0' COMMENT 'flag, whether the process is sub process', `executor_id` int(11) NOT NULL COMMENT 'executor id', `history_cmd` text COMMENT 'history commands of process instance operation', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority. 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `timeout` int(11) DEFAULT '0' COMMENT 'time out', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `var_pool` longtext COMMENT 'var_pool', `dry_run` tinyint(4) DEFAULT '0' COMMENT 'dry run flag:0 normal, 1 dry run', `next_process_instance_id` int(11) DEFAULT '0' COMMENT 'serial queue next processInstanceId', PRIMARY KEY (`id`), KEY `process_instance_index` (`process_definition_code`,`id`) USING BTREE, KEY `start_time_index` (`start_time`,`end_time`) USING BTREE, ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_process_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_project -- ---------------------------- DROP TABLE IF EXISTS `t_ds_project`; CREATE TABLE `t_ds_project` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(100) DEFAULT NULL COMMENT 'project name', `code` bigint(20) NOT NULL COMMENT 'encoding', `description` varchar(200) DEFAULT NULL, `user_id` int(11) DEFAULT NULL COMMENT 'creator id', `flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), KEY `user_id_index` (`user_id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_project -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_queue -- ---------------------------- DROP TABLE IF EXISTS `t_ds_queue`; CREATE TABLE `t_ds_queue` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `queue_name` varchar(64) DEFAULT NULL COMMENT 'queue name', `queue` varchar(64) DEFAULT NULL COMMENT 'yarn queue name', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_queue -- ---------------------------- INSERT INTO `t_ds_queue` VALUES ('1', 'default', 'default', null, null); -- ---------------------------- -- Table structure for t_ds_relation_datasource_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_datasource_user`; CREATE TABLE `t_ds_relation_datasource_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `datasource_id` int(11) DEFAULT NULL COMMENT 'data source id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_datasource_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_process_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_process_instance`; CREATE TABLE `t_ds_relation_process_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `parent_process_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id', `parent_task_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id', `process_instance_id` int(11) DEFAULT NULL COMMENT 'child process instance id', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_process_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_project_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_project_user`; CREATE TABLE `t_ds_relation_project_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `project_id` int(11) DEFAULT NULL COMMENT 'project id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), KEY `user_id_index` (`user_id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_project_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_resources_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_resources_user`; CREATE TABLE `t_ds_relation_resources_user` ( `id` int(11) NOT NULL AUTO_INCREMENT, `user_id` int(11) NOT NULL COMMENT 'user id', `resources_id` int(11) DEFAULT NULL COMMENT 'resource id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_resources_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_udfs_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_udfs_user`; CREATE TABLE `t_ds_relation_udfs_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'userid', `udf_id` int(11) DEFAULT NULL COMMENT 'udf id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_resources -- ---------------------------- DROP TABLE IF EXISTS `t_ds_resources`; CREATE TABLE `t_ds_resources` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `alias` varchar(64) DEFAULT NULL COMMENT 'alias', `file_name` varchar(64) DEFAULT NULL COMMENT 'file name', `description` varchar(255) DEFAULT NULL, `user_id` int(11) DEFAULT NULL COMMENT 'user id', `type` tinyint(4) DEFAULT NULL COMMENT 'resource type,0:FILE,1:UDF', `size` bigint(20) DEFAULT NULL COMMENT 'resource size', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `pid` int(11) DEFAULT NULL, `full_name` varchar(64) DEFAULT NULL, `is_directory` tinyint(4) DEFAULT NULL, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_resources_un` (`full_name`,`type`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_resources -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_schedules -- ---------------------------- DROP TABLE IF EXISTS `t_ds_schedules`; CREATE TABLE `t_ds_schedules` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `process_definition_code` bigint(20) NOT NULL COMMENT 'process definition code', `start_time` datetime NOT NULL COMMENT 'start time', `end_time` datetime NOT NULL COMMENT 'end time', `timezone_id` varchar(40) DEFAULT NULL COMMENT 'schedule timezone id', `crontab` varchar(255) NOT NULL COMMENT 'crontab description', `failure_strategy` tinyint(4) NOT NULL COMMENT 'failure strategy. 0:end,1:continue', `user_id` int(11) NOT NULL COMMENT 'user id', `release_state` tinyint(4) NOT NULL COMMENT 'release state. 0:offline,1:online ', `warning_type` tinyint(4) NOT NULL COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT '' COMMENT 'worker group id', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_schedules -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_session -- ---------------------------- DROP TABLE IF EXISTS `t_ds_session`; CREATE TABLE `t_ds_session` ( `id` varchar(64) NOT NULL COMMENT 'key', `user_id` int(11) DEFAULT NULL COMMENT 'user id', `ip` varchar(45) DEFAULT NULL COMMENT 'ip', `last_login_time` datetime DEFAULT NULL COMMENT 'last login time', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_session -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_task_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_instance`; CREATE TABLE `t_ds_task_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(255) DEFAULT NULL COMMENT 'task name', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_code` bigint(20) NOT NULL COMMENT 'task definition code', `task_definition_version` int(11) DEFAULT '0' COMMENT 'task definition version', `process_instance_id` int(11) DEFAULT NULL COMMENT 'process instance id', `state` tinyint(4) DEFAULT NULL COMMENT 'Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete', `submit_time` datetime DEFAULT NULL COMMENT 'task submit time', `start_time` datetime DEFAULT NULL COMMENT 'task start time', `end_time` datetime DEFAULT NULL COMMENT 'task end time', `host` varchar(135) DEFAULT NULL COMMENT 'host of task running on', `execute_path` varchar(200) DEFAULT NULL COMMENT 'task execute path in the host', `log_path` varchar(200) DEFAULT NULL COMMENT 'task log path', `alert_flag` tinyint(4) DEFAULT NULL COMMENT 'whether alert', `retry_times` int(4) DEFAULT '0' COMMENT 'task retry times', `pid` int(4) DEFAULT NULL COMMENT 'pid of task', `app_link` text COMMENT 'yarn app id', `task_params` text COMMENT 'job custom parameters', `flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available', `retry_interval` int(4) DEFAULT NULL COMMENT 'retry interval when task failed ', `max_retry_times` int(2) DEFAULT NULL COMMENT 'max retry times', `task_instance_priority` int(11) DEFAULT NULL COMMENT 'task instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `environment_config` text COMMENT 'this config contains many environment variables config', `executor_id` int(11) DEFAULT NULL, `first_submit_time` datetime DEFAULT NULL COMMENT 'task first submit time', `delay_time` int(4) DEFAULT '0' COMMENT 'task delay execution time', `var_pool` longtext COMMENT 'var_pool', `task_group_id` int(11) DEFAULT NULL COMMENT 'task group id', `dry_run` tinyint(4) DEFAULT '0' COMMENT 'dry run flag: 0 normal, 1 dry run', PRIMARY KEY (`id`), KEY `process_instance_id` (`process_instance_id`) USING BTREE, CONSTRAINT `foreign_key_instance_id` FOREIGN KEY (`process_instance_id`) REFERENCES `t_ds_process_instance` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_task_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_tenant -- ---------------------------- DROP TABLE IF EXISTS `t_ds_tenant`; CREATE TABLE `t_ds_tenant` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `tenant_code` varchar(64) DEFAULT NULL COMMENT 'tenant code', `description` varchar(255) DEFAULT NULL, `queue_id` int(11) DEFAULT NULL COMMENT 'queue id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_tenant -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_udfs -- ---------------------------- DROP TABLE IF EXISTS `t_ds_udfs`; CREATE TABLE `t_ds_udfs` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `func_name` varchar(100) NOT NULL COMMENT 'UDF function name', `class_name` varchar(255) NOT NULL COMMENT 'class of udf', `type` tinyint(4) NOT NULL COMMENT 'Udf function type', `arg_types` varchar(255) DEFAULT NULL COMMENT 'arguments types', `database` varchar(255) DEFAULT NULL COMMENT 'data base', `description` varchar(255) DEFAULT NULL, `resource_id` int(11) NOT NULL COMMENT 'resource id', `resource_name` varchar(255) NOT NULL COMMENT 'resource name', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_udfs -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_user`; CREATE TABLE `t_ds_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'user id', `user_name` varchar(64) DEFAULT NULL COMMENT 'user name', `user_password` varchar(64) DEFAULT NULL COMMENT 'user password', `user_type` tinyint(4) DEFAULT NULL COMMENT 'user type, 0:administrator,1:ordinary user', `email` varchar(64) DEFAULT NULL COMMENT 'email', `phone` varchar(11) DEFAULT NULL COMMENT 'phone', `tenant_id` int(11) DEFAULT NULL COMMENT 'tenant id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `queue` varchar(64) DEFAULT NULL COMMENT 'queue', `state` tinyint(4) DEFAULT '1' COMMENT 'state 0:disable 1:enable', PRIMARY KEY (`id`), UNIQUE KEY `user_name_unique` (`user_name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_worker_group -- ---------------------------- DROP TABLE IF EXISTS `t_ds_worker_group`; CREATE TABLE `t_ds_worker_group` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `name` varchar(255) NOT NULL COMMENT 'worker group name', `addr_list` text NULL DEFAULT NULL COMMENT 'worker addr list. split by [,]', `create_time` datetime NULL DEFAULT NULL COMMENT 'create time', `update_time` datetime NULL DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `name_unique` (`name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_worker_group -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_version -- ---------------------------- DROP TABLE IF EXISTS `t_ds_version`; CREATE TABLE `t_ds_version` ( `id` int(11) NOT NULL AUTO_INCREMENT, `version` varchar(200) NOT NULL, PRIMARY KEY (`id`), UNIQUE KEY `version_UNIQUE` (`version`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8 COMMENT='version'; -- ---------------------------- -- Records of t_ds_version -- ---------------------------- INSERT INTO `t_ds_version` VALUES ('1', '2.0.2'); -- ---------------------------- -- Records of t_ds_alertgroup -- ---------------------------- INSERT INTO `t_ds_alertgroup`(alert_instance_ids, create_user_id, group_name, description, create_time, update_time) VALUES ("1,2", 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39'); -- ---------------------------- -- Records of t_ds_user -- ---------------------------- INSERT INTO `t_ds_user` VALUES ('1', 'admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', '', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22', null, 1); -- ---------------------------- -- Table structure for t_ds_plugin_define -- ---------------------------- SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY','')); DROP TABLE IF EXISTS `t_ds_plugin_define`; CREATE TABLE `t_ds_plugin_define` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_name` varchar(100) NOT NULL COMMENT 'the name of plugin eg: email', `plugin_type` varchar(100) NOT NULL COMMENT 'plugin type . alert=alert plugin, job=job plugin', `plugin_params` text COMMENT 'plugin params', `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_plugin_define_UN` (`plugin_name`,`plugin_type`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_alert_plugin_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert_plugin_instance`; CREATE TABLE `t_ds_alert_plugin_instance` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_define_id` int NOT NULL, `plugin_instance_params` text COMMENT 'plugin instance params. Also contain the params value which user input in web ui.', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `instance_name` varchar(200) DEFAULT NULL COMMENT 'alert instance name', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_environment -- ---------------------------- DROP TABLE IF EXISTS `t_ds_environment`; CREATE TABLE `t_ds_environment` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `code` bigint(20) DEFAULT NULL COMMENT 'encoding', `name` varchar(100) NOT NULL COMMENT 'environment name', `config` text NULL DEFAULT NULL COMMENT 'this config contains many environment variables config', `description` text NULL DEFAULT NULL COMMENT 'the details', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `environment_name_unique` (`name`), UNIQUE KEY `environment_code_unique` (`code`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_environment_worker_group_relation -- ---------------------------- DROP TABLE IF EXISTS `t_ds_environment_worker_group_relation`; CREATE TABLE `t_ds_environment_worker_group_relation` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `environment_code` bigint(20) NOT NULL COMMENT 'environment code', `worker_group` varchar(255) NOT NULL COMMENT 'worker group id', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `environment_worker_group_unique` (`environment_code`,`worker_group`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_task_group_queue -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_group_queue`; CREATE TABLE `t_ds_task_group_queue` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT'key', `task_id` int(11) DEFAULT NULL COMMENT 'taskintanceid', `task_name` varchar(100) DEFAULT NULL COMMENT 'TaskInstance name', `group_id` int(11) DEFAULT NULL COMMENT 'taskGroup id', `process_id` int(11) DEFAULT NULL COMMENT 'processInstace id', `priority` int(8) DEFAULT '0' COMMENT 'priority', `status` tinyint(4) DEFAULT '-1' COMMENT '-1: waiting 1: running 2: finished', `force_start` tinyint(4) DEFAULT '0' COMMENT 'is force start 0 NO ,1 YES', `in_queue` tinyint(4) DEFAULT '0' COMMENT 'ready to get the queue by other task finish 0 NO ,1 YES', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY( `id` ) )ENGINE= INNODB AUTO_INCREMENT= 1 DEFAULT CHARSET= utf8; -- ---------------------------- -- Table structure for t_ds_task_group -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_group`; CREATE TABLE `t_ds_task_group` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT'key', `name` varchar(100) DEFAULT NULL COMMENT 'task_group name', `description` varchar(200) DEFAULT NULL, `group_size` int (11) NOT NULL COMMENT'group size', `use_size` int (11) DEFAULT '0' COMMENT 'used size', `user_id` int(11) DEFAULT NULL COMMENT 'creator id', `project_id` int(11) DEFAULT NULL COMMENT 'project id', `status` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY(`id`) ) ENGINE= INNODB AUTO_INCREMENT= 1 DEFAULT CHARSET= utf8;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,061
[Bug] [dolphinscheduler-api] The pages of project management, workflow instance and task instance are accessed slowly
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The pages of project management, workflow instance and task instance are accessed slowly ![image](https://user-images.githubusercontent.com/95271106/143994546-7125905d-b772-4a75-bcf1-91573804387b.png) ### What you expected to happen When the amount of data is large, the SQL query statement is slow ### How to reproduce Optimize SQL query statements ### Anything else _No response_ ### Version 2.0.0 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7061
https://github.com/apache/dolphinscheduler/pull/7166
ed9fca6c976761abcf4cef2656b61516fd976ad0
0e75bd8008b62795976919409b79f9137e929a4c
"2021-11-30T06:10:47Z"
java
"2021-12-04T07:53:18Z"
dolphinscheduler-dao/src/main/resources/sql/upgrade/2.0.0_schema/mysql/dolphinscheduler_ddl.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY','')); -- uc_dolphin_T_t_ds_user_A_state drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_user_A_state; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_user_A_state() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_user' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='state') THEN ALTER TABLE t_ds_user ADD `state` tinyint(4) DEFAULT '1' COMMENT 'state 0:disable 1:enable'; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_user_A_state; DROP PROCEDURE uc_dolphin_T_t_ds_user_A_state; -- uc_dolphin_T_t_ds_tenant_A_tenant_name drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_tenant_A_tenant_name; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_tenant_A_tenant_name() BEGIN IF EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_tenant' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='tenant_name') THEN ALTER TABLE t_ds_tenant DROP `tenant_name`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_tenant_A_tenant_name; DROP PROCEDURE uc_dolphin_T_t_ds_tenant_A_tenant_name; -- uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_alertgroup' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='alert_instance_ids') THEN ALTER TABLE t_ds_alertgroup ADD COLUMN `alert_instance_ids` varchar (255) DEFAULT NULL COMMENT 'alert instance ids' AFTER `id`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids(); DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids; -- uc_dolphin_T_t_ds_alertgroup_A_create_user_id drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_create_user_id; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_create_user_id() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_alertgroup' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='create_user_id') THEN ALTER TABLE t_ds_alertgroup ADD COLUMN `create_user_id` int(11) DEFAULT NULL COMMENT 'create user id' AFTER `alert_instance_ids`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_alertgroup_A_create_user_id(); DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_create_user_id; -- uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.STATISTICS WHERE TABLE_NAME='t_ds_alertgroup' AND TABLE_SCHEMA=(SELECT DATABASE()) AND INDEX_NAME ='t_ds_alertgroup_name_un') THEN ALTER TABLE t_ds_alertgroup ADD UNIQUE KEY `t_ds_alertgroup_name_un` (`group_name`); END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName(); DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName; -- uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.STATISTICS WHERE TABLE_NAME='t_ds_datasource' AND TABLE_SCHEMA=(SELECT DATABASE()) AND INDEX_NAME ='t_ds_datasource_name_un') THEN ALTER TABLE t_ds_datasource ADD UNIQUE KEY `t_ds_datasource_name_un` (`name`, `type`); END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName(); DROP PROCEDURE uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName; -- uc_dolphin_T_t_ds_project_A_add_code drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_project_A_add_code; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_project_A_add_code() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_project' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='code') THEN alter table t_ds_project add `code` bigint(20) NOT NULL COMMENT 'encoding' AFTER `name`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_project_A_add_code(); DROP PROCEDURE uc_dolphin_T_t_ds_project_A_add_code; -- ---------------------------- -- Table structure for t_ds_plugin_define -- ---------------------------- SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY','')); DROP TABLE IF EXISTS `t_ds_plugin_define`; CREATE TABLE `t_ds_plugin_define` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_name` varchar(100) NOT NULL COMMENT 'the name of plugin eg: email', `plugin_type` varchar(100) NOT NULL COMMENT 'plugin type . alert=alert plugin, job=job plugin', `plugin_params` text COMMENT 'plugin params', `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_plugin_define_UN` (`plugin_name`,`plugin_type`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_alert_plugin_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert_plugin_instance`; CREATE TABLE `t_ds_alert_plugin_instance` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_define_id` int NOT NULL, `plugin_instance_params` text COMMENT 'plugin instance params. Also contain the params value which user input in web ui.', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `instance_name` varchar(200) DEFAULT NULL COMMENT 'alert instance name', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_environment -- ---------------------------- DROP TABLE IF EXISTS `t_ds_environment`; CREATE TABLE `t_ds_environment` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `code` bigint(20) DEFAULT NULL COMMENT 'encoding', `name` varchar(100) NOT NULL COMMENT 'environment name', `config` text NULL DEFAULT NULL COMMENT 'this config contains many environment variables config', `description` text NULL DEFAULT NULL COMMENT 'the details', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `environment_name_unique` (`name`), UNIQUE KEY `environment_code_unique` (`code`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_environment_worker_group_relation -- ---------------------------- DROP TABLE IF EXISTS `t_ds_environment_worker_group_relation`; CREATE TABLE `t_ds_environment_worker_group_relation` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `environment_code` bigint(20) NOT NULL COMMENT 'environment code', `worker_group` varchar(255) NOT NULL COMMENT 'worker group id', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `environment_worker_group_unique` (`environment_code`,`worker_group`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_definition_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_definition_log`; CREATE TABLE `t_ds_process_definition_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'process definition name', `version` int(11) DEFAULT '0' COMMENT 'process definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online', `user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available', `locations` text COMMENT 'Node location information', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out,unit: minute', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_task_definition -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_definition`; CREATE TABLE `t_ds_task_definition` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'task definition name', `version` int(11) DEFAULT '0' COMMENT 'task definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_params` longtext COMMENT 'job custom parameters', `flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available', `task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority', `worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries', `fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval', `timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open', `timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail', `timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute', `delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute', `resource_ids` text COMMENT 'resource id, separated by comma', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`,`code`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_task_definition_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_definition_log`; CREATE TABLE `t_ds_task_definition_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'task definition name', `version` int(11) DEFAULT '0' COMMENT 'task definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_params` longtext COMMENT 'job custom parameters', `flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available', `task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority', `worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries', `fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval', `timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open', `timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail', `timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute', `delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute', `resource_ids` text DEFAULT NULL COMMENT 'resource id, separated by comma', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_task_relation -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_task_relation`; CREATE TABLE `t_ds_process_task_relation` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `name` varchar(200) DEFAULT NULL COMMENT 'relation name', `project_code` bigint(20) NOT NULL COMMENT 'project code', `process_definition_code` bigint(20) NOT NULL COMMENT 'process code', `process_definition_version` int(11) NOT NULL COMMENT 'process version', `pre_task_code` bigint(20) NOT NULL COMMENT 'pre task code', `pre_task_version` int(11) NOT NULL COMMENT 'pre task version', `post_task_code` bigint(20) NOT NULL COMMENT 'post task code', `post_task_version` int(11) NOT NULL COMMENT 'post task version', `condition_type` tinyint(2) DEFAULT NULL COMMENT 'condition type : 0 none, 1 judge 2 delay', `condition_params` text COMMENT 'condition params(json)', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_task_relation_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_task_relation_log`; CREATE TABLE `t_ds_process_task_relation_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `name` varchar(200) DEFAULT NULL COMMENT 'relation name', `project_code` bigint(20) NOT NULL COMMENT 'project code', `process_definition_code` bigint(20) NOT NULL COMMENT 'process code', `process_definition_version` int(11) NOT NULL COMMENT 'process version', `pre_task_code` bigint(20) NOT NULL COMMENT 'pre task code', `pre_task_version` int(11) NOT NULL COMMENT 'pre task version', `post_task_code` bigint(20) NOT NULL COMMENT 'post task code', `post_task_version` int(11) NOT NULL COMMENT 'post task version', `condition_type` tinyint(2) DEFAULT NULL COMMENT 'condition type : 0 none, 1 judge 2 delay', `condition_params` text COMMENT 'condition params(json)', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- t_ds_worker_group DROP TABLE IF EXISTS `t_ds_worker_group`; CREATE TABLE `t_ds_worker_group` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `name` varchar(255) NOT NULL COMMENT 'worker group name', `addr_list` text NULL DEFAULT NULL COMMENT 'worker addr list. split by [,]', `create_time` datetime NULL DEFAULT NULL COMMENT 'create time', `update_time` datetime NULL DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `name_unique` (`name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- t_ds_command alter table t_ds_command change process_definition_id process_definition_code bigint(20) NOT NULL COMMENT 'process definition code'; alter table t_ds_command add environment_code bigint(20) DEFAULT '-1' COMMENT 'environment code' AFTER worker_group; alter table t_ds_command add dry_run tinyint(4) DEFAULT '0' COMMENT 'dry run flag:0 normal, 1 dry run' AFTER environment_code; alter table t_ds_command add process_definition_version int(11) DEFAULT '0' COMMENT 'process definition version' AFTER process_definition_code; alter table t_ds_command add process_instance_id int(11) DEFAULT '0' COMMENT 'process instance id' AFTER process_definition_version; alter table t_ds_command add KEY `priority_id_index` (`process_instance_priority`,`id`) USING BTREE; -- t_ds_error_command alter table t_ds_error_command change process_definition_id process_definition_code bigint(20) NOT NULL COMMENT 'process definition code'; alter table t_ds_error_command add environment_code bigint(20) DEFAULT '-1' COMMENT 'environment code' AFTER worker_group; alter table t_ds_error_command add dry_run tinyint(4) DEFAULT '0' COMMENT 'dry run flag:0 normal, 1 dry run' AFTER message; alter table t_ds_error_command add process_definition_version int(11) DEFAULT '0' COMMENT 'process definition version' AFTER process_definition_code; alter table t_ds_error_command add process_instance_id int(11) DEFAULT '0' COMMENT 'process instance id' AFTER process_definition_version; -- t_ds_process_instance note: Data migration is not supported alter table t_ds_process_instance change process_definition_id process_definition_code bigint(20) NOT NULL COMMENT 'process definition code'; alter table t_ds_process_instance add process_definition_version int(11) DEFAULT '0' COMMENT 'process definition version' AFTER process_definition_code; alter table t_ds_process_instance add environment_code bigint(20) DEFAULT '-1' COMMENT 'environment code' AFTER worker_group; alter table t_ds_process_instance add var_pool longtext COMMENT 'var_pool' AFTER tenant_id; alter table t_ds_process_instance add dry_run tinyint(4) DEFAULT '0' COMMENT 'dry run flag:0 normal, 1 dry run' AFTER var_pool; alter table t_ds_process_instance drop KEY `process_instance_index`; alter table t_ds_process_instance add KEY `process_instance_index` (`process_definition_code`,`id`) USING BTREE; alter table t_ds_process_instance drop KEY `start_time_index`; alter table t_ds_process_instance add KEY `start_time_index` (`start_time`,`end_time`) USING BTREE; alter table t_ds_process_instance drop process_instance_json; alter table t_ds_process_instance drop locations; alter table t_ds_process_instance drop connects; alter table t_ds_process_instance drop dependence_schedule_times; -- t_ds_task_instance note: Data migration is not supported alter table t_ds_task_instance change process_definition_id task_code bigint(20) NOT NULL COMMENT 'task definition code'; alter table t_ds_task_instance add task_definition_version int(11) DEFAULT '0' COMMENT 'task definition version' AFTER task_code; alter table t_ds_task_instance add task_params text COMMENT 'job custom parameters' AFTER app_link; alter table t_ds_task_instance add environment_code bigint(20) DEFAULT '-1' COMMENT 'environment code' AFTER worker_group; alter table t_ds_task_instance add environment_config text COMMENT 'this config contains many environment variables config' AFTER environment_code; alter table t_ds_task_instance add first_submit_time datetime DEFAULT NULL COMMENT 'task first submit time' AFTER executor_id; alter table t_ds_task_instance add delay_time int(4) DEFAULT '0' COMMENT 'task delay execution time' AFTER first_submit_time; alter table t_ds_task_instance add var_pool longtext COMMENT 'var_pool' AFTER delay_time; alter table t_ds_task_instance add dry_run tinyint(4) DEFAULT '0' COMMENT 'dry run flag:0 normal, 1 dry run' AFTER var_pool; alter table t_ds_task_instance drop KEY `task_instance_index`; alter table t_ds_task_instance drop task_json; -- t_ds_schedules alter table t_ds_schedules change process_definition_id process_definition_code bigint(20) NOT NULL COMMENT 'process definition code'; alter table t_ds_schedules add timezone_id varchar(40) DEFAULT NULL COMMENT 'timezoneId' AFTER end_time; alter table t_ds_schedules add environment_code bigint(20) DEFAULT '-1' COMMENT 'environment code' AFTER worker_group; -- t_ds_process_definition alter table t_ds_process_definition add `code` bigint(20) NOT NULL COMMENT 'encoding' AFTER `id`; alter table t_ds_process_definition change project_id project_code bigint(20) NOT NULL COMMENT 'project code' AFTER `description`; alter table t_ds_process_definition add `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id' AFTER `locations`; alter table t_ds_process_definition add UNIQUE KEY `process_unique` (`name`,`project_code`) USING BTREE; alter table t_ds_process_definition modify `description` text COMMENT 'description' after `version`; alter table t_ds_process_definition modify `release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online' after `project_code`; alter table t_ds_process_definition modify `create_time` datetime DEFAULT NULL COMMENT 'create time' after `tenant_id`;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,061
[Bug] [dolphinscheduler-api] The pages of project management, workflow instance and task instance are accessed slowly
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The pages of project management, workflow instance and task instance are accessed slowly ![image](https://user-images.githubusercontent.com/95271106/143994546-7125905d-b772-4a75-bcf1-91573804387b.png) ### What you expected to happen When the amount of data is large, the SQL query statement is slow ### How to reproduce Optimize SQL query statements ### Anything else _No response_ ### Version 2.0.0 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7061
https://github.com/apache/dolphinscheduler/pull/7166
ed9fca6c976761abcf4cef2656b61516fd976ad0
0e75bd8008b62795976919409b79f9137e929a4c
"2021-11-30T06:10:47Z"
java
"2021-12-04T07:53:18Z"
dolphinscheduler-dao/src/main/resources/sql/upgrade/2.0.0_schema/postgresql/dolphinscheduler_ddl.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ delimiter d// CREATE OR REPLACE FUNCTION public.dolphin_update_metadata( ) RETURNS character varying LANGUAGE 'plpgsql' COST 100 VOLATILE PARALLEL UNSAFE AS $BODY$ DECLARE v_schema varchar; BEGIN ---get schema name v_schema =current_schema(); --- rename columns EXECUTE 'ALTER TABLE IF EXISTS ' || quote_ident(v_schema) ||'.t_ds_command RENAME COLUMN process_definition_id to process_definition_code'; EXECUTE 'ALTER TABLE IF EXISTS ' || quote_ident(v_schema) ||'.t_ds_error_command RENAME COLUMN process_definition_id to process_definition_code'; EXECUTE 'ALTER TABLE IF EXISTS ' || quote_ident(v_schema) ||'.t_ds_process_instance RENAME COLUMN process_definition_id to process_definition_code'; EXECUTE 'ALTER TABLE IF EXISTS ' || quote_ident(v_schema) ||'.t_ds_task_instance RENAME COLUMN process_definition_id to task_code'; EXECUTE 'ALTER TABLE IF EXISTS ' || quote_ident(v_schema) ||'.t_ds_schedules RENAME COLUMN process_definition_id to process_definition_code'; EXECUTE 'ALTER TABLE IF EXISTS ' || quote_ident(v_schema) ||'.t_ds_process_definition RENAME COLUMN project_id to project_code'; --- alter column type EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_command ALTER COLUMN process_definition_code TYPE bigint'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_error_command ALTER COLUMN process_definition_code TYPE bigint'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_process_instance ALTER COLUMN process_definition_code TYPE bigint'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_task_instance ALTER COLUMN task_code TYPE bigint'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_schedules ALTER COLUMN process_definition_code TYPE bigint'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_process_definition ALTER COLUMN project_code TYPE bigint'; --- add columns EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_user ADD COLUMN IF NOT EXISTS "state" int DEFAULT 1'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_alertgroup ADD COLUMN IF NOT EXISTS "alert_instance_ids" varchar(255) DEFAULT NULL'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_alertgroup ADD COLUMN IF NOT EXISTS "create_user_id" int4 DEFAULT NULL'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_project ADD COLUMN IF NOT EXISTS "code" bigint NOT NULL'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_command ADD COLUMN IF NOT EXISTS "environment_code" bigint DEFAULT -1'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_command ADD COLUMN IF NOT EXISTS "dry_run" int DEFAULT 0'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_command ADD COLUMN IF NOT EXISTS "process_definition_version" int DEFAULT 0'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_command ADD COLUMN IF NOT EXISTS "process_instance_id" int DEFAULT 0'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_error_command ADD COLUMN IF NOT EXISTS "environment_code" bigint DEFAULT -1'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_error_command ADD COLUMN IF NOT EXISTS "dry_run" int DEFAULT 0'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_error_command ADD COLUMN IF NOT EXISTS "process_definition_version" int DEFAULT 0'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_error_command ADD COLUMN IF NOT EXISTS "process_instance_id" int DEFAULT 0'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_process_instance ADD COLUMN IF NOT EXISTS "process_definition_version" int DEFAULT 0'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_process_instance ADD COLUMN IF NOT EXISTS "environment_code" bigint DEFAULT -1'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_process_instance ADD COLUMN IF NOT EXISTS "var_pool" text'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_process_instance ADD COLUMN IF NOT EXISTS "dry_run" int DEFAULT 0'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_task_instance ADD COLUMN IF NOT EXISTS "task_definition_version" int DEFAULT 0'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_task_instance ADD COLUMN IF NOT EXISTS "task_params" text'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_task_instance ADD COLUMN IF NOT EXISTS "environment_code" bigint DEFAULT -1'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_task_instance ADD COLUMN IF NOT EXISTS "environment_config" text'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_task_instance ADD COLUMN IF NOT EXISTS "first_submit_time" timestamp DEFAULT NULL'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_task_instance ADD COLUMN IF NOT EXISTS "delay_time" int DEFAULT 0'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_task_instance ADD COLUMN IF NOT EXISTS "var_pool" text'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_task_instance ADD COLUMN IF NOT EXISTS "dry_run" int DEFAULT 0'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_schedules ADD COLUMN IF NOT EXISTS "timezone_id" varchar(40) DEFAULT NULL'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_schedules ADD COLUMN IF NOT EXISTS "environment_code" int DEFAULT -1'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_process_definition ADD COLUMN IF NOT EXISTS "code" bigint'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_process_definition ADD COLUMN IF NOT EXISTS "warning_group_id" int'; ---drop columns EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_tenant DROP COLUMN IF EXISTS "tenant_name"'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_process_instance DROP COLUMN IF EXISTS "process_instance_json"'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_process_instance DROP COLUMN IF EXISTS "locations"'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_process_instance DROP COLUMN IF EXISTS "connects"'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_process_instance DROP COLUMN IF EXISTS "dependence_schedule_times"'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'.t_ds_task_instance DROP COLUMN IF EXISTS "task_json"'; -- add CONSTRAINT EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'."t_ds_alertgroup" ADD CONSTRAINT "t_ds_alertgroup_name_un" UNIQUE ("group_name")'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'."t_ds_datasource" ADD CONSTRAINT "t_ds_datasource_name_un" UNIQUE ("name","type")'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'."t_ds_command" ALTER COLUMN "process_definition_code" SET NOT NULL'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'."t_ds_process_instance" ALTER COLUMN "process_definition_code" SET NOT NULL'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'."t_ds_task_instance" ALTER COLUMN "task_code" SET NOT NULL'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'."t_ds_schedules" ALTER COLUMN "process_definition_code" SET NOT NULL'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'."t_ds_process_definition" ALTER COLUMN "code" SET NOT NULL'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'."t_ds_process_definition" ALTER COLUMN "project_code" SET NOT NULL'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'."t_ds_process_definition" ADD CONSTRAINT "process_unique" UNIQUE ("name","project_code")'; EXECUTE 'ALTER TABLE ' || quote_ident(v_schema) ||'."t_ds_process_definition" ALTER COLUMN "description" SET NOT NULL'; --- drop index EXECUTE 'DROP INDEX IF EXISTS "process_instance_index"'; EXECUTE 'DROP INDEX IF EXISTS "task_instance_index"'; EXECUTE 'DROP INDEX IF EXISTS "start_time_index"'; --- create index EXECUTE 'CREATE INDEX IF NOT EXISTS priority_id_index ON ' || quote_ident(v_schema) ||'.t_ds_command USING Btree("process_instance_priority","id")'; EXECUTE 'CREATE INDEX IF NOT EXISTS process_instance_index ON ' || quote_ident(v_schema) ||'.t_ds_process_instance USING Btree("process_definition_code","id")'; EXECUTE 'CREATE INDEX IF NOT EXISTS start_time_index ON ' || quote_ident(v_schema) ||'.t_ds_process_instance USING Btree("start_time","end_time")'; ---add comment EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_user.state is ''state 0:disable 1:enable'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_alertgroup.alert_instance_ids is ''alert instance ids'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_alertgroup.create_user_id is ''create user id'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_project.code is ''coding'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_command.process_definition_code is ''process definition code'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_command.environment_code is ''environment code'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_command.dry_run is ''dry run flag:0 normal, 1 dry run'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_command.process_definition_version is ''process definition version'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_command.process_instance_id is ''process instance id'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_error_command.process_definition_code is ''process definition code'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_error_command.environment_code is ''environment code'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_error_command.dry_run is ''dry run flag:0 normal, 1 dry run'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_error_command.process_definition_version is ''process definition version'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_error_command.process_instance_id is ''process instance id'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_process_instance.process_definition_code is ''process instance code'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_process_instance.process_definition_version is ''process instance version'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_process_instance.environment_code is ''environment code'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_process_instance.var_pool is ''var pool'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_process_instance.dry_run is ''dry run flag:0 normal, 1 dry run'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_task_instance.task_code is ''task definition code'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_task_instance.task_definition_version is ''task definition version'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_task_instance.task_params is ''task params'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_task_instance.environment_code is ''environment code'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_task_instance.environment_config is ''this config contains many environment variables config'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_task_instance.first_submit_time is ''task first submit time'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_task_instance.delay_time is ''task delay execution time'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_task_instance.var_pool is ''var pool'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_task_instance.dry_run is ''dry run flag:0 normal, 1 dry run'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_schedules.process_definition_code is ''process definition code'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_schedules.timezone_id is ''timezone id'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_schedules.environment_code is ''environment code'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_process_definition.code is ''encoding'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_process_definition.project_code is ''project code'''; EXECUTE 'comment on column ' || quote_ident(v_schema) ||'.t_ds_process_definition.warning_group_id is ''alert group id'''; --create table EXECUTE 'CREATE TABLE IF NOT EXISTS '|| quote_ident(v_schema) ||'."t_ds_plugin_define" ( id serial NOT NULL, plugin_name varchar(100) NOT NULL, plugin_type varchar(100) NOT NULL, plugin_params text NULL, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_plugin_define_pk PRIMARY KEY (id), CONSTRAINT t_ds_plugin_define_un UNIQUE (plugin_name, plugin_type) )'; EXECUTE 'CREATE TABLE IF NOT EXISTS '|| quote_ident(v_schema) ||'."t_ds_alert_plugin_instance" ( id serial NOT NULL, plugin_define_id int4 NOT NULL, plugin_instance_params text NULL, create_time timestamp NULL, update_time timestamp NULL, instance_name varchar(200) NULL, CONSTRAINT t_ds_alert_plugin_instance_pk PRIMARY KEY (id) )'; EXECUTE 'CREATE TABLE IF NOT EXISTS '|| quote_ident(v_schema) ||'."t_ds_environment" ( id serial NOT NULL, code bigint NOT NULL, name varchar(100) DEFAULT NULL, config text DEFAULT NULL, description text, operator int DEFAULT NULL, create_time timestamp DEFAULT NULL, update_time timestamp DEFAULT NULL, PRIMARY KEY (id), CONSTRAINT environment_name_unique UNIQUE (name), CONSTRAINT environment_code_unique UNIQUE (code) )'; EXECUTE 'CREATE TABLE IF NOT EXISTS '|| quote_ident(v_schema) ||'."t_ds_environment_worker_group_relation" ( id serial NOT NULL, environment_code bigint NOT NULL, worker_group varchar(255) NOT NULL, operator int DEFAULT NULL, create_time timestamp DEFAULT NULL, update_time timestamp DEFAULT NULL, PRIMARY KEY (id) , CONSTRAINT environment_worker_group_unique UNIQUE (environment_code,worker_group) )'; EXECUTE 'CREATE TABLE IF NOT EXISTS '|| quote_ident(v_schema) ||'."t_ds_process_definition_log" ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int NOT NULL , description text , project_code bigint DEFAULT NULL , release_state int DEFAULT NULL , user_id int DEFAULT NULL , global_params text , locations text , warning_group_id int DEFAULT NULL , flag int DEFAULT NULL , timeout int DEFAULT 0 , tenant_id int DEFAULT -1 , execution_type int DEFAULT 0, operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) )'; EXECUTE 'CREATE TABLE IF NOT EXISTS '|| quote_ident(v_schema) ||'."t_ds_task_definition" ( id serial NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int NOT NULL , description text , project_code bigint DEFAULT NULL , user_id int DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_params text , flag int DEFAULT NULL , task_priority int DEFAULT NULL , worker_group varchar(255) DEFAULT NULL , environment_code bigint DEFAULT -1, fail_retry_times int DEFAULT NULL , fail_retry_interval int DEFAULT NULL , timeout_flag int DEFAULT NULL , timeout_notify_strategy int DEFAULT NULL , timeout int DEFAULT 0 , delay_time int DEFAULT 0 , resource_ids text , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) )'; EXECUTE 'CREATE TABLE IF NOT EXISTS '|| quote_ident(v_schema) ||'."t_ds_task_definition_log" ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int NOT NULL , description text , project_code bigint DEFAULT NULL , user_id int DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_params text , flag int DEFAULT NULL , task_priority int DEFAULT NULL , worker_group varchar(255) DEFAULT NULL , environment_code bigint DEFAULT -1, fail_retry_times int DEFAULT NULL , fail_retry_interval int DEFAULT NULL , timeout_flag int DEFAULT NULL , timeout_notify_strategy int DEFAULT NULL , timeout int DEFAULT 0 , delay_time int DEFAULT 0 , resource_ids text , operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) )'; EXECUTE 'CREATE TABLE IF NOT EXISTS '|| quote_ident(v_schema) ||'."t_ds_process_task_relation" ( id serial NOT NULL , name varchar(255) DEFAULT NULL , project_code bigint DEFAULT NULL , process_definition_code bigint DEFAULT NULL , process_definition_version int DEFAULT NULL , pre_task_code bigint DEFAULT NULL , pre_task_version int DEFAULT 0 , post_task_code bigint DEFAULT NULL , post_task_version int DEFAULT 0 , condition_type int DEFAULT NULL , condition_params text , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) )'; EXECUTE 'CREATE TABLE IF NOT EXISTS '|| quote_ident(v_schema) ||'."t_ds_process_task_relation_log" ( id int NOT NULL , name varchar(255) DEFAULT NULL , project_code bigint DEFAULT NULL , process_definition_code bigint DEFAULT NULL , process_definition_version int DEFAULT NULL , pre_task_code bigint DEFAULT NULL , pre_task_version int DEFAULT 0 , post_task_code bigint DEFAULT NULL , post_task_version int DEFAULT 0 , condition_type int DEFAULT NULL , condition_params text , operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) )'; EXECUTE 'CREATE TABLE IF NOT EXISTS '|| quote_ident(v_schema) ||'."t_ds_worker_group" ( id serial NOT NULL, name varchar(255) NOT NULL, addr_list text DEFAULT NULL, create_time timestamp DEFAULT NULL, update_time timestamp DEFAULT NULL, PRIMARY KEY (id), UNIQUE KEY name_unique (name) )'; return 'Success!'; exception when others then ---Raise EXCEPTION '(%)',SQLERRM; return SQLERRM; END; $BODY$; d//
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,061
[Bug] [dolphinscheduler-api] The pages of project management, workflow instance and task instance are accessed slowly
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The pages of project management, workflow instance and task instance are accessed slowly ![image](https://user-images.githubusercontent.com/95271106/143994546-7125905d-b772-4a75-bcf1-91573804387b.png) ### What you expected to happen When the amount of data is large, the SQL query statement is slow ### How to reproduce Optimize SQL query statements ### Anything else _No response_ ### Version 2.0.0 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7061
https://github.com/apache/dolphinscheduler/pull/7166
ed9fca6c976761abcf4cef2656b61516fd976ad0
0e75bd8008b62795976919409b79f9137e929a4c
"2021-11-30T06:10:47Z"
java
"2021-12-04T07:53:18Z"
dolphinscheduler-dao/src/main/resources/sql/upgrade/2.0.1-schema/mysql/dolphinscheduler_ddl.sql
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,061
[Bug] [dolphinscheduler-api] The pages of project management, workflow instance and task instance are accessed slowly
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The pages of project management, workflow instance and task instance are accessed slowly ![image](https://user-images.githubusercontent.com/95271106/143994546-7125905d-b772-4a75-bcf1-91573804387b.png) ### What you expected to happen When the amount of data is large, the SQL query statement is slow ### How to reproduce Optimize SQL query statements ### Anything else _No response_ ### Version 2.0.0 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7061
https://github.com/apache/dolphinscheduler/pull/7166
ed9fca6c976761abcf4cef2656b61516fd976ad0
0e75bd8008b62795976919409b79f9137e929a4c
"2021-11-30T06:10:47Z"
java
"2021-12-04T07:53:18Z"
dolphinscheduler-dao/src/main/resources/sql/upgrade/2.0.1-schema/postgre/dolphinscheduler_ddl.sql
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,110
[Feature][dolphinscheduler-api] support grant project by code
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Third-party integrated scenarios, support grant through project code ### Use case ## API ## org.apache.dolphinscheduler.api.controller.UsersController#grantProjectByCode ## Request ## | Name | Type | Require | | :--- | :--- | :---: | | userId | Int | ✓ | | projectCodes | String | ✓ | ## Response ## Same as `org.apache.dolphinscheduler.api.controller.UsersController#grantProject` ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7110
https://github.com/apache/dolphinscheduler/pull/7111
fa8ccd3855dfaa19d73cd7541184938f227bb4b7
27ea43d5dfda2556fa5fb02179e7c906408007ad
"2021-12-01T16:08:53Z"
java
"2021-12-04T13:43:14Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/UsersController.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.controller; import static org.apache.dolphinscheduler.api.enums.Status.AUTHORIZED_USER_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.CREATE_USER_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.DELETE_USER_BY_ID_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.GET_USER_INFO_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.GRANT_DATASOURCE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.GRANT_PROJECT_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.GRANT_RESOURCE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.GRANT_UDF_FUNCTION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.QUERY_USER_LIST_PAGING_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.UNAUTHORIZED_USER_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.UPDATE_USER_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.USER_LIST_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.VERIFY_USERNAME_ERROR; import org.apache.dolphinscheduler.api.aspect.AccessLogAnnotation; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ApiException; import org.apache.dolphinscheduler.api.service.UsersService; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.dao.entity.User; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpStatus; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.RequestAttribute; import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.ResponseStatus; import org.springframework.web.bind.annotation.RestController; import io.swagger.annotations.Api; import io.swagger.annotations.ApiImplicitParam; import io.swagger.annotations.ApiImplicitParams; import io.swagger.annotations.ApiOperation; import springfox.documentation.annotations.ApiIgnore; /** * users controller */ @Api(tags = "USERS_TAG") @RestController @RequestMapping("/users") public class UsersController extends BaseController { private static final Logger logger = LoggerFactory.getLogger(UsersController.class); @Autowired private UsersService usersService; /** * create user * * @param loginUser login user * @param userName user name * @param userPassword user password * @param email email * @param tenantId tenant id * @param phone phone * @param queue queue * @return create result code */ @ApiOperation(value = "createUser", notes = "CREATE_USER_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userName", value = "USER_NAME", required = true, type = "String"), @ApiImplicitParam(name = "userPassword", value = "USER_PASSWORD", required = true, type = "String"), @ApiImplicitParam(name = "tenantId", value = "TENANT_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "queue", value = "QUEUE", dataType = "String"), @ApiImplicitParam(name = "email", value = "EMAIL", required = true, dataType = "String"), @ApiImplicitParam(name = "phone", value = "PHONE", dataType = "String"), @ApiImplicitParam(name = "state", value = "STATE", dataType = "Int", example = "1") }) @PostMapping(value = "/create") @ResponseStatus(HttpStatus.CREATED) @ApiException(CREATE_USER_ERROR) @AccessLogAnnotation(ignoreRequestArgs = {"loginUser", "userPassword"}) public Result createUser(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "userName") String userName, @RequestParam(value = "userPassword") String userPassword, @RequestParam(value = "tenantId") int tenantId, @RequestParam(value = "queue", required = false, defaultValue = "") String queue, @RequestParam(value = "email") String email, @RequestParam(value = "phone", required = false) String phone, @RequestParam(value = "state", required = false) int state) throws Exception { Map<String, Object> result = usersService.createUser(loginUser, userName, userPassword, email, tenantId, phone, queue, state); return returnDataList(result); } /** * query user list paging * * @param loginUser login user * @param pageNo page number * @param searchVal search avlue * @param pageSize page size * @return user list page */ @ApiOperation(value = "queryUserList", notes = "QUERY_USER_LIST_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "pageNo", value = "PAGE_NO", required = true, dataType = "Int", example = "1"), @ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", required = true, dataType = "Int", example = "10"), @ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", type = "String") }) @GetMapping(value = "/list-paging") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_USER_LIST_PAGING_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryUserList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("pageNo") Integer pageNo, @RequestParam("pageSize") Integer pageSize, @RequestParam(value = "searchVal", required = false) String searchVal) { Result result = checkPageParams(pageNo, pageSize); if (!result.checkResult()) { return result; } searchVal = ParameterUtils.handleEscapes(searchVal); result = usersService.queryUserList(loginUser, searchVal, pageNo, pageSize); return result; } /** * update user * * @param loginUser login user * @param id user id * @param userName user name * @param userPassword user password * @param email email * @param tenantId tennat id * @param phone phone * @param queue queue * @return update result code */ @ApiOperation(value = "updateUser", notes = "UPDATE_USER_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "USER_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "userName", value = "USER_NAME", required = true, type = "String"), @ApiImplicitParam(name = "userPassword", value = "USER_PASSWORD", required = true, type = "String"), @ApiImplicitParam(name = "tenantId", value = "TENANT_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "queue", value = "QUEUE", dataType = "String"), @ApiImplicitParam(name = "email", value = "EMAIL", required = true, dataType = "String"), @ApiImplicitParam(name = "phone", value = "PHONE", dataType = "String"), @ApiImplicitParam(name = "state", value = "STATE", dataType = "Int", example = "1") }) @PostMapping(value = "/update") @ResponseStatus(HttpStatus.OK) @ApiException(UPDATE_USER_ERROR) @AccessLogAnnotation(ignoreRequestArgs = {"loginUser", "userPassword"}) public Result updateUser(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "id") int id, @RequestParam(value = "userName") String userName, @RequestParam(value = "userPassword") String userPassword, @RequestParam(value = "queue", required = false, defaultValue = "") String queue, @RequestParam(value = "email") String email, @RequestParam(value = "tenantId") int tenantId, @RequestParam(value = "phone", required = false) String phone, @RequestParam(value = "state", required = false) int state) throws Exception { Map<String, Object> result = usersService.updateUser(loginUser, id, userName, userPassword, email, tenantId, phone, queue, state); return returnDataList(result); } /** * delete user by id * * @param loginUser login user * @param id user id * @return delete result code */ @ApiOperation(value = "delUserById", notes = "DELETE_USER_BY_ID_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "USER_ID", required = true, dataType = "Int", example = "100") }) @PostMapping(value = "/delete") @ResponseStatus(HttpStatus.OK) @ApiException(DELETE_USER_BY_ID_ERROR) @AccessLogAnnotation public Result delUserById(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "id") int id) throws Exception { Map<String, Object> result = usersService.deleteUserById(loginUser, id); return returnDataList(result); } /** * grant project * * @param loginUser login user * @param userId user id * @param projectIds project id array * @return grant result code */ @ApiOperation(value = "grantProject", notes = "GRANT_PROJECT_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "projectIds", value = "PROJECT_IDS", required = true, type = "String") }) @PostMapping(value = "/grant-project") @ResponseStatus(HttpStatus.OK) @ApiException(GRANT_PROJECT_ERROR) @AccessLogAnnotation public Result grantProject(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "userId") int userId, @RequestParam(value = "projectIds") String projectIds) { Map<String, Object> result = usersService.grantProject(loginUser, userId, projectIds); return returnDataList(result); } /** * grant resource * * @param loginUser login user * @param userId user id * @param resourceIds resource id array * @return grant result code */ @ApiOperation(value = "grantResource", notes = "GRANT_RESOURCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "resourceIds", value = "RESOURCE_IDS", required = true, type = "String") }) @PostMapping(value = "/grant-file") @ResponseStatus(HttpStatus.OK) @ApiException(GRANT_RESOURCE_ERROR) @AccessLogAnnotation public Result grantResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "userId") int userId, @RequestParam(value = "resourceIds") String resourceIds) { Map<String, Object> result = usersService.grantResources(loginUser, userId, resourceIds); return returnDataList(result); } /** * grant udf function * * @param loginUser login user * @param userId user id * @param udfIds udf id array * @return grant result code */ @ApiOperation(value = "grantUDFFunc", notes = "GRANT_UDF_FUNC_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "udfIds", value = "UDF_IDS", required = true, type = "String") }) @PostMapping(value = "/grant-udf-func") @ResponseStatus(HttpStatus.OK) @ApiException(GRANT_UDF_FUNCTION_ERROR) @AccessLogAnnotation public Result grantUDFFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "userId") int userId, @RequestParam(value = "udfIds") String udfIds) { Map<String, Object> result = usersService.grantUDFFunction(loginUser, userId, udfIds); return returnDataList(result); } /** * grant datasource * * @param loginUser login user * @param userId user id * @param datasourceIds data source id array * @return grant result code */ @ApiOperation(value = "grantDataSource", notes = "GRANT_DATASOURCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "datasourceIds", value = "DATASOURCE_IDS", required = true, type = "String") }) @PostMapping(value = "/grant-datasource") @ResponseStatus(HttpStatus.OK) @ApiException(GRANT_DATASOURCE_ERROR) @AccessLogAnnotation public Result grantDataSource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "userId") int userId, @RequestParam(value = "datasourceIds") String datasourceIds) { Map<String, Object> result = usersService.grantDataSource(loginUser, userId, datasourceIds); return returnDataList(result); } /** * get user info * * @param loginUser login user * @return user info */ @ApiOperation(value = "getUserInfo", notes = "GET_USER_INFO_NOTES") @GetMapping(value = "/get-user-info") @ResponseStatus(HttpStatus.OK) @ApiException(GET_USER_INFO_ERROR) @AccessLogAnnotation public Result getUserInfo(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser) { Map<String, Object> result = usersService.getUserInfo(loginUser); return returnDataList(result); } /** * user list no paging * * @param loginUser login user * @return user list */ @ApiOperation(value = "listUser", notes = "LIST_USER_NOTES") @GetMapping(value = "/list") @ResponseStatus(HttpStatus.OK) @ApiException(USER_LIST_ERROR) @AccessLogAnnotation public Result listUser(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser) { Map<String, Object> result = usersService.queryAllGeneralUsers(loginUser); return returnDataList(result); } /** * user list no paging * * @param loginUser login user * @return user list */ @GetMapping(value = "/list-all") @ResponseStatus(HttpStatus.OK) @ApiException(USER_LIST_ERROR) @AccessLogAnnotation public Result listAll(@RequestAttribute(value = Constants.SESSION_USER) User loginUser) { Map<String, Object> result = usersService.queryUserList(loginUser); return returnDataList(result); } /** * verify username * * @param loginUser login user * @param userName user name * @return true if user name not exists, otherwise return false */ @ApiOperation(value = "verifyUserName", notes = "VERIFY_USER_NAME_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userName", value = "USER_NAME", required = true, type = "String") }) @GetMapping(value = "/verify-user-name") @ResponseStatus(HttpStatus.OK) @ApiException(VERIFY_USERNAME_ERROR) @AccessLogAnnotation public Result verifyUserName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "userName") String userName ) { return usersService.verifyUserName(userName); } /** * unauthorized user * * @param loginUser login user * @param alertgroupId alert group id * @return unauthorize result code */ @ApiOperation(value = "unauthorizedUser", notes = "UNAUTHORIZED_USER_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "alertgroupId", value = "ALERT_GROUP_ID", required = true, type = "String") }) @GetMapping(value = "/unauth-user") @ResponseStatus(HttpStatus.OK) @ApiException(UNAUTHORIZED_USER_ERROR) @AccessLogAnnotation public Result unauthorizedUser(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("alertgroupId") Integer alertgroupId) { Map<String, Object> result = usersService.unauthorizedUser(loginUser, alertgroupId); return returnDataList(result); } /** * authorized user * * @param loginUser login user * @param alertgroupId alert group id * @return authorized result code */ @ApiOperation(value = "authorizedUser", notes = "AUTHORIZED_USER_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "alertgroupId", value = "ALERT_GROUP_ID", required = true, type = "String") }) @GetMapping(value = "/authed-user") @ResponseStatus(HttpStatus.OK) @ApiException(AUTHORIZED_USER_ERROR) @AccessLogAnnotation public Result authorizedUser(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("alertgroupId") Integer alertgroupId) { try { Map<String, Object> result = usersService.authorizedUser(loginUser, alertgroupId); return returnDataList(result); } catch (Exception e) { logger.error(Status.AUTHORIZED_USER_ERROR.getMsg(), e); return error(Status.AUTHORIZED_USER_ERROR.getCode(), Status.AUTHORIZED_USER_ERROR.getMsg()); } } /** * user registry * * @param userName user name * @param userPassword user password * @param repeatPassword repeat password * @param email user email */ @ApiOperation(value = "registerUser", notes = "REGISTER_USER_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userName", value = "USER_NAME", required = true, type = "String"), @ApiImplicitParam(name = "userPassword", value = "USER_PASSWORD", required = true, type = "String"), @ApiImplicitParam(name = "repeatPassword", value = "REPEAT_PASSWORD", required = true, type = "String"), @ApiImplicitParam(name = "email", value = "EMAIL", required = true, type = "String"), }) @PostMapping("/register") @ResponseStatus(HttpStatus.OK) @ApiException(CREATE_USER_ERROR) @AccessLogAnnotation public Result<Object> registerUser(@RequestParam(value = "userName") String userName, @RequestParam(value = "userPassword") String userPassword, @RequestParam(value = "repeatPassword") String repeatPassword, @RequestParam(value = "email") String email) throws Exception { userName = ParameterUtils.handleEscapes(userName); userPassword = ParameterUtils.handleEscapes(userPassword); repeatPassword = ParameterUtils.handleEscapes(repeatPassword); email = ParameterUtils.handleEscapes(email); Map<String, Object> result = usersService.registerUser(userName, userPassword, repeatPassword, email); return returnDataList(result); } /** * user activate * * @param userName user name */ @ApiOperation(value = "activateUser", notes = "ACTIVATE_USER_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userName", value = "USER_NAME", type = "String"), }) @PostMapping("/activate") @ResponseStatus(HttpStatus.OK) @ApiException(UPDATE_USER_ERROR) @AccessLogAnnotation public Result<Object> activateUser(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "userName") String userName) { userName = ParameterUtils.handleEscapes(userName); Map<String, Object> result = usersService.activateUser(loginUser, userName); return returnDataList(result); } /** * user batch activate * * @param userNames user names */ @ApiOperation(value = "batchActivateUser", notes = "BATCH_ACTIVATE_USER_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userNames", value = "USER_NAMES", required = true, type = "String"), }) @PostMapping("/batch/activate") @ResponseStatus(HttpStatus.OK) @ApiException(UPDATE_USER_ERROR) @AccessLogAnnotation public Result<Object> batchActivateUser(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestBody List<String> userNames) { List<String> formatUserNames = userNames.stream().map(ParameterUtils::handleEscapes).collect(Collectors.toList()); Map<String, Object> result = usersService.batchActivateUser(loginUser, formatUserNames); return returnDataList(result); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,110
[Feature][dolphinscheduler-api] support grant project by code
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Third-party integrated scenarios, support grant through project code ### Use case ## API ## org.apache.dolphinscheduler.api.controller.UsersController#grantProjectByCode ## Request ## | Name | Type | Require | | :--- | :--- | :---: | | userId | Int | ✓ | | projectCodes | String | ✓ | ## Response ## Same as `org.apache.dolphinscheduler.api.controller.UsersController#grantProject` ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7110
https://github.com/apache/dolphinscheduler/pull/7111
fa8ccd3855dfaa19d73cd7541184938f227bb4b7
27ea43d5dfda2556fa5fb02179e7c906408007ad
"2021-12-01T16:08:53Z"
java
"2021-12-04T13:43:14Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UsersService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.dao.entity.User; import java.io.IOException; import java.util.List; import java.util.Map; /** * users service */ public interface UsersService { /** * create user, only system admin have permission * * @param loginUser login user * @param userName user name * @param userPassword user password * @param email email * @param tenantId tenant id * @param phone phone * @param queue queue * @return create result code * @throws Exception exception */ Map<String, Object> createUser(User loginUser, String userName, String userPassword, String email, int tenantId, String phone, String queue, int state) throws IOException; User createUser(String userName, String userPassword, String email, int tenantId, String phone, String queue, int state); /*** * create User for ldap login */ User createUser(UserType userType, String userId, String email); /** * get user by user name * * @param userName user name * @return exist user or null */ User getUserByUserName(String userName); /** * query user by id * * @param id id * @return user info */ User queryUser(int id); /** * query user by ids * * @param ids id list * @return user list */ List<User> queryUser(List<Integer> ids); /** * query user * * @param name name * @return user info */ User queryUser(String name); /** * query user * * @param name name * @param password password * @return user info */ User queryUser(String name, String password); /** * get user id by user name * * @param name user name * @return if name empty 0, user not exists -1, user exist user id */ int getUserIdByName(String name); /** * query user list * * @param loginUser login user * @param pageNo page number * @param searchVal search avlue * @param pageSize page size * @return user list page */ Result queryUserList(User loginUser, String searchVal, Integer pageNo, Integer pageSize); /** * updateProcessInstance user * * * @param loginUser * @param userId user id * @param userName user name * @param userPassword user password * @param email email * @param tenantId tennat id * @param phone phone * @param queue queue * @return update result code * @throws Exception exception */ Map<String, Object> updateUser(User loginUser, int userId, String userName, String userPassword, String email, int tenantId, String phone, String queue, int state) throws IOException; /** * delete user * * @param loginUser login user * @param id user id * @return delete result code * @throws Exception exception when operate hdfs */ Map<String, Object> deleteUserById(User loginUser, int id) throws IOException; /** * grant project * * @param loginUser login user * @param userId user id * @param projectIds project id array * @return grant result code */ Map<String, Object> grantProject(User loginUser, int userId, String projectIds); /** * grant resource * * @param loginUser login user * @param userId user id * @param resourceIds resource id array * @return grant result code */ Map<String, Object> grantResources(User loginUser, int userId, String resourceIds); /** * grant udf function * * @param loginUser login user * @param userId user id * @param udfIds udf id array * @return grant result code */ Map<String, Object> grantUDFFunction(User loginUser, int userId, String udfIds); /** * grant datasource * * @param loginUser login user * @param userId user id * @param datasourceIds data source id array * @return grant result code */ Map<String, Object> grantDataSource(User loginUser, int userId, String datasourceIds); /** * query user info * * @param loginUser login user * @return user info */ Map<String, Object> getUserInfo(User loginUser); /** * query user list * * @param loginUser login user * @return user list */ Map<String, Object> queryAllGeneralUsers(User loginUser); /** * query user list * * @param loginUser login user * @return user list */ Map<String, Object> queryUserList(User loginUser); /** * verify user name exists * * @param userName user name * @return true if user name not exists, otherwise return false */ Result<Object> verifyUserName(String userName); /** * unauthorized user * * @param loginUser login user * @param alertgroupId alert group id * @return unauthorize result code */ Map<String, Object> unauthorizedUser(User loginUser, Integer alertgroupId); /** * authorized user * * @param loginUser login user * @param alertgroupId alert group id * @return authorized result code */ Map<String, Object> authorizedUser(User loginUser, Integer alertgroupId); /** * registry user, default state is 0, default tenant_id is 1, no phone, no queue * * @param userName user name * @param userPassword user password * @param repeatPassword repeat password * @param email email * @return registry result code * @throws Exception exception */ Map<String, Object> registerUser(String userName, String userPassword, String repeatPassword, String email); /** * activate user, only system admin have permission, change user state code 0 to 1 * * @param loginUser login user * @param userName user name * @return create result code */ Map<String, Object> activateUser(User loginUser, String userName); /** * activate user, only system admin have permission, change users state code 0 to 1 * * @param loginUser login user * @param userNames user name * @return create result code */ Map<String, Object> batchActivateUser(User loginUser, List<String> userNames); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,110
[Feature][dolphinscheduler-api] support grant project by code
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Third-party integrated scenarios, support grant through project code ### Use case ## API ## org.apache.dolphinscheduler.api.controller.UsersController#grantProjectByCode ## Request ## | Name | Type | Require | | :--- | :--- | :---: | | userId | Int | ✓ | | projectCodes | String | ✓ | ## Response ## Same as `org.apache.dolphinscheduler.api.controller.UsersController#grantProject` ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7110
https://github.com/apache/dolphinscheduler/pull/7111
fa8ccd3855dfaa19d73cd7541184938f227bb4b7
27ea43d5dfda2556fa5fb02179e7c906408007ad
"2021-12-01T16:08:53Z"
java
"2021-12-04T13:43:14Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent; import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ServiceException; import org.apache.dolphinscheduler.api.service.UsersService; import org.apache.dolphinscheduler.api.utils.CheckUtils; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.utils.EncryptionUtils; import org.apache.dolphinscheduler.common.utils.HadoopUtils; import org.apache.dolphinscheduler.common.utils.PropertyUtils; import org.apache.dolphinscheduler.dao.entity.AlertGroup; import org.apache.dolphinscheduler.dao.entity.DatasourceUser; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.ResourcesUser; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.UDFUser; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.AccessTokenMapper; import org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper; import org.apache.dolphinscheduler.dao.mapper.DataSourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectUserMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UDFUserMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils; import org.apache.dolphinscheduler.spi.enums.ResourceType; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import java.io.IOException; import java.text.MessageFormat; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; /** * users service impl */ @Service public class UsersServiceImpl extends BaseServiceImpl implements UsersService { private static final Logger logger = LoggerFactory.getLogger(UsersServiceImpl.class); @Autowired private AccessTokenMapper accessTokenMapper; @Autowired private UserMapper userMapper; @Autowired private TenantMapper tenantMapper; @Autowired private ProjectUserMapper projectUserMapper; @Autowired private ResourceUserMapper resourceUserMapper; @Autowired private ResourceMapper resourceMapper; @Autowired private DataSourceUserMapper datasourceUserMapper; @Autowired private UDFUserMapper udfUserMapper; @Autowired private AlertGroupMapper alertGroupMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper; @Autowired private ProjectMapper projectMapper; /** * create user, only system admin have permission * * @param loginUser login user * @param userName user name * @param userPassword user password * @param email email * @param tenantId tenant id * @param phone phone * @param queue queue * @return create result code * @throws Exception exception */ @Override @Transactional(rollbackFor = Exception.class) public Map<String, Object> createUser(User loginUser, String userName, String userPassword, String email, int tenantId, String phone, String queue, int state) throws IOException { Map<String, Object> result = new HashMap<>(); //check all user params String msg = this.checkUserParams(userName, userPassword, email, phone); if (!StringUtils.isEmpty(msg)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, msg); return result; } if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } if (!checkTenantExists(tenantId)) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } User user = createUser(userName, userPassword, email, tenantId, phone, queue, state); Tenant tenant = tenantMapper.queryById(tenantId); // resource upload startup if (PropertyUtils.getResUploadStartupState()) { // if tenant not exists if (!HadoopUtils.getInstance().exists(HadoopUtils.getHdfsTenantDir(tenant.getTenantCode()))) { createTenantDirIfNotExists(tenant.getTenantCode()); } String userPath = HadoopUtils.getHdfsUserDir(tenant.getTenantCode(), user.getId()); HadoopUtils.getInstance().mkdir(userPath); } result.put(Constants.DATA_LIST, user); putMsg(result, Status.SUCCESS); return result; } @Override @Transactional(rollbackFor = RuntimeException.class) public User createUser(String userName, String userPassword, String email, int tenantId, String phone, String queue, int state) { User user = new User(); Date now = new Date(); user.setUserName(userName); user.setUserPassword(EncryptionUtils.getMd5(userPassword)); user.setEmail(email); user.setTenantId(tenantId); user.setPhone(phone); user.setState(state); // create general users, administrator users are currently built-in user.setUserType(UserType.GENERAL_USER); user.setCreateTime(now); user.setUpdateTime(now); if (StringUtils.isEmpty(queue)) { queue = ""; } user.setQueue(queue); // save user userMapper.insert(user); return user; } /*** * create User for ldap login */ @Override @Transactional(rollbackFor = Exception.class) public User createUser(UserType userType, String userId, String email) { User user = new User(); Date now = new Date(); user.setUserName(userId); user.setEmail(email); // create general users, administrator users are currently built-in user.setUserType(userType); user.setCreateTime(now); user.setUpdateTime(now); user.setQueue(""); // save user userMapper.insert(user); return user; } /** * get user by user name * * @param userName user name * @return exist user or null */ @Override public User getUserByUserName(String userName) { return userMapper.queryByUserNameAccurately(userName); } /** * query user by id * * @param id id * @return user info */ @Override public User queryUser(int id) { return userMapper.selectById(id); } @Override public List<User> queryUser(List<Integer> ids) { if (CollectionUtils.isEmpty(ids)) { return new ArrayList<>(); } return userMapper.selectByIds(ids); } /** * query user * * @param name name * @return user info */ @Override public User queryUser(String name) { return userMapper.queryByUserNameAccurately(name); } /** * query user * * @param name name * @param password password * @return user info */ @Override public User queryUser(String name, String password) { String md5 = EncryptionUtils.getMd5(password); return userMapper.queryUserByNamePassword(name, md5); } /** * get user id by user name * * @param name user name * @return if name empty 0, user not exists -1, user exist user id */ @Override public int getUserIdByName(String name) { //executor name query int executorId = 0; if (StringUtils.isNotEmpty(name)) { User executor = queryUser(name); if (null != executor) { executorId = executor.getId(); } else { executorId = -1; } } return executorId; } /** * query user list * * @param loginUser login user * @param pageNo page number * @param searchVal search value * @param pageSize page size * @return user list page */ @Override public Result queryUserList(User loginUser, String searchVal, Integer pageNo, Integer pageSize) { Result result = new Result(); if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } Page<User> page = new Page<>(pageNo, pageSize); IPage<User> scheduleList = userMapper.queryUserPaging(page, searchVal); PageInfo<User> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotal((int) scheduleList.getTotal()); pageInfo.setTotalList(scheduleList.getRecords()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * updateProcessInstance user * * @param userId user id * @param userName user name * @param userPassword user password * @param email email * @param tenantId tenant id * @param phone phone * @param queue queue * @return update result code * @throws Exception exception */ @Override public Map<String, Object> updateUser(User loginUser, int userId, String userName, String userPassword, String email, int tenantId, String phone, String queue, int state) throws IOException { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); if (check(result, !hasPerm(loginUser, userId), Status.USER_NO_OPERATION_PERM)) { return result; } User user = userMapper.selectById(userId); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } if (StringUtils.isNotEmpty(userName)) { if (!CheckUtils.checkUserName(userName)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, userName); return result; } User tempUser = userMapper.queryByUserNameAccurately(userName); if (tempUser != null && tempUser.getId() != userId) { putMsg(result, Status.USER_NAME_EXIST); return result; } user.setUserName(userName); } if (StringUtils.isNotEmpty(userPassword)) { if (!CheckUtils.checkPassword(userPassword)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, userPassword); return result; } user.setUserPassword(EncryptionUtils.getMd5(userPassword)); } if (StringUtils.isNotEmpty(email)) { if (!CheckUtils.checkEmail(email)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, email); return result; } user.setEmail(email); } if (StringUtils.isNotEmpty(phone) && !CheckUtils.checkPhone(phone)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, phone); return result; } user.setPhone(phone); user.setQueue(queue); user.setState(state); Date now = new Date(); user.setUpdateTime(now); //if user switches the tenant, the user's resources need to be copied to the new tenant if (user.getTenantId() != tenantId) { Tenant oldTenant = tenantMapper.queryById(user.getTenantId()); //query tenant Tenant newTenant = tenantMapper.queryById(tenantId); if (newTenant != null) { // if hdfs startup if (PropertyUtils.getResUploadStartupState() && oldTenant != null) { String newTenantCode = newTenant.getTenantCode(); String oldResourcePath = HadoopUtils.getHdfsResDir(oldTenant.getTenantCode()); String oldUdfsPath = HadoopUtils.getHdfsUdfDir(oldTenant.getTenantCode()); // if old tenant dir exists if (HadoopUtils.getInstance().exists(oldResourcePath)) { String newResourcePath = HadoopUtils.getHdfsResDir(newTenantCode); String newUdfsPath = HadoopUtils.getHdfsUdfDir(newTenantCode); //file resources list List<Resource> fileResourcesList = resourceMapper.queryResourceList( null, userId, ResourceType.FILE.ordinal()); if (CollectionUtils.isNotEmpty(fileResourcesList)) { ResourceTreeVisitor resourceTreeVisitor = new ResourceTreeVisitor(fileResourcesList); ResourceComponent resourceComponent = resourceTreeVisitor.visit(); copyResourceFiles(resourceComponent, oldResourcePath, newResourcePath); } //udf resources List<Resource> udfResourceList = resourceMapper.queryResourceList( null, userId, ResourceType.UDF.ordinal()); if (CollectionUtils.isNotEmpty(udfResourceList)) { ResourceTreeVisitor resourceTreeVisitor = new ResourceTreeVisitor(udfResourceList); ResourceComponent resourceComponent = resourceTreeVisitor.visit(); copyResourceFiles(resourceComponent, oldUdfsPath, newUdfsPath); } //Delete the user from the old tenant directory String oldUserPath = HadoopUtils.getHdfsUserDir(oldTenant.getTenantCode(), userId); HadoopUtils.getInstance().delete(oldUserPath, true); } else { // if old tenant dir not exists , create createTenantDirIfNotExists(oldTenant.getTenantCode()); } if (HadoopUtils.getInstance().exists(HadoopUtils.getHdfsTenantDir(newTenant.getTenantCode()))) { //create user in the new tenant directory String newUserPath = HadoopUtils.getHdfsUserDir(newTenant.getTenantCode(), user.getId()); HadoopUtils.getInstance().mkdir(newUserPath); } else { // if new tenant dir not exists , create createTenantDirIfNotExists(newTenant.getTenantCode()); } } } user.setTenantId(tenantId); } // updateProcessInstance user userMapper.updateById(user); putMsg(result, Status.SUCCESS); return result; } /** * delete user * * @param loginUser login user * @param id user id * @return delete result code * @throws Exception exception when operate hdfs */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteUserById(User loginUser, int id) throws IOException { Map<String, Object> result = new HashMap<>(); //only admin can operate if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM, id); return result; } //check exist User tempUser = userMapper.selectById(id); if (tempUser == null) { putMsg(result, Status.USER_NOT_EXIST, id); return result; } // check if is a project owner List<Project> projects = projectMapper.queryProjectCreatedByUser(id); if (CollectionUtils.isNotEmpty(projects)) { String projectNames = projects.stream().map(Project::getName).collect(Collectors.joining(",")); putMsg(result, Status.TRANSFORM_PROJECT_OWNERSHIP, projectNames); return result; } // delete user User user = userMapper.queryTenantCodeByUserId(id); if (user != null) { if (PropertyUtils.getResUploadStartupState()) { String userPath = HadoopUtils.getHdfsUserDir(user.getTenantCode(), id); if (HadoopUtils.getInstance().exists(userPath)) { HadoopUtils.getInstance().delete(userPath, true); } } } accessTokenMapper.deleteAccessTokenByUserId(id); userMapper.deleteById(id); putMsg(result, Status.SUCCESS); return result; } /** * grant project * * @param loginUser login user * @param userId user id * @param projectIds project id array * @return grant result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> grantProject(User loginUser, int userId, String projectIds) { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } //check exist User tempUser = userMapper.selectById(userId); if (tempUser == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } //if the selected projectIds are empty, delete all items associated with the user if (check(result, StringUtils.isEmpty(projectIds), Status.SUCCESS)) { projectUserMapper.deleteProjectRelation(0, userId); return result; } String[] projectIdArr = projectIds.split(","); for (String projectId : projectIdArr) { Date now = new Date(); ProjectUser projectUser = new ProjectUser(); projectUser.setUserId(userId); projectUser.setProjectId(Integer.parseInt(projectId)); projectUser.setPerm(7); projectUser.setCreateTime(now); projectUser.setUpdateTime(now); projectUserMapper.insert(projectUser); } putMsg(result, Status.SUCCESS); return result; } /** * grant resource * * @param loginUser login user * @param userId user id * @param resourceIds resource id array * @return grant result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> grantResources(User loginUser, int userId, String resourceIds) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } User user = userMapper.selectById(userId); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } Set<Integer> needAuthorizeResIds = new HashSet<>(); if (StringUtils.isNotBlank(resourceIds)) { String[] resourceFullIdArr = resourceIds.split(","); // need authorize resource id set for (String resourceFullId : resourceFullIdArr) { String[] resourceIdArr = resourceFullId.split("-"); for (int i = 0; i <= resourceIdArr.length - 1; i++) { int resourceIdValue = Integer.parseInt(resourceIdArr[i]); needAuthorizeResIds.add(resourceIdValue); } } } //get the authorized resource id list by user id List<Integer> resIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, Constants.AUTHORIZE_WRITABLE_PERM); List<Resource> oldAuthorizedRes = CollectionUtils.isEmpty(resIds) ? new ArrayList<>() : resourceMapper.queryResourceListById(resIds); //if resource type is UDF,need check whether it is bound by UDF function Set<Integer> oldAuthorizedResIds = oldAuthorizedRes.stream().map(Resource::getId).collect(Collectors.toSet()); //get the unauthorized resource id list oldAuthorizedResIds.removeAll(needAuthorizeResIds); if (CollectionUtils.isNotEmpty(oldAuthorizedResIds)) { // get all resource id of process definitions those is released List<Map<String, Object>> list = processDefinitionMapper.listResourcesByUser(userId); Map<Integer, Set<Long>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list); Set<Integer> resourceIdSet = resourceProcessMap.keySet(); resourceIdSet.retainAll(oldAuthorizedResIds); if (CollectionUtils.isNotEmpty(resourceIdSet)) { logger.error("can't be deleted,because it is used of process definition"); for (Integer resId : resourceIdSet) { logger.error("resource id:{} is used of process definition {}", resId, resourceProcessMap.get(resId)); } putMsg(result, Status.RESOURCE_IS_USED); return result; } } resourceUserMapper.deleteResourceUser(userId, 0); if (check(result, StringUtils.isEmpty(resourceIds), Status.SUCCESS)) { return result; } for (int resourceIdValue : needAuthorizeResIds) { Resource resource = resourceMapper.selectById(resourceIdValue); if (resource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } Date now = new Date(); ResourcesUser resourcesUser = new ResourcesUser(); resourcesUser.setUserId(userId); resourcesUser.setResourcesId(resourceIdValue); if (resource.isDirectory()) { resourcesUser.setPerm(Constants.AUTHORIZE_READABLE_PERM); } else { resourcesUser.setPerm(Constants.AUTHORIZE_WRITABLE_PERM); } resourcesUser.setCreateTime(now); resourcesUser.setUpdateTime(now); resourceUserMapper.insert(resourcesUser); } putMsg(result, Status.SUCCESS); return result; } /** * grant udf function * * @param loginUser login user * @param userId user id * @param udfIds udf id array * @return grant result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> grantUDFFunction(User loginUser, int userId, String udfIds) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } User user = userMapper.selectById(userId); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } udfUserMapper.deleteByUserId(userId); if (check(result, StringUtils.isEmpty(udfIds), Status.SUCCESS)) { return result; } String[] resourcesIdArr = udfIds.split(","); for (String udfId : resourcesIdArr) { Date now = new Date(); UDFUser udfUser = new UDFUser(); udfUser.setUserId(userId); udfUser.setUdfId(Integer.parseInt(udfId)); udfUser.setPerm(7); udfUser.setCreateTime(now); udfUser.setUpdateTime(now); udfUserMapper.insert(udfUser); } putMsg(result, Status.SUCCESS); return result; } /** * grant datasource * * @param loginUser login user * @param userId user id * @param datasourceIds data source id array * @return grant result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> grantDataSource(User loginUser, int userId, String datasourceIds) { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } User user = userMapper.selectById(userId); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } datasourceUserMapper.deleteByUserId(userId); if (check(result, StringUtils.isEmpty(datasourceIds), Status.SUCCESS)) { return result; } String[] datasourceIdArr = datasourceIds.split(","); for (String datasourceId : datasourceIdArr) { Date now = new Date(); DatasourceUser datasourceUser = new DatasourceUser(); datasourceUser.setUserId(userId); datasourceUser.setDatasourceId(Integer.parseInt(datasourceId)); datasourceUser.setPerm(7); datasourceUser.setCreateTime(now); datasourceUser.setUpdateTime(now); datasourceUserMapper.insert(datasourceUser); } putMsg(result, Status.SUCCESS); return result; } /** * query user info * * @param loginUser login user * @return user info */ @Override public Map<String, Object> getUserInfo(User loginUser) { Map<String, Object> result = new HashMap<>(); User user = null; if (loginUser.getUserType() == UserType.ADMIN_USER) { user = loginUser; } else { user = userMapper.queryDetailsById(loginUser.getId()); List<AlertGroup> alertGroups = alertGroupMapper.queryByUserId(loginUser.getId()); StringBuilder sb = new StringBuilder(); if (alertGroups != null && !alertGroups.isEmpty()) { for (int i = 0; i < alertGroups.size() - 1; i++) { sb.append(alertGroups.get(i).getGroupName() + ","); } sb.append(alertGroups.get(alertGroups.size() - 1)); user.setAlertGroup(sb.toString()); } } result.put(Constants.DATA_LIST, user); putMsg(result, Status.SUCCESS); return result; } /** * query user list * * @param loginUser login user * @return user list */ @Override public Map<String, Object> queryAllGeneralUsers(User loginUser) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } List<User> userList = userMapper.queryAllGeneralUser(); result.put(Constants.DATA_LIST, userList); putMsg(result, Status.SUCCESS); return result; } /** * query user list * * @param loginUser login user * @return user list */ @Override public Map<String, Object> queryUserList(User loginUser) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } List<User> userList = userMapper.selectList(null); result.put(Constants.DATA_LIST, userList); putMsg(result, Status.SUCCESS); return result; } /** * verify user name exists * * @param userName user name * @return true if user name not exists, otherwise return false */ @Override public Result<Object> verifyUserName(String userName) { Result<Object> result = new Result<>(); User user = userMapper.queryByUserNameAccurately(userName); if (user != null) { putMsg(result, Status.USER_NAME_EXIST); } else { putMsg(result, Status.SUCCESS); } return result; } /** * unauthorized user * * @param loginUser login user * @param alertgroupId alert group id * @return unauthorize result code */ @Override public Map<String, Object> unauthorizedUser(User loginUser, Integer alertgroupId) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } List<User> userList = userMapper.selectList(null); List<User> resultUsers = new ArrayList<>(); Set<User> userSet = null; if (userList != null && !userList.isEmpty()) { userSet = new HashSet<>(userList); List<User> authedUserList = userMapper.queryUserListByAlertGroupId(alertgroupId); Set<User> authedUserSet = null; if (authedUserList != null && !authedUserList.isEmpty()) { authedUserSet = new HashSet<>(authedUserList); userSet.removeAll(authedUserSet); } resultUsers = new ArrayList<>(userSet); } result.put(Constants.DATA_LIST, resultUsers); putMsg(result, Status.SUCCESS); return result; } /** * authorized user * * @param loginUser login user * @param alertgroupId alert group id * @return authorized result code */ @Override public Map<String, Object> authorizedUser(User loginUser, Integer alertgroupId) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } List<User> userList = userMapper.queryUserListByAlertGroupId(alertgroupId); result.put(Constants.DATA_LIST, userList); putMsg(result, Status.SUCCESS); return result; } /** * @param tenantId tenant id * @return true if tenant exists, otherwise return false */ private boolean checkTenantExists(int tenantId) { return tenantMapper.queryById(tenantId) != null; } /** * @return if check failed return the field, otherwise return null */ private String checkUserParams(String userName, String password, String email, String phone) { String msg = null; if (!CheckUtils.checkUserName(userName)) { msg = userName; } else if (!CheckUtils.checkPassword(password)) { msg = password; } else if (!CheckUtils.checkEmail(email)) { msg = email; } else if (!CheckUtils.checkPhone(phone)) { msg = phone; } return msg; } /** * copy resource files * * @param resourceComponent resource component * @param srcBasePath src base path * @param dstBasePath dst base path * @throws IOException io exception */ private void copyResourceFiles(ResourceComponent resourceComponent, String srcBasePath, String dstBasePath) throws IOException { List<ResourceComponent> components = resourceComponent.getChildren(); if (CollectionUtils.isNotEmpty(components)) { for (ResourceComponent component : components) { // verify whether exist if (!HadoopUtils.getInstance().exists(String.format("%s/%s", srcBasePath, component.getFullName()))) { logger.error("resource file: {} not exist,copy error", component.getFullName()); throw new ServiceException(Status.RESOURCE_NOT_EXIST); } if (!component.isDirctory()) { // copy it to dst HadoopUtils.getInstance().copy(String.format("%s/%s", srcBasePath, component.getFullName()), String.format("%s/%s", dstBasePath, component.getFullName()), false, true); continue; } if (CollectionUtils.isEmpty(component.getChildren())) { // if not exist,need create it if (!HadoopUtils.getInstance().exists(String.format("%s/%s", dstBasePath, component.getFullName()))) { HadoopUtils.getInstance().mkdir(String.format("%s/%s", dstBasePath, component.getFullName())); } } else { copyResourceFiles(component, srcBasePath, dstBasePath); } } } } /** * registry user, default state is 0, default tenant_id is 1, no phone, no queue * * @param userName user name * @param userPassword user password * @param repeatPassword repeat password * @param email email * @return registry result code * @throws Exception exception */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> registerUser(String userName, String userPassword, String repeatPassword, String email) { Map<String, Object> result = new HashMap<>(); //check user params String msg = this.checkUserParams(userName, userPassword, email, ""); if (!StringUtils.isEmpty(msg)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, msg); return result; } if (!userPassword.equals(repeatPassword)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "two passwords are not same"); return result; } User user = createUser(userName, userPassword, email, 1, "", "", Flag.NO.ordinal()); putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, user); return result; } /** * activate user, only system admin have permission, change user state code 0 to 1 * * @param loginUser login user * @param userName user name * @return create result code */ @Override public Map<String, Object> activateUser(User loginUser, String userName) { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } if (!CheckUtils.checkUserName(userName)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, userName); return result; } User user = userMapper.queryByUserNameAccurately(userName); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userName); return result; } if (user.getState() != Flag.NO.ordinal()) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, userName); return result; } user.setState(Flag.YES.ordinal()); Date now = new Date(); user.setUpdateTime(now); userMapper.updateById(user); User responseUser = userMapper.queryByUserNameAccurately(userName); putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, responseUser); return result; } /** * activate user, only system admin have permission, change users state code 0 to 1 * * @param loginUser login user * @param userNames user name * @return create result code */ @Override public Map<String, Object> batchActivateUser(User loginUser, List<String> userNames) { Map<String, Object> result = new HashMap<>(); if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } int totalSuccess = 0; List<String> successUserNames = new ArrayList<>(); Map<String, Object> successRes = new HashMap<>(); int totalFailed = 0; List<Map<String, String>> failedInfo = new ArrayList<>(); Map<String, Object> failedRes = new HashMap<>(); for (String userName : userNames) { Map<String, Object> tmpResult = activateUser(loginUser, userName); if (tmpResult.get(Constants.STATUS) != Status.SUCCESS) { totalFailed++; Map<String, String> failedBody = new HashMap<>(); failedBody.put("userName", userName); Status status = (Status) tmpResult.get(Constants.STATUS); String errorMessage = MessageFormat.format(status.getMsg(), userName); failedBody.put("msg", errorMessage); failedInfo.add(failedBody); } else { totalSuccess++; successUserNames.add(userName); } } successRes.put("sum", totalSuccess); successRes.put("userName", successUserNames); failedRes.put("sum", totalFailed); failedRes.put("info", failedInfo); Map<String, Object> res = new HashMap<>(); res.put("success", successRes); res.put("failed", failedRes); putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, res); return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,110
[Feature][dolphinscheduler-api] support grant project by code
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Third-party integrated scenarios, support grant through project code ### Use case ## API ## org.apache.dolphinscheduler.api.controller.UsersController#grantProjectByCode ## Request ## | Name | Type | Require | | :--- | :--- | :---: | | userId | Int | ✓ | | projectCodes | String | ✓ | ## Response ## Same as `org.apache.dolphinscheduler.api.controller.UsersController#grantProject` ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7110
https://github.com/apache/dolphinscheduler/pull/7111
fa8ccd3855dfaa19d73cd7541184938f227bb4b7
27ea43d5dfda2556fa5fb02179e7c906408007ad
"2021-12-01T16:08:53Z"
java
"2021-12-04T13:43:14Z"
dolphinscheduler-api/src/main/resources/i18n/messages.properties
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # QUERY_SCHEDULE_LIST_NOTES=query schedule list EXECUTE_PROCESS_TAG=execute process related operation PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation RUN_PROCESS_INSTANCE_NOTES=run process instance START_NODE_LIST=start node list(node name) TASK_DEPEND_TYPE=task depend type COMMAND_TYPE=command type RUN_MODE=run mode TIMEOUT=timeout EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance EXECUTE_TYPE=execute type START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition GET_RECEIVER_CC_NOTES=query receiver cc DESC=description GROUP_NAME=group name GROUP_TYPE=group type QUERY_ALERT_GROUP_LIST_NOTES=query alert group list UPDATE_ALERT_GROUP_NOTES=update alert group DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not GRANT_ALERT_GROUP_NOTES=grant alert group USER_IDS=user id list ALERT_GROUP_TAG=alert group related operation ALERT_PLUGIN_INSTANCE_TAG=alert plugin instance related operation UPDATE_ALERT_PLUGIN_INSTANCE_NOTES=update alert plugin instance operation CREATE_ALERT_PLUGIN_INSTANCE_NOTES=create alert plugin instance operation DELETE_ALERT_PLUGIN_INSTANCE_NOTES=delete alert plugin instance operation GET_ALERT_PLUGIN_INSTANCE_NOTES=get alert plugin instance operation CREATE_ALERT_GROUP_NOTES=create alert group WORKER_GROUP_TAG=worker group related operation SAVE_WORKER_GROUP_NOTES=create worker group WORKER_GROUP_NAME=worker group name WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2 QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging QUERY_WORKER_GROUP_LIST_NOTES=query worker group list DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id DATA_ANALYSIS_TAG=analysis related operation of task state COUNT_TASK_STATE_NOTES=count task state COUNT_PROCESS_INSTANCE_NOTES=count process instance state COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user COUNT_COMMAND_STATE_NOTES=count command state COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\ ACCESS_TOKEN_TAG=access token related operation MONITOR_TAG=monitor related operation MASTER_LIST_NOTES=master server list WORKER_LIST_NOTES=worker server list QUERY_DATABASE_STATE_NOTES=query database state QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE TASK_STATE=task instance state SOURCE_TABLE=SOURCE TABLE DEST_TABLE=dest table TASK_DATE=task date QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging DATA_SOURCE_TAG=data source related operation CREATE_DATA_SOURCE_NOTES=create data source DATA_SOURCE_NAME=data source name DATA_SOURCE_NOTE=data source desc DB_TYPE=database type DATA_SOURCE_HOST=DATA SOURCE HOST DATA_SOURCE_PORT=data source port DATABASE_NAME=database name QUEUE_TAG=queue related operation QUERY_QUEUE_LIST_NOTES=query queue list QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging CREATE_QUEUE_NOTES=create queue YARN_QUEUE_NAME=yarn(hadoop) queue name QUEUE_ID=queue id TENANT_DESC=tenant desc QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging QUERY_TENANT_LIST_NOTES=query tenant list UPDATE_TENANT_NOTES=update tenant DELETE_TENANT_NOTES=delete tenant RESOURCES_TAG=resource center related operation CREATE_RESOURCE_NOTES=create resource RESOURCE_TYPE=resource file type RESOURCE_NAME=resource name RESOURCE_DESC=resource file desc RESOURCE_FILE=resource file RESOURCE_ID=resource id QUERY_RESOURCE_LIST_NOTES=query resource list DELETE_RESOURCE_BY_ID_NOTES=delete resource by id VIEW_RESOURCE_BY_ID_NOTES=view resource by id ONLINE_CREATE_RESOURCE_NOTES=online create resource SUFFIX=resource file suffix CONTENT=resource file content UPDATE_RESOURCE_NOTES=edit resource file online DOWNLOAD_RESOURCE_NOTES=download resource file CREATE_UDF_FUNCTION_NOTES=create udf function UDF_TYPE=UDF type FUNC_NAME=function name CLASS_NAME=package and class name ARG_TYPES=arguments UDF_DESC=udf desc VIEW_UDF_FUNCTION_NOTES=view udf function UPDATE_UDF_FUNCTION_NOTES=update udf function QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name DELETE_UDF_FUNCTION_NOTES=delete udf function AUTHORIZED_FILE_NOTES=authorized file UNAUTHORIZED_FILE_NOTES=unauthorized file AUTHORIZED_UDF_FUNC_NOTES=authorized udf func UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func VERIFY_QUEUE_NOTES=verify queue TENANT_TAG=tenant related operation CREATE_TENANT_NOTES=create tenant TENANT_CODE=os tenant code QUEUE_NAME=queue name PASSWORD=password DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...} DATA_SOURCE_PRINCIPAL=principal DATA_SOURCE_KERBEROS_KRB5_CONF=the kerberos authentication parameter java.security.krb5.conf DATA_SOURCE_KERBEROS_KEYTAB_USERNAME=the kerberos authentication parameter login.user.keytab.username DATA_SOURCE_KERBEROS_KEYTAB_PATH=the kerberos authentication parameter login.user.keytab.path PROJECT_TAG=project related operation CREATE_PROJECT_NOTES=create project PROJECT_DESC=project description UPDATE_PROJECT_NOTES=update project PROJECT_ID=project id QUERY_PROJECT_BY_ID_NOTES=query project info by project id QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING DELETE_PROJECT_BY_ID_NOTES=delete project by id QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project QUERY_ALL_PROJECT_LIST_NOTES=query all project list QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project TASK_RECORD_TAG=task record related operation QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging CREATE_TOKEN_NOTES=create token ,note: please login first QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging SCHEDULE=schedule WARNING_TYPE=warning type(sending strategy) WARNING_GROUP_ID=warning group id FAILURE_STRATEGY=failure strategy RECEIVERS=receivers RECEIVERS_CC=receivers cc WORKER_GROUP_ID=worker server group id PROCESS_INSTANCE_PRIORITY=process instance priority UPDATE_SCHEDULE_NOTES=update schedule SCHEDULE_ID=schedule id ONLINE_SCHEDULE_NOTES=online schedule OFFLINE_SCHEDULE_NOTES=offline schedule QUERY_SCHEDULE_NOTES=query schedule QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging LOGIN_TAG=User login related operations USER_NAME=user name PROJECT_NAME=project name CREATE_PROCESS_DEFINITION_NOTES=create process definition PROCESS_DEFINITION_NAME=process definition name PROCESS_DEFINITION_JSON=process definition detail info (json format) PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format) PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format) PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format) PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format) PROCESS_DEFINITION_DESC=process definition desc PROCESS_DEFINITION_TAG=process definition related opertation SIGNOUT_NOTES=logout USER_PASSWORD=user password UPDATE_PROCESS_INSTANCE_NOTES=update process instance QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list VERIFY_PROCESS_DEFINITION_NAME_NOTES=verify process definition name LOGIN_NOTES=user login UPDATE_PROCESS_DEFINITION_NOTES=update process definition PROCESS_DEFINITION_ID=process definition id PROCESS_DEFINITION_IDS=process definition ids RELEASE_PROCESS_DEFINITION_NOTES=release process definition QUERY_PROCESS_DEFINITION_BY_ID_NOTES=query process definition by id QUERY_PROCESS_DEFINITION_BY_NAME_NOTES=query process definition by name QUERY_PROCESS_DEFINITION_LIST_NOTES=query process definition list QUERY_PROCESS_DEFINITION_LIST_PAGING_NOTES=query process definition list paging QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list PAGE_NO=page no PROCESS_INSTANCE_ID=process instance id PROCESS_INSTANCE_JSON=process instance info(json format) SCHEDULE_TIME=schedule time SYNC_DEFINE=update the information of the process instance to the process definition\ RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance SEARCH_VAL=search val USER_ID=user id PAGE_SIZE=page size LIMIT=limit VIEW_TREE_NOTES=view tree GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id PROCESS_DEFINITION_ID_LIST=process definition id list QUERY_PROCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query process definition all by project id DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id TASK_ID=task instance id SKIP_LINE_NUM=skip line num QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log USERS_TAG=users related operation SCHEDULER_TAG=scheduler related operation CREATE_SCHEDULE_NOTES=create schedule CREATE_USER_NOTES=create user TENANT_ID=tenant id QUEUE=queue EMAIL=email PHONE=phone QUERY_USER_LIST_NOTES=query user list UPDATE_USER_NOTES=update user DELETE_USER_BY_ID_NOTES=delete user by id GRANT_PROJECT_NOTES=GRANT PROJECT PROJECT_IDS=project ids(string format, multiple projects separated by ",") GRANT_RESOURCE_NOTES=grant resource file RESOURCE_IDS=resource ids(string format, multiple resources separated by ",") GET_USER_INFO_NOTES=get user info LIST_USER_NOTES=list user VERIFY_USER_NAME_NOTES=verify user name UNAUTHORIZED_USER_NOTES=cancel authorization ALERT_GROUP_ID=alert group id AUTHORIZED_USER_NOTES=authorized user GRANT_UDF_FUNC_NOTES=grant udf function UDF_IDS=udf ids(string format, multiple udf functions separated by ",") GRANT_DATASOURCE_NOTES=grant datasource DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",") QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables VIEW_GANTT_NOTES=view gantt SUB_PROCESS_INSTANCE_ID=sub process instance id TASK_NAME=task instance name TASK_INSTANCE_TAG=task instance related operation LOGGER_TAG=log related operation PROCESS_INSTANCE_TAG=process instance related operation EXECUTION_STATUS=runing status for workflow and task nodes HOST=ip address of running task START_DATE=start date END_DATE=end date QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id UPDATE_DATA_SOURCE_NOTES=update data source DATA_SOURCE_ID=DATA SOURCE ID QUERY_DATA_SOURCE_NOTES=query data source by id QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test DELETE_DATA_SOURCE_NOTES=delete data source VERIFY_DATA_SOURCE_NOTES=verify data source UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source AUTHORIZED_DATA_SOURCE_NOTES=authorized data source DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id QUERY_ALERT_GROUP_LIST_PAGING_NOTES=query alert group list paging EXPORT_PROCESS_DEFINITION_BY_ID_NOTES=export process definition by id BATCH_EXPORT_PROCESS_DEFINITION_BY_IDS_NOTES= batch export process definition by ids QUERY_USER_CREATED_PROJECT_NOTES= query user created project QUERY_AUTHORIZED_AND_USER_CREATED_PROJECT_NOTES= query authorized and user created project COPY_PROCESS_DEFINITION_NOTES= copy process definition notes MOVE_PROCESS_DEFINITION_NOTES= move process definition notes TARGET_PROJECT_ID= target project id IS_COPY = is copy DELETE_PROCESS_DEFINITION_VERSION_NOTES=delete process definition version QUERY_PROCESS_DEFINITION_VERSIONS_NOTES=query process definition versions SWITCH_PROCESS_DEFINITION_VERSION_NOTES=switch process definition version VERSION=version
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,110
[Feature][dolphinscheduler-api] support grant project by code
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Third-party integrated scenarios, support grant through project code ### Use case ## API ## org.apache.dolphinscheduler.api.controller.UsersController#grantProjectByCode ## Request ## | Name | Type | Require | | :--- | :--- | :---: | | userId | Int | ✓ | | projectCodes | String | ✓ | ## Response ## Same as `org.apache.dolphinscheduler.api.controller.UsersController#grantProject` ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7110
https://github.com/apache/dolphinscheduler/pull/7111
fa8ccd3855dfaa19d73cd7541184938f227bb4b7
27ea43d5dfda2556fa5fb02179e7c906408007ad
"2021-12-01T16:08:53Z"
java
"2021-12-04T13:43:14Z"
dolphinscheduler-api/src/main/resources/i18n/messages_en_US.properties
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # QUERY_SCHEDULE_LIST_NOTES=query schedule list EXECUTE_PROCESS_TAG=execute process related operation PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation RUN_PROCESS_INSTANCE_NOTES=run process instance START_NODE_LIST=start node list(node name) TASK_DEPEND_TYPE=task depend type COMMAND_TYPE=command type RUN_MODE=run mode TIMEOUT=timeout EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance EXECUTE_TYPE=execute type START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition GET_RECEIVER_CC_NOTES=query receiver cc DESC=description GROUP_NAME=group name GROUP_TYPE=group type QUERY_ALERT_GROUP_LIST_NOTES=query alert group list UPDATE_ALERT_GROUP_NOTES=update alert group DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not GRANT_ALERT_GROUP_NOTES=grant alert group USER_IDS=user id list EXECUTOR_TAG=executor operation EXECUTOR_NAME=executor name WORKER_GROUP=work group startParams=start parameters ALERT_GROUP_TAG=alert group related operation ALERT_PLUGIN_INSTANCE_TAG=alert plugin instance related operation WORK_FLOW_LINEAGE_TAG=work flow lineage related operation UI_PLUGINS_TAG=UI plugin related operation UPDATE_ALERT_PLUGIN_INSTANCE_NOTES=update alert plugin instance operation CREATE_ALERT_PLUGIN_INSTANCE_NOTES=create alert plugin instance operation DELETE_ALERT_PLUGIN_INSTANCE_NOTES=delete alert plugin instance operation QUERY_ALERT_PLUGIN_INSTANCE_LIST_PAGING_NOTES=query alert plugin instance paging QUERY_TOPN_LONGEST_RUNNING_PROCESS_INSTANCE_NOTES=query topN longest running process instance ALERT_PLUGIN_INSTANCE_NAME=alert plugin instance name ALERT_PLUGIN_DEFINE_ID=alert plugin define id ALERT_PLUGIN_ID=alert plugin id ALERT_PLUGIN_INSTANCE_ID=alert plugin instance id ALERT_PLUGIN_INSTANCE_PARAMS=alert plugin instance parameters ALERT_INSTANCE_NAME=alert instance name VERIFY_ALERT_INSTANCE_NAME_NOTES=verify alert instance name DATA_SOURCE_PARAM=datasource parameter QUERY_ALL_ALERT_PLUGIN_INSTANCE_NOTES=query all alert plugin instances GET_ALERT_PLUGIN_INSTANCE_NOTES=get alert plugin instance operation CREATE_ALERT_GROUP_NOTES=create alert group WORKER_GROUP_TAG=worker group related operation SAVE_WORKER_GROUP_NOTES=create worker group WORKER_GROUP_NAME=worker group name WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2 QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging QUERY_WORKER_GROUP_LIST_NOTES=query worker group list DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id DATA_ANALYSIS_TAG=analysis related operation of task state COUNT_TASK_STATE_NOTES=count task state COUNT_PROCESS_INSTANCE_NOTES=count process instance state COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user COUNT_COMMAND_STATE_NOTES=count command state COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\ ACCESS_TOKEN_TAG=access token related operation MONITOR_TAG=monitor related operation MASTER_LIST_NOTES=master server list WORKER_LIST_NOTES=worker server list QUERY_DATABASE_STATE_NOTES=query database state QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE TASK_STATE=task instance state SOURCE_TABLE=SOURCE TABLE DEST_TABLE=dest table TASK_DATE=task date QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging DATA_SOURCE_TAG=data source related operation CREATE_DATA_SOURCE_NOTES=create data source DATA_SOURCE_NAME=data source name DATA_SOURCE_NOTE=data source desc DB_TYPE=database type DATA_SOURCE_HOST=DATA SOURCE HOST DATA_SOURCE_PORT=data source port DATABASE_NAME=database name QUEUE_TAG=queue related operation QUERY_QUEUE_LIST_NOTES=query queue list QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging CREATE_QUEUE_NOTES=create queue YARN_QUEUE_NAME=yarn(hadoop) queue name QUEUE_ID=queue id TENANT_DESC=tenant desc QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging QUERY_TENANT_LIST_NOTES=query tenant list UPDATE_TENANT_NOTES=update tenant DELETE_TENANT_NOTES=delete tenant RESOURCES_TAG=resource center related operation CREATE_RESOURCE_NOTES=create resource RESOURCE_TYPE=resource file type RESOURCE_NAME=resource name RESOURCE_DESC=resource file desc RESOURCE_FILE=resource file RESOURCE_ID=resource id QUERY_RESOURCE_LIST_NOTES=query resource list DELETE_RESOURCE_BY_ID_NOTES=delete resource by id VIEW_RESOURCE_BY_ID_NOTES=view resource by id ONLINE_CREATE_RESOURCE_NOTES=online create resource SUFFIX=resource file suffix CONTENT=resource file content UPDATE_RESOURCE_NOTES=edit resource file online DOWNLOAD_RESOURCE_NOTES=download resource file CREATE_UDF_FUNCTION_NOTES=create udf function UDF_TYPE=UDF type FUNC_NAME=function name CLASS_NAME=package and class name ARG_TYPES=arguments UDF_DESC=udf desc VIEW_UDF_FUNCTION_NOTES=view udf function UPDATE_UDF_FUNCTION_NOTES=update udf function QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name DELETE_UDF_FUNCTION_NOTES=delete udf function AUTHORIZED_FILE_NOTES=authorized file UNAUTHORIZED_FILE_NOTES=unauthorized file AUTHORIZED_UDF_FUNC_NOTES=authorized udf func UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func VERIFY_QUEUE_NOTES=verify queue TENANT_TAG=tenant related operation CREATE_TENANT_NOTES=create tenant TENANT_CODE=os tenant code QUEUE_NAME=queue name PASSWORD=password DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...} DATA_SOURCE_PRINCIPAL=principal DATA_SOURCE_KERBEROS_KRB5_CONF=the kerberos authentication parameter java.security.krb5.conf DATA_SOURCE_KERBEROS_KEYTAB_USERNAME=the kerberos authentication parameter login.user.keytab.username DATA_SOURCE_KERBEROS_KEYTAB_PATH=the kerberos authentication parameter login.user.keytab.path PROJECT_TAG=project related operation CREATE_PROJECT_NOTES=create project PROJECT_DESC=project description UPDATE_PROJECT_NOTES=update project PROJECT_ID=project id QUERY_PROJECT_BY_ID_NOTES=query project info by project id QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING QUERY_ALL_PROJECT_LIST_NOTES=query all project list DELETE_PROJECT_BY_ID_NOTES=delete project by id QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project TASK_RECORD_TAG=task record related operation QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging CREATE_TOKEN_NOTES=create token ,note: please login first QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging SCHEDULE=schedule WARNING_TYPE=warning type(sending strategy) WARNING_GROUP_ID=warning group id FAILURE_STRATEGY=failure strategy RECEIVERS=receivers RECEIVERS_CC=receivers cc WORKER_GROUP_ID=worker server group id PROCESS_INSTANCE_START_TIME=process instance start time PROCESS_INSTANCE_END_TIME=process instance end time PROCESS_INSTANCE_SIZE=process instance size PROCESS_INSTANCE_PRIORITY=process instance priority EXPECTED_PARALLELISM_NUMBER=custom parallelism to set the complement task threads UPDATE_SCHEDULE_NOTES=update schedule SCHEDULE_ID=schedule id ONLINE_SCHEDULE_NOTES=online schedule OFFLINE_SCHEDULE_NOTES=offline schedule QUERY_SCHEDULE_NOTES=query schedule QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging LOGIN_TAG=User login related operations USER_NAME=user name PROJECT_NAME=project name CREATE_PROCESS_DEFINITION_NOTES=create process definition PROCESS_DEFINITION_NAME=process definition name PROCESS_DEFINITION_JSON=process definition detail info (json format) PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format) PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format) PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format) PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format) PROCESS_DEFINITION_DESC=process definition desc PROCESS_DEFINITION_TAG=process definition related operation SIGNOUT_NOTES=logout USER_PASSWORD=user password UPDATE_PROCESS_INSTANCE_NOTES=update process instance QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list VERIFY_PROCESS_DEFINITION_NAME_NOTES=verify process definition name LOGIN_NOTES=user login UPDATE_PROCESS_DEFINITION_NOTES=update process definition PROCESS_DEFINITION_ID=process definition id PROCESS_DEFINITION_IDS=process definition ids PROCESS_DEFINITION_CODE=process definition code PROCESS_DEFINITION_CODE_LIST=process definition code list IMPORT_PROCESS_DEFINITION_NOTES=import process definition RELEASE_PROCESS_DEFINITION_NOTES=release process definition QUERY_PROCESS_DEFINITION_BY_ID_NOTES=query process definition by id QUERY_PROCESS_DEFINITION_LIST_NOTES=query process definition list QUERY_PROCESS_DEFINITION_LIST_PAGING_NOTES=query process definition list paging QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list PAGE_NO=page no PROCESS_INSTANCE_ID=process instance id PROCESS_INSTANCE_JSON=process instance info(json format) SCHEDULE_TIME=schedule time SYNC_DEFINE=update the information of the process instance to the process definition RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance PREVIEW_SCHEDULE_NOTES=preview schedule SEARCH_VAL=search val USER_ID=user id FORCE_TASK_SUCCESS=force task success QUERY_TASK_INSTANCE_LIST_PAGING_NOTES=query task instance list paging PROCESS_INSTANCE_NAME=process instance name TASK_INSTANCE_ID=task instance id VERIFY_TENANT_CODE_NOTES=verify tenant code QUERY_UI_PLUGIN_DETAIL_BY_ID=query ui plugin detail by id PLUGIN_ID=plugin id QUERY_UI_PLUGINS_BY_TYPE=query ui plugins by type ACTIVATE_USER_NOTES=active user BATCH_ACTIVATE_USER_NOTES=batch active user STATE=state REPEAT_PASSWORD=repeat password REGISTER_USER_NOTES=register user USER_NAMES=user names PAGE_SIZE=page size LIMIT=limit CREATE_WORKER_GROUP_NOTES=create worker group WORKER_ADDR_LIST=worker address list QUERY_WORKER_ADDRESS_LIST_NOTES=query worker address list QUERY_WORKFLOW_LINEAGE_BY_IDS_NOTES=query workflow lineage by ids QUERY_WORKFLOW_LINEAGE_BY_NAME_NOTES=query workflow lineage by name VIEW_TREE_NOTES=view tree UDF_ID=udf id GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id GET_NODE_LIST_BY_DEFINITION_CODE_NOTES=get node list by definition code QUERY_PROCESS_DEFINITION_BY_NAME_NOTES=query process definition by name PROCESS_DEFINITION_ID_LIST=process definition id list QUERY_PROCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query process definition all by project id DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_NOTES=batch delete process instance by process ids QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id TASK_ID=task instance id PROCESS_INSTANCE_IDS=process_instance ids SKIP_LINE_NUM=skip line num QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log USERS_TAG=users related operation SCHEDULER_TAG=scheduler related operation CREATE_SCHEDULE_NOTES=create schedule CREATE_USER_NOTES=create user TENANT_ID=tenant id QUEUE=queue EMAIL=email PHONE=phone QUERY_USER_LIST_NOTES=query user list UPDATE_USER_NOTES=update user UPDATE_QUEUE_NOTES=update queue DELETE_USER_BY_ID_NOTES=delete user by id GRANT_PROJECT_NOTES=GRANT PROJECT PROJECT_IDS=project ids(string format, multiple projects separated by ",") GRANT_RESOURCE_NOTES=grant resource file RESOURCE_IDS=resource ids(string format, multiple resources separated by ",") GET_USER_INFO_NOTES=get user info LIST_USER_NOTES=list user VERIFY_USER_NAME_NOTES=verify user name UNAUTHORIZED_USER_NOTES=cancel authorization ALERT_GROUP_ID=alert group id AUTHORIZED_USER_NOTES=authorized user AUTHORIZE_RESOURCE_TREE_NOTES=authorize resource tree RESOURCE_CURRENTDIR=dir of the current resource QUERY_RESOURCE_LIST_PAGING_NOTES=query resource list paging RESOURCE_PID=parent directory ID of the current resource RESOURCE_FULL_NAME=resource full name QUERY_BY_RESOURCE_NAME=query by resource name QUERY_UDF_FUNC_LIST_NOTES=query udf funciton list VERIFY_RESOURCE_NAME_NOTES=verify resource name GRANT_UDF_FUNC_NOTES=grant udf function UDF_IDS=udf ids(string format, multiple udf functions separated by ",") GRANT_DATASOURCE_NOTES=grant datasource DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",") QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables VIEW_GANTT_NOTES=view gantt SUB_PROCESS_INSTANCE_ID=sub process instance id TASK_NAME=task instance name TASK_INSTANCE_TAG=task instance related operation LOGGER_TAG=log related operation PROCESS_INSTANCE_TAG=process instance related operation EXECUTION_STATUS=runing status for workflow and task nodes HOST=ip address of running task START_DATE=start date END_DATE=end date QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id UPDATE_DATA_SOURCE_NOTES=update data source DATA_SOURCE_ID=DATA SOURCE ID QUERY_DATA_SOURCE_NOTES=query data source by id QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test DELETE_DATA_SOURCE_NOTES=delete data source VERIFY_DATA_SOURCE_NOTES=verify data source UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source AUTHORIZED_DATA_SOURCE_NOTES=authorized data source DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id QUERY_ALERT_GROUP_LIST_PAGING_NOTES=query alert group list paging EXPORT_PROCESS_DEFINITION_BY_ID_NOTES=export process definition by id BATCH_EXPORT_PROCESS_DEFINITION_BY_IDS_NOTES=batch export process definition by ids QUERY_USER_CREATED_PROJECT_NOTES=query user created project QUERY_AUTHORIZED_AND_USER_CREATED_PROJECT_NOTES=query authorized and user created project COPY_PROCESS_DEFINITION_NOTES=copy process definition notes MOVE_PROCESS_DEFINITION_NOTES=move process definition notes TARGET_PROJECT_ID=target project id IS_COPY=is copy DELETE_PROCESS_DEFINITION_VERSION_NOTES=delete process definition version QUERY_PROCESS_DEFINITION_VERSIONS_NOTES=query process definition versions SWITCH_PROCESS_DEFINITION_VERSION_NOTES=switch process definition version VERSION=version
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,110
[Feature][dolphinscheduler-api] support grant project by code
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Third-party integrated scenarios, support grant through project code ### Use case ## API ## org.apache.dolphinscheduler.api.controller.UsersController#grantProjectByCode ## Request ## | Name | Type | Require | | :--- | :--- | :---: | | userId | Int | ✓ | | projectCodes | String | ✓ | ## Response ## Same as `org.apache.dolphinscheduler.api.controller.UsersController#grantProject` ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7110
https://github.com/apache/dolphinscheduler/pull/7111
fa8ccd3855dfaa19d73cd7541184938f227bb4b7
27ea43d5dfda2556fa5fb02179e7c906408007ad
"2021-12-01T16:08:53Z"
java
"2021-12-04T13:43:14Z"
dolphinscheduler-api/src/main/resources/i18n/messages_zh_CN.properties
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # QUERY_SCHEDULE_LIST_NOTES=查询定时列表 PROCESS_INSTANCE_EXECUTOR_TAG=流程实例执行相关操作 UI_PLUGINS_TAG=UI插件相关操作 WORK_FLOW_LINEAGE_TAG=工作流血缘相关操作 RUN_PROCESS_INSTANCE_NOTES=运行流程实例 START_NODE_LIST=开始节点列表(节点name) TASK_DEPEND_TYPE=任务依赖类型 COMMAND_TYPE=指令类型 RUN_MODE=运行模式 TIMEOUT=超时时间 EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=执行流程实例的各种操作(暂停、停止、重跑、恢复等) EXECUTE_TYPE=执行类型 EXECUTOR_TAG=流程相关操作 EXECUTOR_NAME=流程名称 START_CHECK_PROCESS_DEFINITION_NOTES=检查流程定义 DESC=备注(描述) GROUP_NAME=组名称 WORKER_GROUP=worker群组 startParams=启动参数 GROUP_TYPE=组类型 QUERY_ALERT_GROUP_LIST_NOTES=告警组列表 UPDATE_ALERT_GROUP_NOTES=编辑(更新)告警组 DELETE_ALERT_GROUP_BY_ID_NOTES=通过ID删除告警组 VERIFY_ALERT_GROUP_NAME_NOTES=检查告警组是否存在 GRANT_ALERT_GROUP_NOTES=授权告警组 PROCESS_DEFINITION_IDS=流程定义ID PROCESS_DEFINITION_CODE=流程定义编码 PROCESS_DEFINITION_CODE_LIST=流程定义编码列表 USER_IDS=用户ID列表 ALERT_GROUP_TAG=告警组相关操作 WORKER_GROUP_TAG=Worker分组管理 SAVE_WORKER_GROUP_NOTES=创建Worker分组 ALERT_PLUGIN_INSTANCE_TAG=告警插件实例相关操作 WORKER_GROUP_NAME=Worker分组名称 WORKER_IP_LIST=Worker ip列表,注意:多个IP地址以逗号分割 QUERY_WORKER_GROUP_PAGING_NOTES=Worker分组管理 QUERY_WORKER_GROUP_LIST_NOTES=查询worker group分组 DELETE_WORKER_GROUP_BY_ID_NOTES=通过ID删除worker group DATA_ANALYSIS_TAG=任务状态分析相关操作 COUNT_TASK_STATE_NOTES=任务状态统计 COUNT_PROCESS_INSTANCE_NOTES=统计流程实例状态 COUNT_PROCESS_DEFINITION_BY_USER_NOTES=统计用户创建的流程定义 COUNT_COMMAND_STATE_NOTES=统计命令状态 COUNT_QUEUE_STATE_NOTES=统计队列里任务状态 ACCESS_TOKEN_TAG=访问token相关操作 MONITOR_TAG=监控相关操作 MASTER_LIST_NOTES=master服务列表 WORKER_LIST_NOTES=worker服务列表 QUERY_DATABASE_STATE_NOTES=查询数据库状态 QUERY_ZOOKEEPER_STATE_NOTES=查询Zookeeper状态 TASK_STATE=任务实例状态 SOURCE_TABLE=源表 DEST_TABLE=目标表 TASK_DATE=任务时间 QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=分页查询历史任务记录列表 DATA_SOURCE_TAG=数据源相关操作 CREATE_DATA_SOURCE_NOTES=创建数据源 DATA_SOURCE_NAME=数据源名称 DATA_SOURCE_NOTE=数据源描述 DB_TYPE=数据源类型 DATA_SOURCE_HOST=IP主机名 DATA_SOURCE_PORT=数据源端口 DATABASE_NAME=数据库名 QUEUE_TAG=队列相关操作 QUERY_TOPN_LONGEST_RUNNING_PROCESS_INSTANCE_NOTES=查询topN最长运行流程实例 QUERY_QUEUE_LIST_NOTES=查询队列列表 QUERY_QUEUE_LIST_PAGING_NOTES=分页查询队列列表 CREATE_QUEUE_NOTES=创建队列 YARN_QUEUE_NAME=hadoop yarn队列名 QUEUE_ID=队列ID TENANT_DESC=租户描述 QUERY_TENANT_LIST_PAGING_NOTES=分页查询租户列表 QUERY_TENANT_LIST_NOTES=查询租户列表 UPDATE_TENANT_NOTES=更新租户 DELETE_TENANT_NOTES=删除租户 RESOURCES_TAG=资源中心相关操作 CREATE_RESOURCE_NOTES=创建资源 RESOURCE_FULL_NAME=资源全名 RESOURCE_TYPE=资源文件类型 RESOURCE_NAME=资源文件名称 RESOURCE_DESC=资源文件描述 RESOURCE_FILE=资源文件 RESOURCE_ID=资源ID QUERY_RESOURCE_LIST_NOTES=查询资源列表 QUERY_BY_RESOURCE_NAME=通过资源名称查询 QUERY_UDF_FUNC_LIST_NOTES=查询UDF函数列表 VERIFY_RESOURCE_NAME_NOTES=验证资源名称 DELETE_RESOURCE_BY_ID_NOTES=通过ID删除资源 VIEW_RESOURCE_BY_ID_NOTES=通过ID浏览资源 ONLINE_CREATE_RESOURCE_NOTES=在线创建资源 SUFFIX=资源文件后缀 CONTENT=资源文件内容 UPDATE_RESOURCE_NOTES=在线更新资源文件 DOWNLOAD_RESOURCE_NOTES=下载资源文件 CREATE_UDF_FUNCTION_NOTES=创建UDF函数 UDF_TYPE=UDF类型 FUNC_NAME=函数名称 CLASS_NAME=包名类名 ARG_TYPES=参数 UDF_DESC=udf描述,使用说明 VIEW_UDF_FUNCTION_NOTES=查看udf函数 UPDATE_UDF_FUNCTION_NOTES=更新udf函数 QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=分页查询udf函数列表 VERIFY_UDF_FUNCTION_NAME_NOTES=验证udf函数名 DELETE_UDF_FUNCTION_NOTES=删除UDF函数 AUTHORIZED_FILE_NOTES=授权文件 UNAUTHORIZED_FILE_NOTES=取消授权文件 AUTHORIZED_UDF_FUNC_NOTES=授权udf函数 UNAUTHORIZED_UDF_FUNC_NOTES=取消udf函数授权 VERIFY_QUEUE_NOTES=验证队列 TENANT_TAG=租户相关操作 CREATE_TENANT_NOTES=创建租户 TENANT_CODE=操作系统租户 QUEUE_NAME=队列名 PASSWORD=密码 DATA_SOURCE_OTHER=jdbc连接参数,格式为:{"key1":"value1",...} DATA_SOURCE_PRINCIPAL=principal DATA_SOURCE_KERBEROS_KRB5_CONF=kerberos认证参数 java.security.krb5.conf DATA_SOURCE_KERBEROS_KEYTAB_USERNAME=kerberos认证参数 login.user.keytab.username DATA_SOURCE_KERBEROS_KEYTAB_PATH=kerberos认证参数 login.user.keytab.path PROJECT_TAG=项目相关操作 CREATE_PROJECT_NOTES=创建项目 PROJECT_DESC=项目描述 UPDATE_PROJECT_NOTES=更新项目 PROJECT_ID=项目ID QUERY_PROJECT_BY_ID_NOTES=通过项目ID查询项目信息 QUERY_PROJECT_LIST_PAGING_NOTES=分页查询项目列表 QUERY_ALL_PROJECT_LIST_NOTES=查询所有项目 DELETE_PROJECT_BY_ID_NOTES=通过ID删除项目 QUERY_UNAUTHORIZED_PROJECT_NOTES=查询未授权的项目 QUERY_AUTHORIZED_PROJECT_NOTES=查询授权项目 TASK_RECORD_TAG=任务记录相关操作 QUERY_TASK_RECORD_LIST_PAGING_NOTES=分页查询任务记录列表 CREATE_TOKEN_NOTES=创建token,注意需要先登录 QUERY_ACCESS_TOKEN_LIST_NOTES=分页查询access token列表 SCHEDULE=定时 WARNING_TYPE=发送策略 WARNING_GROUP_ID=发送组ID FAILURE_STRATEGY=失败策略 RECEIVERS=收件人 RECEIVERS_CC=收件人(抄送) WORKER_GROUP_ID=Worker Server分组ID PROCESS_INSTANCE_PRIORITY=流程实例优先级 EXPECTED_PARALLELISM_NUMBER=补数任务自定义并行度 UPDATE_SCHEDULE_NOTES=更新定时 SCHEDULE_ID=定时ID ONLINE_SCHEDULE_NOTES=定时上线 OFFLINE_SCHEDULE_NOTES=定时下线 QUERY_SCHEDULE_NOTES=查询定时 QUERY_SCHEDULE_LIST_PAGING_NOTES=分页查询定时 LOGIN_TAG=用户登录相关操作 USER_NAME=用户名 PROJECT_NAME=项目名称 CREATE_PROCESS_DEFINITION_NOTES=创建流程定义 PROCESS_INSTANCE_START_TIME=流程实例启动时间 PROCESS_INSTANCE_END_TIME=流程实例结束时间 PROCESS_INSTANCE_SIZE=流程实例个数 PROCESS_DEFINITION_NAME=流程定义名称 PROCESS_DEFINITION_JSON=流程定义详细信息(json格式) PROCESS_DEFINITION_LOCATIONS=流程定义节点坐标位置信息(json格式) PROCESS_INSTANCE_LOCATIONS=流程实例节点坐标位置信息(json格式) PROCESS_DEFINITION_CONNECTS=流程定义节点图标连接信息(json格式) PROCESS_INSTANCE_CONNECTS=流程实例节点图标连接信息(json格式) PROCESS_DEFINITION_DESC=流程定义描述信息 PROCESS_DEFINITION_TAG=流程定义相关操作 SIGNOUT_NOTES=退出登录 USER_PASSWORD=用户密码 UPDATE_PROCESS_INSTANCE_NOTES=更新流程实例 QUERY_PROCESS_INSTANCE_LIST_NOTES=查询流程实例列表 VERIFY_PROCESS_DEFINITION_NAME_NOTES=验证流程定义名字 LOGIN_NOTES=用户登录 UPDATE_PROCESS_DEFINITION_NOTES=更新流程定义 PROCESS_DEFINITION_ID=流程定义ID RELEASE_PROCESS_DEFINITION_NOTES=发布流程定义 QUERY_PROCESS_DEFINITION_BY_ID_NOTES=通过流程定义ID查询流程定义 QUERY_PROCESS_DEFINITION_LIST_NOTES=查询流程定义列表 QUERY_PROCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表 QUERY_ALL_DEFINITION_LIST_NOTES=查询所有流程定义 PAGE_NO=页码号 PROCESS_INSTANCE_ID=流程实例ID PROCESS_INSTANCE_IDS=流程实例ID集合 PROCESS_INSTANCE_JSON=流程实例信息(json格式) PREVIEW_SCHEDULE_NOTES=定时调度预览 SCHEDULE_TIME=定时时间 SYNC_DEFINE=更新流程实例的信息是否同步到流程定义 RECOVERY_PROCESS_INSTANCE_FLAG=是否恢复流程实例 SEARCH_VAL=搜索值 FORCE_TASK_SUCCESS=强制TASK成功 QUERY_TASK_INSTANCE_LIST_PAGING_NOTES=分页查询任务实例列表 PROCESS_INSTANCE_NAME=流程实例名称 TASK_INSTANCE_ID=任务实例ID VERIFY_TENANT_CODE_NOTES=验证租户 QUERY_UI_PLUGIN_DETAIL_BY_ID=通过ID查询UI插件详情 QUERY_UI_PLUGINS_BY_TYPE=通过类型查询UI插件 ACTIVATE_USER_NOTES=激活用户 BATCH_ACTIVATE_USER_NOTES=批量激活用户 REPEAT_PASSWORD=重复密码 REGISTER_USER_NOTES=用户注册 STATE=状态 USER_NAMES=多个用户名 PLUGIN_ID=插件ID USER_ID=用户ID PAGE_SIZE=页大小 LIMIT=显示多少条 UDF_ID=udf ID AUTHORIZE_RESOURCE_TREE_NOTES=授权资源树 RESOURCE_CURRENTDIR=当前资源目录 RESOURCE_PID=资源父目录ID QUERY_RESOURCE_LIST_PAGING_NOTES=分页查询资源列表 VIEW_TREE_NOTES=树状图 IMPORT_PROCESS_DEFINITION_NOTES=导入流程定义 GET_NODE_LIST_BY_DEFINITION_ID_NOTES=通过流程定义ID获得任务节点列表 PROCESS_DEFINITION_ID_LIST=流程定义id列表 QUERY_PROCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=通过项目ID查询流程定义 BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=通过流程定义ID集合批量删除流程定义 BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_NOTES=通过流程实例ID集合批量删除流程实例 DELETE_PROCESS_DEFINITION_BY_ID_NOTES=通过流程定义ID删除流程定义 QUERY_PROCESS_INSTANCE_BY_ID_NOTES=通过流程实例ID查询流程实例 DELETE_PROCESS_INSTANCE_BY_ID_NOTES=通过流程实例ID删除流程实例 TASK_ID=任务实例ID SKIP_LINE_NUM=忽略行数 QUERY_TASK_INSTANCE_LOG_NOTES=查询任务实例日志 DOWNLOAD_TASK_INSTANCE_LOG_NOTES=下载任务实例日志 USERS_TAG=用户相关操作 SCHEDULER_TAG=定时相关操作 CREATE_SCHEDULE_NOTES=创建定时 CREATE_USER_NOTES=创建用户 CREATE_WORKER_GROUP_NOTES=创建Worker分组 WORKER_ADDR_LIST=worker地址列表 QUERY_WORKER_ADDRESS_LIST_NOTES=查询worker地址列表 QUERY_WORKFLOW_LINEAGE_BY_IDS_NOTES=通过IDs查询工作流血缘列表 QUERY_WORKFLOW_LINEAGE_BY_NAME_NOTES=通过名称查询工作流血缘列表 TENANT_ID=租户ID QUEUE=使用的队列 EMAIL=邮箱 PHONE=手机号 QUERY_USER_LIST_NOTES=查询用户列表 UPDATE_USER_NOTES=更新用户 UPDATE_QUEUE_NOTES=更新队列 DELETE_USER_BY_ID_NOTES=删除用户通过ID GRANT_PROJECT_NOTES=授权项目 PROJECT_IDS=项目IDS(字符串格式,多个项目以","分割) GRANT_RESOURCE_NOTES=授权资源文件 RESOURCE_IDS=资源ID列表(字符串格式,多个资源ID以","分割) GET_USER_INFO_NOTES=获取用户信息 GET_NODE_LIST_BY_DEFINITION_CODE_NOTES=通过流程定义编码查询节点列表 QUERY_PROCESS_DEFINITION_BY_NAME_NOTES=通过名称查询流程定义 LIST_USER_NOTES=用户列表 VERIFY_USER_NAME_NOTES=验证用户名 UNAUTHORIZED_USER_NOTES=取消授权 ALERT_GROUP_ID=报警组ID AUTHORIZED_USER_NOTES=授权用户 GRANT_UDF_FUNC_NOTES=授权udf函数 UDF_IDS=udf函数id列表(字符串格式,多个udf函数ID以","分割) GRANT_DATASOURCE_NOTES=授权数据源 DATASOURCE_IDS=数据源ID列表(字符串格式,多个数据源ID以","分割) QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=通过任务实例ID查询子流程实例 QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=通过子流程实例ID查询父流程实例信息 QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=查询流程实例全局变量和局部变量 VIEW_GANTT_NOTES=浏览Gantt图 SUB_PROCESS_INSTANCE_ID=子流程实例ID TASK_NAME=任务实例名 TASK_INSTANCE_TAG=任务实例相关操作 LOGGER_TAG=日志相关操作 PROCESS_INSTANCE_TAG=流程实例相关操作 EXECUTION_STATUS=工作流和任务节点的运行状态 HOST=运行任务的主机IP地址 START_DATE=开始时间 END_DATE=结束时间 QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=通过流程实例ID查询任务列表 DELETE_ALERT_PLUGIN_INSTANCE_NOTES=删除告警插件实例 CREATE_ALERT_PLUGIN_INSTANCE_NOTES=创建告警插件实例 GET_ALERT_PLUGIN_INSTANCE_NOTES=查询告警插件实例 QUERY_ALERT_PLUGIN_INSTANCE_LIST_PAGING_NOTES=分页查询告警实例列表 QUERY_ALL_ALERT_PLUGIN_INSTANCE_NOTES=查询所有告警实例列表 UPDATE_ALERT_PLUGIN_INSTANCE_NOTES=更新告警插件实例 ALERT_PLUGIN_INSTANCE_NAME=告警插件实例名称 ALERT_PLUGIN_DEFINE_ID=告警插件定义ID ALERT_PLUGIN_ID=告警插件ID ALERT_PLUGIN_INSTANCE_ID=告警插件实例ID ALERT_PLUGIN_INSTANCE_PARAMS=告警插件实例参数 ALERT_INSTANCE_NAME=告警插件名称 VERIFY_ALERT_INSTANCE_NAME_NOTES=验证告警插件名称 UPDATE_DATA_SOURCE_NOTES=更新数据源 DATA_SOURCE_PARAM=数据源参数 DATA_SOURCE_ID=数据源ID CREATE_ALERT_GROUP_NOTES=创建告警组 QUERY_DATA_SOURCE_NOTES=查询数据源通过ID QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=通过数据源类型查询数据源列表 QUERY_DATA_SOURCE_LIST_PAGING_NOTES=分页查询数据源列表 CONNECT_DATA_SOURCE_NOTES=连接数据源 CONNECT_DATA_SOURCE_TEST_NOTES=连接数据源测试 DELETE_DATA_SOURCE_NOTES=删除数据源 VERIFY_DATA_SOURCE_NOTES=验证数据源 UNAUTHORIZED_DATA_SOURCE_NOTES=未授权的数据源 AUTHORIZED_DATA_SOURCE_NOTES=授权的数据源 DELETE_SCHEDULER_BY_ID_NOTES=根据定时id删除定时数据 QUERY_ALERT_GROUP_LIST_PAGING_NOTES=分页查询告警组列表 EXPORT_PROCESS_DEFINITION_BY_ID_NOTES=通过工作流ID导出工作流定义 BATCH_EXPORT_PROCESS_DEFINITION_BY_IDS_NOTES=批量导出工作流定义 QUERY_USER_CREATED_PROJECT_NOTES=查询用户创建的项目 QUERY_AUTHORIZED_AND_USER_CREATED_PROJECT_NOTES=查询授权和用户创建的项目 COPY_PROCESS_DEFINITION_NOTES=复制工作流定义 MOVE_PROCESS_DEFINITION_NOTES=移动工作流定义 TARGET_PROJECT_ID=目标项目ID IS_COPY=是否复制 DELETE_PROCESS_DEFINITION_VERSION_NOTES=删除流程历史版本 QUERY_PROCESS_DEFINITION_VERSIONS_NOTES=查询流程历史版本信息 SWITCH_PROCESS_DEFINITION_VERSION_NOTES=切换流程版本 VERSION=版本号
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,110
[Feature][dolphinscheduler-api] support grant project by code
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Third-party integrated scenarios, support grant through project code ### Use case ## API ## org.apache.dolphinscheduler.api.controller.UsersController#grantProjectByCode ## Request ## | Name | Type | Require | | :--- | :--- | :---: | | userId | Int | ✓ | | projectCodes | String | ✓ | ## Response ## Same as `org.apache.dolphinscheduler.api.controller.UsersController#grantProject` ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7110
https://github.com/apache/dolphinscheduler/pull/7111
fa8ccd3855dfaa19d73cd7541184938f227bb4b7
27ea43d5dfda2556fa5fb02179e7c906408007ad
"2021-12-01T16:08:53Z"
java
"2021-12-04T13:43:14Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/UsersControllerTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.controller; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.utils.JSONUtils; import java.util.ArrayList; import java.util.List; import org.junit.Assert; import org.junit.Ignore; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.http.MediaType; import org.springframework.test.web.servlet.MvcResult; import org.springframework.util.LinkedMultiValueMap; import org.springframework.util.MultiValueMap; /** * users controller test */ public class UsersControllerTest extends AbstractControllerTest { private static final Logger logger = LoggerFactory.getLogger(UsersControllerTest.class); @Test public void testCreateUser() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userName", "user_test"); paramsMap.add("userPassword", "123456qwe?"); paramsMap.add("tenantId", "109"); paramsMap.add("queue", "1"); paramsMap.add("email", "12343534@qq.com"); paramsMap.add("phone", "15800000000"); MvcResult mvcResult = mockMvc.perform(post("/users/create") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.CREATE_USER_ERROR.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testUpdateUser() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("id", "32"); paramsMap.add("userName", "user_test"); paramsMap.add("userPassword", "123456qwe?"); paramsMap.add("tenantId", "9"); paramsMap.add("queue", "1"); paramsMap.add("email", "12343534@qq.com"); paramsMap.add("phone", "15800000000"); MvcResult mvcResult = mockMvc.perform(post("/users/update") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.UPDATE_USER_ERROR.getCode(), result.getCode().intValue()); } @Test public void testGrantProject() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userId", "32"); paramsMap.add("projectIds", "3"); MvcResult mvcResult = mockMvc.perform(post("/users/grant-project") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.USER_NOT_EXIST.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testGrantResource() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userId", "32"); paramsMap.add("resourceIds", "5"); MvcResult mvcResult = mockMvc.perform(post("/users/grant-file") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.USER_NOT_EXIST.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testGrantUDFFunc() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userId", "32"); paramsMap.add("udfIds", "5"); MvcResult mvcResult = mockMvc.perform(post("/users/grant-udf-func") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.USER_NOT_EXIST.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testGrantDataSource() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userId", "32"); paramsMap.add("datasourceIds", "5"); MvcResult mvcResult = mockMvc.perform(post("/users/grant-datasource") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.USER_NOT_EXIST.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testGetUserInfo() throws Exception { MvcResult mvcResult = mockMvc.perform(get("/users/get-user-info") .header(SESSION_ID, sessionId)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testListAll() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userName", "test"); MvcResult mvcResult = mockMvc.perform(get("/users/list-all") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } // todo: there is a sql error, the table t_ds_relation_user_alertgroup has already been dropped @Ignore @Test public void testAuthorizedUser() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("alertgroupId", "1"); MvcResult mvcResult = mockMvc.perform(get("/users/authed-user") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } // todo: t_ds_relation_user_alertgroup has already been dropped @Ignore @Test public void testUnauthorizedUser() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("alertgroupId", "1"); MvcResult mvcResult = mockMvc.perform(get("/users/unauth-user") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testVerifyUserName() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userName", "hello"); MvcResult mvcResult = mockMvc.perform(get("/users/verify-user-name") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testDelUserById() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("id", "32"); MvcResult mvcResult = mockMvc.perform(post("/users/delete") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.USER_NOT_EXIST.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testQueryList() throws Exception { MvcResult mvcResult = mockMvc.perform(get("/users/list") .header(SESSION_ID, sessionId)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testRegisterUser() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userName", "user_test"); paramsMap.add("userPassword", "123456qwe?"); paramsMap.add("repeatPassword", "123456qwe?"); paramsMap.add("email", "12343534@qq.com"); MvcResult mvcResult = mockMvc.perform(post("/users/register") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); } @Test @Ignore public void testActivateUser() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userName", "user_test"); MvcResult mvcResult = mockMvc.perform(post("/users/activate") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); } @Test public void testBatchActivateUser() throws Exception { List<String> userNames = new ArrayList<>(); userNames.add("user_sky_cxl"); userNames.add("19990323"); userNames.add("test_sky_post_11"); String jsonUserNames = JSONUtils.toJsonString(userNames); MvcResult mvcResult = mockMvc.perform(post("/users/batch/activate") .header(SESSION_ID, sessionId) .contentType(MediaType.APPLICATION_JSON) .content(jsonUserNames)) .andExpect(status().isOk()) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,110
[Feature][dolphinscheduler-api] support grant project by code
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Third-party integrated scenarios, support grant through project code ### Use case ## API ## org.apache.dolphinscheduler.api.controller.UsersController#grantProjectByCode ## Request ## | Name | Type | Require | | :--- | :--- | :---: | | userId | Int | ✓ | | projectCodes | String | ✓ | ## Response ## Same as `org.apache.dolphinscheduler.api.controller.UsersController#grantProject` ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7110
https://github.com/apache/dolphinscheduler/pull/7111
fa8ccd3855dfaa19d73cd7541184938f227bb4b7
27ea43d5dfda2556fa5fb02179e7c906408007ad
"2021-12-01T16:08:53Z"
java
"2021-12-04T13:43:14Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/UsersServiceTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.when; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.impl.UsersServiceImpl; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.utils.EncryptionUtils; import org.apache.dolphinscheduler.dao.entity.AlertGroup; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.AccessTokenMapper; import org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper; import org.apache.dolphinscheduler.dao.mapper.DataSourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectUserMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UDFUserMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.spi.enums.ResourceType; import org.apache.commons.collections.CollectionUtils; import java.util.ArrayList; import java.util.List; import java.util.Map; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.google.common.collect.Lists; /** * users service test */ @RunWith(MockitoJUnitRunner.class) public class UsersServiceTest { private static final Logger logger = LoggerFactory.getLogger(UsersServiceTest.class); @InjectMocks private UsersServiceImpl usersService; @Mock private UserMapper userMapper; @Mock private AccessTokenMapper accessTokenMapper; @Mock private TenantMapper tenantMapper; @Mock private ResourceMapper resourceMapper; @Mock private AlertGroupMapper alertGroupMapper; @Mock private DataSourceUserMapper datasourceUserMapper; @Mock private ProjectUserMapper projectUserMapper; @Mock private ResourceUserMapper resourceUserMapper; @Mock private UDFUserMapper udfUserMapper; @Mock private ProjectMapper projectMapper; private String queueName = "UsersServiceTestQueue"; @Before public void before() { } @After public void after() { } @Test public void testCreateUserForLdap() { String userName = "user1"; String email = "user1@ldap.com"; User user = usersService.createUser(UserType.ADMIN_USER, userName, email); Assert.assertNotNull(user); } @Test public void testCreateUser() { User user = new User(); user.setUserType(UserType.ADMIN_USER); String userName = "userTest0001~"; String userPassword = "userTest"; String email = "123@qq.com"; int tenantId = Integer.MAX_VALUE; String phone = "13456432345"; int state = 1; try { //userName error Map<String, Object> result = usersService.createUser(user, userName, userPassword, email, tenantId, phone, queueName, state); logger.info(result.toString()); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); userName = "userTest0001"; userPassword = "userTest000111111111111111"; //password error result = usersService.createUser(user, userName, userPassword, email, tenantId, phone, queueName, state); logger.info(result.toString()); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); userPassword = "userTest0001"; email = "1q.com"; //email error result = usersService.createUser(user, userName, userPassword, email, tenantId, phone, queueName, state); logger.info(result.toString()); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); email = "122222@qq.com"; phone = "2233"; //phone error result = usersService.createUser(user, userName, userPassword, email, tenantId, phone, queueName, state); logger.info(result.toString()); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); phone = "13456432345"; //tenantId not exists result = usersService.createUser(user, userName, userPassword, email, tenantId, phone, queueName, state); logger.info(result.toString()); Assert.assertEquals(Status.TENANT_NOT_EXIST, result.get(Constants.STATUS)); //success Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); result = usersService.createUser(user, userName, userPassword, email, 1, phone, queueName, state); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } catch (Exception e) { logger.error(Status.CREATE_USER_ERROR.getMsg(), e); Assert.assertTrue(false); } } @Test public void testQueryUser() { String userName = "userTest0001"; String userPassword = "userTest0001"; when(userMapper.queryUserByNamePassword(userName, EncryptionUtils.getMd5(userPassword))).thenReturn(getGeneralUser()); User queryUser = usersService.queryUser(userName, userPassword); logger.info(queryUser.toString()); Assert.assertTrue(queryUser != null); } @Test public void testSelectByIds() { List<Integer> ids = new ArrayList<>(); List<User> users = usersService.queryUser(ids); Assert.assertTrue(users.isEmpty()); ids.add(1); List<User> userList = new ArrayList<>(); userList.add(new User()); when(userMapper.selectByIds(ids)).thenReturn(userList); List<User> userList1 = usersService.queryUser(ids); Assert.assertFalse(userList1.isEmpty()); } @Test public void testGetUserIdByName() { User user = new User(); user.setId(1); user.setUserType(UserType.ADMIN_USER); user.setUserName("test_user"); //user name null int userId = usersService.getUserIdByName(""); Assert.assertEquals(0, userId); //user not exist when(usersService.queryUser(user.getUserName())).thenReturn(null); int userNotExistId = usersService.getUserIdByName(user.getUserName()); Assert.assertEquals(-1, userNotExistId); //user exist when(usersService.queryUser(user.getUserName())).thenReturn(user); int userExistId = usersService.getUserIdByName(user.getUserName()); Assert.assertEquals(user.getId(), userExistId); } @Test public void testQueryUserList() { User user = new User(); //no operate Map<String, Object> result = usersService.queryUserList(user); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //success user.setUserType(UserType.ADMIN_USER); when(userMapper.selectList(null)).thenReturn(getUserList()); result = usersService.queryUserList(user); List<User> userList = (List<User>) result.get(Constants.DATA_LIST); Assert.assertTrue(userList.size() > 0); } @Test public void testQueryUserListPage() { User user = new User(); IPage<User> page = new Page<>(1, 10); page.setRecords(getUserList()); when(userMapper.queryUserPaging(any(Page.class), eq("userTest"))).thenReturn(page); //no operate Result result = usersService.queryUserList(user, "userTest", 1, 10); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM.getCode(), (int) result.getCode()); //success user.setUserType(UserType.ADMIN_USER); result = usersService.queryUserList(user, "userTest", 1, 10); Assert.assertEquals(Status.SUCCESS.getCode(), (int) result.getCode()); PageInfo<User> pageInfo = (PageInfo<User>) result.getData(); Assert.assertTrue(pageInfo.getTotalList().size() > 0); } @Test public void testUpdateUser() { String userName = "userTest0001"; String userPassword = "userTest0001"; try { //user not exist Map<String, Object> result = usersService.updateUser(getLoginUser(), 0, userName, userPassword, "3443@qq.com", 1, "13457864543", "queue", 1); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); logger.info(result.toString()); //success when(userMapper.selectById(1)).thenReturn(getUser()); result = usersService.updateUser(getLoginUser(), 1, userName, userPassword, "32222s@qq.com", 1, "13457864543", "queue", 1); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } catch (Exception e) { logger.error("update user error", e); Assert.assertTrue(false); } } @Test public void testDeleteUserById() { User loginUser = new User(); try { when(userMapper.queryTenantCodeByUserId(1)).thenReturn(getUser()); when(userMapper.selectById(1)).thenReturn(getUser()); when(accessTokenMapper.deleteAccessTokenByUserId(1)).thenReturn(0); //no operate Map<String, Object> result = usersService.deleteUserById(loginUser, 3); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); // user not exist loginUser.setUserType(UserType.ADMIN_USER); result = usersService.deleteUserById(loginUser, 3); logger.info(result.toString()); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); // user is project owner Mockito.when(projectMapper.queryProjectCreatedByUser(1)).thenReturn(Lists.newArrayList(new Project())); result = usersService.deleteUserById(loginUser, 1); Assert.assertEquals(Status.TRANSFORM_PROJECT_OWNERSHIP, result.get(Constants.STATUS)); //success Mockito.when(projectMapper.queryProjectCreatedByUser(1)).thenReturn(null); result = usersService.deleteUserById(loginUser, 1); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } catch (Exception e) { logger.error("delete user error", e); Assert.assertTrue(false); } } @Test public void testGrantProject() { when(userMapper.selectById(1)).thenReturn(getUser()); User loginUser = new User(); String projectIds = "100000,120000"; Map<String, Object> result = usersService.grantProject(loginUser, 1, projectIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //user not exist loginUser.setUserType(UserType.ADMIN_USER); result = usersService.grantProject(loginUser, 2, projectIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); //success result = usersService.grantProject(loginUser, 1, projectIds); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } @Test public void testGrantResources() { String resourceIds = "100000,120000"; when(userMapper.selectById(1)).thenReturn(getUser()); User loginUser = new User(); Map<String, Object> result = usersService.grantResources(loginUser, 1, resourceIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //user not exist loginUser.setUserType(UserType.ADMIN_USER); result = usersService.grantResources(loginUser, 2, resourceIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); //success when(resourceMapper.selectById(Mockito.anyInt())).thenReturn(getResource()); when(resourceUserMapper.deleteResourceUser(1, 0)).thenReturn(1); result = usersService.grantResources(loginUser, 1, resourceIds); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } @Test public void testGrantUDFFunction() { String udfIds = "100000,120000"; when(userMapper.selectById(1)).thenReturn(getUser()); User loginUser = new User(); Map<String, Object> result = usersService.grantUDFFunction(loginUser, 1, udfIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //user not exist loginUser.setUserType(UserType.ADMIN_USER); result = usersService.grantUDFFunction(loginUser, 2, udfIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); //success when(udfUserMapper.deleteByUserId(1)).thenReturn(1); result = usersService.grantUDFFunction(loginUser, 1, udfIds); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } @Test public void testGrantDataSource() { String datasourceIds = "100000,120000"; when(userMapper.selectById(1)).thenReturn(getUser()); User loginUser = new User(); Map<String, Object> result = usersService.grantDataSource(loginUser, 1, datasourceIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //user not exist loginUser.setUserType(UserType.ADMIN_USER); result = usersService.grantDataSource(loginUser, 2, datasourceIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); //success when(datasourceUserMapper.deleteByUserId(Mockito.anyInt())).thenReturn(1); result = usersService.grantDataSource(loginUser, 1, datasourceIds); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } private User getLoginUser() { User loginUser = new User(); loginUser.setId(1); loginUser.setUserType(UserType.ADMIN_USER); return loginUser; } @Test public void getUserInfo() { User loginUser = new User(); loginUser.setUserName("admin"); loginUser.setUserType(UserType.ADMIN_USER); // get admin user Map<String, Object> result = usersService.getUserInfo(loginUser); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); User tempUser = (User) result.get(Constants.DATA_LIST); //check userName Assert.assertEquals("admin", tempUser.getUserName()); //get general user loginUser.setUserType(null); loginUser.setId(1); when(userMapper.queryDetailsById(1)).thenReturn(getGeneralUser()); when(alertGroupMapper.queryByUserId(1)).thenReturn(getAlertGroups()); result = usersService.getUserInfo(loginUser); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); tempUser = (User) result.get(Constants.DATA_LIST); //check userName Assert.assertEquals("userTest0001", tempUser.getUserName()); } @Test public void testQueryAllGeneralUsers() { User loginUser = new User(); //no operate Map<String, Object> result = usersService.queryAllGeneralUsers(loginUser); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //success loginUser.setUserType(UserType.ADMIN_USER); when(userMapper.queryAllGeneralUser()).thenReturn(getUserList()); result = usersService.queryAllGeneralUsers(loginUser); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); List<User> userList = (List<User>) result.get(Constants.DATA_LIST); Assert.assertTrue(CollectionUtils.isNotEmpty(userList)); } @Test public void testVerifyUserName() { //not exist user Result result = usersService.verifyUserName("admin89899"); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg()); //exist user when(userMapper.queryByUserNameAccurately("userTest0001")).thenReturn(getUser()); result = usersService.verifyUserName("userTest0001"); logger.info(result.toString()); Assert.assertEquals(Status.USER_NAME_EXIST.getMsg(), result.getMsg()); } @Test public void testUnauthorizedUser() { User loginUser = new User(); when(userMapper.selectList(null)).thenReturn(getUserList()); when(userMapper.queryUserListByAlertGroupId(2)).thenReturn(getUserList()); //no operate Map<String, Object> result = usersService.unauthorizedUser(loginUser, 2); logger.info(result.toString()); loginUser.setUserType(UserType.ADMIN_USER); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //success result = usersService.unauthorizedUser(loginUser, 2); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } @Test public void testAuthorizedUser() { User loginUser = new User(); when(userMapper.queryUserListByAlertGroupId(2)).thenReturn(getUserList()); //no operate Map<String, Object> result = usersService.authorizedUser(loginUser, 2); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //success loginUser.setUserType(UserType.ADMIN_USER); result = usersService.authorizedUser(loginUser, 2); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); List<User> userList = (List<User>) result.get(Constants.DATA_LIST); logger.info(result.toString()); Assert.assertTrue(CollectionUtils.isNotEmpty(userList)); } @Test public void testRegisterUser() { String userName = "userTest0002~"; String userPassword = "userTest"; String repeatPassword = "userTest"; String email = "123@qq.com"; try { //userName error Map<String, Object> result = usersService.registerUser(userName, userPassword, repeatPassword, email); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); userName = "userTest0002"; userPassword = "userTest000111111111111111"; //password error result = usersService.registerUser(userName, userPassword, repeatPassword, email); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); userPassword = "userTest0002"; email = "1q.com"; //email error result = usersService.registerUser(userName, userPassword, repeatPassword, email); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); //repeatPassword error email = "7400@qq.com"; repeatPassword = "userPassword"; result = usersService.registerUser(userName, userPassword, repeatPassword, email); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); //success repeatPassword = "userTest0002"; result = usersService.registerUser(userName, userPassword, repeatPassword, email); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } catch (Exception e) { Assert.assertTrue(false); } } @Test public void testActivateUser() { User user = new User(); user.setUserType(UserType.GENERAL_USER); String userName = "userTest0002~"; try { //not admin Map<String, Object> result = usersService.activateUser(user, userName); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //userName error user.setUserType(UserType.ADMIN_USER); result = usersService.activateUser(user, userName); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); //user not exist userName = "userTest10013"; result = usersService.activateUser(user, userName); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); //user state error userName = "userTest0001"; when(userMapper.queryByUserNameAccurately(userName)).thenReturn(getUser()); result = usersService.activateUser(user, userName); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); //success when(userMapper.queryByUserNameAccurately(userName)).thenReturn(getDisabledUser()); result = usersService.activateUser(user, userName); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } catch (Exception e) { Assert.assertTrue(false); } } @Test public void testBatchActivateUser() { User user = new User(); user.setUserType(UserType.GENERAL_USER); List<String> userNames = new ArrayList<>(); userNames.add("userTest0001"); userNames.add("userTest0002"); userNames.add("userTest0003~"); userNames.add("userTest0004"); try { //not admin Map<String, Object> result = usersService.batchActivateUser(user, userNames); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //batch activate user names user.setUserType(UserType.ADMIN_USER); when(userMapper.queryByUserNameAccurately("userTest0001")).thenReturn(getUser()); when(userMapper.queryByUserNameAccurately("userTest0002")).thenReturn(getDisabledUser()); result = usersService.batchActivateUser(user, userNames); Map<String, Object> responseData = (Map<String, Object>) result.get(Constants.DATA_LIST); Map<String, Object> successData = (Map<String, Object>) responseData.get("success"); int totalSuccess = (Integer) successData.get("sum"); Map<String, Object> failedData = (Map<String, Object>) responseData.get("failed"); int totalFailed = (Integer) failedData.get("sum"); Assert.assertEquals(1, totalSuccess); Assert.assertEquals(3, totalFailed); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } catch (Exception e) { Assert.assertTrue(false); } } /** * get disabled user */ private User getDisabledUser() { User user = new User(); user.setUserType(UserType.GENERAL_USER); user.setUserName("userTest0001"); user.setUserPassword("userTest0001"); user.setState(0); return user; } /** * get user */ private User getGeneralUser() { User user = new User(); user.setUserType(UserType.GENERAL_USER); user.setUserName("userTest0001"); user.setUserPassword("userTest0001"); return user; } private List<User> getUserList() { List<User> userList = new ArrayList<>(); userList.add(getGeneralUser()); return userList; } /** * get user */ private User getUser() { User user = new User(); user.setUserType(UserType.ADMIN_USER); user.setUserName("userTest0001"); user.setUserPassword("userTest0001"); user.setState(1); return user; } /** * get tenant * * @return tenant */ private Tenant getTenant() { Tenant tenant = new Tenant(); tenant.setId(1); return tenant; } /** * get resource * * @return resource */ private Resource getResource() { Resource resource = new Resource(); resource.setPid(-1); resource.setUserId(1); resource.setDescription("ResourcesServiceTest.jar"); resource.setAlias("ResourcesServiceTest.jar"); resource.setFullName("/ResourcesServiceTest.jar"); resource.setType(ResourceType.FILE); return resource; } private List<AlertGroup> getAlertGroups() { List<AlertGroup> alertGroups = new ArrayList<>(); AlertGroup alertGroup = new AlertGroup(); alertGroups.add(alertGroup); return alertGroups; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,110
[Feature][dolphinscheduler-api] support grant project by code
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Third-party integrated scenarios, support grant through project code ### Use case ## API ## org.apache.dolphinscheduler.api.controller.UsersController#grantProjectByCode ## Request ## | Name | Type | Require | | :--- | :--- | :---: | | userId | Int | ✓ | | projectCodes | String | ✓ | ## Response ## Same as `org.apache.dolphinscheduler.api.controller.UsersController#grantProject` ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7110
https://github.com/apache/dolphinscheduler/pull/7111
fa8ccd3855dfaa19d73cd7541184938f227bb4b7
27ea43d5dfda2556fa5fb02179e7c906408007ad
"2021-12-01T16:08:53Z"
java
"2021-12-04T13:43:14Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ProjectMapper.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.dao.mapper; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.ibatis.annotations.Param; import java.util.List; import com.baomidou.mybatisplus.core.mapper.BaseMapper; import com.baomidou.mybatisplus.core.metadata.IPage; /** * project mapper interface */ public interface ProjectMapper extends BaseMapper<Project> { /** * query project detail by code * @param projectCode projectCode * @return project */ Project queryByCode(@Param("projectCode") long projectCode); /** * TODO: delete * query project detail by id * @param projectId projectId * @return project */ Project queryDetailById(@Param("projectId") int projectId); /** * query project detail by code * @param projectCode projectCode * @return project */ Project queryDetailByCode(@Param("projectCode") long projectCode); /** * query project by name * @param projectName projectName * @return project */ Project queryByName(@Param("projectName") String projectName); /** * project page * @param page page * @param userId userId * @param searchName searchName * @return project Ipage */ IPage<Project> queryProjectListPaging(IPage<Project> page, @Param("userId") int userId, @Param("searchName") String searchName); /** * query create project user * @param userId userId * @return project list */ List<Project> queryProjectCreatedByUser(@Param("userId") int userId); /** * query authed project list by userId * @param userId userId * @return project list */ List<Project> queryAuthedProjectListByUserId(@Param("userId") int userId); /** * query relation project list by userId * @param userId userId * @return project list */ List<Project> queryRelationProjectListByUserId(@Param("userId") int userId); /** * query project except userId * @param userId userId * @return project list */ List<Project> queryProjectExceptUserId(@Param("userId") int userId); /** * query project list by userId * @param userId * @return */ List<Project> queryProjectCreatedAndAuthorizedByUserId(@Param("userId") int userId); /** * query project name and user name by processInstanceId. * @param processInstanceId processInstanceId * @return projectName and userName */ ProjectUser queryProjectWithUserByProcessInstanceId(@Param("processInstanceId") int processInstanceId); /** * query all project * @return projectList */ List<Project> queryAllProject(); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,110
[Feature][dolphinscheduler-api] support grant project by code
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Third-party integrated scenarios, support grant through project code ### Use case ## API ## org.apache.dolphinscheduler.api.controller.UsersController#grantProjectByCode ## Request ## | Name | Type | Require | | :--- | :--- | :---: | | userId | Int | ✓ | | projectCodes | String | ✓ | ## Response ## Same as `org.apache.dolphinscheduler.api.controller.UsersController#grantProject` ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7110
https://github.com/apache/dolphinscheduler/pull/7111
fa8ccd3855dfaa19d73cd7541184938f227bb4b7
27ea43d5dfda2556fa5fb02179e7c906408007ad
"2021-12-01T16:08:53Z"
java
"2021-12-04T13:43:14Z"
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProjectMapper.xml
<?xml version="1.0" encoding="UTF-8" ?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProjectMapper"> <sql id="baseSql"> id, name, code, description, user_id, flag, create_time, update_time </sql> <sql id="baseSqlV2"> ${alias}.id, ${alias}.name, ${alias}.code, ${alias}.description, ${alias}.user_id, ${alias}.flag, ${alias}.create_time, ${alias}.update_time </sql> <select id="queryByCode" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSql"/> from t_ds_project where code = #{projectCode} </select> <select id="queryDetailByCode" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSqlV2"> <property name="alias" value="p"/> </include> , u.user_name as user_name from t_ds_project p join t_ds_user u on p.user_id = u.id where p.code = #{projectCode} </select> <select id="queryDetailById" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSqlV2"> <property name="alias" value="p"/> </include> , u.user_name as user_name from t_ds_project p join t_ds_user u on p.user_id = u.id where p.id = #{projectId} </select> <select id="queryByName" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSqlV2"> <property name="alias" value="p"/> </include> , u.user_name as user_name from t_ds_project p join t_ds_user u on p.user_id = u.id where p.name = #{projectName} limit 1 </select> <select id="queryProjectListPaging" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSqlV2"> <property name="alias" value="p"/> </include> , u.user_name as user_name, (SELECT COUNT(*) FROM t_ds_process_definition AS def WHERE def.project_code = p.code) AS def_count, (SELECT COUNT(*) FROM t_ds_process_definition_log def, t_ds_process_instance inst WHERE def.code = inst.process_definition_code and def.version = inst.process_definition_version AND def.project_code = p.code AND inst.state=1 ) as inst_running_count from t_ds_project p left join t_ds_user u on u.id=p.user_id where 1=1 <if test="userId != 0"> and p.id in (select project_id from t_ds_relation_project_user where user_id=#{userId} union select id as project_id from t_ds_project where user_id=#{userId} ) </if> <if test="searchName!=null and searchName != ''"> AND (p.name LIKE concat('%', #{searchName}, '%') OR p.description LIKE concat('%', #{searchName}, '%') ) </if> order by p.create_time desc </select> <select id="queryAuthedProjectListByUserId" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSqlV2"> <property name="alias" value="p"/> </include> from t_ds_project p,t_ds_relation_project_user rel where p.id = rel.project_id and rel.user_id= #{userId} </select> <select id="queryRelationProjectListByUserId" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSqlV2"> <property name="alias" value="p"/> </include> from t_ds_project p left join t_ds_relation_project_user rel on p.id = rel.project_id where 1=1 <if test="userId != 0 "> and rel.user_id= #{userId} </if> </select> <select id="queryProjectExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSql"/> from t_ds_project where user_id <![CDATA[ <> ]]> #{userId} </select> <select id="queryProjectCreatedByUser" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSql"/> from t_ds_project where user_id = #{userId} </select> <select id="queryProjectCreatedAndAuthorizedByUserId" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSql"/> from t_ds_project where id in (select project_id from t_ds_relation_project_user where user_id=#{userId} union select id as project_id from t_ds_project where user_id=#{userId}) </select> <select id="queryProjectWithUserByProcessInstanceId" resultType="org.apache.dolphinscheduler.dao.entity.ProjectUser"> select dp.id project_id, dp.name project_name, u.user_name user_name from t_ds_process_instance di join t_ds_process_definition dpd on di.process_definition_code = dpd.code join t_ds_project dp on dpd.project_code = dp.code join t_ds_user u on dp.user_id = u.id where di.id = #{processInstanceId}; </select> <select id="queryAllProject" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select * from t_ds_project </select> </mapper>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,176
[Feature][Workflow] main package
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description The main jar package was renamed the main program package. ![image](https://user-images.githubusercontent.com/19239641/144711497-9b9ff397-9284-4862-9f16-0ea59dd80930.png) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7176
https://github.com/apache/dolphinscheduler/pull/7177
27ea43d5dfda2556fa5fb02179e7c906408007ad
2fa3157e6ec7654d593fe3f71a84b70575bb5702
"2021-12-04T13:37:48Z"
java
"2021-12-04T13:54:59Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/flink.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="flink-model"> <m-list-box> <div slot="text">{{$t('Program Type')}}</div> <div slot="content"> <el-select style="width: 130px;" size="small" v-model="programType" :disabled="isDetails"> <el-option v-for="city in programTypeList" :key="city.code" :value="city.code" :label="city.code"> </el-option> </el-select> </div> </m-list-box> <m-list-box v-if="programType !== 'PYTHON'"> <div slot="text">{{$t('Main Class')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="input" size="small" v-model="mainClass" :placeholder="$t('Please enter main class')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Main Jar Package')}}</div> <div slot="content"> <treeselect v-model="mainJar" maxHeight="200" :options="mainJarLists" :disable-branch-nodes="true" :normalizer="normalizer" :disabled="isDetails" :placeholder="$t('Please enter main jar package')"> <div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}</div> </treeselect> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Deploy Mode')}}</div> <div slot="content"> <el-radio-group v-model="deployMode" size="small"> <el-radio :label="'cluster'" :disabled="isDetails"></el-radio> <el-radio :label="'local'" :disabled="isDetails"></el-radio> </el-radio-group> </div> </m-list-box> <m-list-box v-if="deployMode === 'cluster'"> <div slot="text">{{$t('Flink Version')}}</div> <div slot="content"> <el-select style="width: 100px;" size="small" v-model="flinkVersion" :disabled="isDetails"> <el-option v-for="version in flinkVersionList" :key="version.code" :value="version.code" :label="version.code"> </el-option> </el-select> </div> </m-list-box> <m-list-box v-if="deployMode === 'cluster'"> <div slot="text">{{$t('App Name')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="input" size="small" v-model="appName" :placeholder="$t('Please enter app name(optional)')"> </el-input> </div> </m-list-box> <m-list-4-box v-if="deployMode === 'cluster'"> <div slot="text">{{$t('JobManager Memory')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="input" size="small" v-model="jobManagerMemory" :placeholder="$t('Please enter JobManager memory')"> </el-input> </div> <div slot="text-2">{{$t('TaskManager Memory')}}</div> <div slot="content-2"> <el-input :disabled="isDetails" type="input" size="small" v-model="taskManagerMemory" :placeholder="$t('Please enter TaskManager memory')"> </el-input> </div> </m-list-4-box> <m-list-4-box v-if="deployMode === 'cluster'"> <div slot="text">{{$t('Slot Number')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="input" size="small" v-model="slot" :placeholder="$t('Please enter Slot number')"> </el-input> </div> <div slot="text-2" v-if="flinkVersion === '<1.10'">{{$t('TaskManager Number')}}</div> <div slot="content-2" v-if="flinkVersion === '<1.10'"> <el-input :disabled="isDetails" type="input" size="small" v-model="taskManager" :placeholder="$t('Please enter TaskManager number')"> </el-input> </div> </m-list-4-box> <m-list-4-box> <div slot="text">{{$t('Parallelism')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="input" size="small" v-model="parallelism" :placeholder="$t('Please enter Parallelism')"> </el-input> </div> </m-list-4-box> <m-list-box> <div slot="text">{{$t('Main Arguments')}}</div> <div slot="content"> <el-input :autosize="{minRows:2}" :disabled="isDetails" type="textarea" size="small" v-model="mainArgs" :placeholder="$t('Please enter main arguments')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Option Parameters')}}</div> <div slot="content"> <el-input :disabled="isDetails" :autosize="{minRows:2}" type="textarea" size="small" v-model="others" :placeholder="$t('Please enter option parameters')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Resources')}}</div> <div slot="content"> <treeselect v-model="resourceList" :multiple="true" maxHeight="200" :options="mainJarList" :normalizer="normalizer" :disabled="isDetails" :value-consists-of="valueConsistsOf" :placeholder="$t('Please select resources')"> <div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}<span class="copy-path" @mousedown="_copyPath($event, node)" >&nbsp; <em class="el-icon-copy-document" data-container="body" data-toggle="tooltip" :title="$t('Copy path')" ></em> &nbsp; </span></div> </treeselect> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Custom Parameters')}}</div> <div slot="content"> <m-local-params ref="refLocalParams" @on-local-params="_onLocalParams" :udp-list="localParams" :hide="false"> </m-local-params> </div> </m-list-box> </div> </template> <script> import _ from 'lodash' import i18n from '@/module/i18n' import mLocalParams from './_source/localParams' import mListBox from './_source/listBox' import mList4Box from './_source/list4Box' import Treeselect from '@riophae/vue-treeselect' import '@riophae/vue-treeselect/dist/vue-treeselect.css' import disabledState from '@/module/mixin/disabledState' import Clipboard from 'clipboard' import { diGuiTree, searchTree } from './_source/resourceTree' export default { name: 'flink', data () { return { valueConsistsOf: 'LEAF_PRIORITY', // Main function class mainClass: '', // Master jar package mainJar: null, // Master jar package(List) mainJarLists: [], mainJarList: [], // Deployment method deployMode: 'cluster', // Resource(list) resourceList: [], // Cache ResourceList cacheResourceList: [], // Custom function localParams: [], // Slot number slot: 1, // Parallelism parallelism: 1, // TaskManager mumber taskManager: '2', // JobManager memory jobManagerMemory: '1G', // TaskManager memory taskManagerMemory: '2G', // Flink app name appName: '', // Main arguments mainArgs: '', // Option parameters others: '', // Program type programType: 'SCALA', // Program type(List) programTypeList: [{ code: 'JAVA' }, { code: 'SCALA' }, { code: 'PYTHON' }], flinkVersion: '<1.10', // Flink Versions(List) flinkVersionList: [{ code: '<1.10' }, { code: '>=1.10' }], normalizer (node) { return { label: node.name } }, allNoResources: [], noRes: [] } }, props: { backfillItem: Object }, mixins: [disabledState], methods: { _copyPath (e, node) { e.stopPropagation() let clipboard = new Clipboard('.copy-path', { text: function () { return node.raw.fullName } }) clipboard.on('success', handler => { this.$message.success(`${i18n.$t('Copy success')}`) // Free memory clipboard.destroy() }) clipboard.on('error', handler => { // Copy is not supported this.$message.warning(`${i18n.$t('The browser does not support automatic copying')}`) // Free memory clipboard.destroy() }) }, /** * getResourceId */ marjarId (name) { this.store.dispatch('dag/getResourceId', { type: 'FILE', fullName: '/' + name }).then(res => { this.mainJar = res.id }).catch(e => { this.$message.error(e.msg || '') }) }, /** * return localParams */ _onLocalParams (a) { this.localParams = a }, /** * return resourceList */ _onResourcesData (a) { this.resourceList = a }, /** * cache resourceList */ _onCacheResourcesData (a) { this.cacheResourceList = a }, /** * verification */ _verification () { if (this.programType !== 'PYTHON' && !this.mainClass) { this.$message.warning(`${i18n.$t('Please enter main class')}`) return false } if (!this.mainJar) { this.$message.warning(`${i18n.$t('Please enter main jar package')}`) return false } if (!this.jobManagerMemory) { this.$message.warning(`${i18n.$t('Please enter JobManager memory')}`) return false } if (!Number.isInteger(parseInt(this.jobManagerMemory))) { this.$message.warning(`${i18n.$t('Memory should be a positive integer')}`) return false } if (!this.taskManagerMemory) { this.$message.warning(`${i18n.$t('Please enter TaskManager memory')}`) return false } if (!Number.isInteger(parseInt(this.taskManagerMemory))) { this.$message.warning(`${i18n.$t('Memory should be a positive integer')}`) return false } if (!Number.isInteger(parseInt(this.slot))) { this.$message.warning(`${i18n.$t('Please enter Slot number')}`) return false } if (!Number.isInteger(parseInt(this.parallelism))) { this.$message.warning(`${i18n.$t('Please enter Parallelism')}`) return false } if (this.flinkVersion === '<1.10' && !Number.isInteger(parseInt(this.taskManager))) { this.$message.warning(`${i18n.$t('Please enter TaskManager number')}`) return false } // noRes if (this.noRes.length > 0) { this.$message.warning(`${i18n.$t('Please delete all non-existent resources')}`) return false } // localParams Subcomponent verification if (!this.$refs.refLocalParams._verifProp()) { return false } // storage this.$emit('on-params', { mainClass: this.mainClass, mainJar: { id: this.mainJar }, deployMode: this.deployMode, resourceList: _.map(this.resourceList, v => { return { id: v } }), localParams: this.localParams, flinkVersion: this.flinkVersion, slot: this.slot, parallelism: this.parallelism, taskManager: this.taskManager, jobManagerMemory: this.jobManagerMemory, taskManagerMemory: this.taskManagerMemory, appName: this.appName, mainArgs: this.mainArgs, others: this.others, programType: this.programType }) return true }, dataProcess (backResource) { let isResourceId = [] let resourceIdArr = [] if (this.resourceList.length > 0) { this.resourceList.forEach(v => { this.mainJarList.forEach(v1 => { if (searchTree(v1, v)) { isResourceId.push(searchTree(v1, v)) } }) }) resourceIdArr = isResourceId.map(item => { return item.id }) Array.prototype.diff = function (a) { return this.filter(function (i) { return a.indexOf(i) < 0 }) } let diffSet = this.resourceList.diff(resourceIdArr) let optionsCmp = [] if (diffSet.length > 0) { diffSet.forEach(item => { backResource.forEach(item1 => { if (item === item1.id || item === item1.res) { optionsCmp.push(item1) } }) }) } let noResources = [{ id: -1, name: $t('Unauthorized or deleted resources'), fullName: '/' + $t('Unauthorized or deleted resources'), children: [] }] if (optionsCmp.length > 0) { this.allNoResources = optionsCmp optionsCmp = optionsCmp.map(item => { return { id: item.id, name: item.name, fullName: item.res } }) optionsCmp.forEach(item => { item.isNew = true }) noResources[0].children = optionsCmp this.mainJarList = this.mainJarList.concat(noResources) } } } }, watch: { // Listening type programType (type) { if (type === 'PYTHON') { this.mainClass = '' } }, // Watch the cacheParams cacheParams (val) { this.$emit('on-cache-params', val) }, resourceIdArr (arr) { let result = [] arr.forEach(item => { this.allNoResources.forEach(item1 => { if (item.id === item1.id) { // resultBool = true result.push(item1) } }) }) this.noRes = result } }, computed: { resourceIdArr () { let isResourceId = [] let resourceIdArr = [] if (this.resourceList.length > 0) { this.resourceList.forEach(v => { this.mainJarList.forEach(v1 => { if (searchTree(v1, v)) { isResourceId.push(searchTree(v1, v)) } }) }) resourceIdArr = isResourceId.map(item => { return { id: item.id, name: item.name, res: item.fullName } }) } return resourceIdArr }, cacheParams () { return { mainClass: this.mainClass, mainJar: { id: this.mainJar }, deployMode: this.deployMode, resourceList: this.resourceIdArr, localParams: this.localParams, slot: this.slot, parallelism: this.parallelism, taskManager: this.taskManager, jobManagerMemory: this.jobManagerMemory, taskManagerMemory: this.taskManagerMemory, appName: this.appName, mainArgs: this.mainArgs, others: this.others, programType: this.programType } } }, created () { let item = this.store.state.dag.resourcesListS let items = this.store.state.dag.resourcesListJar diGuiTree(item) diGuiTree(items) this.mainJarList = item this.mainJarLists = items let o = this.backfillItem // Non-null objects represent backfill if (!_.isEmpty(o)) { this.mainClass = o.params.mainClass || '' if (o.params.mainJar.res) { this.marjarId(o.params.mainJar.res) } else if (o.params.mainJar.res === '') { this.mainJar = '' } else { this.mainJar = o.params.mainJar.id || '' } this.deployMode = o.params.deployMode || '' this.flinkVersion = o.params.flinkVersion || '<1.10' this.slot = o.params.slot || 1 this.parallelism = o.params.parallelism || 1 this.taskManager = o.params.taskManager || '2' this.jobManagerMemory = o.params.jobManagerMemory || '1G' this.taskManagerMemory = o.params.taskManagerMemory || '2G' this.appName = o.params.appName || '' this.mainArgs = o.params.mainArgs || '' this.others = o.params.others this.programType = o.params.programType || 'SCALA' // backfill resourceList let backResource = o.params.resourceList || [] let resourceList = o.params.resourceList || [] if (resourceList.length) { _.map(resourceList, v => { if (!v.id) { this.store.dispatch('dag/getResourceId', { type: 'FILE', fullName: '/' + v.res }).then(res => { this.resourceList.push(res.id) this.dataProcess(backResource) }).catch(e => { this.resourceList.push(v.res) this.dataProcess(backResource) }) } else { this.resourceList.push(v.id) this.dataProcess(backResource) } }) this.cacheResourceList = resourceList } // backfill localParams let localParams = o.params.localParams || [] if (localParams.length) { this.localParams = localParams } } }, mounted () { }, components: { mLocalParams, mListBox, mList4Box, Treeselect } } </script>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,176
[Feature][Workflow] main package
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description The main jar package was renamed the main program package. ![image](https://user-images.githubusercontent.com/19239641/144711497-9b9ff397-9284-4862-9f16-0ea59dd80930.png) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7176
https://github.com/apache/dolphinscheduler/pull/7177
27ea43d5dfda2556fa5fb02179e7c906408007ad
2fa3157e6ec7654d593fe3f71a84b70575bb5702
"2021-12-04T13:37:48Z"
java
"2021-12-04T13:54:59Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/mr.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="mr-model"> <m-list-box> <div slot="text">{{$t('Program Type')}}</div> <div slot="content"> <el-select v-model="programType" :disabled="isDetails" style="width: 110px;" size="small"> <el-option v-for="city in programTypeList" :key="city.code" :value="city.code" :label="city.code"> </el-option> </el-select> </div> </m-list-box> <m-list-box v-if="programType !== 'PYTHON'"> <div slot="text">{{$t('Main Class')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="input" size="small" v-model="mainClass" :placeholder="$t('Please enter main class')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Main Jar Package')}}</div> <div slot="content"> <treeselect v-model="mainJar" maxHeight="200" :options="mainJarLists" :disable-branch-nodes="true" :normalizer="normalizer" :value-consists-of="valueConsistsOf" :disabled="isDetails" :placeholder="$t('Please enter main jar package')"> <div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}</div> </treeselect> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('App Name')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="input" size="small" v-model="appName" :placeholder="$t('Please enter app name(optional)')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Main Arguments')}}</div> <div slot="content"> <el-input :autosize="{minRows:2}" :disabled="isDetails" type="textarea" size="small" v-model="mainArgs" :placeholder="$t('Please enter main arguments')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Option Parameters')}}</div> <div slot="content"> <el-input :disabled="isDetails" :autosize="{minRows:2}" type="textarea" size="small" v-model="others" :placeholder="$t('Please enter option parameters')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Resources')}}</div> <div slot="content"> <treeselect v-model="resourceList" :multiple="true" maxHeight="200" :options="mainJarList" :normalizer="normalizer" :disabled="isDetails" :value-consists-of="valueConsistsOf" :placeholder="$t('Please select resources')"> <div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}<span class="copy-path" @mousedown="_copyPath($event, node)" >&nbsp; <em class="el-icon-copy-document" data-container="body" data-toggle="tooltip" :title="$t('Copy path')" ></em> &nbsp; </span></div> </treeselect> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Custom Parameters')}}</div> <div slot="content"> <m-local-params ref="refLocalParams" @on-local-params="_onLocalParams" :udp-list="localParams" :hide="false"> </m-local-params> </div> </m-list-box> </div> </template> <script> import _ from 'lodash' import i18n from '@/module/i18n' import mListBox from './_source/listBox' import mLocalParams from './_source/localParams' import Treeselect from '@riophae/vue-treeselect' import '@riophae/vue-treeselect/dist/vue-treeselect.css' import disabledState from '@/module/mixin/disabledState' import Clipboard from 'clipboard' import { diGuiTree, searchTree } from './_source/resourceTree' export default { name: 'mr', data () { return { valueConsistsOf: 'LEAF_PRIORITY', // Main function class mainClass: '', // Master jar package mainJar: null, // Main jar package (List) mainJarLists: [], mainJarList: [], // Resource(list) resourceList: [], // Cache ResourceList cacheResourceList: [], // Custom parameter localParams: [], // MR app name appName: '', // Main arguments mainArgs: '', // Option parameters others: '', // Program type programType: 'JAVA', // Program type(List) programTypeList: [{ code: 'JAVA' }, { code: 'PYTHON' }], normalizer (node) { return { label: node.name } }, allNoResources: [], noRes: [] } }, props: { backfillItem: Object }, mixins: [disabledState], methods: { _copyPath (e, node) { e.stopPropagation() let clipboard = new Clipboard('.copy-path', { text: function () { return node.raw.fullName } }) clipboard.on('success', handler => { this.$message.success(`${i18n.$t('Copy success')}`) // Free memory clipboard.destroy() }) clipboard.on('error', handler => { // Copy is not supported this.$message.warning(`${i18n.$t('The browser does not support automatic copying')}`) // Free memory clipboard.destroy() }) }, /** * getResourceId */ marjarId (name) { this.store.dispatch('dag/getResourceId', { type: 'FILE', fullName: '/' + name }).then(res => { this.mainJar = res.id }).catch(e => { this.$message.error(e.msg || '') }) }, /** * return localParams */ _onLocalParams (a) { this.localParams = a }, /** * return resourceList */ _onResourcesData (a) { this.resourceList = a }, /** * cache resourceList */ _onCacheResourcesData (a) { this.cacheResourceList = a }, dataProcess (backResource) { let isResourceId = [] let resourceIdArr = [] if (this.resourceList.length > 0) { this.resourceList.forEach(v => { this.mainJarList.forEach(v1 => { if (searchTree(v1, v)) { isResourceId.push(searchTree(v1, v)) } }) }) resourceIdArr = isResourceId.map(item => { return item.id }) Array.prototype.diff = function (a) { return this.filter(function (i) { return a.indexOf(i) < 0 }) } let diffSet = this.resourceList.diff(resourceIdArr) let optionsCmp = [] if (diffSet.length > 0) { diffSet.forEach(item => { backResource.forEach(item1 => { if (item === item1.id || item === item1.res) { optionsCmp.push(item1) } }) }) } let noResources = [{ id: -1, name: $t('Unauthorized or deleted resources'), fullName: '/' + $t('Unauthorized or deleted resources'), children: [] }] if (optionsCmp.length > 0) { this.allNoResources = optionsCmp optionsCmp = optionsCmp.map(item => { return { id: item.id, name: item.name, fullName: item.res } }) optionsCmp.forEach(item => { item.isNew = true }) noResources[0].children = optionsCmp this.mainJarList = this.mainJarList.concat(noResources) } } }, /** * verification */ _verification () { if (this.programType !== 'PYTHON' && !this.mainClass) { this.$message.warning(`${i18n.$t('Please enter main class')}`) return false } if (!this.mainJar) { this.$message.warning(`${i18n.$t('Please enter main jar package')}`) return false } // noRes if (this.noRes.length > 0) { this.$message.warning(`${i18n.$t('Please delete all non-existent resources')}`) return false } // localParams Subcomponent verification if (!this.$refs.refLocalParams._verifProp()) { return false } // storage this.$emit('on-params', { mainClass: this.mainClass, mainJar: { id: this.mainJar }, resourceList: _.map(this.resourceList, v => { return { id: v } }), localParams: this.localParams, appName: this.appName, mainArgs: this.mainArgs, others: this.others, programType: this.programType }) return true } }, watch: { /** * monitor */ programType (type) { if (type === 'PYTHON') { this.mainClass = '' } }, // Watch the cacheParams cacheParams (val) { this.$emit('on-cache-params', val) }, resourceIdArr (arr) { let result = [] arr.forEach(item => { this.allNoResources.forEach(item1 => { if (item.id === item1.id) { // resultBool = true result.push(item1) } }) }) this.noRes = result } }, computed: { resourceIdArr () { let isResourceId = [] let resourceIdArr = [] if (this.resourceList.length > 0) { this.resourceList.forEach(v => { this.mainJarList.forEach(v1 => { if (searchTree(v1, v)) { isResourceId.push(searchTree(v1, v)) } }) }) resourceIdArr = isResourceId.map(item => { return { id: item.id, name: item.name, res: item.fullName } }) } return resourceIdArr }, cacheParams () { return { mainClass: this.mainClass, mainJar: { id: this.mainJar }, resourceList: this.resourceIdArr, localParams: this.localParams, appName: this.appName, mainArgs: this.mainArgs, others: this.others, programType: this.programType } } }, created () { let item = this.store.state.dag.resourcesListS let items = this.store.state.dag.resourcesListJar diGuiTree(item) diGuiTree(items) this.mainJarList = item this.mainJarLists = items let o = this.backfillItem // Non-null objects represent backfill if (!_.isEmpty(o)) { this.mainClass = o.params.mainClass || '' if (o.params.mainJar.res) { this.marjarId(o.params.mainJar.res) } else if (o.params.mainJar.res === '') { this.mainJar = '' } else { this.mainJar = o.params.mainJar.id || '' } this.appName = o.params.appName || '' this.mainArgs = o.params.mainArgs || '' this.others = o.params.others this.programType = o.params.programType || 'JAVA' // backfill resourceList let resourceList = o.params.resourceList || [] if (resourceList.length) { _.map(resourceList, v => { if (!v.id) { this.store.dispatch('dag/getResourceId', { type: 'FILE', fullName: '/' + v.res }).then(res => { this.resourceList.push(res.id) this.dataProcess(backResource) }).catch(e => { this.resourceList.push(v.res) this.dataProcess(backResource) }) } else { this.resourceList.push(v.id) this.dataProcess(backResource) } }) this.cacheResourceList = resourceList } // backfill localParams let backResource = o.params.resourceList || [] let localParams = o.params.localParams || [] if (localParams.length) { this.localParams = localParams } } }, mounted () { }, components: { mLocalParams, mListBox, Treeselect } } </script>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,176
[Feature][Workflow] main package
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description The main jar package was renamed the main program package. ![image](https://user-images.githubusercontent.com/19239641/144711497-9b9ff397-9284-4862-9f16-0ea59dd80930.png) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7176
https://github.com/apache/dolphinscheduler/pull/7177
27ea43d5dfda2556fa5fb02179e7c906408007ad
2fa3157e6ec7654d593fe3f71a84b70575bb5702
"2021-12-04T13:37:48Z"
java
"2021-12-04T13:54:59Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/spark.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="spark-model"> <m-list-box> <div slot="text">{{$t('Program Type')}}</div> <div slot="content"> <el-select style="width: 130px;" size="small" v-model="programType" :disabled="isDetails"> <el-option v-for="city in programTypeList" :key="city.code" :value="city.code" :label="city.code"> </el-option> </el-select> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Spark Version')}}</div> <div slot="content"> <el-select style="width: 130px;" size="small" v-model="sparkVersion" :disabled="isDetails"> <el-option v-for="city in sparkVersionList" :key="city.code" :value="city.code" :label="city.code"> </el-option> </el-select> </div> </m-list-box> <m-list-box v-if="programType !== 'PYTHON'"> <div slot="text">{{$t('Main Class')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="input" size="small" v-model="mainClass" :placeholder="$t('Please enter main class')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Main Jar Package')}}</div> <div slot="content"> <treeselect v-model="mainJar" maxHeight="200" :options="mainJarLists" :disable-branch-nodes="true" :normalizer="normalizer" :disabled="isDetails" :placeholder="$t('Please enter main jar package')"> <div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}</div> </treeselect> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Deploy Mode')}}</div> <div slot="content"> <el-radio-group v-model="deployMode" size="small"> <el-radio :label="'cluster'" :disabled="isDetails"></el-radio> <el-radio :label="'client'" :disabled="isDetails"></el-radio> <el-radio :label="'local'" :disabled="isDetails"></el-radio> </el-radio-group> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('App Name')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="input" size="small" v-model="appName" :placeholder="$t('Please enter app name(optional)')"> </el-input> </div> </m-list-box> <m-list-4-box> <div slot="text">{{$t('Driver Cores')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="input" size="small" v-model="driverCores" :placeholder="$t('Please enter Driver cores')"> </el-input> </div> <div slot="text-2">{{$t('Driver Memory')}}</div> <div slot="content-2"> <el-input :disabled="isDetails" type="input" size="small" v-model="driverMemory" :placeholder="$t('Please enter Driver memory')"> </el-input> </div> </m-list-4-box> <m-list-4-box> <div slot="text">{{$t('Executor Number')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="input" size="small" v-model="numExecutors" :placeholder="$t('Please enter Executor number')"> </el-input> </div> <div slot="text-2">{{$t('Executor Memory')}}</div> <div slot="content-2"> <el-input :disabled="isDetails" type="input" size="small" v-model="executorMemory" :placeholder="$t('Please enter Executor memory')"> </el-input> </div> </m-list-4-box> <m-list-4-box> <div slot="text">{{$t('Executor Cores')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="input" size="small" v-model="executorCores" :placeholder="$t('Please enter Executor cores')"> </el-input> </div> </m-list-4-box> <m-list-box> <div slot="text">{{$t('Main Arguments')}}</div> <div slot="content"> <el-input :autosize="{minRows:2}" :disabled="isDetails" type="textarea" size="small" v-model="mainArgs" :placeholder="$t('Please enter main arguments')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Option Parameters')}}</div> <div slot="content"> <el-input :disabled="isDetails" :autosize="{minRows:2}" type="textarea" size="small" v-model="others" :placeholder="$t('Please enter option parameters')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Resources')}}</div> <div slot="content"> <treeselect v-model="resourceList" :multiple="true" maxHeight="200" :options="mainJarList" :normalizer="normalizer" :value-consists-of="valueConsistsOf" :disabled="isDetails" :placeholder="$t('Please select resources')"> <div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}<span class="copy-path" @mousedown="_copyPath($event, node)" >&nbsp; <em class="el-icon-copy-document" data-container="body" data-toggle="tooltip" :title="$t('Copy path')" ></em> &nbsp; </span></div> </treeselect> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Custom Parameters')}}</div> <div slot="content"> <m-local-params ref="refLocalParams" @on-local-params="_onLocalParams" :udp-list="localParams" :hide="false"> </m-local-params> </div> </m-list-box> </div> </template> <script> import _ from 'lodash' import i18n from '@/module/i18n' import mLocalParams from './_source/localParams' import mListBox from './_source/listBox' import mList4Box from './_source/list4Box' import Treeselect from '@riophae/vue-treeselect' import '@riophae/vue-treeselect/dist/vue-treeselect.css' import disabledState from '@/module/mixin/disabledState' import Clipboard from 'clipboard' import { diGuiTree, searchTree } from './_source/resourceTree' export default { name: 'spark', data () { return { valueConsistsOf: 'LEAF_PRIORITY', // Main function class mainClass: '', // Master jar package mainJar: null, // Master jar package(List) mainJarLists: [], mainJarList: [], // Deployment method deployMode: 'cluster', // Resource(list) resourceList: [], // Cache ResourceList cacheResourceList: [], // Custom function localParams: [], // Driver cores driverCores: 1, // Driver memory driverMemory: '512M', // Executor number numExecutors: 2, // Executor memory executorMemory: '2G', // Executor cores executorCores: 2, // Spark app name appName: '', // Main arguments mainArgs: '', // Option parameters others: '', // Program type programType: 'SCALA', // Program type(List) programTypeList: [{ code: 'JAVA' }, { code: 'SCALA' }, { code: 'PYTHON' }], // Spark version sparkVersion: 'SPARK2', // Spark version(LIst) sparkVersionList: [{ code: 'SPARK2' }, { code: 'SPARK1' }], normalizer (node) { return { label: node.name } }, allNoResources: [], noRes: [] } }, props: { backfillItem: Object }, mixins: [disabledState], methods: { _copyPath (e, node) { e.stopPropagation() let clipboard = new Clipboard('.copy-path', { text: function () { return node.raw.fullName } }) clipboard.on('success', handler => { this.$message.success(`${i18n.$t('Copy success')}`) // Free memory clipboard.destroy() }) clipboard.on('error', handler => { // Copy is not supported this.$message.warning(`${i18n.$t('The browser does not support automatic copying')}`) // Free memory clipboard.destroy() }) }, /** * getResourceId */ marjarId (name) { this.store.dispatch('dag/getResourceId', { type: 'FILE', fullName: '/' + name }).then(res => { this.mainJar = res.id }).catch(e => { this.$message.error(e.msg || '') }) }, /** * return localParams */ _onLocalParams (a) { this.localParams = a }, /** * return resourceList */ _onResourcesData (a) { this.resourceList = a }, /** * cache resourceList */ _onCacheResourcesData (a) { this.cacheResourceList = a }, dataProcess (backResource) { let isResourceId = [] let resourceIdArr = [] if (this.resourceList.length > 0) { this.resourceList.forEach(v => { this.mainJarList.forEach(v1 => { if (searchTree(v1, v)) { isResourceId.push(searchTree(v1, v)) } }) }) resourceIdArr = isResourceId.map(item => { return item.id }) Array.prototype.diff = function (a) { return this.filter(function (i) { return a.indexOf(i) < 0 }) } let diffSet = this.resourceList.diff(resourceIdArr) let optionsCmp = [] if (diffSet.length > 0) { diffSet.forEach(item => { backResource.forEach(item1 => { if (item === item1.id || item === item1.res) { optionsCmp.push(item1) } }) }) } let noResources = [{ id: -1, name: $t('Unauthorized or deleted resources'), fullName: '/' + $t('Unauthorized or deleted resources'), children: [] }] if (optionsCmp.length > 0) { this.allNoResources = optionsCmp optionsCmp = optionsCmp.map(item => { return { id: item.id, name: item.name, fullName: item.res } }) optionsCmp.forEach(item => { item.isNew = true }) noResources[0].children = optionsCmp this.mainJarList = this.mainJarList.concat(noResources) } } }, /** * verification */ _verification () { if (this.programType !== 'PYTHON' && !this.mainClass) { this.$message.warning(`${i18n.$t('Please enter main class')}`) return false } if (!this.mainJar) { this.$message.warning(`${i18n.$t('Please enter main jar package')}`) return false } if (!this.driverCores) { this.$message.warning(`${i18n.$t('Please enter Driver cores')}`) return false } if (!Number.isInteger(parseInt(this.driverCores))) { this.$message.warning(`${i18n.$t('Core number should be positive integer')}`) return false } if (!this.driverMemory) { this.$message.warning(`${i18n.$t('Please enter Driver memory')}`) return false } if (!Number.isInteger(parseInt(this.driverMemory))) { this.$message.warning(`${i18n.$t('Memory should be a positive integer')}`) return false } if (!this.executorCores) { this.$message.warning(`${i18n.$t('Please enter Executor cores')}`) return false } if (!Number.isInteger(parseInt(this.executorCores))) { this.$message.warning(`${i18n.$t('Core number should be positive integer')}`) return false } if (!this.executorMemory) { this.$message.warning(`${i18n.$t('Please enter Executor memory')}`) return false } if (!Number.isInteger(parseInt(this.executorMemory))) { this.$message.warning(`${i18n.$t('Memory should be a positive integer')}`) return false } if (!this.numExecutors) { this.$message.warning(`${i18n.$t('Please enter Executor number')}`) return false } if (!Number.isInteger(parseInt(this.numExecutors))) { this.$message.warning(`${i18n.$t('The Executor number should be a positive integer')}`) return false } // noRes if (this.noRes.length > 0) { this.$message.warning(`${i18n.$t('Please delete all non-existent resources')}`) return false } // localParams Subcomponent verification if (!this.$refs.refLocalParams._verifProp()) { return false } // Process resourcelist let dataProcessing = _.map(this.resourceList, v => { return { id: v } }) // storage this.$emit('on-params', { mainClass: this.mainClass, mainJar: { id: this.mainJar }, deployMode: this.deployMode, resourceList: dataProcessing, localParams: this.localParams, driverCores: this.driverCores, driverMemory: this.driverMemory, numExecutors: this.numExecutors, executorMemory: this.executorMemory, executorCores: this.executorCores, appName: this.appName, mainArgs: this.mainArgs, others: this.others, programType: this.programType, sparkVersion: this.sparkVersion }) return true } }, watch: { // Listening type programType (type) { if (type === 'PYTHON') { this.mainClass = '' } }, // Watch the cacheParams cacheParams (val) { this.$emit('on-cache-params', val) }, resourceIdArr (arr) { let result = [] arr.forEach(item => { this.allNoResources.forEach(item1 => { if (item.id === item1.id) { // resultBool = true result.push(item1) } }) }) this.noRes = result } }, computed: { resourceIdArr () { let isResourceId = [] let resourceIdArr = [] if (this.resourceList.length > 0) { this.resourceList.forEach(v => { this.mainJarList.forEach(v1 => { if (searchTree(v1, v)) { isResourceId.push(searchTree(v1, v)) } }) }) resourceIdArr = isResourceId.map(item => { return { id: item.id, name: item.name, res: item.fullName } }) } return resourceIdArr }, cacheParams () { return { mainClass: this.mainClass, mainJar: { id: this.mainJar }, deployMode: this.deployMode, resourceList: this.resourceIdArr, localParams: this.localParams, driverCores: this.driverCores, driverMemory: this.driverMemory, numExecutors: this.numExecutors, executorMemory: this.executorMemory, executorCores: this.executorCores, appName: this.appName, mainArgs: this.mainArgs, others: this.others, programType: this.programType, sparkVersion: this.sparkVersion } } }, created () { let item = this.store.state.dag.resourcesListS let items = this.store.state.dag.resourcesListJar diGuiTree(item) diGuiTree(items) this.mainJarList = item this.mainJarLists = items let o = this.backfillItem // Non-null objects represent backfill if (!_.isEmpty(o)) { this.mainClass = o.params.mainClass || '' if (o.params.mainJar.res) { this.marjarId(o.params.mainJar.res) } else if (o.params.mainJar.res === '') { this.mainJar = '' } else { this.mainJar = o.params.mainJar.id || '' } this.deployMode = o.params.deployMode || '' this.driverCores = o.params.driverCores || 1 this.driverMemory = o.params.driverMemory || '512M' this.numExecutors = o.params.numExecutors || 2 this.executorMemory = o.params.executorMemory || '2G' this.executorCores = o.params.executorCores || 2 this.appName = o.params.appName || '' this.mainArgs = o.params.mainArgs || '' this.others = o.params.others this.programType = o.params.programType || 'SCALA' this.sparkVersion = o.params.sparkVersion || 'SPARK2' // backfill resourceList let backResource = o.params.resourceList || [] let resourceList = o.params.resourceList || [] if (resourceList.length) { _.map(resourceList, v => { if (!v.id) { this.store.dispatch('dag/getResourceId', { type: 'FILE', fullName: '/' + v.res }).then(res => { this.resourceList.push(res.id) this.dataProcess(backResource) }).catch(e => { this.resourceList.push(v.res) this.dataProcess(backResource) }) } else { this.resourceList.push(v.id) this.dataProcess(backResource) } }) this.cacheResourceList = resourceList } // backfill localParams let localParams = o.params.localParams || [] if (localParams.length) { this.localParams = localParams } } }, mounted () { }, components: { mLocalParams, mListBox, mList4Box, Treeselect } } </script>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,176
[Feature][Workflow] main package
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description The main jar package was renamed the main program package. ![image](https://user-images.githubusercontent.com/19239641/144711497-9b9ff397-9284-4862-9f16-0ea59dd80930.png) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7176
https://github.com/apache/dolphinscheduler/pull/7177
27ea43d5dfda2556fa5fb02179e7c906408007ad
2fa3157e6ec7654d593fe3f71a84b70575bb5702
"2021-12-04T13:37:48Z"
java
"2021-12-04T13:54:59Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/definitionDetails.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="home-main index-model"> <m-dag v-if="!isLoading" :type="'definition'" :release-state="releaseState"></m-dag> <m-spin :is-spin="isLoading" ></m-spin> </div> </template> <script> import mDag from './_source/dag.vue' import mSpin from '@/module/components/spin/spin' import Affirm from './_source/jumpAffirm' import disabledState from '@/module/mixin/disabledState' import { mapActions, mapMutations } from 'vuex' export default { name: 'definition-details', data () { return { // loading isLoading: true, // state releaseState: '' } }, provide () { return { definitionDetails: this } }, mixins: [disabledState], props: {}, methods: { ...mapMutations('dag', ['resetParams', 'setIsDetails']), ...mapActions('dag', ['getProjectList', 'getResourcesList', 'getProcessDetails', 'getResourcesListJar']), ...mapActions('security', ['getTenantList', 'getWorkerGroupsAll', 'getAlarmGroupsAll']), /** * init */ init () { this.isLoading = true // Initialization parameters this.resetParams() // Promise Get node needs data Promise.all([ // Node details this.getProcessDetails(this.$route.params.code), // get project this.getProjectList(), // get resource this.getResourcesList(), // get jar this.getResourcesListJar(), // get worker group list this.getWorkerGroupsAll(), // get alarm group list this.getAlarmGroupsAll(), this.getTenantList() ]).then((data) => { let item = data[0] this.setIsDetails(item.processDefinition.releaseState === 'ONLINE') this.releaseState = item.processDefinition.releaseState this.isLoading = false // Whether to pop up the box? Affirm.init(this.$root) }).catch(() => { this.isLoading = false }) }, /** * Redraw (refresh operation) */ _reset () { this.getProcessDetails(this.$route.params.code).then(res => { let item = res this.setIsDetails(item.releaseState === 'ONLINE') this.releaseState = item.releaseState }) } }, watch: { // Listening for routing changes $route: { deep: true, handler () { this.init() } } }, created () { this.init() }, mounted () { }, components: { mDag, mSpin } } </script>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,176
[Feature][Workflow] main package
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description The main jar package was renamed the main program package. ![image](https://user-images.githubusercontent.com/19239641/144711497-9b9ff397-9284-4862-9f16-0ea59dd80930.png) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7176
https://github.com/apache/dolphinscheduler/pull/7177
27ea43d5dfda2556fa5fb02179e7c906408007ad
2fa3157e6ec7654d593fe3f71a84b70575bb5702
"2021-12-04T13:37:48Z"
java
"2021-12-04T13:54:59Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/index.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="home-main index-model"> <m-dag v-if="!isLoading"></m-dag> <m-spin :is-spin="isLoading"></m-spin> </div> </template> <script> import mDag from './_source/dag.vue' import { mapActions, mapMutations } from 'vuex' import mSpin from '@/module/components/spin/spin' import Affirm from './_source/jumpAffirm' import disabledState from '@/module/mixin/disabledState' export default { name: 'create-index', data () { return { // loading isLoading: true } }, // mixins mixins: [disabledState], props: {}, methods: { ...mapMutations('dag', ['resetParams']), ...mapActions('dag', ['getProjectList', 'getResourcesList', 'getResourcesListJar', 'getResourcesListJar']), ...mapActions('security', ['getTenantList', 'getWorkerGroupsAll', 'getAlarmGroupsAll']), /** * init */ init () { this.isLoading = true // Initialization parameters this.resetParams() // Promise Get node needs data Promise.all([ // get project this.getProjectList(), // get jar this.getResourcesListJar(), // get resource this.getResourcesList(), // get jar this.getResourcesListJar(), // get worker group list this.getWorkerGroupsAll(), // get alarm group list this.getAlarmGroupsAll(), this.getTenantList() ]).then((data) => { this.isLoading = false // Whether to pop up the box? Affirm.init(this.$root) }).catch(() => { this.isLoading = false }) } }, watch: { $route: { deep: true, handler () { this.init() } } }, created () { this.init() }, mounted () { }, components: { mDag, mSpin } } </script>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,176
[Feature][Workflow] main package
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description The main jar package was renamed the main program package. ![image](https://user-images.githubusercontent.com/19239641/144711497-9b9ff397-9284-4862-9f16-0ea59dd80930.png) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7176
https://github.com/apache/dolphinscheduler/pull/7177
27ea43d5dfda2556fa5fb02179e7c906408007ad
2fa3157e6ec7654d593fe3f71a84b70575bb5702
"2021-12-04T13:37:48Z"
java
"2021-12-04T13:54:59Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/instanceDetails.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="home-main index-model"> <m-variable></m-variable> <m-starting-param></m-starting-param> <m-dag v-if="!isLoading" :type="'instance'"></m-dag> <m-spin :is-spin="isLoading"></m-spin> </div> </template> <script> import mDag from './_source/dag.vue' import { mapActions, mapMutations } from 'vuex' import mSpin from '@/module/components/spin/spin' import mVariable from './_source/variable' import mStartingParam from './_source/startingParam' import Affirm from './_source/jumpAffirm' import disabledState from '@/module/mixin/disabledState' export default { name: 'instance-details', data () { return { // loading isLoading: true } }, mixins: [disabledState], props: {}, methods: { ...mapMutations('dag', ['setIsDetails', 'resetParams']), ...mapActions('dag', ['getProjectList', 'getResourcesList', 'getInstancedetail', 'getResourcesListJar']), ...mapActions('security', ['getTenantList', 'getWorkerGroupsAll', 'getAlarmGroupsAll']), /** * init */ init () { this.isLoading = true // Initialization parameters this.resetParams() // Promise Get node needs data Promise.all([ // Process instance details this.getInstancedetail(this.$route.params.id), // get project this.getProjectList(), // get resources this.getResourcesList(), // get jar this.getResourcesListJar(), // get worker group list this.getWorkerGroupsAll(), // get alarm group list this.getAlarmGroupsAll(), this.getTenantList() ]).then((data) => { let item = data[0] let flag = false if (item.state !== 'WAITING_THREAD' && item.state !== 'SUCCESS' && item.state !== 'PAUSE' && item.state !== 'FAILURE' && item.state !== 'STOP') { flag = true } else { flag = false } this.setIsDetails(flag) this.isLoading = false // Whether to pop up the box? Affirm.init(this.$root) }).catch(() => { this.isLoading = false }) }, /** * Redraw (refresh operation) */ _reset () { this.getInstancedetail(this.$route.params.id).then(res => { let item = res let flag = false if (item.state !== 'WAITING_THREAD' && item.state !== 'SUCCESS' && item.state !== 'PAUSE' && item.state !== 'FAILURE' && item.state !== 'STOP') { flag = true } else { flag = false } this.setIsDetails(flag) }) } }, watch: { $route: { deep: true, handler () { this.init() } } }, created () { this.init() }, mounted () { }, components: { mDag, mSpin, mVariable, mStartingParam } } </script>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,176
[Feature][Workflow] main package
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description The main jar package was renamed the main program package. ![image](https://user-images.githubusercontent.com/19239641/144711497-9b9ff397-9284-4862-9f16-0ea59dd80930.png) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7176
https://github.com/apache/dolphinscheduler/pull/7177
27ea43d5dfda2556fa5fb02179e7c906408007ad
2fa3157e6ec7654d593fe3f71a84b70575bb5702
"2021-12-04T13:37:48Z"
java
"2021-12-04T13:54:59Z"
dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/taskDefinition/index.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="task-definition" v-if="!isLoading"> <m-list-construction :title="$t('Task Definition')"> <template slot="conditions"> <m-conditions @on-conditions="_onConditions" :taskTypeShow="true"> <template v-slot:button-group> <el-button size="mini" @click="createTask"> {{ $t("Create task") }} </el-button> </template> </m-conditions> </template> <template v-slot:content> <template v-if="tasksList.length || total > 0"> <m-list :tasksList="tasksList" @on-update="_onUpdate" @editTask="editTask" @viewTaskDetail="viewTaskDetail" ></m-list> <div class="page-box"> <el-pagination background @current-change="_page" @size-change="_pageSize" :page-size="searchParams.pageSize" :current-page.sync="searchParams.pageNo" :page-sizes="[10, 30, 50]" :total="total" layout="sizes, prev, pager, next, jumper" > </el-pagination> </div> </template> <template v-if="!tasksList.length"> <m-no-data></m-no-data> </template> <m-spin :is-spin="isLoading"></m-spin> </template> </m-list-construction> <el-drawer :visible.sync="taskDrawer" size="" :with-header="false" @close="closeTaskDrawer" > <!-- fix the bug that Element-ui(2.13.2) auto focus on the first input --> <div style="width: 0px; height: 0px; overflow: hidden"> <el-input type="text" /> </div> <m-form-model v-if="taskDrawer" :nodeData="nodeData" type="task-definition" @changeTaskType="changeTaskType" @close="closeTaskDrawer" @addTaskInfo="saveTask" :taskDefinition="editingTask" > </m-form-model> </el-drawer> </div> </template> <script> import mListConstruction from '@/module/components/listConstruction/listConstruction' import mConditions from '@/module/components/conditions/conditions' import mList from './_source/list' import mNoData from '@/module/components/noData/noData' import mSpin from '@/module/components/spin/spin' import { mapActions, mapMutations } from 'vuex' import listUrlParamHandle from '@/module/mixin/listUrlParamHandle' import mFormModel from '@/conf/home/pages/dag/_source/formModel/formModel.vue' /** * tasksType */ import { tasksType } from '@/conf/home/pages/dag/_source/config.js' const DEFAULT_NODE_DATA = { id: -1, taskType: 'SHELL', instanceId: -1 } export default { name: 'task-definition-index', data () { // tasksType const tasksTypeList = Object.keys(tasksType) return { total: null, tasksList: [], isLoading: true, searchParams: { pageSize: 10, pageNo: 1, searchVal: '', taskType: '', userId: '' }, // whether the task config drawer is visible taskDrawer: false, // nodeData nodeData: { ...DEFAULT_NODE_DATA }, // tasksType tasksTypeList, // editing task definition editingTask: null } }, mixins: [listUrlParamHandle], methods: { ...mapActions('dag', [ 'getTaskDefinitionsList', 'genTaskCodeList', 'saveTaskDefinition', 'updateTaskDefinition' ]), ...mapActions('dag', [ 'getProcessList', 'getProjectList', 'getResourcesList', 'getResourcesListJar', 'getResourcesListJar' ]), ...mapMutations('dag', ['resetParams', 'setIsDetails']), ...mapActions('security', [ 'getTenantList', 'getWorkerGroupsAll', 'getAlarmGroupsAll' ]), /** * Toggle task drawer */ showTaskDrawer () { this.taskDrawer = true }, closeTaskDrawer () { this.setIsDetails(false) this.taskDrawer = false }, saveTask ({ item }) { const isEditing = !!this.editingTask if (isEditing) { this.updateTaskDefinition(item) .then((res) => { this.$message.success(res.msg) this._onUpdate() this.closeTaskDrawer() }) .catch((e) => { this.$message.error(e.msg || '') }) } else { this.genTaskCodeList({ genNum: 1 }) .then((res) => { const [code] = res return code }) .then((code) => { return this.saveTaskDefinition({ taskDefinitionJson: [ { ...item, code } ] }) }) .then((res) => { this.$message.success(res.msg) this._onUpdate() this.closeTaskDrawer() }) .catch((e) => { this.$message.error(e.msg || '') }) } }, createTask () { this.editingTask = null this.nodeData.taskType = DEFAULT_NODE_DATA.taskType this.showTaskDrawer() }, editTask (task) { this.editingTask = task this.nodeData.id = task.code this.nodeData.taskType = task.taskType this.showTaskDrawer() }, viewTaskDetail (task) { this.setIsDetails(true) this.editTask(task) }, /** * pageNo */ _page (val) { this.searchParams.pageNo = val }, _pageSize (val) { this.searchParams.pageSize = val }, /** * conditions */ _onConditions (o) { this.searchParams.searchVal = o.searchVal this.searchParams.taskType = o.taskType this.searchParams.pageNo = 1 }, /** * get task definition list */ _getList (flag) { this.isLoading = !flag this.getTaskDefinitionsList(this.searchParams) .then((res) => { if (this.searchParams.pageNo > 1 && res.totalList.length === 0) { this.searchParams.pageNo = this.searchParams.pageNo - 1 } else { this.tasksList = [] this.tasksList = res.totalList this.total = res.total this.isLoading = false } }) .catch((e) => { this.isLoading = false }) }, /** * update task dataList */ _onUpdate () { this._debounceGET('false') }, /** * change form modal task type */ changeTaskType (value) { this.nodeData.taskType = value } }, created () { this.isLoading = true // Initialization parameters this.resetParams() // Promise Get node needs data Promise.all([ // get process definition this.getProcessList(), // get project this.getProjectList(), // get jar this.getResourcesListJar(), // get resource this.getResourcesList(), // get jar this.getResourcesListJar(), // get worker group list this.getWorkerGroupsAll(), // get alarm group list this.getAlarmGroupsAll(), this.getTenantList() ]) .then((data) => { this.isLoading = false }) .catch(() => { this.isLoading = false }) }, mounted () {}, components: { mListConstruction, mConditions, mList, mNoData, mSpin, mFormModel } } </script> <style lang="scss" scoped> .task-definition { .taskGroupBtn { width: 300px; } } </style>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,176
[Feature][Workflow] main package
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description The main jar package was renamed the main program package. ![image](https://user-images.githubusercontent.com/19239641/144711497-9b9ff397-9284-4862-9f16-0ea59dd80930.png) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7176
https://github.com/apache/dolphinscheduler/pull/7177
27ea43d5dfda2556fa5fb02179e7c906408007ad
2fa3157e6ec7654d593fe3f71a84b70575bb5702
"2021-12-04T13:37:48Z"
java
"2021-12-04T13:54:59Z"
dolphinscheduler-ui/src/js/conf/home/store/dag/actions.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import _ from 'lodash' import io from '@/module/io' // Avoid passing in illegal values when users directly call third-party interfaces const convertLocations = (locationStr) => { let locations = null if (!locationStr) return locations try { locations = JSON.parse(locationStr) } catch (error) {} return Array.isArray(locations) ? locations : null } export default { /** * Task status acquisition */ getTaskState ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/${payload}/tasks`, { processInstanceId: payload }, res => { state.taskInstances = res.data.taskList resolve(res) }).catch(e => { reject(e) }) }) }, /** * Update process definition status */ editProcessState ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition/${payload.code}/release`, { name: payload.name, releaseState: payload.releaseState }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * get process definition versions pagination info */ getProcessDefinitionVersionsPage ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload.code}/versions`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * switch process definition version */ switchProcessDefinitionVersion ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload.code}/versions/${payload.version}`, {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * delete process definition version */ deleteProcessDefinitionVersion ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/process-definition/${payload.code}/versions/${payload.version}`, {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Update process instance status */ editExecutorsState ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/executors/execute`, { processInstanceId: payload.processInstanceId, executeType: payload.executeType }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Verify that the DGA map name exists */ verifDAGName ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/verify-name`, { name: payload }, res => { state.name = payload resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get process definition DAG diagram details */ getProcessDetails ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload}`, { }, res => { // process definition code state.code = res.data.processDefinition.code // version state.version = res.data.processDefinition.version // name state.name = res.data.processDefinition.name // releaseState state.releaseState = res.data.processDefinition.releaseState // description state.description = res.data.processDefinition.description // taskRelationJson state.connects = res.data.processTaskRelationList // locations state.locations = convertLocations(res.data.processDefinition.locations) // global params state.globalParams = res.data.processDefinition.globalParamList // timeout state.timeout = res.data.processDefinition.timeout // executionType state.executionType = res.data.processDefinition.executionType // tenantCode state.tenantCode = res.data.processDefinition.tenantCode || 'default' // tasks info state.tasks = res.data.taskDefinitionList.map(task => _.pick(task, [ 'code', 'name', 'version', 'description', 'delayTime', 'taskType', 'taskParams', 'flag', 'taskPriority', 'workerGroup', 'failRetryTimes', 'failRetryInterval', 'timeoutFlag', 'timeoutNotifyStrategy', 'timeout', 'environmentCode' ])) resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get process definition DAG diagram details */ copyProcess ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition/batch-copy`, { codes: payload.codes, targetProjectCode: payload.targetProjectCode }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get process definition DAG diagram details */ moveProcess ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition/batch-move`, { codes: payload.codes, targetProjectCode: payload.targetProjectCode }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get all the items created by the logged in user */ getAllItems ({ state }, payload) { return new Promise((resolve, reject) => { io.get('projects/created-and-authed', {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get the process instance DAG diagram details */ getInstancedetail ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/${payload}`, { }, res => { const { processDefinition, processTaskRelationList, taskDefinitionList } = res.data.dagData // code state.code = processDefinition.code // version state.version = processDefinition.version // name state.name = res.data.name // desc state.description = processDefinition.description // connects state.connects = processTaskRelationList // locations state.locations = convertLocations(processDefinition.locations) // global params state.globalParams = processDefinition.globalParamList // timeout state.timeout = processDefinition.timeout // executionType state.executionType = processDefinition.executionType // tenantCode state.tenantCode = res.data.tenantCode || 'default' // tasks info state.tasks = taskDefinitionList.map(task => _.pick(task, [ 'code', 'name', 'version', 'description', 'delayTime', 'taskType', 'taskParams', 'flag', 'taskPriority', 'workerGroup', 'failRetryTimes', 'failRetryInterval', 'timeoutFlag', 'timeoutNotifyStrategy', 'timeout', 'environmentCode' ])) // startup parameters state.startup = _.assign(state.startup, _.pick(res.data, ['commandType', 'failureStrategy', 'processInstancePriority', 'workerGroup', 'warningType', 'warningGroupId', 'receivers', 'receiversCc'])) state.startup.commandParam = JSON.parse(res.data.commandParam) resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Create process definition */ saveDAGchart ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition`, { locations: JSON.stringify(state.locations), name: _.trim(state.name), taskDefinitionJson: JSON.stringify(state.tasks), taskRelationJson: JSON.stringify(state.connects), tenantCode: state.tenantCode, executionType: state.executionType, description: _.trim(state.description), globalParams: JSON.stringify(state.globalParams), timeout: state.timeout }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Process definition update */ updateDefinition ({ state }, payload) { return new Promise((resolve, reject) => { io.put(`projects/${state.projectCode}/process-definition/${payload}`, { locations: JSON.stringify(state.locations), name: _.trim(state.name), taskDefinitionJson: JSON.stringify(state.tasks), taskRelationJson: JSON.stringify(state.connects), tenantCode: state.tenantCode, executionType: state.executionType, description: _.trim(state.description), globalParams: JSON.stringify(state.globalParams), timeout: state.timeout, releaseState: state.releaseState }, res => { resolve(res) state.isEditDag = false }).catch(e => { reject(e) }) }) }, /** * Process instance update */ updateInstance ({ state }, instanceId) { return new Promise((resolve, reject) => { io.put(`projects/${state.projectCode}/process-instances/${instanceId}`, { syncDefine: state.syncDefine, globalParams: JSON.stringify(state.globalParams), locations: JSON.stringify(state.locations), taskDefinitionJson: JSON.stringify(state.tasks), taskRelationJson: JSON.stringify(state.connects), tenantCode: state.tenantCode, timeout: state.timeout }, res => { resolve(res) state.isEditDag = false }).catch(e => { reject(e) }) }) }, /** * Get a list of process definitions (sub-workflow usage is not paged) */ getProcessList ({ state }, payload) { return new Promise((resolve, reject) => { if (state.processListS.length) { resolve() return } io.get(`projects/${state.projectCode}/process-definition/simple-list`, payload, res => { state.processListS = res.data resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get a list of process definitions (list page usage with pagination) */ getProcessListP ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition`, payload, res => { resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get a list of project */ getProjectList ({ state }, payload) { return new Promise((resolve, reject) => { if (state.projectListS.length) { resolve() return } io.get('projects/created-and-authed', payload, res => { state.projectListS = res.data resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get a list of process definitions by project code */ getProcessByProjectCode ({ state }, code) { return new Promise((resolve, reject) => { io.get(`projects/${code}/process-definition/all`, res => { resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * get datasource */ getDatasourceList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('datasources/list', { type: payload }, res => { resolve(res) }).catch(res => { reject(res) }) }) }, /** * get resources */ getResourcesList ({ state }) { return new Promise((resolve, reject) => { if (state.resourcesListS.length) { resolve() return } io.get('resources/list', { type: 'FILE' }, res => { state.resourcesListS = res.data resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * get jar */ getResourcesListJar ({ state }) { return new Promise((resolve, reject) => { if (state.resourcesListJar.length) { resolve() return } io.get('resources/query-by-type', { type: 'FILE' }, res => { state.resourcesListJar = res.data resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get process instance */ getProcessInstance ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances`, payload, res => { state.instanceListS = res.data.totalList resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get alarm list */ getNotifyGroupList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('alert-groups/list', res => { state.notifyGroupListS = _.map(res.data, v => { return { id: v.id, code: v.groupName, disabled: false } }) resolve(_.cloneDeep(state.notifyGroupListS)) }).catch(res => { reject(res) }) }) }, /** * Process definition startup interface */ processStart ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/executors/start-process-instance`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * View log */ getLog ({ state }, payload) { return new Promise((resolve, reject) => { io.get('log/detail', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get the process instance id according to the process definition id * @param taskId */ getSubProcessId ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/query-sub-by-parent`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Called before the process definition starts */ getStartCheck ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/executors/start-check`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Create timing */ createSchedule ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/schedules`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Preview timing */ previewSchedule ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/schedules/preview`, payload, res => { resolve(res.data) // alert(res.data) }).catch(e => { reject(e) }) }) }, /** * Timing list paging */ getScheduleList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/schedules`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Timing online */ scheduleOffline ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/schedules/${payload.id}/offline`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Timed offline */ scheduleOnline ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/schedules/${payload.id}/online`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Edit timing */ updateSchedule ({ state }, payload) { return new Promise((resolve, reject) => { io.put(`projects/${state.projectCode}/schedules/${payload.id}`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Delete process instance */ deleteInstance ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/process-instances/${payload.processInstanceId}`, {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Batch delete process instance */ batchDeleteInstance ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-instances/batch-delete`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Delete definition */ deleteDefinition ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/process-definition/${payload.code}`, {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Batch delete definition */ batchDeleteDefinition ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition/batch-delete`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * export definition */ exportDefinition ({ state }, payload) { const downloadBlob = (data, fileNameS = 'json') => { if (!data) { return } const blob = new Blob([data]) const fileName = `${fileNameS}.json` if ('download' in document.createElement('a')) { // 不是IE浏览器 const url = window.URL.createObjectURL(blob) const link = document.createElement('a') link.style.display = 'none' link.href = url link.setAttribute('download', fileName) document.body.appendChild(link) link.click() document.body.removeChild(link) // 下载完成移除元素 window.URL.revokeObjectURL(url) // 释放掉blob对象 } else { // IE 10+ window.navigator.msSaveBlob(blob, fileName) } } io.post(`projects/${state.projectCode}/process-definition/batch-export`, { codes: payload.codes }, res => { downloadBlob(res, payload.fileName) }, e => { }, { responseType: 'blob' }) }, /** * Process instance get variable */ getViewvariables ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/${payload.processInstanceId}/view-variables`, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get udfs function based on data source */ getUdfList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('resources/udf-func/list', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Query task instance list */ getTaskInstanceList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/task-instances`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Force fail/kill/need_fault_tolerance task success */ forceTaskSuccess ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/task-instances/${payload.taskInstanceId}/force-success`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Query task record list */ getTaskRecordList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('projects/task-record/list-paging', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Query history task record list */ getHistoryTaskRecordList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('projects/task-record/history-list-paging', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * tree chart */ getViewTree ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload.code}/view-tree`, { limit: payload.limit }, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * gantt chart */ getViewGantt ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/${payload.processInstanceId}/view-gantt`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Query task node list */ getProcessTasksList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload.code}/tasks`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, getTaskListDefIdAll ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/batch-query-tasks`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * remove timing */ deleteTiming ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/schedules/${payload.scheduleId}`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, getResourceId ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`resources/${payload.id}`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, genTaskCodeList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/task-definition/gen-task-codes`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Query Task Definitions List Paging */ getTaskDefinitionsList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/task-definition`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Delete Task Definition by code */ deleteTaskDefinition ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/task-definition/${payload.code}`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Save Task Definition */ saveTaskDefinition ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/task-definition`, { taskDefinitionJson: JSON.stringify(payload.taskDefinitionJson) }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, updateTaskDefinition ({ state }, taskDefinition) { return new Promise((resolve, reject) => { io.put(`projects/${state.projectCode}/task-definition/${taskDefinition.code}`, { taskDefinitionJsonObj: JSON.stringify(taskDefinition) }, res => { resolve(res) }).catch(e => { reject(e) }) }) } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,176
[Feature][Workflow] main package
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description The main jar package was renamed the main program package. ![image](https://user-images.githubusercontent.com/19239641/144711497-9b9ff397-9284-4862-9f16-0ea59dd80930.png) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7176
https://github.com/apache/dolphinscheduler/pull/7177
27ea43d5dfda2556fa5fb02179e7c906408007ad
2fa3157e6ec7654d593fe3f71a84b70575bb5702
"2021-12-04T13:37:48Z"
java
"2021-12-04T13:54:59Z"
dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ export default { 'User Name': 'User Name', 'Please enter user name': 'Please enter user name', Password: 'Password', 'Please enter your password': 'Please enter your password', 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22', Login: 'Login', Home: 'Home', 'Failed to create node to save': 'Failed to create node to save', 'Global parameters': 'Global parameters', 'Local parameters': 'Local parameters', 'Copy success': 'Copy success', 'The browser does not support automatic copying': 'The browser does not support automatic copying', 'Whether to save the DAG graph': 'Whether to save the DAG graph', 'Current node settings': 'Current node settings', 'View history': 'View history', 'View log': 'View log', 'Force success': 'Force success', 'Enter this child node': 'Enter this child node', 'Node name': 'Node name', 'Please enter name (required)': 'Please enter name (required)', 'Run flag': 'Run flag', Normal: 'Normal', 'Prohibition execution': 'Prohibition execution', 'Please enter description': 'Please enter description', 'Number of failed retries': 'Number of failed retries', Times: 'Times', 'Failed retry interval': 'Failed retry interval', Minute: 'Minute', 'Delay execution time': 'Delay execution time', 'Delay execution': 'Delay execution', 'Forced success': 'Forced success', Cancel: 'Cancel', 'Confirm add': 'Confirm add', 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process', 'The task has not been executed and cannot enter the sub-Process': 'The task has not been executed and cannot enter the sub-Process', 'Name already exists': 'Name already exists', 'Download Log': 'Download Log', 'Refresh Log': 'Refresh Log', 'Enter full screen': 'Enter full screen', 'Cancel full screen': 'Cancel full screen', Close: 'Close', 'Update log success': 'Update log success', 'No more logs': 'No more logs', 'No log': 'No log', 'Loading Log...': 'Loading Log...', 'Set the DAG diagram name': 'Set the DAG diagram name', 'Please enter description(optional)': 'Please enter description(optional)', 'Set global': 'Set global', 'Whether to go online the process definition': 'Whether to go online the process definition', 'Whether to update the process definition': 'Whether to update the process definition', Add: 'Add', 'DAG graph name cannot be empty': 'DAG graph name cannot be empty', 'Create Datasource': 'Create Datasource', 'Project Home': 'Workflow Monitor', 'Project Manage': 'Project', 'Create Project': 'Create Project', 'Cron Manage': 'Cron Manage', 'Copy Workflow': 'Copy Workflow', 'Tenant Manage': 'Tenant Manage', 'Create Tenant': 'Create Tenant', 'User Manage': 'User Manage', 'Create User': 'Create User', 'User Information': 'User Information', 'Edit Password': 'Edit Password', Success: 'Success', Failed: 'Failed', Delete: 'Delete', 'Please choose': 'Please choose', 'Please enter a positive integer': 'Please enter a positive integer', 'Program Type': 'Program Type', 'Main Class': 'Main Class', 'Main Jar Package': 'Main Jar Package', 'Please enter main jar package': 'Please enter main jar package', 'Please enter main class': 'Please enter main class', 'Main Arguments': 'Main Arguments', 'Please enter main arguments': 'Please enter main arguments', 'Option Parameters': 'Option Parameters', 'Please enter option parameters': 'Please enter option parameters', Resources: 'Resources', 'Custom Parameters': 'Custom Parameters', 'Custom template': 'Custom template', Datasource: 'Datasource', methods: 'methods', 'Please enter the procedure method': 'Please enter the procedure script \n\ncall procedure:{call <procedure-name>[(<arg1>,<arg2>, ...)]}\n\ncall function:{?= call <procedure-name>[(<arg1>,<arg2>, ...)]} ', 'The procedure method script example': 'example:{call <procedure-name>[(?,?, ...)]} or {?= call <procedure-name>[(?,?, ...)]}', Script: 'Script', 'Please enter script(required)': 'Please enter script(required)', 'Deploy Mode': 'Deploy Mode', 'Driver Cores': 'Driver Cores', 'Please enter Driver cores': 'Please enter Driver cores', 'Driver Memory': 'Driver Memory', 'Please enter Driver memory': 'Please enter Driver memory', 'Executor Number': 'Executor Number', 'Please enter Executor number': 'Please enter Executor number', 'The Executor number should be a positive integer': 'The Executor number should be a positive integer', 'Executor Memory': 'Executor Memory', 'Please enter Executor memory': 'Please enter Executor memory', 'Executor Cores': 'Executor Cores', 'Please enter Executor cores': 'Please enter Executor cores', 'Memory should be a positive integer': 'Memory should be a positive integer', 'Core number should be positive integer': 'Core number should be positive integer', 'Flink Version': 'Flink Version', 'JobManager Memory': 'JobManager Memory', 'Please enter JobManager memory': 'Please enter JobManager memory', 'TaskManager Memory': 'TaskManager Memory', 'Please enter TaskManager memory': 'Please enter TaskManager memory', 'Slot Number': 'Slot Number', 'Please enter Slot number': 'Please enter Slot number', Parallelism: 'Parallelism', 'Custom Parallelism': 'Configure parallelism', 'Please enter Parallelism': 'Please enter Parallelism', 'Parallelism tip': 'If there are a large number of tasks requiring complement, you can use the custom parallelism to ' + 'set the complement task thread to a reasonable value to avoid too large impact on the server.', 'Parallelism number should be positive integer': 'Parallelism number should be positive integer', 'TaskManager Number': 'TaskManager Number', 'Please enter TaskManager number': 'Please enter TaskManager number', 'App Name': 'App Name', 'Please enter app name(optional)': 'Please enter app name(optional)', 'SQL Type': 'SQL Type', 'Send Email': 'Send Email', 'Log display': 'Log display', 'rows of result': 'rows of result', Title: 'Title', 'Please enter the title of email': 'Please enter the title of email', Table: 'Table', TableMode: 'Table', Attachment: 'Attachment', 'SQL Parameter': 'SQL Parameter', 'SQL Statement': 'SQL Statement', 'UDF Function': 'UDF Function', 'Please enter a SQL Statement(required)': 'Please enter a SQL Statement(required)', 'Please enter a JSON Statement(required)': 'Please enter a JSON Statement(required)', 'One form or attachment must be selected': 'One form or attachment must be selected', 'Mail subject required': 'Mail subject required', 'Child Node': 'Child Node', 'Please select a sub-Process': 'Please select a sub-Process', Edit: 'Edit', 'Switch To This Version': 'Switch To This Version', 'Datasource Name': 'Datasource Name', 'Please enter datasource name': 'Please enter datasource name', IP: 'IP', 'Please enter IP': 'Please enter IP', Port: 'Port', 'Please enter port': 'Please enter port', 'Database Name': 'Database Name', 'Please enter database name': 'Please enter database name', 'Oracle Connect Type': 'ServiceName or SID', 'Oracle Service Name': 'ServiceName', 'Oracle SID': 'SID', 'jdbc connect parameters': 'jdbc connect parameters', 'Test Connect': 'Test Connect', 'Please enter resource name': 'Please enter resource name', 'Please enter resource folder name': 'Please enter resource folder name', 'Please enter a non-query SQL statement': 'Please enter a non-query SQL statement', 'Please enter IP/hostname': 'Please enter IP/hostname', 'jdbc connection parameters is not a correct JSON format': 'jdbc connection parameters is not a correct JSON format', '#': '#', 'Datasource Type': 'Datasource Type', 'Datasource Parameter': 'Datasource Parameter', 'Create Time': 'Create Time', 'Update Time': 'Update Time', Operation: 'Operation', 'Current Version': 'Current Version', 'Click to view': 'Click to view', 'Delete?': 'Delete?', 'Switch Version Successfully': 'Switch Version Successfully', 'Confirm Switch To This Version?': 'Confirm Switch To This Version?', Confirm: 'Confirm', 'Task status statistics': 'Task Status Statistics', Number: 'Number', State: 'State', 'Dry-run flag': 'Dry-run flag', 'Process Status Statistics': 'Process Status Statistics', 'Process Definition Statistics': 'Process Definition Statistics', 'Project Name': 'Project Name', 'Please enter name': 'Please enter name', 'Owned Users': 'Owned Users', 'Process Pid': 'Process Pid', 'Zk registration directory': 'Zk registration directory', cpuUsage: 'cpuUsage', memoryUsage: 'memoryUsage', 'Last heartbeat time': 'Last heartbeat time', 'Edit Tenant': 'Edit Tenant', 'OS Tenant Code': 'OS Tenant Code', 'Tenant Name': 'Tenant Name', Queue: 'Yarn Queue', 'Please select a queue': 'default is tenant association queue', 'Please enter the os tenant code in English': 'Please enter the os tenant code in English', 'Please enter os tenant code in English': 'Please enter os tenant code in English', 'Please enter os tenant code': 'Please enter os tenant code', 'Please enter tenant Name': 'Please enter tenant Name', 'The os tenant code. Only letters or a combination of letters and numbers are allowed': 'The os tenant code. Only letters or a combination of letters and numbers are allowed', 'Edit User': 'Edit User', Tenant: 'Tenant', Email: 'Email', Phone: 'Phone', 'User Type': 'User Type', 'Please enter phone number': 'Please enter phone number', 'Please enter email': 'Please enter email', 'Please enter the correct email format': 'Please enter the correct email format', 'Please enter the correct mobile phone format': 'Please enter the correct mobile phone format', Project: 'Project', Authorize: 'Authorize', 'File resources': 'File resources', 'UDF resources': 'UDF resources', 'UDF resources directory': 'UDF resources directory', 'Please select UDF resources directory': 'Please select UDF resources directory', 'Alarm group': 'Alarm group', 'Alarm group required': 'Alarm group required', 'Edit alarm group': 'Edit alarm group', 'Create alarm group': 'Create alarm group', 'Create Alarm Instance': 'Create Alarm Instance', 'Edit Alarm Instance': 'Edit Alarm Instance', 'Group Name': 'Group Name', 'Alarm instance name': 'Alarm instance name', 'Alarm plugin name': 'Alarm plugin name', 'Select plugin': 'Select plugin', 'Select Alarm plugin': 'Please select an Alarm plugin', 'Please enter group name': 'Please enter group name', 'Instance parameter exception': 'Instance parameter exception', 'Group Type': 'Group Type', 'Alarm plugin instance': 'Alarm plugin instance', 'Select Alarm plugin instance': 'Please select an Alarm plugin instance', Remarks: 'Remarks', SMS: 'SMS', 'Managing Users': 'Managing Users', Permission: 'Permission', Administrator: 'Administrator', 'Confirm Password': 'Confirm Password', 'Please enter confirm password': 'Please enter confirm password', 'Password cannot be in Chinese': 'Password cannot be in Chinese', 'Please enter a password (6-22) character password': 'Please enter a password (6-22) character password', 'Confirmation password cannot be in Chinese': 'Confirmation password cannot be in Chinese', 'Please enter a confirmation password (6-22) character password': 'Please enter a confirmation password (6-22) character password', 'The password is inconsistent with the confirmation password': 'The password is inconsistent with the confirmation password', 'Please select the datasource': 'Please select the datasource', 'Please select resources': 'Please select resources', Query: 'Query', 'Non Query': 'Non Query', 'prop(required)': 'prop(required)', 'value(optional)': 'value(optional)', 'value(required)': 'value(required)', 'prop is empty': 'prop is empty', 'value is empty': 'value is empty', 'prop is repeat': 'prop is repeat', 'Start Time': 'Start Time', 'End Time': 'End Time', crontab: 'crontab', 'Failure Strategy': 'Failure Strategy', online: 'online', offline: 'offline', 'Task Status': 'Task Status', 'Process Instance': 'Process Instance', 'Task Instance': 'Task Instance', 'Select date range': 'Select date range', startDate: 'startDate', endDate: 'endDate', Date: 'Date', Waiting: 'Waiting', Execution: 'Execution', Finish: 'Finish', 'Create File': 'Create File', 'Create folder': 'Create folder', 'File Name': 'File Name', 'Folder Name': 'Folder Name', 'File Format': 'File Format', 'Folder Format': 'Folder Format', 'File Content': 'File Content', 'Upload File Size': 'Upload File size cannot exceed 1g', Create: 'Create', 'Please enter the resource content': 'Please enter the resource content', 'Resource content cannot exceed 3000 lines': 'Resource content cannot exceed 3000 lines', 'File Details': 'File Details', 'Download Details': 'Download Details', Return: 'Return', Save: 'Save', 'File Manage': 'File Manage', 'Upload Files': 'Upload Files', 'Create UDF Function': 'Create UDF Function', 'Upload UDF Resources': 'Upload UDF Resources', 'Service-Master': 'Service-Master', 'Service-Worker': 'Service-Worker', 'Process Name': 'Process Name', Executor: 'Executor', 'Run Type': 'Run Type', 'Scheduling Time': 'Scheduling Time', 'Run Times': 'Run Times', host: 'host', 'fault-tolerant sign': 'fault-tolerant sign', Rerun: 'Rerun', 'Recovery Failed': 'Recovery Failed', Stop: 'Stop', Pause: 'Pause', 'Recovery Suspend': 'Recovery Suspend', Gantt: 'Gantt', 'Node Type': 'Node Type', 'Submit Time': 'Submit Time', Duration: 'Duration', 'Retry Count': 'Retry Count', 'Task Name': 'Task Name', 'Task Date': 'Task Date', 'Source Table': 'Source Table', 'Record Number': 'Record Number', 'Target Table': 'Target Table', 'Online viewing type is not supported': 'Online viewing type is not supported', Size: 'Size', Rename: 'Rename', Download: 'Download', Export: 'Export', 'Version Info': 'Version Info', Submit: 'Submit', 'Edit UDF Function': 'Edit UDF Function', type: 'type', 'UDF Function Name': 'UDF Function Name', FILE: 'FILE', UDF: 'UDF', 'File Subdirectory': 'File Subdirectory', 'Please enter a function name': 'Please enter a function name', 'Package Name': 'Package Name', 'Please enter a Package name': 'Please enter a Package name', Parameter: 'Parameter', 'Please enter a parameter': 'Please enter a parameter', 'UDF Resources': 'UDF Resources', 'Upload Resources': 'Upload Resources', Instructions: 'Instructions', 'Please enter a instructions': 'Please enter a instructions', 'Please enter a UDF function name': 'Please enter a UDF function name', 'Select UDF Resources': 'Select UDF Resources', 'Class Name': 'Class Name', 'Jar Package': 'Jar Package', 'Library Name': 'Library Name', 'UDF Resource Name': 'UDF Resource Name', 'File Size': 'File Size', Description: 'Description', 'Drag Nodes and Selected Items': 'Drag Nodes and Selected Items', 'Select Line Connection': 'Select Line Connection', 'Delete selected lines or nodes': 'Delete selected lines or nodes', 'Full Screen': 'Full Screen', Unpublished: 'Unpublished', 'Start Process': 'Start Process', 'Execute from the current node': 'Execute from the current node', 'Recover tolerance fault process': 'Recover tolerance fault process', 'Resume the suspension process': 'Resume the suspension process', 'Execute from the failed nodes': 'Execute from the failed nodes', 'Complement Data': 'Complement Data', 'Scheduling execution': 'Scheduling execution', 'Recovery waiting thread': 'Recovery waiting thread', 'Submitted successfully': 'Submitted successfully', Executing: 'Executing', 'Ready to pause': 'Ready to pause', 'Ready to stop': 'Ready to stop', 'Need fault tolerance': 'Need fault tolerance', Kill: 'Kill', 'Waiting for thread': 'Waiting for thread', 'Waiting for dependence': 'Waiting for dependence', Start: 'Start', Copy: 'Copy', 'Copy name': 'Copy name', 'Copy path': 'Copy path', 'Please enter keyword': 'Please enter keyword', 'File Upload': 'File Upload', 'Drag the file into the current upload window': 'Drag the file into the current upload window', 'Drag area upload': 'Drag area upload', Upload: 'Upload', 'ReUpload File': 'ReUpload File', 'Please enter file name': 'Please enter file name', 'Please select the file to upload': 'Please select the file to upload', 'Resources manage': 'Resources', Security: 'Security', Logout: 'Logout', 'No data': 'No data', 'Uploading...': 'Uploading...', 'Loading...': 'Loading...', List: 'List', 'Unable to download without proper url': 'Unable to download without proper url', Process: 'Process', 'Process definition': 'Process definition', 'Task record': 'Task record', 'Warning group manage': 'Warning group manage', 'Warning instance manage': 'Warning instance manage', 'Servers manage': 'Servers manage', 'UDF manage': 'UDF manage', 'Resource manage': 'Resource manage', 'Function manage': 'Function manage', 'Edit password': 'Edit password', 'Ordinary users': 'Ordinary users', 'Create process': 'Create process', 'Import process': 'Import process', 'Timing state': 'Timing state', Timing: 'Timing', Timezone: 'Timezone', TreeView: 'TreeView', 'Mailbox already exists! Recipients and copyers cannot repeat': 'Mailbox already exists! Recipients and copyers cannot repeat', 'Mailbox input is illegal': 'Mailbox input is illegal', 'Please set the parameters before starting': 'Please set the parameters before starting', Continue: 'Continue', End: 'End', 'Node execution': 'Node execution', 'Backward execution': 'Backward execution', 'Forward execution': 'Forward execution', 'Execute only the current node': 'Execute only the current node', 'Notification strategy': 'Notification strategy', 'Notification group': 'Notification group', 'Please select a notification group': 'Please select a notification group', 'Whether it is a complement process?': 'Whether it is a complement process?', 'Schedule date': 'Schedule date', 'Mode of execution': 'Mode of execution', 'Serial execution': 'Serial execution', 'Parallel execution': 'Parallel execution', 'Set parameters before timing': 'Set parameters before timing', 'Start and stop time': 'Start and stop time', 'Please select time': 'Please select time', 'Please enter crontab': 'Please enter crontab', none_1: 'none', success_1: 'success', failure_1: 'failure', All_1: 'All', Toolbar: 'Toolbar', 'View variables': 'View variables', 'Format DAG': 'Format DAG', 'Refresh DAG status': 'Refresh DAG status', Return_1: 'Return', 'Please enter format': 'Please enter format', 'connection parameter': 'connection parameter', 'Process definition details': 'Process definition details', 'Create process definition': 'Create process definition', 'Scheduled task list': 'Scheduled task list', 'Process instance details': 'Process instance details', 'Create Resource': 'Create Resource', 'User Center': 'User Center', AllStatus: 'All', None: 'None', Name: 'Name', 'Process priority': 'Process priority', 'Task priority': 'Task priority', 'Task timeout alarm': 'Task timeout alarm', 'Timeout strategy': 'Timeout strategy', 'Timeout alarm': 'Timeout alarm', 'Timeout failure': 'Timeout failure', 'Timeout period': 'Timeout period', 'Waiting Dependent complete': 'Waiting Dependent complete', 'Waiting Dependent start': 'Waiting Dependent start', 'Check interval': 'Check interval', 'Timeout must be longer than check interval': 'Timeout must be longer than check interval', 'Timeout strategy must be selected': 'Timeout strategy must be selected', 'Timeout must be a positive integer': 'Timeout must be a positive integer', 'Add dependency': 'Add dependency', 'Whether dry-run': 'Whether dry-run', and: 'and', or: 'or', month: 'month', week: 'week', day: 'day', hour: 'hour', Running: 'Running', 'Waiting for dependency to complete': 'Waiting for dependency to complete', Selected: 'Selected', CurrentHour: 'CurrentHour', Last1Hour: 'Last1Hour', Last2Hours: 'Last2Hours', Last3Hours: 'Last3Hours', Last24Hours: 'Last24Hours', today: 'today', Last1Days: 'Last1Days', Last2Days: 'Last2Days', Last3Days: 'Last3Days', Last7Days: 'Last7Days', ThisWeek: 'ThisWeek', LastWeek: 'LastWeek', LastMonday: 'LastMonday', LastTuesday: 'LastTuesday', LastWednesday: 'LastWednesday', LastThursday: 'LastThursday', LastFriday: 'LastFriday', LastSaturday: 'LastSaturday', LastSunday: 'LastSunday', ThisMonth: 'ThisMonth', LastMonth: 'LastMonth', LastMonthBegin: 'LastMonthBegin', LastMonthEnd: 'LastMonthEnd', 'Refresh status succeeded': 'Refresh status succeeded', 'Queue manage': 'Yarn Queue manage', 'Create queue': 'Create queue', 'Edit queue': 'Edit queue', 'Datasource manage': 'Datasource', 'History task record': 'History task record', 'Please go online': 'Please go online', 'Queue value': 'Queue value', 'Please enter queue value': 'Please enter queue value', 'Worker group manage': 'Worker group manage', 'Create worker group': 'Create worker group', 'Edit worker group': 'Edit worker group', 'Token manage': 'Token manage', 'Create token': 'Create token', 'Edit token': 'Edit token', Addresses: 'Addresses', 'Worker Addresses': 'Worker Addresses', 'Please select the worker addresses': 'Please select the worker addresses', 'Failure time': 'Failure time', 'Expiration time': 'Expiration time', User: 'User', 'Please enter token': 'Please enter token', 'Generate token': 'Generate token', Monitor: 'Monitor', Group: 'Group', 'Queue statistics': 'Queue statistics', 'Command status statistics': 'Command status statistics', 'Task kill': 'Task Kill', 'Task queue': 'Task queue', 'Error command count': 'Error command count', 'Normal command count': 'Normal command count', Manage: ' Manage', 'Number of connections': 'Number of connections', Sent: 'Sent', Received: 'Received', 'Min latency': 'Min latency', 'Avg latency': 'Avg latency', 'Max latency': 'Max latency', 'Node count': 'Node count', 'Query time': 'Query time', 'Node self-test status': 'Node self-test status', 'Health status': 'Health status', 'Max connections': 'Max connections', 'Threads connections': 'Threads connections', 'Max used connections': 'Max used connections', 'Threads running connections': 'Threads running connections', 'Worker group': 'Worker group', 'Please enter a positive integer greater than 0': 'Please enter a positive integer greater than 0', 'Pre Statement': 'Pre Statement', 'Post Statement': 'Post Statement', 'Statement cannot be empty': 'Statement cannot be empty', 'Process Define Count': 'Work flow Define Count', 'Process Instance Running Count': 'Process Instance Running Count', 'command number of waiting for running': 'command number of waiting for running', 'failure command number': 'failure command number', 'tasks number of waiting running': 'tasks number of waiting running', 'task number of ready to kill': 'task number of ready to kill', 'Statistics manage': 'Statistics Manage', statistics: 'Statistics', 'select tenant': 'select tenant', 'Please enter Principal': 'Please enter Principal', 'Please enter the kerberos authentication parameter java.security.krb5.conf': 'Please enter the kerberos authentication parameter java.security.krb5.conf', 'Please enter the kerberos authentication parameter login.user.keytab.username': 'Please enter the kerberos authentication parameter login.user.keytab.username', 'Please enter the kerberos authentication parameter login.user.keytab.path': 'Please enter the kerberos authentication parameter login.user.keytab.path', 'The start time must not be the same as the end': 'The start time must not be the same as the end', 'Startup parameter': 'Startup parameter', 'Startup type': 'Startup type', 'warning of timeout': 'warning of timeout', 'Next five execution times': 'Next five execution times', 'Execute time': 'Execute time', 'Complement range': 'Complement range', 'Http Url': 'Http Url', 'Http Method': 'Http Method', 'Http Parameters': 'Http Parameters', 'Http Parameters Key': 'Http Parameters Key', 'Http Parameters Position': 'Http Parameters Position', 'Http Parameters Value': 'Http Parameters Value', 'Http Check Condition': 'Http Check Condition', 'Http Condition': 'Http Condition', 'Please Enter Http Url': 'Please Enter Http Url(required)', 'Please Enter Http Condition': 'Please Enter Http Condition', 'There is no data for this period of time': 'There is no data for this period of time', 'Worker addresses cannot be empty': 'Worker addresses cannot be empty', 'Please generate token': 'Please generate token', 'Please Select token': 'Please select the expiration time of token', 'Spark Version': 'Spark Version', TargetDataBase: 'target database', TargetTable: 'target table', TargetJobName: 'target job name', 'Please enter Pigeon job name': 'Please enter Pigeon job name', 'Please enter the table of target': 'Please enter the table of target', 'Please enter a Target Table(required)': 'Please enter a Target Table(required)', SpeedByte: 'speed(byte count)', SpeedRecord: 'speed(record count)', '0 means unlimited by byte': '0 means unlimited', '0 means unlimited by count': '0 means unlimited', 'Modify User': 'Modify User', 'Whether directory': 'Whether directory', Yes: 'Yes', No: 'No', 'Hadoop Custom Params': 'Hadoop Params', 'Sqoop Advanced Parameters': 'Sqoop Params', 'Sqoop Job Name': 'Job Name', 'Please enter Mysql Database(required)': 'Please enter Mysql Database(required)', 'Please enter Mysql Table(required)': 'Please enter Mysql Table(required)', 'Please enter Columns (Comma separated)': 'Please enter Columns (Comma separated)', 'Please enter Target Dir(required)': 'Please enter Target Dir(required)', 'Please enter Export Dir(required)': 'Please enter Export Dir(required)', 'Please enter Hive Database(required)': 'Please enter Hive Databasec(required)', 'Please enter Hive Table(required)': 'Please enter Hive Table(required)', 'Please enter Hive Partition Keys': 'Please enter Hive Partition Key', 'Please enter Hive Partition Values': 'Please enter Partition Value', 'Please enter Replace Delimiter': 'Please enter Replace Delimiter', 'Please enter Fields Terminated': 'Please enter Fields Terminated', 'Please enter Lines Terminated': 'Please enter Lines Terminated', 'Please enter Concurrency': 'Please enter Concurrency', 'Please enter Update Key': 'Please enter Update Key', 'Please enter Job Name(required)': 'Please enter Job Name(required)', 'Please enter Custom Shell(required)': 'Please enter Custom Shell(required)', Direct: 'Direct', Type: 'Type', ModelType: 'ModelType', ColumnType: 'ColumnType', Database: 'Database', Column: 'Column', 'Map Column Hive': 'Map Column Hive', 'Map Column Java': 'Map Column Java', 'Export Dir': 'Export Dir', 'Hive partition Keys': 'Hive partition Keys', 'Hive partition Values': 'Hive partition Values', FieldsTerminated: 'FieldsTerminated', LinesTerminated: 'LinesTerminated', IsUpdate: 'IsUpdate', UpdateKey: 'UpdateKey', UpdateMode: 'UpdateMode', 'Target Dir': 'Target Dir', DeleteTargetDir: 'DeleteTargetDir', FileType: 'FileType', CompressionCodec: 'CompressionCodec', CreateHiveTable: 'CreateHiveTable', DropDelimiter: 'DropDelimiter', OverWriteSrc: 'OverWriteSrc', ReplaceDelimiter: 'ReplaceDelimiter', Concurrency: 'Concurrency', Form: 'Form', OnlyUpdate: 'OnlyUpdate', AllowInsert: 'AllowInsert', 'Data Source': 'Data Source', 'Data Target': 'Data Target', 'All Columns': 'All Columns', 'Some Columns': 'Some Columns', 'Branch flow': 'Branch flow', 'Custom Job': 'Custom Job', 'Custom Script': 'Custom Script', 'Cannot select the same node for successful branch flow and failed branch flow': 'Cannot select the same node for successful branch flow and failed branch flow', 'Successful branch flow and failed branch flow are required': 'conditions node Successful and failed branch flow are required', 'No resources exist': 'No resources exist', 'Please delete all non-existing resources': 'Please delete all non-existing resources', 'Unauthorized or deleted resources': 'Unauthorized or deleted resources', 'Please delete all non-existent resources': 'Please delete all non-existent resources', Kinship: 'Workflow relationship', Reset: 'Reset', KinshipStateActive: 'Current selection', KinshipState1: 'Online', KinshipState0: 'Workflow is not online', KinshipState10: 'Scheduling is not online', 'Dag label display control': 'Dag label display control', Enable: 'Enable', Disable: 'Disable', 'The Worker group no longer exists, please select the correct Worker group!': 'The Worker group no longer exists, please select the correct Worker group!', 'Please confirm whether the workflow has been saved before downloading': 'Please confirm whether the workflow has been saved before downloading', 'User name length is between 3 and 39': 'User name length is between 3 and 39', 'Timeout Settings': 'Timeout Settings', 'Connect Timeout': 'Connect Timeout', 'Socket Timeout': 'Socket Timeout', 'Connect timeout be a positive integer': 'Connect timeout be a positive integer', 'Socket Timeout be a positive integer': 'Socket Timeout be a positive integer', ms: 'ms', 'Please Enter Url': 'Please Enter Url eg. 127.0.0.1:7077', Master: 'Master', 'Please select the waterdrop resources': 'Please select the waterdrop resources', zkDirectory: 'zkDirectory', 'Directory detail': 'Directory detail', 'Connection name': 'Connection name', 'Current connection settings': 'Current connection settings', 'Please save the DAG before formatting': 'Please save the DAG before formatting', 'Batch copy': 'Batch copy', 'Related items': 'Related items', 'Project name is required': 'Project name is required', 'Batch move': 'Batch move', Version: 'Version', 'Pre tasks': 'Pre tasks', 'Running Memory': 'Running Memory', 'Max Memory': 'Max Memory', 'Min Memory': 'Min Memory', 'The workflow canvas is abnormal and cannot be saved, please recreate': 'The workflow canvas is abnormal and cannot be saved, please recreate', Info: 'Info', 'Datasource userName': 'owner', 'Resource userName': 'owner', 'Environment manage': 'Environment manage', 'Create environment': 'Create environment', 'Edit environment': 'Edit environment', 'Environment value': 'Environment value', 'Environment Name': 'Environment Name', 'Environment Code': 'Environment Code', 'Environment Config': 'Environment Config', 'Environment Desc': 'Environment Desc', 'Environment Worker Group': 'Worker Groups', 'Please enter environment config': 'Please enter environment config', 'Please enter environment desc': 'Please enter environment desc', 'Please select worker groups': 'Please select worker groups', condition: 'condition', 'The condition content cannot be empty': 'The condition content cannot be empty', 'Reference from': 'Reference from', 'No more...': 'No more...', 'Task Definition': 'Task Definition', 'Create task': 'Create task', 'Task Type': 'Task Type', 'Process execute type': 'Process execute type', parallel: 'parallel', 'Serial wait': 'Serial wait', 'Serial discard': 'Serial discard', 'Serial priority': 'Serial priority', 'Recover serial wait': 'Recover serial wait', IsEnableProxy: 'Enable Proxy', WebHook: 'WebHook', Keyword: 'Keyword', Proxy: 'Proxy', receivers: 'Receivers', receiverCcs: 'ReceiverCcs', transportProtocol: 'Transport Protocol', serverHost: 'SMTP Host', serverPort: 'SMTP Port', sender: 'Sender', enableSmtpAuth: 'SMTP Auth', starttlsEnable: 'SMTP STARTTLS Enable', sslEnable: 'SMTP SSL Enable', smtpSslTrust: 'SMTP SSL Trust', url: 'URL', requestType: 'Request Type', headerParams: 'Headers', bodyParams: 'Body', contentField: 'Content Field', path: 'Script Path', userParams: 'User Params', corpId: 'CorpId', secret: 'Secret', userSendMsg: 'UserSendMsg', agentId: 'AgentId', users: 'Users', Username: 'Username', showType: 'Show Type', 'Please select a task type (required)': 'Please select a task type (required)', layoutType: 'Layout Type', gridLayout: 'Grid', dagreLayout: 'Dagre', rows: 'Rows', cols: 'Cols', processOnline: 'Online', searchNode: 'Search Node', dagScale: 'Scale', workflowName: 'Workflow Name', scheduleStartTime: 'Schedule Start Time', scheduleEndTime: 'Schedule End Time', crontabExpression: 'Crontab', workflowPublishStatus: 'Workflow Publish Status', schedulePublishStatus: 'Schedule Publish Status' }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,176
[Feature][Workflow] main package
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description The main jar package was renamed the main program package. ![image](https://user-images.githubusercontent.com/19239641/144711497-9b9ff397-9284-4862-9f16-0ea59dd80930.png) ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7176
https://github.com/apache/dolphinscheduler/pull/7177
27ea43d5dfda2556fa5fb02179e7c906408007ad
2fa3157e6ec7654d593fe3f71a84b70575bb5702
"2021-12-04T13:37:48Z"
java
"2021-12-04T13:54:59Z"
dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ export default { 'User Name': '用户名', 'Please enter user name': '请输入用户名', Password: '密码', 'Please enter your password': '请输入密码', 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': '密码至少包含数字,字母和字符的两种组合,长度在6-22之间', Login: '登录', Home: '首页', 'Failed to create node to save': '未创建节点保存失败', 'Global parameters': '全局参数', 'Local parameters': '局部参数', 'Copy success': '复制成功', 'The browser does not support automatic copying': '该浏览器不支持自动复制', 'Whether to save the DAG graph': '是否保存DAG图', 'Current node settings': '当前节点设置', 'View history': '查看历史', 'View log': '查看日志', 'Force success': '强制成功', 'Enter this child node': '进入该子节点', 'Node name': '节点名称', 'Please enter name (required)': '请输入名称(必填)', 'Run flag': '运行标志', Normal: '正常', 'Prohibition execution': '禁止执行', 'Please enter description': '请输入描述', 'Number of failed retries': '失败重试次数', Times: '次', 'Failed retry interval': '失败重试间隔', Minute: '分', 'Delay execution time': '延时执行时间', 'Delay execution': '延时执行', 'Forced success': '强制成功', Cancel: '取消', 'Confirm add': '确认添加', 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': '新创建子工作流还未执行,不能进入子工作流', 'The task has not been executed and cannot enter the sub-Process': '该任务还未执行,不能进入子工作流', 'Name already exists': '名称已存在请重新输入', 'Download Log': '下载日志', 'Refresh Log': '刷新日志', 'Enter full screen': '进入全屏', 'Cancel full screen': '取消全屏', Close: '关闭', 'Update log success': '更新日志成功', 'No more logs': '暂无更多日志', 'No log': '暂无日志', 'Loading Log...': '正在努力请求日志中...', 'Set the DAG diagram name': '设置DAG图名称', 'Please enter description(optional)': '请输入描述(选填)', 'Set global': '设置全局', 'Whether to go online the process definition': '是否上线流程定义', 'Whether to update the process definition': '是否更新流程定义', Add: '添加', 'DAG graph name cannot be empty': 'DAG图名称不能为空', 'Create Datasource': '创建数据源', 'Project Home': '工作流监控', 'Project Manage': '项目管理', 'Create Project': '创建项目', 'Cron Manage': '定时管理', 'Copy Workflow': '复制工作流', 'Tenant Manage': '租户管理', 'Create Tenant': '创建租户', 'User Manage': '用户管理', 'Create User': '创建用户', 'User Information': '用户信息', 'Edit Password': '密码修改', Success: '成功', Failed: '失败', Delete: '删除', 'Please choose': '请选择', 'Please enter a positive integer': '请输入正整数', 'Program Type': '程序类型', 'Main Class': '主函数的Class', 'Main Jar Package': '主Jar包', 'Please enter main jar package': '请选择主Jar包', 'Please enter main class': '请填写主函数的Class', 'Main Arguments': '主程序参数', 'Please enter main arguments': '请输入主程序参数', 'Option Parameters': '选项参数', 'Please enter option parameters': '请输入选项参数', Resources: '资源', 'Custom Parameters': '自定义参数', 'Custom template': '自定义模版', Datasource: '数据源', methods: '方法', 'Please enter the procedure method': '请输入存储脚本 \n\n调用存储过程:{call <procedure-name>[(<arg1>,<arg2>, ...)]}\n\n调用存储函数:{?= call <procedure-name>[(<arg1>,<arg2>, ...)]} ', 'The procedure method script example': '示例:{call <procedure-name>[(?,?, ...)]} 或 {?= call <procedure-name>[(?,?, ...)]}', Script: '脚本', 'Please enter script(required)': '请输入脚本(必填)', 'Deploy Mode': '部署方式', 'Driver Cores': 'Driver核心数', 'Please enter Driver cores': '请输入Driver核心数', 'Driver Memory': 'Driver内存数', 'Please enter Driver memory': '请输入Driver内存数', 'Executor Number': 'Executor数量', 'Please enter Executor number': '请输入Executor数量', 'The Executor number should be a positive integer': 'Executor数量为正整数', 'Executor Memory': 'Executor内存数', 'Please enter Executor memory': '请输入Executor内存数', 'Executor Cores': 'Executor核心数', 'Please enter Executor cores': '请输入Executor核心数', 'Memory should be a positive integer': '内存数为数字', 'Core number should be positive integer': '核心数为正整数', 'Flink Version': 'Flink版本', 'JobManager Memory': 'JobManager内存数', 'Please enter JobManager memory': '请输入JobManager内存数', 'TaskManager Memory': 'TaskManager内存数', 'Please enter TaskManager memory': '请输入TaskManager内存数', 'Slot Number': 'Slot数量', 'Please enter Slot number': '请输入Slot数量', Parallelism: '并行度', 'Custom Parallelism': '自定义并行度', 'Please enter Parallelism': '请输入并行度', 'Parallelism number should be positive integer': '并行度必须为正整数', 'Parallelism tip': '如果存在大量任务需要补数时,可以利用自定义并行度将补数的任务线程设置成合理的数值,避免对服务器造成过大的影响', 'TaskManager Number': 'TaskManager数量', 'Please enter TaskManager number': '请输入TaskManager数量', 'App Name': '任务名称', 'Please enter app name(optional)': '请输入任务名称(选填)', 'SQL Type': 'sql类型', 'Send Email': '发送邮件', 'Log display': '日志显示', 'rows of result': '行查询结果', Title: '主题', 'Please enter the title of email': '请输入邮件主题', Table: '表名', TableMode: '表格', Attachment: '附件', 'SQL Parameter': 'sql参数', 'SQL Statement': 'sql语句', 'UDF Function': 'UDF函数', 'Please enter a SQL Statement(required)': '请输入sql语句(必填)', 'Please enter a JSON Statement(required)': '请输入json语句(必填)', 'One form or attachment must be selected': '表格、附件必须勾选一个', 'Mail subject required': '邮件主题必填', 'Child Node': '子节点', 'Please select a sub-Process': '请选择子工作流', Edit: '编辑', 'Switch To This Version': '切换到该版本', 'Datasource Name': '数据源名称', 'Please enter datasource name': '请输入数据源名称', IP: 'IP主机名', 'Please enter IP': '请输入IP主机名', Port: '端口', 'Please enter port': '请输入端口', 'Database Name': '数据库名', 'Please enter database name': '请输入数据库名', 'Oracle Connect Type': '服务名或SID', 'Oracle Service Name': '服务名', 'Oracle SID': 'SID', 'jdbc connect parameters': 'jdbc连接参数', 'Test Connect': '测试连接', 'Please enter resource name': '请输入数据源名称', 'Please enter resource folder name': '请输入资源文件夹名称', 'Please enter a non-query SQL statement': '请输入非查询sql语句', 'Please enter IP/hostname': '请输入IP/主机名', 'jdbc connection parameters is not a correct JSON format': 'jdbc连接参数不是一个正确的JSON格式', '#': '编号', 'Datasource Type': '数据源类型', 'Datasource Parameter': '数据源参数', 'Create Time': '创建时间', 'Update Time': '更新时间', Operation: '操作', 'Current Version': '当前版本', 'Click to view': '点击查看', 'Delete?': '确定删除吗?', 'Switch Version Successfully': '切换版本成功', 'Confirm Switch To This Version?': '确定切换到该版本吗?', Confirm: '确定', 'Task status statistics': '任务状态统计', Number: '数量', State: '状态', 'Dry-run flag': '空跑标识', 'Process Status Statistics': '流程状态统计', 'Process Definition Statistics': '流程定义统计', 'Project Name': '项目名称', 'Please enter name': '请输入名称', 'Owned Users': '所属用户', 'Process Pid': '进程Pid', 'Zk registration directory': 'zk注册目录', cpuUsage: 'cpuUsage', memoryUsage: 'memoryUsage', 'Last heartbeat time': '最后心跳时间', 'Edit Tenant': '编辑租户', 'OS Tenant Code': '操作系统租户', 'Tenant Name': '租户名称', Queue: '队列', 'Please select a queue': '默认为租户关联队列', 'Please enter the os tenant code in English': '请输入操作系统租户只允许英文', 'Please enter os tenant code in English': '请输入英文操作系统租户', 'Please enter os tenant code': '请输入操作系统租户', 'Please enter tenant Name': '请输入租户名称', 'The os tenant code. Only letters or a combination of letters and numbers are allowed': '操作系统租户只允许字母或字母与数字组合', 'Edit User': '编辑用户', Tenant: '租户', Email: '邮件', Phone: '手机', 'User Type': '用户类型', 'Please enter phone number': '请输入手机', 'Please enter email': '请输入邮箱', 'Please enter the correct email format': '请输入正确的邮箱格式', 'Please enter the correct mobile phone format': '请输入正确的手机格式', Project: '项目', Authorize: '授权', 'File resources': '文件资源', 'UDF resources': 'UDF资源', 'UDF resources directory': 'UDF资源目录', 'Please select UDF resources directory': '请选择UDF资源目录', 'Alarm group': '告警组', 'Alarm group required': '告警组必填', 'Edit alarm group': '编辑告警组', 'Create alarm group': '创建告警组', 'Create Alarm Instance': '创建告警实例', 'Edit Alarm Instance': '编辑告警实例', 'Group Name': '组名称', 'Alarm instance name': '告警实例名称', 'Alarm plugin name': '告警插件名称', 'Select plugin': '选择插件', 'Select Alarm plugin': '请选择告警插件', 'Please enter group name': '请输入组名称', 'Instance parameter exception': '实例参数异常', 'Group Type': '组类型', 'Alarm plugin instance': '告警插件实例', 'Select Alarm plugin instance': '请选择告警插件实例', Remarks: '备注', SMS: '短信', 'Managing Users': '管理用户', Permission: '权限', Administrator: '管理员', 'Confirm Password': '确认密码', 'Please enter confirm password': '请输入确认密码', 'Password cannot be in Chinese': '密码不能为中文', 'Please enter a password (6-22) character password': '请输入密码(6-22)字符密码', 'Confirmation password cannot be in Chinese': '确认密码不能为中文', 'Please enter a confirmation password (6-22) character password': '请输入确认密码(6-22)字符密码', 'The password is inconsistent with the confirmation password': '密码与确认密码不一致,请重新确认', 'Please select the datasource': '请选择数据源', 'Please select resources': '请选择资源', Query: '查询', 'Non Query': '非查询', 'prop(required)': 'prop(必填)', 'value(optional)': 'value(选填)', 'value(required)': 'value(必填)', 'prop is empty': '自定义参数prop不能为空', 'value is empty': 'value不能为空', 'prop is repeat': 'prop中有重复', 'Start Time': '开始时间', 'End Time': '结束时间', crontab: 'crontab', 'Failure Strategy': '失败策略', online: '上线', offline: '下线', 'Task Status': '任务状态', 'Process Instance': '工作流实例', 'Task Instance': '任务实例', 'Select date range': '选择日期区间', startDate: '开始日期', endDate: '结束日期', Date: '日期', Waiting: '等待', Execution: '执行中', Finish: '完成', 'Create File': '创建文件', 'Create folder': '创建文件夹', 'File Name': '文件名称', 'Folder Name': '文件夹名称', 'File Format': '文件格式', 'Folder Format': '文件夹格式', 'File Content': '文件内容', 'Upload File Size': '文件大小不能超过1G', Create: '创建', 'Please enter the resource content': '请输入资源内容', 'Resource content cannot exceed 3000 lines': '资源内容不能超过3000行', 'File Details': '文件详情', 'Download Details': '下载详情', Return: '返回', Save: '保存', 'File Manage': '文件管理', 'Upload Files': '上传文件', 'Create UDF Function': '创建UDF函数', 'Upload UDF Resources': '上传UDF资源', 'Service-Master': '服务管理-Master', 'Service-Worker': '服务管理-Worker', 'Process Name': '工作流名称', Executor: '执行用户', 'Run Type': '运行类型', 'Scheduling Time': '调度时间', 'Run Times': '运行次数', host: 'host', 'fault-tolerant sign': '容错标识', Rerun: '重跑', 'Recovery Failed': '恢复失败', Stop: '停止', Pause: '暂停', 'Recovery Suspend': '恢复运行', Gantt: '甘特图', 'Node Type': '节点类型', 'Submit Time': '提交时间', Duration: '运行时长', 'Retry Count': '重试次数', 'Task Name': '任务名称', 'Task Date': '任务日期', 'Source Table': '源表', 'Record Number': '记录数', 'Target Table': '目标表', 'Online viewing type is not supported': '不支持在线查看类型', Size: '大小', Rename: '重命名', Download: '下载', Export: '导出', 'Version Info': '版本信息', Submit: '提交', 'Edit UDF Function': '编辑UDF函数', type: '类型', 'UDF Function Name': 'UDF函数名称', FILE: '文件', UDF: 'UDF', 'File Subdirectory': '文件子目录', 'Please enter a function name': '请输入函数名', 'Package Name': '包名类名', 'Please enter a Package name': '请输入包名类名', Parameter: '参数', 'Please enter a parameter': '请输入参数', 'UDF Resources': 'UDF资源', 'Upload Resources': '上传资源', Instructions: '使用说明', 'Please enter a instructions': '请输入使用说明', 'Please enter a UDF function name': '请输入UDF函数名称', 'Select UDF Resources': '请选择UDF资源', 'Class Name': '类名', 'Jar Package': 'jar包', 'Library Name': '库名', 'UDF Resource Name': 'UDF资源名称', 'File Size': '文件大小', Description: '描述', 'Drag Nodes and Selected Items': '拖动节点和选中项', 'Select Line Connection': '选择线条连接', 'Delete selected lines or nodes': '删除选中的线或节点', 'Full Screen': '全屏', Unpublished: '未发布', 'Start Process': '启动工作流', 'Execute from the current node': '从当前节点开始执行', 'Recover tolerance fault process': '恢复被容错的工作流', 'Resume the suspension process': '恢复运行流程', 'Execute from the failed nodes': '从失败节点开始执行', 'Complement Data': '补数', 'Scheduling execution': '调度执行', 'Recovery waiting thread': '恢复等待线程', 'Submitted successfully': '提交成功', Executing: '正在执行', 'Ready to pause': '准备暂停', 'Ready to stop': '准备停止', 'Need fault tolerance': '需要容错', Kill: 'Kill', 'Waiting for thread': '等待线程', 'Waiting for dependence': '等待依赖', Start: '运行', Copy: '复制节点', 'Copy name': '复制名称', 'Copy path': '复制路径', 'Please enter keyword': '请输入关键词', 'File Upload': '文件上传', 'Drag the file into the current upload window': '请将文件拖拽到当前上传窗口内!', 'Drag area upload': '拖动区域上传', Upload: '上传', 'ReUpload File': '重新上传文件', 'Please enter file name': '请输入文件名', 'Please select the file to upload': '请选择要上传的文件', 'Resources manage': '资源中心', Security: '安全中心', Logout: '退出', 'No data': '查询无数据', 'Uploading...': '文件上传中', 'Loading...': '正在努力加载中...', List: '列表', 'Unable to download without proper url': '无下载url无法下载', Process: '工作流', 'Process definition': '工作流定义', 'Task record': '任务记录', 'Warning group manage': '告警组管理', 'Warning instance manage': '告警实例管理', 'Servers manage': '服务管理', 'UDF manage': 'UDF管理', 'Resource manage': '资源管理', 'Function manage': '函数管理', 'Edit password': '修改密码', 'Ordinary users': '普通用户', 'Create process': '创建工作流', 'Import process': '导入工作流', 'Timing state': '定时状态', Timing: '定时', Timezone: '时区', TreeView: '树形图', 'Mailbox already exists! Recipients and copyers cannot repeat': '邮箱已存在!收件人和抄送人不能重复', 'Mailbox input is illegal': '邮箱输入不合法', 'Please set the parameters before starting': '启动前请先设置参数', Continue: '继续', End: '结束', 'Node execution': '节点执行', 'Backward execution': '向后执行', 'Forward execution': '向前执行', 'Execute only the current node': '仅执行当前节点', 'Notification strategy': '通知策略', 'Notification group': '通知组', 'Please select a notification group': '请选择通知组', 'Whether it is a complement process?': '是否补数', 'Schedule date': '调度日期', 'Mode of execution': '执行方式', 'Serial execution': '串行执行', 'Parallel execution': '并行执行', 'Set parameters before timing': '定时前请先设置参数', 'Start and stop time': '起止时间', 'Please select time': '请选择时间', 'Please enter crontab': '请输入crontab', none_1: '都不发', success_1: '成功发', failure_1: '失败发', All_1: '成功或失败都发', Toolbar: '工具栏', 'View variables': '查看变量', 'Format DAG': '格式化DAG', 'Refresh DAG status': '刷新DAG状态', Return_1: '返回上一节点', 'Please enter format': '请输入格式为', 'connection parameter': '连接参数', 'Process definition details': '流程定义详情', 'Create process definition': '创建流程定义', 'Scheduled task list': '定时任务列表', 'Process instance details': '流程实例详情', 'Create Resource': '创建资源', 'User Center': '用户中心', AllStatus: '全部状态', None: '无', Name: '名称', 'Process priority': '流程优先级', 'Task priority': '任务优先级', 'Task timeout alarm': '任务超时告警', 'Timeout strategy': '超时策略', 'Timeout alarm': '超时告警', 'Timeout failure': '超时失败', 'Timeout period': '超时时长', 'Waiting Dependent complete': '等待依赖完成', 'Waiting Dependent start': '等待依赖启动', 'Check interval': '检查间隔', 'Timeout must be longer than check interval': '超时时间必须比检查间隔长', 'Timeout strategy must be selected': '超时策略必须选一个', 'Timeout must be a positive integer': '超时时长必须为正整数', 'Add dependency': '添加依赖', 'Whether dry-run': '是否空跑', and: '且', or: '或', month: '月', week: '周', day: '日', hour: '时', Running: '正在运行', 'Waiting for dependency to complete': '等待依赖完成', Selected: '已选', CurrentHour: '当前小时', Last1Hour: '前1小时', Last2Hours: '前2小时', Last3Hours: '前3小时', Last24Hours: '前24小时', today: '今天', Last1Days: '昨天', Last2Days: '前两天', Last3Days: '前三天', Last7Days: '前七天', ThisWeek: '本周', LastWeek: '上周', LastMonday: '上周一', LastTuesday: '上周二', LastWednesday: '上周三', LastThursday: '上周四', LastFriday: '上周五', LastSaturday: '上周六', LastSunday: '上周日', ThisMonth: '本月', LastMonth: '上月', LastMonthBegin: '上月初', LastMonthEnd: '上月末', 'Refresh status succeeded': '刷新状态成功', 'Queue manage': 'Yarn 队列管理', 'Create queue': '创建队列', 'Edit queue': '编辑队列', 'Datasource manage': '数据源中心', 'History task record': '历史任务记录', 'Please go online': '不要忘记上线', 'Queue value': '队列值', 'Please enter queue value': '请输入队列值', 'Worker group manage': 'Worker分组管理', 'Create worker group': '创建Worker分组', 'Edit worker group': '编辑Worker分组', 'Token manage': '令牌管理', 'Create token': '创建令牌', 'Edit token': '编辑令牌', Addresses: '地址', 'Worker Addresses': 'Worker地址', 'Please select the worker addresses': '请选择Worker地址', 'Failure time': '失效时间', 'Expiration time': '失效时间', User: '用户', 'Please enter token': '请输入令牌', 'Generate token': '生成令牌', Monitor: '监控中心', Group: '分组', 'Queue statistics': '队列统计', 'Command status statistics': '命令状态统计', 'Task kill': '等待kill任务', 'Task queue': '等待执行任务', 'Error command count': '错误指令数', 'Normal command count': '正确指令数', Manage: '管理', 'Number of connections': '连接数', Sent: '发送量', Received: '接收量', 'Min latency': '最低延时', 'Avg latency': '平均延时', 'Max latency': '最大延时', 'Node count': '节点数', 'Query time': '当前查询时间', 'Node self-test status': '节点自检状态', 'Health status': '健康状态', 'Max connections': '最大连接数', 'Threads connections': '当前连接数', 'Max used connections': '同时使用连接最大数', 'Threads running connections': '数据库当前活跃连接数', 'Worker group': 'Worker分组', 'Please enter a positive integer greater than 0': '请输入大于 0 的正整数', 'Pre Statement': '前置sql', 'Post Statement': '后置sql', 'Statement cannot be empty': '语句不能为空', 'Process Define Count': '工作流定义数', 'Process Instance Running Count': '正在运行的流程数', 'command number of waiting for running': '待执行的命令数', 'failure command number': '执行失败的命令数', 'tasks number of waiting running': '待运行任务数', 'task number of ready to kill': '待杀死任务数', 'Statistics manage': '统计管理', statistics: '统计', 'select tenant': '选择租户', 'Please enter Principal': '请输入Principal', 'Please enter the kerberos authentication parameter java.security.krb5.conf': '请输入kerberos认证参数 java.security.krb5.conf', 'Please enter the kerberos authentication parameter login.user.keytab.username': '请输入kerberos认证参数 login.user.keytab.username', 'Please enter the kerberos authentication parameter login.user.keytab.path': '请输入kerberos认证参数 login.user.keytab.path', 'The start time must not be the same as the end': '开始时间和结束时间不能相同', 'Startup parameter': '启动参数', 'Startup type': '启动类型', 'warning of timeout': '超时告警', 'Next five execution times': '接下来五次执行时间', 'Execute time': '执行时间', 'Complement range': '补数范围', 'Http Url': '请求地址', 'Http Method': '请求类型', 'Http Parameters': '请求参数', 'Http Parameters Key': '参数名', 'Http Parameters Position': '参数位置', 'Http Parameters Value': '参数值', 'Http Check Condition': '校验条件', 'Http Condition': '校验内容', 'Please Enter Http Url': '请填写请求地址(必填)', 'Please Enter Http Condition': '请填写校验内容', 'There is no data for this period of time': '该时间段无数据', 'Worker addresses cannot be empty': 'Worker地址不能为空', 'Please generate token': '请生成Token', 'Please Select token': '请选择Token失效时间', 'Spark Version': 'Spark版本', TargetDataBase: '目标库', TargetTable: '目标表', TargetJobName: '目标任务名', 'Please enter Pigeon job name': '请输入Pigeon任务名', 'Please enter the table of target': '请输入目标表名', 'Please enter a Target Table(required)': '请输入目标表(必填)', SpeedByte: '限流(字节数)', SpeedRecord: '限流(记录数)', '0 means unlimited by byte': 'KB,0代表不限制', '0 means unlimited by count': '0代表不限制', 'Modify User': '修改用户', 'Whether directory': '是否文件夹', Yes: '是', No: '否', 'Hadoop Custom Params': 'Hadoop参数', 'Sqoop Advanced Parameters': 'Sqoop参数', 'Sqoop Job Name': '任务名称', 'Please enter Mysql Database(required)': '请输入Mysql数据库(必填)', 'Please enter Mysql Table(required)': '请输入Mysql表名(必填)', 'Please enter Columns (Comma separated)': '请输入列名,用 , 隔开', 'Please enter Target Dir(required)': '请输入目标路径(必填)', 'Please enter Export Dir(required)': '请输入数据源路径(必填)', 'Please enter Hive Database(required)': '请输入Hive数据库(必填)', 'Please enter Hive Table(required)': '请输入Hive表名(必填)', 'Please enter Hive Partition Keys': '请输入分区键', 'Please enter Hive Partition Values': '请输入分区值', 'Please enter Replace Delimiter': '请输入替换分隔符', 'Please enter Fields Terminated': '请输入列分隔符', 'Please enter Lines Terminated': '请输入行分隔符', 'Please enter Concurrency': '请输入并发度', 'Please enter Update Key': '请输入更新列', 'Please enter Job Name(required)': '请输入任务名称(必填)', 'Please enter Custom Shell(required)': '请输入自定义脚本', Direct: '流向', Type: '类型', ModelType: '模式', ColumnType: '列类型', Database: '数据库', Column: '列', 'Map Column Hive': 'Hive类型映射', 'Map Column Java': 'Java类型映射', 'Export Dir': '数据源路径', 'Hive partition Keys': 'Hive 分区键', 'Hive partition Values': 'Hive 分区值', FieldsTerminated: '列分隔符', LinesTerminated: '行分隔符', IsUpdate: '是否更新', UpdateKey: '更新列', UpdateMode: '更新类型', 'Target Dir': '目标路径', DeleteTargetDir: '是否删除目录', FileType: '保存格式', CompressionCodec: '压缩类型', CreateHiveTable: '是否创建新表', DropDelimiter: '是否删除分隔符', OverWriteSrc: '是否覆盖数据源', ReplaceDelimiter: '替换分隔符', Concurrency: '并发度', Form: '表单', OnlyUpdate: '只更新', AllowInsert: '无更新便插入', 'Data Source': '数据来源', 'Data Target': '数据目的', 'All Columns': '全表导入', 'Some Columns': '选择列', 'Branch flow': '分支流转', 'Custom Job': '自定义任务', 'Custom Script': '自定义脚本', 'Cannot select the same node for successful branch flow and failed branch flow': '成功分支流转和失败分支流转不能选择同一个节点', 'Successful branch flow and failed branch flow are required': 'conditions节点成功和失败分支流转必填', 'No resources exist': '不存在资源', 'Please delete all non-existing resources': '请删除所有不存在资源', 'Unauthorized or deleted resources': '未授权或已删除资源', 'Please delete all non-existent resources': '请删除所有未授权或已删除资源', Kinship: '工作流关系', Reset: '重置', KinshipStateActive: '当前选择', KinshipState1: '已上线', KinshipState0: '工作流未上线', KinshipState10: '调度未上线', 'Dag label display control': 'Dag节点名称显隐', Enable: '启用', Disable: '停用', 'The Worker group no longer exists, please select the correct Worker group!': '该Worker分组已经不存在,请选择正确的Worker分组!', 'Please confirm whether the workflow has been saved before downloading': '下载前请确定工作流是否已保存', 'User name length is between 3 and 39': '用户名长度在3~39之间', 'Timeout Settings': '超时设置', 'Connect Timeout': '连接超时', 'Socket Timeout': 'Socket超时', 'Connect timeout be a positive integer': '连接超时必须为数字', 'Socket Timeout be a positive integer': 'Socket超时必须为数字', ms: '毫秒', 'Please Enter Url': '请直接填写地址,例如:127.0.0.1:7077', Master: 'Master', 'Please select the waterdrop resources': '请选择waterdrop配置文件', zkDirectory: 'zk注册目录', 'Directory detail': '查看目录详情', 'Connection name': '连线名', 'Current connection settings': '当前连线设置', 'Please save the DAG before formatting': '格式化前请先保存DAG', 'Batch copy': '批量复制', 'Related items': '关联项目', 'Project name is required': '项目名称必填', 'Batch move': '批量移动', Version: '版本', 'Pre tasks': '前置任务', 'Running Memory': '运行内存', 'Max Memory': '最大内存', 'Min Memory': '最小内存', 'The workflow canvas is abnormal and cannot be saved, please recreate': '该工作流画布异常,无法保存,请重新创建', Info: '提示', 'Datasource userName': '所属用户', 'Resource userName': '所属用户', 'Environment manage': '环境管理', 'Create environment': '创建环境', 'Edit environment': '编辑', 'Environment value': 'Environment value', 'Environment Name': '环境名称', 'Environment Code': '环境编码', 'Environment Config': '环境配置', 'Environment Desc': '详细描述', 'Environment Worker Group': 'Worker组', 'Please enter environment config': '请输入环境配置信息', 'Please enter environment desc': '请输入详细描述', 'Please select worker groups': '请选择Worker分组', condition: '条件', 'The condition content cannot be empty': '条件内容不能为空', 'Reference from': '使用已有任务', 'No more...': '没有更多了...', 'Task Definition': '任务定义', 'Create task': '创建任务', 'Task Type': '任务类型', 'Process execute type': '执行策略', parallel: '并行', 'Serial wait': '串行等待', 'Serial discard': '串行抛弃', 'Serial priority': '串行优先', 'Recover serial wait': '串行恢复', IsEnableProxy: '启用代理', WebHook: 'Web钩子', Keyword: '密钥', Proxy: '代理', receivers: '收件人', receiverCcs: '抄送人', transportProtocol: '邮件协议', serverHost: 'SMTP服务器', serverPort: 'SMTP端口', sender: '发件人', enableSmtpAuth: '请求认证', starttlsEnable: 'STARTTLS连接', sslEnable: 'SSL连接', smtpSslTrust: 'SSL证书信任', url: 'URL', requestType: '请求方式', headerParams: '请求头', bodyParams: '请求体', contentField: '内容字段', path: '脚本路径', userParams: '自定义参数', corpId: '企业ID', secret: '密钥', teamSendMsg: '群发信息', userSendMsg: '群员信息', agentId: '应用ID', users: '群员', Username: '用户名', showType: '内容展示类型', 'Please select a task type (required)': '请选择任务类型(必选)', layoutType: '布局类型', gridLayout: '网格布局', dagreLayout: '层次布局', rows: '行数', cols: '列数', processOnline: '已上线', searchNode: '搜索节点', dagScale: '缩放', workflowName: '工作流名称', scheduleStartTime: '定时开始时间', scheduleEndTime: '定时结束时间', crontabExpression: 'Crontab', workflowPublishStatus: '工作流上线状态', schedulePublishStatus: '定时状态' }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,183
[Bug] [Warning instance manage] When selecting `slack`, an error message will appear, and the corresponding form will not appear.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened When selecting `slack`, an error message will appear, and the corresponding form will not appear. <img width="1649" alt="38dcd65fbb30c4b9f4a49abb1167530" src="https://user-images.githubusercontent.com/19239641/144731706-91114a7e-a9ad-4fcf-bfd4-a76f86f47f83.png"> ### What you expected to happen `i18n` does not support Little Camel. ### How to reproduce Choose `slack`. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7183
https://github.com/apache/dolphinscheduler/pull/7184
2fa3157e6ec7654d593fe3f71a84b70575bb5702
eb07a2b1c953596dd0c2a420f8bd31334f6dfd3e
"2021-12-05T03:03:05Z"
java
"2021-12-05T03:23:50Z"
dolphinscheduler-ui/src/js/conf/home/pages/security/pages/warningInstance/_source/createWarningInstance.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <m-popover ref="popover" :ok-text="item ? $t('Edit') : $t('Submit')" @ok="_ok" @close="close"> <template slot="content"> <div class="create-warning-model"> <m-list-box-f> <template slot="name"><strong>*</strong>{{$t('Alarm instance name')}}</template> <template slot="content"> <el-input type="input" v-model="instanceName" maxlength="60" size="small" :placeholder="$t('Please enter group name')"> </el-input> </template> </m-list-box-f> <m-list-box-f> <template slot="name"><strong>*</strong>{{$t('Select plugin')}}</template> <template slot="content"> <el-select v-model="pluginDefineId" size="small" style="width: 100%" @change="changePlugin" disabled="true" v-if="item.id"> <el-option v-for="items in pluginInstance" :key="items.id" :value="items.id" :label="items.pluginName"> </el-option> </el-select> <el-select v-model="pluginDefineId" size="small" style="width: 100%" @change="changePlugin" v-else> <el-option v-for="items in pluginInstance" :key="items.id" :value="items.id" :label="items.pluginName"> </el-option> </el-select> </template> </m-list-box-f> <div class="alertForm"> <template> <form-create v-model="$f" :rule="rule" :option="{submitBtn:false}" size="mini"></form-create> </template> </div> </div> </template> </m-popover> </template> <script> import i18n from '@/module/i18n' import store from '@/conf/home/store' import mPopover from '@/module/components/popup/popover' import mListBoxF from '@/module/components/listBoxF/listBoxF' export default { name: 'create-warning', data () { return { store, instanceName: '', pluginDefineId: null, $f: {}, rule: [] } }, props: { item: Object, pluginInstance: Array }, methods: { _ok () { if (this._verification()) { // The name is not verified if (this.item && this.item.instanceName === this.instanceName) { this._submit() return } // Verify username this.store.dispatch('security/verifyName', { type: 'alarmInstance', instanceName: this.instanceName }).then(res => { this._submit() }).catch(e => { this.$message.error(e.msg || '') }) } }, _verification () { // group name if (!this.instanceName.replace(/\s*/g, '')) { this.$message.warning(`${i18n.$t('Please enter group name')}`) return false } if (!this.pluginDefineId) { this.$message.warning(`${i18n.$t('Select Alarm plugin')}`) return false } return true }, // Select plugin changePlugin () { this.store.dispatch('security/getUiPluginById', { pluginId: this.pluginDefineId }).then(res => { this.rule = JSON.parse(res.pluginParams) this.rule.forEach(item => { if (item.title.indexOf('$t') !== -1) { item.title = this.$t(item.field) } // fix null pointer exception if (!item.props) { item.props = {} } }) }).catch(e => { this.$message.error(e.msg || '') }) }, _submit () { this.$f.validate((valid) => { if (valid) { this.$f.rule.forEach(item => { item.title = item.name }) let param = { instanceName: this.instanceName, pluginDefineId: this.pluginDefineId, pluginInstanceParams: JSON.stringify(this.$f.rule) } if (this.item) { param.alertPluginInstanceId = this.item.id param.pluginDefineId = null } this.$refs.popover.spinnerLoading = true this.store.dispatch(`security/${this.item ? 'updateAlertPluginInstance' : 'createAlertPluginInstance'}`, param).then(res => { this.$refs.popover.spinnerLoading = false this.$emit('onUpdate') this.$message.success(res.msg) }).catch(e => { this.$message.error(e.msg || '') this.$refs.popover.spinnerLoading = false }) } else { this.$message.warning(`${i18n.$t('Instance parameter exception')}`) this.$refs.popover.spinnerLoading = false } }) }, close () { this.$emit('close') } }, watch: {}, created () { if (this.item) { this.instanceName = this.item.instanceName this.pluginDefineId = this.item.pluginDefineId this.rule = JSON.parse(this.item.pluginInstanceParams).map(item => { if (item.title.indexOf('$t') !== -1) { item.title = this.$t(item.field) } item.props = item.props || {} return item }) } }, components: { mPopover, mListBoxF } } </script> <style lang="scss" rel="stylesheet/scss"> .alertForm { label { span { font-weight: 10!important; } } .el-row { width: 520px; } .el-form-item__label { width: 144px!important; color: #606266!important; } .el-form-item__content { margin-left: 144px!important; width: calc(100% - 162px); } } </style>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,183
[Bug] [Warning instance manage] When selecting `slack`, an error message will appear, and the corresponding form will not appear.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened When selecting `slack`, an error message will appear, and the corresponding form will not appear. <img width="1649" alt="38dcd65fbb30c4b9f4a49abb1167530" src="https://user-images.githubusercontent.com/19239641/144731706-91114a7e-a9ad-4fcf-bfd4-a76f86f47f83.png"> ### What you expected to happen `i18n` does not support Little Camel. ### How to reproduce Choose `slack`. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7183
https://github.com/apache/dolphinscheduler/pull/7184
2fa3157e6ec7654d593fe3f71a84b70575bb5702
eb07a2b1c953596dd0c2a420f8bd31334f6dfd3e
"2021-12-05T03:03:05Z"
java
"2021-12-05T03:23:50Z"
dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ export default { 'User Name': 'User Name', 'Please enter user name': 'Please enter user name', Password: 'Password', 'Please enter your password': 'Please enter your password', 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22', Login: 'Login', Home: 'Home', 'Failed to create node to save': 'Failed to create node to save', 'Global parameters': 'Global parameters', 'Local parameters': 'Local parameters', 'Copy success': 'Copy success', 'The browser does not support automatic copying': 'The browser does not support automatic copying', 'Whether to save the DAG graph': 'Whether to save the DAG graph', 'Current node settings': 'Current node settings', 'View history': 'View history', 'View log': 'View log', 'Force success': 'Force success', 'Enter this child node': 'Enter this child node', 'Node name': 'Node name', 'Please enter name (required)': 'Please enter name (required)', 'Run flag': 'Run flag', Normal: 'Normal', 'Prohibition execution': 'Prohibition execution', 'Please enter description': 'Please enter description', 'Number of failed retries': 'Number of failed retries', Times: 'Times', 'Failed retry interval': 'Failed retry interval', Minute: 'Minute', 'Delay execution time': 'Delay execution time', 'Delay execution': 'Delay execution', 'Forced success': 'Forced success', Cancel: 'Cancel', 'Confirm add': 'Confirm add', 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process', 'The task has not been executed and cannot enter the sub-Process': 'The task has not been executed and cannot enter the sub-Process', 'Name already exists': 'Name already exists', 'Download Log': 'Download Log', 'Refresh Log': 'Refresh Log', 'Enter full screen': 'Enter full screen', 'Cancel full screen': 'Cancel full screen', Close: 'Close', 'Update log success': 'Update log success', 'No more logs': 'No more logs', 'No log': 'No log', 'Loading Log...': 'Loading Log...', 'Set the DAG diagram name': 'Set the DAG diagram name', 'Please enter description(optional)': 'Please enter description(optional)', 'Set global': 'Set global', 'Whether to go online the process definition': 'Whether to go online the process definition', 'Whether to update the process definition': 'Whether to update the process definition', Add: 'Add', 'DAG graph name cannot be empty': 'DAG graph name cannot be empty', 'Create Datasource': 'Create Datasource', 'Project Home': 'Workflow Monitor', 'Project Manage': 'Project', 'Create Project': 'Create Project', 'Cron Manage': 'Cron Manage', 'Copy Workflow': 'Copy Workflow', 'Tenant Manage': 'Tenant Manage', 'Create Tenant': 'Create Tenant', 'User Manage': 'User Manage', 'Create User': 'Create User', 'User Information': 'User Information', 'Edit Password': 'Edit Password', Success: 'Success', Failed: 'Failed', Delete: 'Delete', 'Please choose': 'Please choose', 'Please enter a positive integer': 'Please enter a positive integer', 'Program Type': 'Program Type', 'Main Class': 'Main Class', 'Main Package': 'Main Package', 'Please enter main package': 'Please enter main package', 'Please enter main class': 'Please enter main class', 'Main Arguments': 'Main Arguments', 'Please enter main arguments': 'Please enter main arguments', 'Option Parameters': 'Option Parameters', 'Please enter option parameters': 'Please enter option parameters', Resources: 'Resources', 'Custom Parameters': 'Custom Parameters', 'Custom template': 'Custom template', Datasource: 'Datasource', methods: 'methods', 'Please enter the procedure method': 'Please enter the procedure script \n\ncall procedure:{call <procedure-name>[(<arg1>,<arg2>, ...)]}\n\ncall function:{?= call <procedure-name>[(<arg1>,<arg2>, ...)]} ', 'The procedure method script example': 'example:{call <procedure-name>[(?,?, ...)]} or {?= call <procedure-name>[(?,?, ...)]}', Script: 'Script', 'Please enter script(required)': 'Please enter script(required)', 'Deploy Mode': 'Deploy Mode', 'Driver Cores': 'Driver Cores', 'Please enter Driver cores': 'Please enter Driver cores', 'Driver Memory': 'Driver Memory', 'Please enter Driver memory': 'Please enter Driver memory', 'Executor Number': 'Executor Number', 'Please enter Executor number': 'Please enter Executor number', 'The Executor number should be a positive integer': 'The Executor number should be a positive integer', 'Executor Memory': 'Executor Memory', 'Please enter Executor memory': 'Please enter Executor memory', 'Executor Cores': 'Executor Cores', 'Please enter Executor cores': 'Please enter Executor cores', 'Memory should be a positive integer': 'Memory should be a positive integer', 'Core number should be positive integer': 'Core number should be positive integer', 'Flink Version': 'Flink Version', 'JobManager Memory': 'JobManager Memory', 'Please enter JobManager memory': 'Please enter JobManager memory', 'TaskManager Memory': 'TaskManager Memory', 'Please enter TaskManager memory': 'Please enter TaskManager memory', 'Slot Number': 'Slot Number', 'Please enter Slot number': 'Please enter Slot number', Parallelism: 'Parallelism', 'Custom Parallelism': 'Configure parallelism', 'Please enter Parallelism': 'Please enter Parallelism', 'Parallelism tip': 'If there are a large number of tasks requiring complement, you can use the custom parallelism to ' + 'set the complement task thread to a reasonable value to avoid too large impact on the server.', 'Parallelism number should be positive integer': 'Parallelism number should be positive integer', 'TaskManager Number': 'TaskManager Number', 'Please enter TaskManager number': 'Please enter TaskManager number', 'App Name': 'App Name', 'Please enter app name(optional)': 'Please enter app name(optional)', 'SQL Type': 'SQL Type', 'Send Email': 'Send Email', 'Log display': 'Log display', 'rows of result': 'rows of result', Title: 'Title', 'Please enter the title of email': 'Please enter the title of email', Table: 'Table', TableMode: 'Table', Attachment: 'Attachment', 'SQL Parameter': 'SQL Parameter', 'SQL Statement': 'SQL Statement', 'UDF Function': 'UDF Function', 'Please enter a SQL Statement(required)': 'Please enter a SQL Statement(required)', 'Please enter a JSON Statement(required)': 'Please enter a JSON Statement(required)', 'One form or attachment must be selected': 'One form or attachment must be selected', 'Mail subject required': 'Mail subject required', 'Child Node': 'Child Node', 'Please select a sub-Process': 'Please select a sub-Process', Edit: 'Edit', 'Switch To This Version': 'Switch To This Version', 'Datasource Name': 'Datasource Name', 'Please enter datasource name': 'Please enter datasource name', IP: 'IP', 'Please enter IP': 'Please enter IP', Port: 'Port', 'Please enter port': 'Please enter port', 'Database Name': 'Database Name', 'Please enter database name': 'Please enter database name', 'Oracle Connect Type': 'ServiceName or SID', 'Oracle Service Name': 'ServiceName', 'Oracle SID': 'SID', 'jdbc connect parameters': 'jdbc connect parameters', 'Test Connect': 'Test Connect', 'Please enter resource name': 'Please enter resource name', 'Please enter resource folder name': 'Please enter resource folder name', 'Please enter a non-query SQL statement': 'Please enter a non-query SQL statement', 'Please enter IP/hostname': 'Please enter IP/hostname', 'jdbc connection parameters is not a correct JSON format': 'jdbc connection parameters is not a correct JSON format', '#': '#', 'Datasource Type': 'Datasource Type', 'Datasource Parameter': 'Datasource Parameter', 'Create Time': 'Create Time', 'Update Time': 'Update Time', Operation: 'Operation', 'Current Version': 'Current Version', 'Click to view': 'Click to view', 'Delete?': 'Delete?', 'Switch Version Successfully': 'Switch Version Successfully', 'Confirm Switch To This Version?': 'Confirm Switch To This Version?', Confirm: 'Confirm', 'Task status statistics': 'Task Status Statistics', Number: 'Number', State: 'State', 'Dry-run flag': 'Dry-run flag', 'Process Status Statistics': 'Process Status Statistics', 'Process Definition Statistics': 'Process Definition Statistics', 'Project Name': 'Project Name', 'Please enter name': 'Please enter name', 'Owned Users': 'Owned Users', 'Process Pid': 'Process Pid', 'Zk registration directory': 'Zk registration directory', cpuUsage: 'cpuUsage', memoryUsage: 'memoryUsage', 'Last heartbeat time': 'Last heartbeat time', 'Edit Tenant': 'Edit Tenant', 'OS Tenant Code': 'OS Tenant Code', 'Tenant Name': 'Tenant Name', Queue: 'Yarn Queue', 'Please select a queue': 'default is tenant association queue', 'Please enter the os tenant code in English': 'Please enter the os tenant code in English', 'Please enter os tenant code in English': 'Please enter os tenant code in English', 'Please enter os tenant code': 'Please enter os tenant code', 'Please enter tenant Name': 'Please enter tenant Name', 'The os tenant code. Only letters or a combination of letters and numbers are allowed': 'The os tenant code. Only letters or a combination of letters and numbers are allowed', 'Edit User': 'Edit User', Tenant: 'Tenant', Email: 'Email', Phone: 'Phone', 'User Type': 'User Type', 'Please enter phone number': 'Please enter phone number', 'Please enter email': 'Please enter email', 'Please enter the correct email format': 'Please enter the correct email format', 'Please enter the correct mobile phone format': 'Please enter the correct mobile phone format', Project: 'Project', Authorize: 'Authorize', 'File resources': 'File resources', 'UDF resources': 'UDF resources', 'UDF resources directory': 'UDF resources directory', 'Please select UDF resources directory': 'Please select UDF resources directory', 'Alarm group': 'Alarm group', 'Alarm group required': 'Alarm group required', 'Edit alarm group': 'Edit alarm group', 'Create alarm group': 'Create alarm group', 'Create Alarm Instance': 'Create Alarm Instance', 'Edit Alarm Instance': 'Edit Alarm Instance', 'Group Name': 'Group Name', 'Alarm instance name': 'Alarm instance name', 'Alarm plugin name': 'Alarm plugin name', 'Select plugin': 'Select plugin', 'Select Alarm plugin': 'Please select an Alarm plugin', 'Please enter group name': 'Please enter group name', 'Instance parameter exception': 'Instance parameter exception', 'Group Type': 'Group Type', 'Alarm plugin instance': 'Alarm plugin instance', 'Select Alarm plugin instance': 'Please select an Alarm plugin instance', Remarks: 'Remarks', SMS: 'SMS', 'Managing Users': 'Managing Users', Permission: 'Permission', Administrator: 'Administrator', 'Confirm Password': 'Confirm Password', 'Please enter confirm password': 'Please enter confirm password', 'Password cannot be in Chinese': 'Password cannot be in Chinese', 'Please enter a password (6-22) character password': 'Please enter a password (6-22) character password', 'Confirmation password cannot be in Chinese': 'Confirmation password cannot be in Chinese', 'Please enter a confirmation password (6-22) character password': 'Please enter a confirmation password (6-22) character password', 'The password is inconsistent with the confirmation password': 'The password is inconsistent with the confirmation password', 'Please select the datasource': 'Please select the datasource', 'Please select resources': 'Please select resources', Query: 'Query', 'Non Query': 'Non Query', 'prop(required)': 'prop(required)', 'value(optional)': 'value(optional)', 'value(required)': 'value(required)', 'prop is empty': 'prop is empty', 'value is empty': 'value is empty', 'prop is repeat': 'prop is repeat', 'Start Time': 'Start Time', 'End Time': 'End Time', crontab: 'crontab', 'Failure Strategy': 'Failure Strategy', online: 'online', offline: 'offline', 'Task Status': 'Task Status', 'Process Instance': 'Process Instance', 'Task Instance': 'Task Instance', 'Select date range': 'Select date range', startDate: 'startDate', endDate: 'endDate', Date: 'Date', Waiting: 'Waiting', Execution: 'Execution', Finish: 'Finish', 'Create File': 'Create File', 'Create folder': 'Create folder', 'File Name': 'File Name', 'Folder Name': 'Folder Name', 'File Format': 'File Format', 'Folder Format': 'Folder Format', 'File Content': 'File Content', 'Upload File Size': 'Upload File size cannot exceed 1g', Create: 'Create', 'Please enter the resource content': 'Please enter the resource content', 'Resource content cannot exceed 3000 lines': 'Resource content cannot exceed 3000 lines', 'File Details': 'File Details', 'Download Details': 'Download Details', Return: 'Return', Save: 'Save', 'File Manage': 'File Manage', 'Upload Files': 'Upload Files', 'Create UDF Function': 'Create UDF Function', 'Upload UDF Resources': 'Upload UDF Resources', 'Service-Master': 'Service-Master', 'Service-Worker': 'Service-Worker', 'Process Name': 'Process Name', Executor: 'Executor', 'Run Type': 'Run Type', 'Scheduling Time': 'Scheduling Time', 'Run Times': 'Run Times', host: 'host', 'fault-tolerant sign': 'fault-tolerant sign', Rerun: 'Rerun', 'Recovery Failed': 'Recovery Failed', Stop: 'Stop', Pause: 'Pause', 'Recovery Suspend': 'Recovery Suspend', Gantt: 'Gantt', 'Node Type': 'Node Type', 'Submit Time': 'Submit Time', Duration: 'Duration', 'Retry Count': 'Retry Count', 'Task Name': 'Task Name', 'Task Date': 'Task Date', 'Source Table': 'Source Table', 'Record Number': 'Record Number', 'Target Table': 'Target Table', 'Online viewing type is not supported': 'Online viewing type is not supported', Size: 'Size', Rename: 'Rename', Download: 'Download', Export: 'Export', 'Version Info': 'Version Info', Submit: 'Submit', 'Edit UDF Function': 'Edit UDF Function', type: 'type', 'UDF Function Name': 'UDF Function Name', FILE: 'FILE', UDF: 'UDF', 'File Subdirectory': 'File Subdirectory', 'Please enter a function name': 'Please enter a function name', 'Package Name': 'Package Name', 'Please enter a Package name': 'Please enter a Package name', Parameter: 'Parameter', 'Please enter a parameter': 'Please enter a parameter', 'UDF Resources': 'UDF Resources', 'Upload Resources': 'Upload Resources', Instructions: 'Instructions', 'Please enter a instructions': 'Please enter a instructions', 'Please enter a UDF function name': 'Please enter a UDF function name', 'Select UDF Resources': 'Select UDF Resources', 'Class Name': 'Class Name', 'Jar Package': 'Jar Package', 'Library Name': 'Library Name', 'UDF Resource Name': 'UDF Resource Name', 'File Size': 'File Size', Description: 'Description', 'Drag Nodes and Selected Items': 'Drag Nodes and Selected Items', 'Select Line Connection': 'Select Line Connection', 'Delete selected lines or nodes': 'Delete selected lines or nodes', 'Full Screen': 'Full Screen', Unpublished: 'Unpublished', 'Start Process': 'Start Process', 'Execute from the current node': 'Execute from the current node', 'Recover tolerance fault process': 'Recover tolerance fault process', 'Resume the suspension process': 'Resume the suspension process', 'Execute from the failed nodes': 'Execute from the failed nodes', 'Complement Data': 'Complement Data', 'Scheduling execution': 'Scheduling execution', 'Recovery waiting thread': 'Recovery waiting thread', 'Submitted successfully': 'Submitted successfully', Executing: 'Executing', 'Ready to pause': 'Ready to pause', 'Ready to stop': 'Ready to stop', 'Need fault tolerance': 'Need fault tolerance', Kill: 'Kill', 'Waiting for thread': 'Waiting for thread', 'Waiting for dependence': 'Waiting for dependence', Start: 'Start', Copy: 'Copy', 'Copy name': 'Copy name', 'Copy path': 'Copy path', 'Please enter keyword': 'Please enter keyword', 'File Upload': 'File Upload', 'Drag the file into the current upload window': 'Drag the file into the current upload window', 'Drag area upload': 'Drag area upload', Upload: 'Upload', 'ReUpload File': 'ReUpload File', 'Please enter file name': 'Please enter file name', 'Please select the file to upload': 'Please select the file to upload', 'Resources manage': 'Resources', Security: 'Security', Logout: 'Logout', 'No data': 'No data', 'Uploading...': 'Uploading...', 'Loading...': 'Loading...', List: 'List', 'Unable to download without proper url': 'Unable to download without proper url', Process: 'Process', 'Process definition': 'Process definition', 'Task record': 'Task record', 'Warning group manage': 'Warning group manage', 'Warning instance manage': 'Warning instance manage', 'Servers manage': 'Servers manage', 'UDF manage': 'UDF manage', 'Resource manage': 'Resource manage', 'Function manage': 'Function manage', 'Edit password': 'Edit password', 'Ordinary users': 'Ordinary users', 'Create process': 'Create process', 'Import process': 'Import process', 'Timing state': 'Timing state', Timing: 'Timing', Timezone: 'Timezone', TreeView: 'TreeView', 'Mailbox already exists! Recipients and copyers cannot repeat': 'Mailbox already exists! Recipients and copyers cannot repeat', 'Mailbox input is illegal': 'Mailbox input is illegal', 'Please set the parameters before starting': 'Please set the parameters before starting', Continue: 'Continue', End: 'End', 'Node execution': 'Node execution', 'Backward execution': 'Backward execution', 'Forward execution': 'Forward execution', 'Execute only the current node': 'Execute only the current node', 'Notification strategy': 'Notification strategy', 'Notification group': 'Notification group', 'Please select a notification group': 'Please select a notification group', 'Whether it is a complement process?': 'Whether it is a complement process?', 'Schedule date': 'Schedule date', 'Mode of execution': 'Mode of execution', 'Serial execution': 'Serial execution', 'Parallel execution': 'Parallel execution', 'Set parameters before timing': 'Set parameters before timing', 'Start and stop time': 'Start and stop time', 'Please select time': 'Please select time', 'Please enter crontab': 'Please enter crontab', none_1: 'none', success_1: 'success', failure_1: 'failure', All_1: 'All', Toolbar: 'Toolbar', 'View variables': 'View variables', 'Format DAG': 'Format DAG', 'Refresh DAG status': 'Refresh DAG status', Return_1: 'Return', 'Please enter format': 'Please enter format', 'connection parameter': 'connection parameter', 'Process definition details': 'Process definition details', 'Create process definition': 'Create process definition', 'Scheduled task list': 'Scheduled task list', 'Process instance details': 'Process instance details', 'Create Resource': 'Create Resource', 'User Center': 'User Center', AllStatus: 'All', None: 'None', Name: 'Name', 'Process priority': 'Process priority', 'Task priority': 'Task priority', 'Task timeout alarm': 'Task timeout alarm', 'Timeout strategy': 'Timeout strategy', 'Timeout alarm': 'Timeout alarm', 'Timeout failure': 'Timeout failure', 'Timeout period': 'Timeout period', 'Waiting Dependent complete': 'Waiting Dependent complete', 'Waiting Dependent start': 'Waiting Dependent start', 'Check interval': 'Check interval', 'Timeout must be longer than check interval': 'Timeout must be longer than check interval', 'Timeout strategy must be selected': 'Timeout strategy must be selected', 'Timeout must be a positive integer': 'Timeout must be a positive integer', 'Add dependency': 'Add dependency', 'Whether dry-run': 'Whether dry-run', and: 'and', or: 'or', month: 'month', week: 'week', day: 'day', hour: 'hour', Running: 'Running', 'Waiting for dependency to complete': 'Waiting for dependency to complete', Selected: 'Selected', CurrentHour: 'CurrentHour', Last1Hour: 'Last1Hour', Last2Hours: 'Last2Hours', Last3Hours: 'Last3Hours', Last24Hours: 'Last24Hours', today: 'today', Last1Days: 'Last1Days', Last2Days: 'Last2Days', Last3Days: 'Last3Days', Last7Days: 'Last7Days', ThisWeek: 'ThisWeek', LastWeek: 'LastWeek', LastMonday: 'LastMonday', LastTuesday: 'LastTuesday', LastWednesday: 'LastWednesday', LastThursday: 'LastThursday', LastFriday: 'LastFriday', LastSaturday: 'LastSaturday', LastSunday: 'LastSunday', ThisMonth: 'ThisMonth', LastMonth: 'LastMonth', LastMonthBegin: 'LastMonthBegin', LastMonthEnd: 'LastMonthEnd', 'Refresh status succeeded': 'Refresh status succeeded', 'Queue manage': 'Yarn Queue manage', 'Create queue': 'Create queue', 'Edit queue': 'Edit queue', 'Datasource manage': 'Datasource', 'History task record': 'History task record', 'Please go online': 'Please go online', 'Queue value': 'Queue value', 'Please enter queue value': 'Please enter queue value', 'Worker group manage': 'Worker group manage', 'Create worker group': 'Create worker group', 'Edit worker group': 'Edit worker group', 'Token manage': 'Token manage', 'Create token': 'Create token', 'Edit token': 'Edit token', Addresses: 'Addresses', 'Worker Addresses': 'Worker Addresses', 'Please select the worker addresses': 'Please select the worker addresses', 'Failure time': 'Failure time', 'Expiration time': 'Expiration time', User: 'User', 'Please enter token': 'Please enter token', 'Generate token': 'Generate token', Monitor: 'Monitor', Group: 'Group', 'Queue statistics': 'Queue statistics', 'Command status statistics': 'Command status statistics', 'Task kill': 'Task Kill', 'Task queue': 'Task queue', 'Error command count': 'Error command count', 'Normal command count': 'Normal command count', Manage: ' Manage', 'Number of connections': 'Number of connections', Sent: 'Sent', Received: 'Received', 'Min latency': 'Min latency', 'Avg latency': 'Avg latency', 'Max latency': 'Max latency', 'Node count': 'Node count', 'Query time': 'Query time', 'Node self-test status': 'Node self-test status', 'Health status': 'Health status', 'Max connections': 'Max connections', 'Threads connections': 'Threads connections', 'Max used connections': 'Max used connections', 'Threads running connections': 'Threads running connections', 'Worker group': 'Worker group', 'Please enter a positive integer greater than 0': 'Please enter a positive integer greater than 0', 'Pre Statement': 'Pre Statement', 'Post Statement': 'Post Statement', 'Statement cannot be empty': 'Statement cannot be empty', 'Process Define Count': 'Work flow Define Count', 'Process Instance Running Count': 'Process Instance Running Count', 'command number of waiting for running': 'command number of waiting for running', 'failure command number': 'failure command number', 'tasks number of waiting running': 'tasks number of waiting running', 'task number of ready to kill': 'task number of ready to kill', 'Statistics manage': 'Statistics Manage', statistics: 'Statistics', 'select tenant': 'select tenant', 'Please enter Principal': 'Please enter Principal', 'Please enter the kerberos authentication parameter java.security.krb5.conf': 'Please enter the kerberos authentication parameter java.security.krb5.conf', 'Please enter the kerberos authentication parameter login.user.keytab.username': 'Please enter the kerberos authentication parameter login.user.keytab.username', 'Please enter the kerberos authentication parameter login.user.keytab.path': 'Please enter the kerberos authentication parameter login.user.keytab.path', 'The start time must not be the same as the end': 'The start time must not be the same as the end', 'Startup parameter': 'Startup parameter', 'Startup type': 'Startup type', 'warning of timeout': 'warning of timeout', 'Next five execution times': 'Next five execution times', 'Execute time': 'Execute time', 'Complement range': 'Complement range', 'Http Url': 'Http Url', 'Http Method': 'Http Method', 'Http Parameters': 'Http Parameters', 'Http Parameters Key': 'Http Parameters Key', 'Http Parameters Position': 'Http Parameters Position', 'Http Parameters Value': 'Http Parameters Value', 'Http Check Condition': 'Http Check Condition', 'Http Condition': 'Http Condition', 'Please Enter Http Url': 'Please Enter Http Url(required)', 'Please Enter Http Condition': 'Please Enter Http Condition', 'There is no data for this period of time': 'There is no data for this period of time', 'Worker addresses cannot be empty': 'Worker addresses cannot be empty', 'Please generate token': 'Please generate token', 'Please Select token': 'Please select the expiration time of token', 'Spark Version': 'Spark Version', TargetDataBase: 'target database', TargetTable: 'target table', TargetJobName: 'target job name', 'Please enter Pigeon job name': 'Please enter Pigeon job name', 'Please enter the table of target': 'Please enter the table of target', 'Please enter a Target Table(required)': 'Please enter a Target Table(required)', SpeedByte: 'speed(byte count)', SpeedRecord: 'speed(record count)', '0 means unlimited by byte': '0 means unlimited', '0 means unlimited by count': '0 means unlimited', 'Modify User': 'Modify User', 'Whether directory': 'Whether directory', Yes: 'Yes', No: 'No', 'Hadoop Custom Params': 'Hadoop Params', 'Sqoop Advanced Parameters': 'Sqoop Params', 'Sqoop Job Name': 'Job Name', 'Please enter Mysql Database(required)': 'Please enter Mysql Database(required)', 'Please enter Mysql Table(required)': 'Please enter Mysql Table(required)', 'Please enter Columns (Comma separated)': 'Please enter Columns (Comma separated)', 'Please enter Target Dir(required)': 'Please enter Target Dir(required)', 'Please enter Export Dir(required)': 'Please enter Export Dir(required)', 'Please enter Hive Database(required)': 'Please enter Hive Databasec(required)', 'Please enter Hive Table(required)': 'Please enter Hive Table(required)', 'Please enter Hive Partition Keys': 'Please enter Hive Partition Key', 'Please enter Hive Partition Values': 'Please enter Partition Value', 'Please enter Replace Delimiter': 'Please enter Replace Delimiter', 'Please enter Fields Terminated': 'Please enter Fields Terminated', 'Please enter Lines Terminated': 'Please enter Lines Terminated', 'Please enter Concurrency': 'Please enter Concurrency', 'Please enter Update Key': 'Please enter Update Key', 'Please enter Job Name(required)': 'Please enter Job Name(required)', 'Please enter Custom Shell(required)': 'Please enter Custom Shell(required)', Direct: 'Direct', Type: 'Type', ModelType: 'ModelType', ColumnType: 'ColumnType', Database: 'Database', Column: 'Column', 'Map Column Hive': 'Map Column Hive', 'Map Column Java': 'Map Column Java', 'Export Dir': 'Export Dir', 'Hive partition Keys': 'Hive partition Keys', 'Hive partition Values': 'Hive partition Values', FieldsTerminated: 'FieldsTerminated', LinesTerminated: 'LinesTerminated', IsUpdate: 'IsUpdate', UpdateKey: 'UpdateKey', UpdateMode: 'UpdateMode', 'Target Dir': 'Target Dir', DeleteTargetDir: 'DeleteTargetDir', FileType: 'FileType', CompressionCodec: 'CompressionCodec', CreateHiveTable: 'CreateHiveTable', DropDelimiter: 'DropDelimiter', OverWriteSrc: 'OverWriteSrc', ReplaceDelimiter: 'ReplaceDelimiter', Concurrency: 'Concurrency', Form: 'Form', OnlyUpdate: 'OnlyUpdate', AllowInsert: 'AllowInsert', 'Data Source': 'Data Source', 'Data Target': 'Data Target', 'All Columns': 'All Columns', 'Some Columns': 'Some Columns', 'Branch flow': 'Branch flow', 'Custom Job': 'Custom Job', 'Custom Script': 'Custom Script', 'Cannot select the same node for successful branch flow and failed branch flow': 'Cannot select the same node for successful branch flow and failed branch flow', 'Successful branch flow and failed branch flow are required': 'conditions node Successful and failed branch flow are required', 'No resources exist': 'No resources exist', 'Please delete all non-existing resources': 'Please delete all non-existing resources', 'Unauthorized or deleted resources': 'Unauthorized or deleted resources', 'Please delete all non-existent resources': 'Please delete all non-existent resources', Kinship: 'Workflow relationship', Reset: 'Reset', KinshipStateActive: 'Current selection', KinshipState1: 'Online', KinshipState0: 'Workflow is not online', KinshipState10: 'Scheduling is not online', 'Dag label display control': 'Dag label display control', Enable: 'Enable', Disable: 'Disable', 'The Worker group no longer exists, please select the correct Worker group!': 'The Worker group no longer exists, please select the correct Worker group!', 'Please confirm whether the workflow has been saved before downloading': 'Please confirm whether the workflow has been saved before downloading', 'User name length is between 3 and 39': 'User name length is between 3 and 39', 'Timeout Settings': 'Timeout Settings', 'Connect Timeout': 'Connect Timeout', 'Socket Timeout': 'Socket Timeout', 'Connect timeout be a positive integer': 'Connect timeout be a positive integer', 'Socket Timeout be a positive integer': 'Socket Timeout be a positive integer', ms: 'ms', 'Please Enter Url': 'Please Enter Url eg. 127.0.0.1:7077', Master: 'Master', 'Please select the waterdrop resources': 'Please select the waterdrop resources', zkDirectory: 'zkDirectory', 'Directory detail': 'Directory detail', 'Connection name': 'Connection name', 'Current connection settings': 'Current connection settings', 'Please save the DAG before formatting': 'Please save the DAG before formatting', 'Batch copy': 'Batch copy', 'Related items': 'Related items', 'Project name is required': 'Project name is required', 'Batch move': 'Batch move', Version: 'Version', 'Pre tasks': 'Pre tasks', 'Running Memory': 'Running Memory', 'Max Memory': 'Max Memory', 'Min Memory': 'Min Memory', 'The workflow canvas is abnormal and cannot be saved, please recreate': 'The workflow canvas is abnormal and cannot be saved, please recreate', Info: 'Info', 'Datasource userName': 'owner', 'Resource userName': 'owner', 'Environment manage': 'Environment manage', 'Create environment': 'Create environment', 'Edit environment': 'Edit environment', 'Environment value': 'Environment value', 'Environment Name': 'Environment Name', 'Environment Code': 'Environment Code', 'Environment Config': 'Environment Config', 'Environment Desc': 'Environment Desc', 'Environment Worker Group': 'Worker Groups', 'Please enter environment config': 'Please enter environment config', 'Please enter environment desc': 'Please enter environment desc', 'Please select worker groups': 'Please select worker groups', condition: 'condition', 'The condition content cannot be empty': 'The condition content cannot be empty', 'Reference from': 'Reference from', 'No more...': 'No more...', 'Task Definition': 'Task Definition', 'Create task': 'Create task', 'Task Type': 'Task Type', 'Process execute type': 'Process execute type', parallel: 'parallel', 'Serial wait': 'Serial wait', 'Serial discard': 'Serial discard', 'Serial priority': 'Serial priority', 'Recover serial wait': 'Recover serial wait', IsEnableProxy: 'Enable Proxy', WebHook: 'WebHook', Keyword: 'Keyword', Proxy: 'Proxy', receivers: 'Receivers', receiverCcs: 'ReceiverCcs', transportProtocol: 'Transport Protocol', serverHost: 'SMTP Host', serverPort: 'SMTP Port', sender: 'Sender', enableSmtpAuth: 'SMTP Auth', starttlsEnable: 'SMTP STARTTLS Enable', sslEnable: 'SMTP SSL Enable', smtpSslTrust: 'SMTP SSL Trust', url: 'URL', requestType: 'Request Type', headerParams: 'Headers', bodyParams: 'Body', contentField: 'Content Field', path: 'Script Path', userParams: 'User Params', corpId: 'CorpId', secret: 'Secret', userSendMsg: 'UserSendMsg', agentId: 'AgentId', users: 'Users', Username: 'Username', showType: 'Show Type', 'Please select a task type (required)': 'Please select a task type (required)', layoutType: 'Layout Type', gridLayout: 'Grid', dagreLayout: 'Dagre', rows: 'Rows', cols: 'Cols', processOnline: 'Online', searchNode: 'Search Node', dagScale: 'Scale', workflowName: 'Workflow Name', scheduleStartTime: 'Schedule Start Time', scheduleEndTime: 'Schedule End Time', crontabExpression: 'Crontab', workflowPublishStatus: 'Workflow Publish Status', schedulePublishStatus: 'Schedule Publish Status' }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,183
[Bug] [Warning instance manage] When selecting `slack`, an error message will appear, and the corresponding form will not appear.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened When selecting `slack`, an error message will appear, and the corresponding form will not appear. <img width="1649" alt="38dcd65fbb30c4b9f4a49abb1167530" src="https://user-images.githubusercontent.com/19239641/144731706-91114a7e-a9ad-4fcf-bfd4-a76f86f47f83.png"> ### What you expected to happen `i18n` does not support Little Camel. ### How to reproduce Choose `slack`. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7183
https://github.com/apache/dolphinscheduler/pull/7184
2fa3157e6ec7654d593fe3f71a84b70575bb5702
eb07a2b1c953596dd0c2a420f8bd31334f6dfd3e
"2021-12-05T03:03:05Z"
java
"2021-12-05T03:23:50Z"
dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ export default { 'User Name': '用户名', 'Please enter user name': '请输入用户名', Password: '密码', 'Please enter your password': '请输入密码', 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': '密码至少包含数字,字母和字符的两种组合,长度在6-22之间', Login: '登录', Home: '首页', 'Failed to create node to save': '未创建节点保存失败', 'Global parameters': '全局参数', 'Local parameters': '局部参数', 'Copy success': '复制成功', 'The browser does not support automatic copying': '该浏览器不支持自动复制', 'Whether to save the DAG graph': '是否保存DAG图', 'Current node settings': '当前节点设置', 'View history': '查看历史', 'View log': '查看日志', 'Force success': '强制成功', 'Enter this child node': '进入该子节点', 'Node name': '节点名称', 'Please enter name (required)': '请输入名称(必填)', 'Run flag': '运行标志', Normal: '正常', 'Prohibition execution': '禁止执行', 'Please enter description': '请输入描述', 'Number of failed retries': '失败重试次数', Times: '次', 'Failed retry interval': '失败重试间隔', Minute: '分', 'Delay execution time': '延时执行时间', 'Delay execution': '延时执行', 'Forced success': '强制成功', Cancel: '取消', 'Confirm add': '确认添加', 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': '新创建子工作流还未执行,不能进入子工作流', 'The task has not been executed and cannot enter the sub-Process': '该任务还未执行,不能进入子工作流', 'Name already exists': '名称已存在请重新输入', 'Download Log': '下载日志', 'Refresh Log': '刷新日志', 'Enter full screen': '进入全屏', 'Cancel full screen': '取消全屏', Close: '关闭', 'Update log success': '更新日志成功', 'No more logs': '暂无更多日志', 'No log': '暂无日志', 'Loading Log...': '正在努力请求日志中...', 'Set the DAG diagram name': '设置DAG图名称', 'Please enter description(optional)': '请输入描述(选填)', 'Set global': '设置全局', 'Whether to go online the process definition': '是否上线流程定义', 'Whether to update the process definition': '是否更新流程定义', Add: '添加', 'DAG graph name cannot be empty': 'DAG图名称不能为空', 'Create Datasource': '创建数据源', 'Project Home': '工作流监控', 'Project Manage': '项目管理', 'Create Project': '创建项目', 'Cron Manage': '定时管理', 'Copy Workflow': '复制工作流', 'Tenant Manage': '租户管理', 'Create Tenant': '创建租户', 'User Manage': '用户管理', 'Create User': '创建用户', 'User Information': '用户信息', 'Edit Password': '密码修改', Success: '成功', Failed: '失败', Delete: '删除', 'Please choose': '请选择', 'Please enter a positive integer': '请输入正整数', 'Program Type': '程序类型', 'Main Class': '主函数的Class', 'Main Package': '主程序包', 'Please enter main package': '请选择主程序包', 'Please enter main class': '请填写主函数的Class', 'Main Arguments': '主程序参数', 'Please enter main arguments': '请输入主程序参数', 'Option Parameters': '选项参数', 'Please enter option parameters': '请输入选项参数', Resources: '资源', 'Custom Parameters': '自定义参数', 'Custom template': '自定义模版', Datasource: '数据源', methods: '方法', 'Please enter the procedure method': '请输入存储脚本 \n\n调用存储过程:{call <procedure-name>[(<arg1>,<arg2>, ...)]}\n\n调用存储函数:{?= call <procedure-name>[(<arg1>,<arg2>, ...)]} ', 'The procedure method script example': '示例:{call <procedure-name>[(?,?, ...)]} 或 {?= call <procedure-name>[(?,?, ...)]}', Script: '脚本', 'Please enter script(required)': '请输入脚本(必填)', 'Deploy Mode': '部署方式', 'Driver Cores': 'Driver核心数', 'Please enter Driver cores': '请输入Driver核心数', 'Driver Memory': 'Driver内存数', 'Please enter Driver memory': '请输入Driver内存数', 'Executor Number': 'Executor数量', 'Please enter Executor number': '请输入Executor数量', 'The Executor number should be a positive integer': 'Executor数量为正整数', 'Executor Memory': 'Executor内存数', 'Please enter Executor memory': '请输入Executor内存数', 'Executor Cores': 'Executor核心数', 'Please enter Executor cores': '请输入Executor核心数', 'Memory should be a positive integer': '内存数为数字', 'Core number should be positive integer': '核心数为正整数', 'Flink Version': 'Flink版本', 'JobManager Memory': 'JobManager内存数', 'Please enter JobManager memory': '请输入JobManager内存数', 'TaskManager Memory': 'TaskManager内存数', 'Please enter TaskManager memory': '请输入TaskManager内存数', 'Slot Number': 'Slot数量', 'Please enter Slot number': '请输入Slot数量', Parallelism: '并行度', 'Custom Parallelism': '自定义并行度', 'Please enter Parallelism': '请输入并行度', 'Parallelism number should be positive integer': '并行度必须为正整数', 'Parallelism tip': '如果存在大量任务需要补数时,可以利用自定义并行度将补数的任务线程设置成合理的数值,避免对服务器造成过大的影响', 'TaskManager Number': 'TaskManager数量', 'Please enter TaskManager number': '请输入TaskManager数量', 'App Name': '任务名称', 'Please enter app name(optional)': '请输入任务名称(选填)', 'SQL Type': 'sql类型', 'Send Email': '发送邮件', 'Log display': '日志显示', 'rows of result': '行查询结果', Title: '主题', 'Please enter the title of email': '请输入邮件主题', Table: '表名', TableMode: '表格', Attachment: '附件', 'SQL Parameter': 'sql参数', 'SQL Statement': 'sql语句', 'UDF Function': 'UDF函数', 'Please enter a SQL Statement(required)': '请输入sql语句(必填)', 'Please enter a JSON Statement(required)': '请输入json语句(必填)', 'One form or attachment must be selected': '表格、附件必须勾选一个', 'Mail subject required': '邮件主题必填', 'Child Node': '子节点', 'Please select a sub-Process': '请选择子工作流', Edit: '编辑', 'Switch To This Version': '切换到该版本', 'Datasource Name': '数据源名称', 'Please enter datasource name': '请输入数据源名称', IP: 'IP主机名', 'Please enter IP': '请输入IP主机名', Port: '端口', 'Please enter port': '请输入端口', 'Database Name': '数据库名', 'Please enter database name': '请输入数据库名', 'Oracle Connect Type': '服务名或SID', 'Oracle Service Name': '服务名', 'Oracle SID': 'SID', 'jdbc connect parameters': 'jdbc连接参数', 'Test Connect': '测试连接', 'Please enter resource name': '请输入数据源名称', 'Please enter resource folder name': '请输入资源文件夹名称', 'Please enter a non-query SQL statement': '请输入非查询sql语句', 'Please enter IP/hostname': '请输入IP/主机名', 'jdbc connection parameters is not a correct JSON format': 'jdbc连接参数不是一个正确的JSON格式', '#': '编号', 'Datasource Type': '数据源类型', 'Datasource Parameter': '数据源参数', 'Create Time': '创建时间', 'Update Time': '更新时间', Operation: '操作', 'Current Version': '当前版本', 'Click to view': '点击查看', 'Delete?': '确定删除吗?', 'Switch Version Successfully': '切换版本成功', 'Confirm Switch To This Version?': '确定切换到该版本吗?', Confirm: '确定', 'Task status statistics': '任务状态统计', Number: '数量', State: '状态', 'Dry-run flag': '空跑标识', 'Process Status Statistics': '流程状态统计', 'Process Definition Statistics': '流程定义统计', 'Project Name': '项目名称', 'Please enter name': '请输入名称', 'Owned Users': '所属用户', 'Process Pid': '进程Pid', 'Zk registration directory': 'zk注册目录', cpuUsage: 'cpuUsage', memoryUsage: 'memoryUsage', 'Last heartbeat time': '最后心跳时间', 'Edit Tenant': '编辑租户', 'OS Tenant Code': '操作系统租户', 'Tenant Name': '租户名称', Queue: '队列', 'Please select a queue': '默认为租户关联队列', 'Please enter the os tenant code in English': '请输入操作系统租户只允许英文', 'Please enter os tenant code in English': '请输入英文操作系统租户', 'Please enter os tenant code': '请输入操作系统租户', 'Please enter tenant Name': '请输入租户名称', 'The os tenant code. Only letters or a combination of letters and numbers are allowed': '操作系统租户只允许字母或字母与数字组合', 'Edit User': '编辑用户', Tenant: '租户', Email: '邮件', Phone: '手机', 'User Type': '用户类型', 'Please enter phone number': '请输入手机', 'Please enter email': '请输入邮箱', 'Please enter the correct email format': '请输入正确的邮箱格式', 'Please enter the correct mobile phone format': '请输入正确的手机格式', Project: '项目', Authorize: '授权', 'File resources': '文件资源', 'UDF resources': 'UDF资源', 'UDF resources directory': 'UDF资源目录', 'Please select UDF resources directory': '请选择UDF资源目录', 'Alarm group': '告警组', 'Alarm group required': '告警组必填', 'Edit alarm group': '编辑告警组', 'Create alarm group': '创建告警组', 'Create Alarm Instance': '创建告警实例', 'Edit Alarm Instance': '编辑告警实例', 'Group Name': '组名称', 'Alarm instance name': '告警实例名称', 'Alarm plugin name': '告警插件名称', 'Select plugin': '选择插件', 'Select Alarm plugin': '请选择告警插件', 'Please enter group name': '请输入组名称', 'Instance parameter exception': '实例参数异常', 'Group Type': '组类型', 'Alarm plugin instance': '告警插件实例', 'Select Alarm plugin instance': '请选择告警插件实例', Remarks: '备注', SMS: '短信', 'Managing Users': '管理用户', Permission: '权限', Administrator: '管理员', 'Confirm Password': '确认密码', 'Please enter confirm password': '请输入确认密码', 'Password cannot be in Chinese': '密码不能为中文', 'Please enter a password (6-22) character password': '请输入密码(6-22)字符密码', 'Confirmation password cannot be in Chinese': '确认密码不能为中文', 'Please enter a confirmation password (6-22) character password': '请输入确认密码(6-22)字符密码', 'The password is inconsistent with the confirmation password': '密码与确认密码不一致,请重新确认', 'Please select the datasource': '请选择数据源', 'Please select resources': '请选择资源', Query: '查询', 'Non Query': '非查询', 'prop(required)': 'prop(必填)', 'value(optional)': 'value(选填)', 'value(required)': 'value(必填)', 'prop is empty': '自定义参数prop不能为空', 'value is empty': 'value不能为空', 'prop is repeat': 'prop中有重复', 'Start Time': '开始时间', 'End Time': '结束时间', crontab: 'crontab', 'Failure Strategy': '失败策略', online: '上线', offline: '下线', 'Task Status': '任务状态', 'Process Instance': '工作流实例', 'Task Instance': '任务实例', 'Select date range': '选择日期区间', startDate: '开始日期', endDate: '结束日期', Date: '日期', Waiting: '等待', Execution: '执行中', Finish: '完成', 'Create File': '创建文件', 'Create folder': '创建文件夹', 'File Name': '文件名称', 'Folder Name': '文件夹名称', 'File Format': '文件格式', 'Folder Format': '文件夹格式', 'File Content': '文件内容', 'Upload File Size': '文件大小不能超过1G', Create: '创建', 'Please enter the resource content': '请输入资源内容', 'Resource content cannot exceed 3000 lines': '资源内容不能超过3000行', 'File Details': '文件详情', 'Download Details': '下载详情', Return: '返回', Save: '保存', 'File Manage': '文件管理', 'Upload Files': '上传文件', 'Create UDF Function': '创建UDF函数', 'Upload UDF Resources': '上传UDF资源', 'Service-Master': '服务管理-Master', 'Service-Worker': '服务管理-Worker', 'Process Name': '工作流名称', Executor: '执行用户', 'Run Type': '运行类型', 'Scheduling Time': '调度时间', 'Run Times': '运行次数', host: 'host', 'fault-tolerant sign': '容错标识', Rerun: '重跑', 'Recovery Failed': '恢复失败', Stop: '停止', Pause: '暂停', 'Recovery Suspend': '恢复运行', Gantt: '甘特图', 'Node Type': '节点类型', 'Submit Time': '提交时间', Duration: '运行时长', 'Retry Count': '重试次数', 'Task Name': '任务名称', 'Task Date': '任务日期', 'Source Table': '源表', 'Record Number': '记录数', 'Target Table': '目标表', 'Online viewing type is not supported': '不支持在线查看类型', Size: '大小', Rename: '重命名', Download: '下载', Export: '导出', 'Version Info': '版本信息', Submit: '提交', 'Edit UDF Function': '编辑UDF函数', type: '类型', 'UDF Function Name': 'UDF函数名称', FILE: '文件', UDF: 'UDF', 'File Subdirectory': '文件子目录', 'Please enter a function name': '请输入函数名', 'Package Name': '包名类名', 'Please enter a Package name': '请输入包名类名', Parameter: '参数', 'Please enter a parameter': '请输入参数', 'UDF Resources': 'UDF资源', 'Upload Resources': '上传资源', Instructions: '使用说明', 'Please enter a instructions': '请输入使用说明', 'Please enter a UDF function name': '请输入UDF函数名称', 'Select UDF Resources': '请选择UDF资源', 'Class Name': '类名', 'Jar Package': 'jar包', 'Library Name': '库名', 'UDF Resource Name': 'UDF资源名称', 'File Size': '文件大小', Description: '描述', 'Drag Nodes and Selected Items': '拖动节点和选中项', 'Select Line Connection': '选择线条连接', 'Delete selected lines or nodes': '删除选中的线或节点', 'Full Screen': '全屏', Unpublished: '未发布', 'Start Process': '启动工作流', 'Execute from the current node': '从当前节点开始执行', 'Recover tolerance fault process': '恢复被容错的工作流', 'Resume the suspension process': '恢复运行流程', 'Execute from the failed nodes': '从失败节点开始执行', 'Complement Data': '补数', 'Scheduling execution': '调度执行', 'Recovery waiting thread': '恢复等待线程', 'Submitted successfully': '提交成功', Executing: '正在执行', 'Ready to pause': '准备暂停', 'Ready to stop': '准备停止', 'Need fault tolerance': '需要容错', Kill: 'Kill', 'Waiting for thread': '等待线程', 'Waiting for dependence': '等待依赖', Start: '运行', Copy: '复制节点', 'Copy name': '复制名称', 'Copy path': '复制路径', 'Please enter keyword': '请输入关键词', 'File Upload': '文件上传', 'Drag the file into the current upload window': '请将文件拖拽到当前上传窗口内!', 'Drag area upload': '拖动区域上传', Upload: '上传', 'ReUpload File': '重新上传文件', 'Please enter file name': '请输入文件名', 'Please select the file to upload': '请选择要上传的文件', 'Resources manage': '资源中心', Security: '安全中心', Logout: '退出', 'No data': '查询无数据', 'Uploading...': '文件上传中', 'Loading...': '正在努力加载中...', List: '列表', 'Unable to download without proper url': '无下载url无法下载', Process: '工作流', 'Process definition': '工作流定义', 'Task record': '任务记录', 'Warning group manage': '告警组管理', 'Warning instance manage': '告警实例管理', 'Servers manage': '服务管理', 'UDF manage': 'UDF管理', 'Resource manage': '资源管理', 'Function manage': '函数管理', 'Edit password': '修改密码', 'Ordinary users': '普通用户', 'Create process': '创建工作流', 'Import process': '导入工作流', 'Timing state': '定时状态', Timing: '定时', Timezone: '时区', TreeView: '树形图', 'Mailbox already exists! Recipients and copyers cannot repeat': '邮箱已存在!收件人和抄送人不能重复', 'Mailbox input is illegal': '邮箱输入不合法', 'Please set the parameters before starting': '启动前请先设置参数', Continue: '继续', End: '结束', 'Node execution': '节点执行', 'Backward execution': '向后执行', 'Forward execution': '向前执行', 'Execute only the current node': '仅执行当前节点', 'Notification strategy': '通知策略', 'Notification group': '通知组', 'Please select a notification group': '请选择通知组', 'Whether it is a complement process?': '是否补数', 'Schedule date': '调度日期', 'Mode of execution': '执行方式', 'Serial execution': '串行执行', 'Parallel execution': '并行执行', 'Set parameters before timing': '定时前请先设置参数', 'Start and stop time': '起止时间', 'Please select time': '请选择时间', 'Please enter crontab': '请输入crontab', none_1: '都不发', success_1: '成功发', failure_1: '失败发', All_1: '成功或失败都发', Toolbar: '工具栏', 'View variables': '查看变量', 'Format DAG': '格式化DAG', 'Refresh DAG status': '刷新DAG状态', Return_1: '返回上一节点', 'Please enter format': '请输入格式为', 'connection parameter': '连接参数', 'Process definition details': '流程定义详情', 'Create process definition': '创建流程定义', 'Scheduled task list': '定时任务列表', 'Process instance details': '流程实例详情', 'Create Resource': '创建资源', 'User Center': '用户中心', AllStatus: '全部状态', None: '无', Name: '名称', 'Process priority': '流程优先级', 'Task priority': '任务优先级', 'Task timeout alarm': '任务超时告警', 'Timeout strategy': '超时策略', 'Timeout alarm': '超时告警', 'Timeout failure': '超时失败', 'Timeout period': '超时时长', 'Waiting Dependent complete': '等待依赖完成', 'Waiting Dependent start': '等待依赖启动', 'Check interval': '检查间隔', 'Timeout must be longer than check interval': '超时时间必须比检查间隔长', 'Timeout strategy must be selected': '超时策略必须选一个', 'Timeout must be a positive integer': '超时时长必须为正整数', 'Add dependency': '添加依赖', 'Whether dry-run': '是否空跑', and: '且', or: '或', month: '月', week: '周', day: '日', hour: '时', Running: '正在运行', 'Waiting for dependency to complete': '等待依赖完成', Selected: '已选', CurrentHour: '当前小时', Last1Hour: '前1小时', Last2Hours: '前2小时', Last3Hours: '前3小时', Last24Hours: '前24小时', today: '今天', Last1Days: '昨天', Last2Days: '前两天', Last3Days: '前三天', Last7Days: '前七天', ThisWeek: '本周', LastWeek: '上周', LastMonday: '上周一', LastTuesday: '上周二', LastWednesday: '上周三', LastThursday: '上周四', LastFriday: '上周五', LastSaturday: '上周六', LastSunday: '上周日', ThisMonth: '本月', LastMonth: '上月', LastMonthBegin: '上月初', LastMonthEnd: '上月末', 'Refresh status succeeded': '刷新状态成功', 'Queue manage': 'Yarn 队列管理', 'Create queue': '创建队列', 'Edit queue': '编辑队列', 'Datasource manage': '数据源中心', 'History task record': '历史任务记录', 'Please go online': '不要忘记上线', 'Queue value': '队列值', 'Please enter queue value': '请输入队列值', 'Worker group manage': 'Worker分组管理', 'Create worker group': '创建Worker分组', 'Edit worker group': '编辑Worker分组', 'Token manage': '令牌管理', 'Create token': '创建令牌', 'Edit token': '编辑令牌', Addresses: '地址', 'Worker Addresses': 'Worker地址', 'Please select the worker addresses': '请选择Worker地址', 'Failure time': '失效时间', 'Expiration time': '失效时间', User: '用户', 'Please enter token': '请输入令牌', 'Generate token': '生成令牌', Monitor: '监控中心', Group: '分组', 'Queue statistics': '队列统计', 'Command status statistics': '命令状态统计', 'Task kill': '等待kill任务', 'Task queue': '等待执行任务', 'Error command count': '错误指令数', 'Normal command count': '正确指令数', Manage: '管理', 'Number of connections': '连接数', Sent: '发送量', Received: '接收量', 'Min latency': '最低延时', 'Avg latency': '平均延时', 'Max latency': '最大延时', 'Node count': '节点数', 'Query time': '当前查询时间', 'Node self-test status': '节点自检状态', 'Health status': '健康状态', 'Max connections': '最大连接数', 'Threads connections': '当前连接数', 'Max used connections': '同时使用连接最大数', 'Threads running connections': '数据库当前活跃连接数', 'Worker group': 'Worker分组', 'Please enter a positive integer greater than 0': '请输入大于 0 的正整数', 'Pre Statement': '前置sql', 'Post Statement': '后置sql', 'Statement cannot be empty': '语句不能为空', 'Process Define Count': '工作流定义数', 'Process Instance Running Count': '正在运行的流程数', 'command number of waiting for running': '待执行的命令数', 'failure command number': '执行失败的命令数', 'tasks number of waiting running': '待运行任务数', 'task number of ready to kill': '待杀死任务数', 'Statistics manage': '统计管理', statistics: '统计', 'select tenant': '选择租户', 'Please enter Principal': '请输入Principal', 'Please enter the kerberos authentication parameter java.security.krb5.conf': '请输入kerberos认证参数 java.security.krb5.conf', 'Please enter the kerberos authentication parameter login.user.keytab.username': '请输入kerberos认证参数 login.user.keytab.username', 'Please enter the kerberos authentication parameter login.user.keytab.path': '请输入kerberos认证参数 login.user.keytab.path', 'The start time must not be the same as the end': '开始时间和结束时间不能相同', 'Startup parameter': '启动参数', 'Startup type': '启动类型', 'warning of timeout': '超时告警', 'Next five execution times': '接下来五次执行时间', 'Execute time': '执行时间', 'Complement range': '补数范围', 'Http Url': '请求地址', 'Http Method': '请求类型', 'Http Parameters': '请求参数', 'Http Parameters Key': '参数名', 'Http Parameters Position': '参数位置', 'Http Parameters Value': '参数值', 'Http Check Condition': '校验条件', 'Http Condition': '校验内容', 'Please Enter Http Url': '请填写请求地址(必填)', 'Please Enter Http Condition': '请填写校验内容', 'There is no data for this period of time': '该时间段无数据', 'Worker addresses cannot be empty': 'Worker地址不能为空', 'Please generate token': '请生成Token', 'Please Select token': '请选择Token失效时间', 'Spark Version': 'Spark版本', TargetDataBase: '目标库', TargetTable: '目标表', TargetJobName: '目标任务名', 'Please enter Pigeon job name': '请输入Pigeon任务名', 'Please enter the table of target': '请输入目标表名', 'Please enter a Target Table(required)': '请输入目标表(必填)', SpeedByte: '限流(字节数)', SpeedRecord: '限流(记录数)', '0 means unlimited by byte': 'KB,0代表不限制', '0 means unlimited by count': '0代表不限制', 'Modify User': '修改用户', 'Whether directory': '是否文件夹', Yes: '是', No: '否', 'Hadoop Custom Params': 'Hadoop参数', 'Sqoop Advanced Parameters': 'Sqoop参数', 'Sqoop Job Name': '任务名称', 'Please enter Mysql Database(required)': '请输入Mysql数据库(必填)', 'Please enter Mysql Table(required)': '请输入Mysql表名(必填)', 'Please enter Columns (Comma separated)': '请输入列名,用 , 隔开', 'Please enter Target Dir(required)': '请输入目标路径(必填)', 'Please enter Export Dir(required)': '请输入数据源路径(必填)', 'Please enter Hive Database(required)': '请输入Hive数据库(必填)', 'Please enter Hive Table(required)': '请输入Hive表名(必填)', 'Please enter Hive Partition Keys': '请输入分区键', 'Please enter Hive Partition Values': '请输入分区值', 'Please enter Replace Delimiter': '请输入替换分隔符', 'Please enter Fields Terminated': '请输入列分隔符', 'Please enter Lines Terminated': '请输入行分隔符', 'Please enter Concurrency': '请输入并发度', 'Please enter Update Key': '请输入更新列', 'Please enter Job Name(required)': '请输入任务名称(必填)', 'Please enter Custom Shell(required)': '请输入自定义脚本', Direct: '流向', Type: '类型', ModelType: '模式', ColumnType: '列类型', Database: '数据库', Column: '列', 'Map Column Hive': 'Hive类型映射', 'Map Column Java': 'Java类型映射', 'Export Dir': '数据源路径', 'Hive partition Keys': 'Hive 分区键', 'Hive partition Values': 'Hive 分区值', FieldsTerminated: '列分隔符', LinesTerminated: '行分隔符', IsUpdate: '是否更新', UpdateKey: '更新列', UpdateMode: '更新类型', 'Target Dir': '目标路径', DeleteTargetDir: '是否删除目录', FileType: '保存格式', CompressionCodec: '压缩类型', CreateHiveTable: '是否创建新表', DropDelimiter: '是否删除分隔符', OverWriteSrc: '是否覆盖数据源', ReplaceDelimiter: '替换分隔符', Concurrency: '并发度', Form: '表单', OnlyUpdate: '只更新', AllowInsert: '无更新便插入', 'Data Source': '数据来源', 'Data Target': '数据目的', 'All Columns': '全表导入', 'Some Columns': '选择列', 'Branch flow': '分支流转', 'Custom Job': '自定义任务', 'Custom Script': '自定义脚本', 'Cannot select the same node for successful branch flow and failed branch flow': '成功分支流转和失败分支流转不能选择同一个节点', 'Successful branch flow and failed branch flow are required': 'conditions节点成功和失败分支流转必填', 'No resources exist': '不存在资源', 'Please delete all non-existing resources': '请删除所有不存在资源', 'Unauthorized or deleted resources': '未授权或已删除资源', 'Please delete all non-existent resources': '请删除所有未授权或已删除资源', Kinship: '工作流关系', Reset: '重置', KinshipStateActive: '当前选择', KinshipState1: '已上线', KinshipState0: '工作流未上线', KinshipState10: '调度未上线', 'Dag label display control': 'Dag节点名称显隐', Enable: '启用', Disable: '停用', 'The Worker group no longer exists, please select the correct Worker group!': '该Worker分组已经不存在,请选择正确的Worker分组!', 'Please confirm whether the workflow has been saved before downloading': '下载前请确定工作流是否已保存', 'User name length is between 3 and 39': '用户名长度在3~39之间', 'Timeout Settings': '超时设置', 'Connect Timeout': '连接超时', 'Socket Timeout': 'Socket超时', 'Connect timeout be a positive integer': '连接超时必须为数字', 'Socket Timeout be a positive integer': 'Socket超时必须为数字', ms: '毫秒', 'Please Enter Url': '请直接填写地址,例如:127.0.0.1:7077', Master: 'Master', 'Please select the waterdrop resources': '请选择waterdrop配置文件', zkDirectory: 'zk注册目录', 'Directory detail': '查看目录详情', 'Connection name': '连线名', 'Current connection settings': '当前连线设置', 'Please save the DAG before formatting': '格式化前请先保存DAG', 'Batch copy': '批量复制', 'Related items': '关联项目', 'Project name is required': '项目名称必填', 'Batch move': '批量移动', Version: '版本', 'Pre tasks': '前置任务', 'Running Memory': '运行内存', 'Max Memory': '最大内存', 'Min Memory': '最小内存', 'The workflow canvas is abnormal and cannot be saved, please recreate': '该工作流画布异常,无法保存,请重新创建', Info: '提示', 'Datasource userName': '所属用户', 'Resource userName': '所属用户', 'Environment manage': '环境管理', 'Create environment': '创建环境', 'Edit environment': '编辑', 'Environment value': 'Environment value', 'Environment Name': '环境名称', 'Environment Code': '环境编码', 'Environment Config': '环境配置', 'Environment Desc': '详细描述', 'Environment Worker Group': 'Worker组', 'Please enter environment config': '请输入环境配置信息', 'Please enter environment desc': '请输入详细描述', 'Please select worker groups': '请选择Worker分组', condition: '条件', 'The condition content cannot be empty': '条件内容不能为空', 'Reference from': '使用已有任务', 'No more...': '没有更多了...', 'Task Definition': '任务定义', 'Create task': '创建任务', 'Task Type': '任务类型', 'Process execute type': '执行策略', parallel: '并行', 'Serial wait': '串行等待', 'Serial discard': '串行抛弃', 'Serial priority': '串行优先', 'Recover serial wait': '串行恢复', IsEnableProxy: '启用代理', WebHook: 'Web钩子', Keyword: '密钥', Proxy: '代理', receivers: '收件人', receiverCcs: '抄送人', transportProtocol: '邮件协议', serverHost: 'SMTP服务器', serverPort: 'SMTP端口', sender: '发件人', enableSmtpAuth: '请求认证', starttlsEnable: 'STARTTLS连接', sslEnable: 'SSL连接', smtpSslTrust: 'SSL证书信任', url: 'URL', requestType: '请求方式', headerParams: '请求头', bodyParams: '请求体', contentField: '内容字段', path: '脚本路径', userParams: '自定义参数', corpId: '企业ID', secret: '密钥', teamSendMsg: '群发信息', userSendMsg: '群员信息', agentId: '应用ID', users: '群员', Username: '用户名', showType: '内容展示类型', 'Please select a task type (required)': '请选择任务类型(必选)', layoutType: '布局类型', gridLayout: '网格布局', dagreLayout: '层次布局', rows: '行数', cols: '列数', processOnline: '已上线', searchNode: '搜索节点', dagScale: '缩放', workflowName: '工作流名称', scheduleStartTime: '定时开始时间', scheduleEndTime: '定时结束时间', crontabExpression: 'Crontab', workflowPublishStatus: '工作流上线状态', schedulePublishStatus: '定时状态' }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,093
[Bug] [Dependent] stuck in running
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![](https://files.catbox.moe/3y3efn.png) ![](https://files.catbox.moe/h12kd7.png) dependent stuck in starting.no log output and pid is 0. It seems dependent task not launch. ### What you expected to happen running successfully ### How to reproduce ![](https://files.catbox.moe/hqc4hs.png) ### Anything else _No response_ ### Version 2.0.0 ### Are you willing to submit PR? - [x] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7093
https://github.com/apache/dolphinscheduler/pull/7116
eb07a2b1c953596dd0c2a420f8bd31334f6dfd3e
8d39bf14b1b189b43d27acfa2a3ae9ddd4eec5c6
"2021-12-01T03:05:16Z"
java
"2021-12-05T05:52:48Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/StateWheelExecuteThread.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.runner; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.StateEvent; import org.apache.dolphinscheduler.common.enums.StateEventType; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.server.master.cache.ProcessInstanceExecCacheManager; import org.apache.hadoop.util.ThreadUtil; import java.util.concurrent.ConcurrentHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * 1. timeout check wheel * 2. dependent task check wheel */ public class StateWheelExecuteThread extends Thread { private static final Logger logger = LoggerFactory.getLogger(StateWheelExecuteThread.class); ConcurrentHashMap<Integer, ProcessInstance> processInstanceCheckList; ConcurrentHashMap<Integer, TaskInstance> taskInstanceCheckList; private ProcessInstanceExecCacheManager processInstanceExecCacheManager; private int stateCheckIntervalSecs; public StateWheelExecuteThread(ConcurrentHashMap<Integer, ProcessInstance> processInstances, ConcurrentHashMap<Integer, TaskInstance> taskInstances, ProcessInstanceExecCacheManager processInstanceExecCacheManager, int stateCheckIntervalSecs) { this.processInstanceCheckList = processInstances; this.taskInstanceCheckList = taskInstances; this.processInstanceExecCacheManager = processInstanceExecCacheManager; this.stateCheckIntervalSecs = stateCheckIntervalSecs; } @Override public void run() { logger.info("state wheel thread start"); while (Stopper.isRunning()) { try { checkProcess(); checkTask(); } catch (Exception e) { logger.error("state wheel thread check error:", e); } ThreadUtil.sleepAtLeastIgnoreInterrupts(stateCheckIntervalSecs); } } public boolean addProcess(ProcessInstance processInstance) { this.processInstanceCheckList.put(processInstance.getId(), processInstance); return true; } public boolean addTask(TaskInstance taskInstance) { this.taskInstanceCheckList.put(taskInstance.getId(), taskInstance); return true; } private void checkTask() { if (taskInstanceCheckList.isEmpty()) { return; } for (TaskInstance taskInstance : this.taskInstanceCheckList.values()) { if (TimeoutFlag.OPEN == taskInstance.getTaskDefine().getTimeoutFlag()) { long timeRemain = DateUtils.getRemainTime(taskInstance.getStartTime(), taskInstance.getTaskDefine().getTimeout() * Constants.SEC_2_MINUTES_TIME_UNIT); if (0 <= timeRemain && processTimeout(taskInstance)) { taskInstanceCheckList.remove(taskInstance.getId()); return; } } if (taskInstance.taskCanRetry() && taskInstance.retryTaskIntervalOverTime()) { processDependCheck(taskInstance); taskInstanceCheckList.remove(taskInstance.getId()); } if (taskInstance.isSubProcess() || taskInstance.isDependTask()) { processDependCheck(taskInstance); } } } private void checkProcess() { if (processInstanceCheckList.isEmpty()) { return; } for (ProcessInstance processInstance : this.processInstanceCheckList.values()) { long timeRemain = DateUtils.getRemainTime(processInstance.getStartTime(), processInstance.getTimeout() * Constants.SEC_2_MINUTES_TIME_UNIT); if (0 <= timeRemain && processTimeout(processInstance)) { processInstanceCheckList.remove(processInstance.getId()); } } } private void putEvent(StateEvent stateEvent) { if (!processInstanceExecCacheManager.contains(stateEvent.getProcessInstanceId())) { return; } WorkflowExecuteThread workflowExecuteThread = this.processInstanceExecCacheManager.getByProcessInstanceId(stateEvent.getProcessInstanceId()); workflowExecuteThread.addStateEvent(stateEvent); } private boolean processDependCheck(TaskInstance taskInstance) { StateEvent stateEvent = new StateEvent(); stateEvent.setType(StateEventType.TASK_STATE_CHANGE); stateEvent.setProcessInstanceId(taskInstance.getProcessInstanceId()); stateEvent.setTaskInstanceId(taskInstance.getId()); stateEvent.setExecutionStatus(ExecutionStatus.RUNNING_EXECUTION); putEvent(stateEvent); return true; } private boolean processTimeout(TaskInstance taskInstance) { StateEvent stateEvent = new StateEvent(); stateEvent.setType(StateEventType.TASK_TIMEOUT); stateEvent.setProcessInstanceId(taskInstance.getProcessInstanceId()); stateEvent.setTaskInstanceId(taskInstance.getId()); putEvent(stateEvent); return true; } private boolean processTimeout(ProcessInstance processInstance) { StateEvent stateEvent = new StateEvent(); stateEvent.setType(StateEventType.PROCESS_TIMEOUT); stateEvent.setProcessInstanceId(processInstance.getId()); putEvent(stateEvent); return true; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,551
[Bug][Module Name] job state always in final state:RUNNING_EXECUTION when execution engine is tez
``` yarn application -status application_1621995850095_6901 21/05/27 07:06:04 INFO client.RMProxy: Connecting to ResourceManager at ip-192-168-24-254.us-west-2.compute.internal/192.168.24.254:8032 21/05/27 07:06:04 INFO client.AHSProxy: Connecting to Application History server at ip-192-168-24-254.us-west-2.compute.internal/192.168.24.254:10200 21/05/27 07:06:04 INFO conf.Configuration: resource-types.xml not found 21/05/27 07:06:04 INFO resource.ResourceUtils: Unable to find 'resource-types.xml'. 21/05/27 07:06:04 INFO resource.ResourceUtils: Adding resource type - name = memory-mb, units = Mi, type = COUNTABLE 21/05/27 07:06:04 INFO resource.ResourceUtils: Adding resource type - name = vcores, units = , type = COUNTABLE Application Report : Application-Id : application_1621995850095_6901 Application-Name : HIVE-f07905b9-25b6-4e3f-93a4-652ed349b9bd Application-Type : TEZ User : hadoop Queue : default Application Priority : 0 Start-Time : 1622098349177 Finish-Time : 1622098762224 Progress : 100% State : FINISHED Final-State : ENDED job final state is not SUCCEEDED but ENDED.Cause shell command always running. worker.log [INFO] 2021-05-27 07:05:41.030 - [taskAppId=TASK-10-264-3804]:[127] - FINALIZE_SESSION [INFO] 2021-05-27 07:05:41.809 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:42.811 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:43.812 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:44.814 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:45.816 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:46.818 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:47.820 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:48.821 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:49.823 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:50.825 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:51.826 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION ``` **Which version of Dolphin Scheduler:** -[1.3.6]
https://github.com/apache/dolphinscheduler/issues/5551
https://github.com/apache/dolphinscheduler/pull/6289
8d39bf14b1b189b43d27acfa2a3ae9ddd4eec5c6
2dc09c627fe1d32c63e9a7551390540581916d7d
"2021-05-27T07:22:10Z"
java
"2021-12-05T06:03:29Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.SystemUtils; import java.util.regex.Pattern; /** * Constants */ public final class Constants { private Constants() { throw new UnsupportedOperationException("Construct Constants"); } /** * common properties path */ public static final String COMMON_PROPERTIES_PATH = "/common.properties"; /** * alert properties */ public static final String ALERT_RPC_PORT = "alert.rpc.port"; /** * registry properties */ public static final String REGISTRY_DOLPHINSCHEDULER_MASTERS = "/nodes/master"; public static final String REGISTRY_DOLPHINSCHEDULER_WORKERS = "/nodes/worker"; public static final String REGISTRY_DOLPHINSCHEDULER_DEAD_SERVERS = "/dead-servers"; public static final String REGISTRY_DOLPHINSCHEDULER_NODE = "/nodes"; public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_MASTERS = "/lock/masters"; public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_FAILOVER_MASTERS = "/lock/failover/masters"; public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_FAILOVER_WORKERS = "/lock/failover/workers"; public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS = "/lock/failover/startup-masters"; /** * fs.defaultFS */ public static final String FS_DEFAULTFS = "fs.defaultFS"; /** * fs s3a endpoint */ public static final String FS_S3A_ENDPOINT = "fs.s3a.endpoint"; /** * fs s3a access key */ public static final String FS_S3A_ACCESS_KEY = "fs.s3a.access.key"; /** * fs s3a secret key */ public static final String FS_S3A_SECRET_KEY = "fs.s3a.secret.key"; /** * hadoop configuration */ public static final String HADOOP_RM_STATE_ACTIVE = "ACTIVE"; public static final String HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT = "resource.manager.httpaddress.port"; /** * yarn.resourcemanager.ha.rm.ids */ public static final String YARN_RESOURCEMANAGER_HA_RM_IDS = "yarn.resourcemanager.ha.rm.ids"; /** * yarn.application.status.address */ public static final String YARN_APPLICATION_STATUS_ADDRESS = "yarn.application.status.address"; /** * yarn.job.history.status.address */ public static final String YARN_JOB_HISTORY_STATUS_ADDRESS = "yarn.job.history.status.address"; /** * hdfs configuration * hdfs.root.user */ public static final String HDFS_ROOT_USER = "hdfs.root.user"; /** * hdfs/s3 configuration * resource.upload.path */ public static final String RESOURCE_UPLOAD_PATH = "resource.upload.path"; /** * data basedir path */ public static final String DATA_BASEDIR_PATH = "data.basedir.path"; /** * dolphinscheduler.env.path */ public static final String DOLPHINSCHEDULER_ENV_PATH = "dolphinscheduler.env.path"; /** * environment properties default path */ public static final String ENV_PATH = "env/dolphinscheduler_env.sh"; /** * resource.view.suffixs */ public static final String RESOURCE_VIEW_SUFFIXS = "resource.view.suffixs"; public static final String RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE = "txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js"; /** * development.state */ public static final String DEVELOPMENT_STATE = "development.state"; /** * sudo enable */ public static final String SUDO_ENABLE = "sudo.enable"; /** * string true */ public static final String STRING_TRUE = "true"; /** * resource storage type */ public static final String RESOURCE_STORAGE_TYPE = "resource.storage.type"; /** * comma , */ public static final String COMMA = ","; /** * COLON : */ public static final String COLON = ":"; /** * SINGLE_SLASH / */ public static final String SINGLE_SLASH = "/"; /** * DOUBLE_SLASH // */ public static final String DOUBLE_SLASH = "//"; /** * EQUAL SIGN */ public static final String EQUAL_SIGN = "="; /** * date format of yyyy-MM-dd HH:mm:ss */ public static final String YYYY_MM_DD_HH_MM_SS = "yyyy-MM-dd HH:mm:ss"; /** * date format of yyyyMMdd */ public static final String YYYYMMDD = "yyyyMMdd"; /** * date format of yyyyMMddHHmmss */ public static final String YYYYMMDDHHMMSS = "yyyyMMddHHmmss"; /** * date format of yyyyMMddHHmmssSSS */ public static final String YYYYMMDDHHMMSSSSS = "yyyyMMddHHmmssSSS"; /** * http connect time out */ public static final int HTTP_CONNECT_TIMEOUT = 60 * 1000; /** * http connect request time out */ public static final int HTTP_CONNECTION_REQUEST_TIMEOUT = 60 * 1000; /** * httpclient soceket time out */ public static final int SOCKET_TIMEOUT = 60 * 1000; /** * http header */ public static final String HTTP_HEADER_UNKNOWN = "unKnown"; /** * http X-Forwarded-For */ public static final String HTTP_X_FORWARDED_FOR = "X-Forwarded-For"; /** * http X-Real-IP */ public static final String HTTP_X_REAL_IP = "X-Real-IP"; /** * UTF-8 */ public static final String UTF_8 = "UTF-8"; /** * user name regex */ public static final Pattern REGEX_USER_NAME = Pattern.compile("^[a-zA-Z0-9._-]{3,39}$"); /** * email regex */ public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^([a-z0-9A-Z]+[_|\\-|\\.]?)+[a-z0-9A-Z]@([a-z0-9A-Z]+(-[a-z0-9A-Z]+)?\\.)+[a-zA-Z]{2,}$"); /** * read permission */ public static final int READ_PERMISSION = 2; /** * write permission */ public static final int WRITE_PERMISSION = 2 * 2; /** * execute permission */ public static final int EXECUTE_PERMISSION = 1; /** * default admin permission */ public static final int DEFAULT_ADMIN_PERMISSION = 7; /** * all permissions */ public static final int ALL_PERMISSIONS = READ_PERMISSION | WRITE_PERMISSION | EXECUTE_PERMISSION; /** * max task timeout */ public static final int MAX_TASK_TIMEOUT = 24 * 3600; /** * worker host weight */ public static final int DEFAULT_WORKER_HOST_WEIGHT = 100; /** * time unit secong to minutes */ public static final int SEC_2_MINUTES_TIME_UNIT = 60; /*** * * rpc port */ public static final String RPC_PORT = "rpc.port"; /** * forbid running task */ public static final String FLOWNODE_RUN_FLAG_FORBIDDEN = "FORBIDDEN"; /** * normal running task */ public static final String FLOWNODE_RUN_FLAG_NORMAL = "NORMAL"; public static final String COMMON_TASK_TYPE = "common"; public static final String DEFAULT = "default"; public static final String PASSWORD = "password"; public static final String XXXXXX = "******"; public static final String NULL = "NULL"; public static final String THREAD_NAME_MASTER_SERVER = "Master-Server"; public static final String THREAD_NAME_WORKER_SERVER = "Worker-Server"; /** * command parameter keys */ public static final String CMD_PARAM_RECOVER_PROCESS_ID_STRING = "ProcessInstanceId"; public static final String CMD_PARAM_RECOVERY_START_NODE_STRING = "StartNodeIdList"; public static final String CMD_PARAM_RECOVERY_WAITING_THREAD = "WaitingThreadInstanceId"; public static final String CMD_PARAM_SUB_PROCESS = "processInstanceId"; public static final String CMD_PARAM_EMPTY_SUB_PROCESS = "0"; public static final String CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID = "parentProcessInstanceId"; public static final String CMD_PARAM_SUB_PROCESS_DEFINE_CODE = "processDefinitionCode"; public static final String CMD_PARAM_START_NODES = "StartNodeList"; public static final String CMD_PARAM_START_PARAMS = "StartParams"; public static final String CMD_PARAM_FATHER_PARAMS = "fatherParams"; /** * complement data start date */ public static final String CMDPARAM_COMPLEMENT_DATA_START_DATE = "complementStartDate"; /** * complement data end date */ public static final String CMDPARAM_COMPLEMENT_DATA_END_DATE = "complementEndDate"; /** * complement date default cron string */ public static final String DEFAULT_CRON_STRING = "0 0 0 * * ? *"; public static final int SLEEP_TIME_MILLIS = 1000; /** * one second mils */ public static final int SECOND_TIME_MILLIS = 1000; /** * master task instance cache-database refresh interval */ public static final int CACHE_REFRESH_TIME_MILLIS = 20 * 1000; /** * heartbeat for zk info length */ public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 13; /** * jar */ public static final String JAR = "jar"; /** * hadoop */ public static final String HADOOP = "hadoop"; /** * -D <property>=<value> */ public static final String D = "-D"; /** * exit code success */ public static final int EXIT_CODE_SUCCESS = 0; /** * exit code failure */ public static final int EXIT_CODE_FAILURE = -1; /** * process or task definition failure */ public static final int DEFINITION_FAILURE = -1; /** * process or task definition first version */ public static final int VERSION_FIRST = 1; /** * date format of yyyyMMdd */ public static final String PARAMETER_FORMAT_DATE = "yyyyMMdd"; /** * date format of yyyyMMddHHmmss */ public static final String PARAMETER_FORMAT_TIME = "yyyyMMddHHmmss"; /** * system date(yyyyMMddHHmmss) */ public static final String PARAMETER_DATETIME = "system.datetime"; /** * system date(yyyymmdd) today */ public static final String PARAMETER_CURRENT_DATE = "system.biz.curdate"; /** * system date(yyyymmdd) yesterday */ public static final String PARAMETER_BUSINESS_DATE = "system.biz.date"; /** * ACCEPTED */ public static final String ACCEPTED = "ACCEPTED"; /** * SUCCEEDED */ public static final String SUCCEEDED = "SUCCEEDED"; /** * NEW */ public static final String NEW = "NEW"; /** * NEW_SAVING */ public static final String NEW_SAVING = "NEW_SAVING"; /** * SUBMITTED */ public static final String SUBMITTED = "SUBMITTED"; /** * FAILED */ public static final String FAILED = "FAILED"; /** * KILLED */ public static final String KILLED = "KILLED"; /** * RUNNING */ public static final String RUNNING = "RUNNING"; /** * underline "_" */ public static final String UNDERLINE = "_"; /** * quartz job prifix */ public static final String QUARTZ_JOB_PRIFIX = "job"; /** * quartz job group prifix */ public static final String QUARTZ_JOB_GROUP_PRIFIX = "jobgroup"; /** * projectId */ public static final String PROJECT_ID = "projectId"; /** * processId */ public static final String SCHEDULE_ID = "scheduleId"; /** * schedule */ public static final String SCHEDULE = "schedule"; /** * application regex */ public static final String APPLICATION_REGEX = "application_\\d+_\\d+"; public static final String PID = SystemUtils.IS_OS_WINDOWS ? "handle" : "pid"; /** * month_begin */ public static final String MONTH_BEGIN = "month_begin"; /** * add_months */ public static final String ADD_MONTHS = "add_months"; /** * month_end */ public static final String MONTH_END = "month_end"; /** * week_begin */ public static final String WEEK_BEGIN = "week_begin"; /** * week_end */ public static final String WEEK_END = "week_end"; /** * timestamp */ public static final String TIMESTAMP = "timestamp"; public static final char SUBTRACT_CHAR = '-'; public static final char ADD_CHAR = '+'; public static final char MULTIPLY_CHAR = '*'; public static final char DIVISION_CHAR = '/'; public static final char LEFT_BRACE_CHAR = '('; public static final char RIGHT_BRACE_CHAR = ')'; public static final String ADD_STRING = "+"; public static final String STAR = "*"; public static final String DIVISION_STRING = "/"; public static final String LEFT_BRACE_STRING = "("; public static final char P = 'P'; public static final char N = 'N'; public static final String SUBTRACT_STRING = "-"; public static final String GLOBAL_PARAMS = "globalParams"; public static final String LOCAL_PARAMS = "localParams"; public static final String SUBPROCESS_INSTANCE_ID = "subProcessInstanceId"; public static final String PROCESS_INSTANCE_STATE = "processInstanceState"; public static final String PARENT_WORKFLOW_INSTANCE = "parentWorkflowInstance"; public static final String CONDITION_RESULT = "conditionResult"; public static final String SWITCH_RESULT = "switchResult"; public static final String WAIT_START_TIMEOUT = "waitStartTimeout"; public static final String DEPENDENCE = "dependence"; public static final String TASK_LIST = "taskList"; public static final String QUEUE = "queue"; public static final String QUEUE_NAME = "queueName"; public static final int LOG_QUERY_SKIP_LINE_NUMBER = 0; public static final int LOG_QUERY_LIMIT = 4096; /** * master/worker server use for zk */ public static final String MASTER_TYPE = "master"; public static final String WORKER_TYPE = "worker"; public static final String DELETE_OP = "delete"; public static final String ADD_OP = "add"; public static final String ALIAS = "alias"; public static final String CONTENT = "content"; public static final String DEPENDENT_SPLIT = ":||"; public static final long DEPENDENT_ALL_TASK_CODE = 0; /** * preview schedule execute count */ public static final int PREVIEW_SCHEDULE_EXECUTE_COUNT = 5; /** * kerberos */ public static final String KERBEROS = "kerberos"; /** * kerberos expire time */ public static final String KERBEROS_EXPIRE_TIME = "kerberos.expire.time"; /** * java.security.krb5.conf */ public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf"; /** * java.security.krb5.conf.path */ public static final String JAVA_SECURITY_KRB5_CONF_PATH = "java.security.krb5.conf.path"; /** * hadoop.security.authentication */ public static final String HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authentication"; /** * hadoop.security.authentication */ public static final String HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE = "hadoop.security.authentication.startup.state"; /** * com.amazonaws.services.s3.enableV4 */ public static final String AWS_S3_V4 = "com.amazonaws.services.s3.enableV4"; /** * loginUserFromKeytab user */ public static final String LOGIN_USER_KEY_TAB_USERNAME = "login.user.keytab.username"; /** * loginUserFromKeytab path */ public static final String LOGIN_USER_KEY_TAB_PATH = "login.user.keytab.path"; /** * task log info format */ public static final String TASK_LOG_INFO_FORMAT = "TaskLogInfo-%s"; public static final int[] NOT_TERMINATED_STATES = new int[] { ExecutionStatus.SUBMITTED_SUCCESS.ordinal(), ExecutionStatus.RUNNING_EXECUTION.ordinal(), ExecutionStatus.DELAY_EXECUTION.ordinal(), ExecutionStatus.READY_PAUSE.ordinal(), ExecutionStatus.READY_STOP.ordinal(), ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(), ExecutionStatus.WAITING_THREAD.ordinal(), ExecutionStatus.WAITING_DEPEND.ordinal() }; public static final int[] RUNNING_PROCESS_STATE = new int[] { ExecutionStatus.RUNNING_EXECUTION.ordinal(), ExecutionStatus.SUBMITTED_SUCCESS.ordinal(), ExecutionStatus.SERIAL_WAIT.ordinal() }; /** * status */ public static final String STATUS = "status"; /** * message */ public static final String MSG = "msg"; /** * data total */ public static final String COUNT = "count"; /** * page size */ public static final String PAGE_SIZE = "pageSize"; /** * current page no */ public static final String PAGE_NUMBER = "pageNo"; /** * */ public static final String DATA_LIST = "data"; public static final String TOTAL_LIST = "totalList"; public static final String CURRENT_PAGE = "currentPage"; public static final String TOTAL_PAGE = "totalPage"; public static final String TOTAL = "total"; /** * workflow */ public static final String WORKFLOW_LIST = "workFlowList"; public static final String WORKFLOW_RELATION_LIST = "workFlowRelationList"; /** * session user */ public static final String SESSION_USER = "session.user"; public static final String SESSION_ID = "sessionId"; /** * locale */ public static final String LOCALE_LANGUAGE = "language"; /** * database type */ public static final String MYSQL = "MYSQL"; public static final String HIVE = "HIVE"; public static final String ADDRESS = "address"; public static final String DATABASE = "database"; public static final String OTHER = "other"; /** * session timeout */ public static final int SESSION_TIME_OUT = 7200; public static final int MAX_FILE_SIZE = 1024 * 1024 * 1024; public static final String UDF = "UDF"; public static final String CLASS = "class"; /** * dataSource sensitive param */ public static final String DATASOURCE_PASSWORD_REGEX = "(?<=((?i)password((\\\\\":\\\\\")|(=')))).*?(?=((\\\\\")|(')))"; /** * default worker group */ public static final String DEFAULT_WORKER_GROUP = "default"; /** * authorize writable perm */ public static final int AUTHORIZE_WRITABLE_PERM = 7; /** * authorize readable perm */ public static final int AUTHORIZE_READABLE_PERM = 4; public static final int NORMAL_NODE_STATUS = 0; public static final int ABNORMAL_NODE_STATUS = 1; public static final int BUSY_NODE_STATUE = 2; public static final String START_TIME = "start time"; public static final String END_TIME = "end time"; public static final String START_END_DATE = "startDate,endDate"; /** * system line separator */ public static final String SYSTEM_LINE_SEPARATOR = System.getProperty("line.separator"); /** * datasource encryption salt */ public static final String DATASOURCE_ENCRYPTION_SALT_DEFAULT = "!@#$%^&*"; public static final String DATASOURCE_ENCRYPTION_ENABLE = "datasource.encryption.enable"; public static final String DATASOURCE_ENCRYPTION_SALT = "datasource.encryption.salt"; /** * network interface preferred */ public static final String DOLPHIN_SCHEDULER_NETWORK_INTERFACE_PREFERRED = "dolphin.scheduler.network.interface.preferred"; /** * network IP gets priority, default inner outer */ public static final String DOLPHIN_SCHEDULER_NETWORK_PRIORITY_STRATEGY = "dolphin.scheduler.network.priority.strategy"; /** * exec shell scripts */ public static final String SH = "sh"; /** * pstree, get pud and sub pid */ public static final String PSTREE = "pstree"; public static final Boolean KUBERNETES_MODE = !StringUtils.isEmpty(System.getenv("KUBERNETES_SERVICE_HOST")) && !StringUtils.isEmpty(System.getenv("KUBERNETES_SERVICE_PORT")); /** * dry run flag */ public static final int DRY_RUN_FLAG_NO = 0; public static final int DRY_RUN_FLAG_YES = 1; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,551
[Bug][Module Name] job state always in final state:RUNNING_EXECUTION when execution engine is tez
``` yarn application -status application_1621995850095_6901 21/05/27 07:06:04 INFO client.RMProxy: Connecting to ResourceManager at ip-192-168-24-254.us-west-2.compute.internal/192.168.24.254:8032 21/05/27 07:06:04 INFO client.AHSProxy: Connecting to Application History server at ip-192-168-24-254.us-west-2.compute.internal/192.168.24.254:10200 21/05/27 07:06:04 INFO conf.Configuration: resource-types.xml not found 21/05/27 07:06:04 INFO resource.ResourceUtils: Unable to find 'resource-types.xml'. 21/05/27 07:06:04 INFO resource.ResourceUtils: Adding resource type - name = memory-mb, units = Mi, type = COUNTABLE 21/05/27 07:06:04 INFO resource.ResourceUtils: Adding resource type - name = vcores, units = , type = COUNTABLE Application Report : Application-Id : application_1621995850095_6901 Application-Name : HIVE-f07905b9-25b6-4e3f-93a4-652ed349b9bd Application-Type : TEZ User : hadoop Queue : default Application Priority : 0 Start-Time : 1622098349177 Finish-Time : 1622098762224 Progress : 100% State : FINISHED Final-State : ENDED job final state is not SUCCEEDED but ENDED.Cause shell command always running. worker.log [INFO] 2021-05-27 07:05:41.030 - [taskAppId=TASK-10-264-3804]:[127] - FINALIZE_SESSION [INFO] 2021-05-27 07:05:41.809 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:42.811 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:43.812 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:44.814 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:45.816 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:46.818 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:47.820 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:48.821 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:49.823 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:50.825 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION [INFO] 2021-05-27 07:05:51.826 - [taskAppId=TASK-16-259-3724]:[404] - appId:application_1621995850095_6901, final state:RUNNING_EXECUTION ``` **Which version of Dolphin Scheduler:** -[1.3.6]
https://github.com/apache/dolphinscheduler/issues/5551
https://github.com/apache/dolphinscheduler/pull/6289
8d39bf14b1b189b43d27acfa2a3ae9ddd4eec5c6
2dc09c627fe1d32c63e9a7551390540581916d7d
"2021-05-27T07:22:10Z"
java
"2021-12-05T06:03:29Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.utils; import static org.apache.dolphinscheduler.common.Constants.RESOURCE_UPLOAD_PATH; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.ResUploadType; import org.apache.dolphinscheduler.common.exception.BaseException; import org.apache.dolphinscheduler.spi.enums.ResourceType; import org.apache.commons.io.IOUtils; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.client.cli.RMAdminCLI; import java.io.BufferedReader; import java.io.Closeable; import java.io.File; import java.io.IOException; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.security.PrivilegedExceptionAction; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; /** * hadoop utils * single instance */ public class HadoopUtils implements Closeable { private static final Logger logger = LoggerFactory.getLogger(HadoopUtils.class); private static String hdfsUser = PropertyUtils.getString(Constants.HDFS_ROOT_USER); public static final String resourceUploadPath = PropertyUtils.getString(RESOURCE_UPLOAD_PATH, "/dolphinscheduler"); public static final String rmHaIds = PropertyUtils.getString(Constants.YARN_RESOURCEMANAGER_HA_RM_IDS); public static final String appAddress = PropertyUtils.getString(Constants.YARN_APPLICATION_STATUS_ADDRESS); public static final String jobHistoryAddress = PropertyUtils.getString(Constants.YARN_JOB_HISTORY_STATUS_ADDRESS); public static final int HADOOP_RESOURCE_MANAGER_HTTP_ADDRESS_PORT_VALUE = PropertyUtils.getInt(Constants.HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT, 8088); private static final String HADOOP_UTILS_KEY = "HADOOP_UTILS_KEY"; private static final LoadingCache<String, HadoopUtils> cache = CacheBuilder .newBuilder() .expireAfterWrite(PropertyUtils.getInt(Constants.KERBEROS_EXPIRE_TIME, 2), TimeUnit.HOURS) .build(new CacheLoader<String, HadoopUtils>() { @Override public HadoopUtils load(String key) throws Exception { return new HadoopUtils(); } }); private static volatile boolean yarnEnabled = false; private Configuration configuration; private FileSystem fs; private HadoopUtils() { init(); initHdfsPath(); } public static HadoopUtils getInstance() { return cache.getUnchecked(HADOOP_UTILS_KEY); } /** * init dolphinscheduler root path in hdfs */ private void initHdfsPath() { Path path = new Path(resourceUploadPath); try { if (!fs.exists(path)) { fs.mkdirs(path); } } catch (Exception e) { logger.error(e.getMessage(), e); } } /** * init hadoop configuration */ private void init() { try { configuration = new HdfsConfiguration(); String resourceStorageType = PropertyUtils.getUpperCaseString(Constants.RESOURCE_STORAGE_TYPE); ResUploadType resUploadType = ResUploadType.valueOf(resourceStorageType); if (resUploadType == ResUploadType.HDFS) { if (CommonUtils.loadKerberosConf(configuration)) { hdfsUser = ""; } String defaultFS = configuration.get(Constants.FS_DEFAULTFS); //first get key from core-site.xml hdfs-site.xml ,if null ,then try to get from properties file // the default is the local file system if (defaultFS.startsWith("file")) { String defaultFSProp = PropertyUtils.getString(Constants.FS_DEFAULTFS); if (StringUtils.isNotBlank(defaultFSProp)) { Map<String, String> fsRelatedProps = PropertyUtils.getPrefixedProperties("fs."); configuration.set(Constants.FS_DEFAULTFS, defaultFSProp); fsRelatedProps.forEach((key, value) -> configuration.set(key, value)); } else { logger.error("property:{} can not to be empty, please set!", Constants.FS_DEFAULTFS); throw new RuntimeException( String.format("property: %s can not to be empty, please set!", Constants.FS_DEFAULTFS) ); } } else { logger.info("get property:{} -> {}, from core-site.xml hdfs-site.xml ", Constants.FS_DEFAULTFS, defaultFS); } if (StringUtils.isNotEmpty(hdfsUser)) { UserGroupInformation ugi = UserGroupInformation.createRemoteUser(hdfsUser); ugi.doAs((PrivilegedExceptionAction<Boolean>) () -> { fs = FileSystem.get(configuration); return true; }); } else { logger.warn("hdfs.root.user is not set value!"); fs = FileSystem.get(configuration); } } else if (resUploadType == ResUploadType.S3) { System.setProperty(Constants.AWS_S3_V4, Constants.STRING_TRUE); configuration.set(Constants.FS_DEFAULTFS, PropertyUtils.getString(Constants.FS_DEFAULTFS)); configuration.set(Constants.FS_S3A_ENDPOINT, PropertyUtils.getString(Constants.FS_S3A_ENDPOINT)); configuration.set(Constants.FS_S3A_ACCESS_KEY, PropertyUtils.getString(Constants.FS_S3A_ACCESS_KEY)); configuration.set(Constants.FS_S3A_SECRET_KEY, PropertyUtils.getString(Constants.FS_S3A_SECRET_KEY)); fs = FileSystem.get(configuration); } } catch (Exception e) { logger.error(e.getMessage(), e); } } /** * @return Configuration */ public Configuration getConfiguration() { return configuration; } /** * @return DefaultFS */ public String getDefaultFS() { return getConfiguration().get(Constants.FS_DEFAULTFS); } /** * get application url * * @param applicationId application id * @return url of application */ public String getApplicationUrl(String applicationId) throws Exception { /** * if rmHaIds contains xx, it signs not use resourcemanager * otherwise: * if rmHaIds is empty, single resourcemanager enabled * if rmHaIds not empty: resourcemanager HA enabled */ yarnEnabled = true; String appUrl = StringUtils.isEmpty(rmHaIds) ? appAddress : getAppAddress(appAddress, rmHaIds); if (StringUtils.isBlank(appUrl)) { throw new BaseException("yarn application url generation failed"); } if (logger.isDebugEnabled()) { logger.debug("yarn application url:{}, applicationId:{}", appUrl, applicationId); } return String.format(appUrl, HADOOP_RESOURCE_MANAGER_HTTP_ADDRESS_PORT_VALUE, applicationId); } public String getJobHistoryUrl(String applicationId) { //eg:application_1587475402360_712719 -> job_1587475402360_712719 String jobId = applicationId.replace("application", "job"); return String.format(jobHistoryAddress, jobId); } /** * cat file on hdfs * * @param hdfsFilePath hdfs file path * @return byte[] byte array * @throws IOException errors */ public byte[] catFile(String hdfsFilePath) throws IOException { if (StringUtils.isBlank(hdfsFilePath)) { logger.error("hdfs file path:{} is blank", hdfsFilePath); return new byte[0]; } try (FSDataInputStream fsDataInputStream = fs.open(new Path(hdfsFilePath))) { return IOUtils.toByteArray(fsDataInputStream); } } /** * cat file on hdfs * * @param hdfsFilePath hdfs file path * @param skipLineNums skip line numbers * @param limit read how many lines * @return content of file * @throws IOException errors */ public List<String> catFile(String hdfsFilePath, int skipLineNums, int limit) throws IOException { if (StringUtils.isBlank(hdfsFilePath)) { logger.error("hdfs file path:{} is blank", hdfsFilePath); return Collections.emptyList(); } try (FSDataInputStream in = fs.open(new Path(hdfsFilePath))) { BufferedReader br = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); Stream<String> stream = br.lines().skip(skipLineNums).limit(limit); return stream.collect(Collectors.toList()); } } /** * make the given file and all non-existent parents into * directories. Has the semantics of Unix 'mkdir -p'. * Existence of the directory hierarchy is not an error. * * @param hdfsPath path to create * @return mkdir result * @throws IOException errors */ public boolean mkdir(String hdfsPath) throws IOException { return fs.mkdirs(new Path(hdfsPath)); } /** * copy files between FileSystems * * @param srcPath source hdfs path * @param dstPath destination hdfs path * @param deleteSource whether to delete the src * @param overwrite whether to overwrite an existing file * @return if success or not * @throws IOException errors */ public boolean copy(String srcPath, String dstPath, boolean deleteSource, boolean overwrite) throws IOException { return FileUtil.copy(fs, new Path(srcPath), fs, new Path(dstPath), deleteSource, overwrite, fs.getConf()); } /** * the src file is on the local disk. Add it to FS at * the given dst name. * * @param srcFile local file * @param dstHdfsPath destination hdfs path * @param deleteSource whether to delete the src * @param overwrite whether to overwrite an existing file * @return if success or not * @throws IOException errors */ public boolean copyLocalToHdfs(String srcFile, String dstHdfsPath, boolean deleteSource, boolean overwrite) throws IOException { Path srcPath = new Path(srcFile); Path dstPath = new Path(dstHdfsPath); fs.copyFromLocalFile(deleteSource, overwrite, srcPath, dstPath); return true; } /** * copy hdfs file to local * * @param srcHdfsFilePath source hdfs file path * @param dstFile destination file * @param deleteSource delete source * @param overwrite overwrite * @return result of copy hdfs file to local * @throws IOException errors */ public boolean copyHdfsToLocal(String srcHdfsFilePath, String dstFile, boolean deleteSource, boolean overwrite) throws IOException { Path srcPath = new Path(srcHdfsFilePath); File dstPath = new File(dstFile); if (dstPath.exists()) { if (dstPath.isFile()) { if (overwrite) { Files.delete(dstPath.toPath()); } } else { logger.error("destination file must be a file"); } } if (!dstPath.getParentFile().exists()) { dstPath.getParentFile().mkdirs(); } return FileUtil.copy(fs, srcPath, dstPath, deleteSource, fs.getConf()); } /** * delete a file * * @param hdfsFilePath the path to delete. * @param recursive if path is a directory and set to * true, the directory is deleted else throws an exception. In * case of a file the recursive can be set to either true or false. * @return true if delete is successful else false. * @throws IOException errors */ public boolean delete(String hdfsFilePath, boolean recursive) throws IOException { return fs.delete(new Path(hdfsFilePath), recursive); } /** * check if exists * * @param hdfsFilePath source file path * @return result of exists or not * @throws IOException errors */ public boolean exists(String hdfsFilePath) throws IOException { return fs.exists(new Path(hdfsFilePath)); } /** * Gets a list of files in the directory * * @param filePath file path * @return {@link FileStatus} file status * @throws Exception errors */ public FileStatus[] listFileStatus(String filePath) throws Exception { try { return fs.listStatus(new Path(filePath)); } catch (IOException e) { logger.error("Get file list exception", e); throw new Exception("Get file list exception", e); } } /** * Renames Path src to Path dst. Can take place on local fs * or remote DFS. * * @param src path to be renamed * @param dst new path after rename * @return true if rename is successful * @throws IOException on failure */ public boolean rename(String src, String dst) throws IOException { return fs.rename(new Path(src), new Path(dst)); } /** * hadoop resourcemanager enabled or not * * @return result */ public boolean isYarnEnabled() { return yarnEnabled; } /** * get the state of an application * * @param applicationId application id * @return the return may be null or there may be other parse exceptions */ public ExecutionStatus getApplicationStatus(String applicationId) throws Exception { if (StringUtils.isEmpty(applicationId)) { return null; } String result = Constants.FAILED; String applicationUrl = getApplicationUrl(applicationId); if (logger.isDebugEnabled()) { logger.debug("generate yarn application url, applicationUrl={}", applicationUrl); } String responseContent = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false) ? KerberosHttpClient.get(applicationUrl) : HttpUtils.get(applicationUrl); if (responseContent != null) { ObjectNode jsonObject = JSONUtils.parseObject(responseContent); if (!jsonObject.has("app")) { return ExecutionStatus.FAILURE; } result = jsonObject.path("app").path("finalStatus").asText(); } else { //may be in job history String jobHistoryUrl = getJobHistoryUrl(applicationId); if (logger.isDebugEnabled()) { logger.debug("generate yarn job history application url, jobHistoryUrl={}", jobHistoryUrl); } responseContent = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false) ? KerberosHttpClient.get(jobHistoryUrl) : HttpUtils.get(jobHistoryUrl); if (null != responseContent) { ObjectNode jsonObject = JSONUtils.parseObject(responseContent); if (!jsonObject.has("job")) { return ExecutionStatus.FAILURE; } result = jsonObject.path("job").path("state").asText(); } else { return ExecutionStatus.FAILURE; } } switch (result) { case Constants.ACCEPTED: return ExecutionStatus.SUBMITTED_SUCCESS; case Constants.SUCCEEDED: return ExecutionStatus.SUCCESS; case Constants.NEW: case Constants.NEW_SAVING: case Constants.SUBMITTED: case Constants.FAILED: return ExecutionStatus.FAILURE; case Constants.KILLED: return ExecutionStatus.KILL; case Constants.RUNNING: default: return ExecutionStatus.RUNNING_EXECUTION; } } /** * get data hdfs path * * @return data hdfs path */ public static String getHdfsDataBasePath() { if ("/".equals(resourceUploadPath)) { // if basepath is configured to /, the generated url may be //default/resources (with extra leading /) return ""; } else { return resourceUploadPath; } } /** * hdfs resource dir * * @param tenantCode tenant code * @param resourceType resource type * @return hdfs resource dir */ public static String getHdfsDir(ResourceType resourceType, String tenantCode) { String hdfsDir = ""; if (resourceType.equals(ResourceType.FILE)) { hdfsDir = getHdfsResDir(tenantCode); } else if (resourceType.equals(ResourceType.UDF)) { hdfsDir = getHdfsUdfDir(tenantCode); } return hdfsDir; } /** * hdfs resource dir * * @param tenantCode tenant code * @return hdfs resource dir */ public static String getHdfsResDir(String tenantCode) { return String.format("%s/resources", getHdfsTenantDir(tenantCode)); } /** * hdfs user dir * * @param tenantCode tenant code * @param userId user id * @return hdfs resource dir */ public static String getHdfsUserDir(String tenantCode, int userId) { return String.format("%s/home/%d", getHdfsTenantDir(tenantCode), userId); } /** * hdfs udf dir * * @param tenantCode tenant code * @return get udf dir on hdfs */ public static String getHdfsUdfDir(String tenantCode) { return String.format("%s/udfs", getHdfsTenantDir(tenantCode)); } /** * get hdfs file name * * @param resourceType resource type * @param tenantCode tenant code * @param fileName file name * @return hdfs file name */ public static String getHdfsFileName(ResourceType resourceType, String tenantCode, String fileName) { if (fileName.startsWith("/")) { fileName = fileName.replaceFirst("/", ""); } return String.format("%s/%s", getHdfsDir(resourceType, tenantCode), fileName); } /** * get absolute path and name for resource file on hdfs * * @param tenantCode tenant code * @param fileName file name * @return get absolute path and name for file on hdfs */ public static String getHdfsResourceFileName(String tenantCode, String fileName) { if (fileName.startsWith("/")) { fileName = fileName.replaceFirst("/", ""); } return String.format("%s/%s", getHdfsResDir(tenantCode), fileName); } /** * get absolute path and name for udf file on hdfs * * @param tenantCode tenant code * @param fileName file name * @return get absolute path and name for udf file on hdfs */ public static String getHdfsUdfFileName(String tenantCode, String fileName) { if (fileName.startsWith("/")) { fileName = fileName.replaceFirst("/", ""); } return String.format("%s/%s", getHdfsUdfDir(tenantCode), fileName); } /** * @param tenantCode tenant code * @return file directory of tenants on hdfs */ public static String getHdfsTenantDir(String tenantCode) { return String.format("%s/%s", getHdfsDataBasePath(), tenantCode); } /** * getAppAddress * * @param appAddress app address * @param rmHa resource manager ha * @return app address */ public static String getAppAddress(String appAddress, String rmHa) { //get active ResourceManager String activeRM = YarnHAAdminUtils.getActiveRMName(rmHa); if (StringUtils.isEmpty(activeRM)) { return null; } String[] split1 = appAddress.split(Constants.DOUBLE_SLASH); if (split1.length != 2) { return null; } String start = split1[0] + Constants.DOUBLE_SLASH; String[] split2 = split1[1].split(Constants.COLON); if (split2.length != 2) { return null; } String end = Constants.COLON + split2[1]; return start + activeRM + end; } @Override public void close() throws IOException { if (fs != null) { try { fs.close(); } catch (IOException e) { logger.error("Close HadoopUtils instance failed", e); throw new IOException("Close HadoopUtils instance failed", e); } } } /** * yarn ha admin utils */ private static final class YarnHAAdminUtils extends RMAdminCLI { /** * get active resourcemanager */ public static String getActiveRMName(String rmIds) { String[] rmIdArr = rmIds.split(Constants.COMMA); String yarnUrl = "http://%s:" + HADOOP_RESOURCE_MANAGER_HTTP_ADDRESS_PORT_VALUE + "/ws/v1/cluster/info"; try { /** * send http get request to rm */ for (String rmId : rmIdArr) { String state = getRMState(String.format(yarnUrl, rmId)); if (Constants.HADOOP_RM_STATE_ACTIVE.equals(state)) { return rmId; } } } catch (Exception e) { logger.error("yarn ha application url generation failed, message:{}", e.getMessage()); } return null; } /** * get ResourceManager state */ public static String getRMState(String url) { String retStr = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false) ? KerberosHttpClient.get(url) : HttpUtils.get(url); if (StringUtils.isEmpty(retStr)) { return null; } //to json ObjectNode jsonObject = JSONUtils.parseObject(retStr); //get ResourceManager state if (!jsonObject.has("clusterInfo")) { return null; } return jsonObject.get("clusterInfo").path("haState").asText(); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,180
[Feature][dolphinscheduler-api] Revoke project permission for specified user
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Revoke project permission for specified user ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7180
https://github.com/apache/dolphinscheduler/pull/7185
2dc09c627fe1d32c63e9a7551390540581916d7d
3361d763c4af1b387602dfc97a5a57961b5a7c43
"2021-12-04T17:08:06Z"
java
"2021-12-05T06:13:23Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/UsersController.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.controller; import static org.apache.dolphinscheduler.api.enums.Status.AUTHORIZED_USER_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.CREATE_USER_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.DELETE_USER_BY_ID_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.GET_USER_INFO_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.GRANT_DATASOURCE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.GRANT_PROJECT_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.GRANT_RESOURCE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.GRANT_UDF_FUNCTION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.QUERY_USER_LIST_PAGING_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.UNAUTHORIZED_USER_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.UPDATE_USER_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.USER_LIST_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.VERIFY_USERNAME_ERROR; import org.apache.dolphinscheduler.api.aspect.AccessLogAnnotation; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ApiException; import org.apache.dolphinscheduler.api.service.UsersService; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.dao.entity.User; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpStatus; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.RequestAttribute; import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.ResponseStatus; import org.springframework.web.bind.annotation.RestController; import io.swagger.annotations.Api; import io.swagger.annotations.ApiImplicitParam; import io.swagger.annotations.ApiImplicitParams; import io.swagger.annotations.ApiOperation; import springfox.documentation.annotations.ApiIgnore; /** * users controller */ @Api(tags = "USERS_TAG") @RestController @RequestMapping("/users") public class UsersController extends BaseController { private static final Logger logger = LoggerFactory.getLogger(UsersController.class); @Autowired private UsersService usersService; /** * create user * * @param loginUser login user * @param userName user name * @param userPassword user password * @param email email * @param tenantId tenant id * @param phone phone * @param queue queue * @return create result code */ @ApiOperation(value = "createUser", notes = "CREATE_USER_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userName", value = "USER_NAME", required = true, type = "String"), @ApiImplicitParam(name = "userPassword", value = "USER_PASSWORD", required = true, type = "String"), @ApiImplicitParam(name = "tenantId", value = "TENANT_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "queue", value = "QUEUE", dataType = "String"), @ApiImplicitParam(name = "email", value = "EMAIL", required = true, dataType = "String"), @ApiImplicitParam(name = "phone", value = "PHONE", dataType = "String"), @ApiImplicitParam(name = "state", value = "STATE", dataType = "Int", example = "1") }) @PostMapping(value = "/create") @ResponseStatus(HttpStatus.CREATED) @ApiException(CREATE_USER_ERROR) @AccessLogAnnotation(ignoreRequestArgs = {"loginUser", "userPassword"}) public Result createUser(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "userName") String userName, @RequestParam(value = "userPassword") String userPassword, @RequestParam(value = "tenantId") int tenantId, @RequestParam(value = "queue", required = false, defaultValue = "") String queue, @RequestParam(value = "email") String email, @RequestParam(value = "phone", required = false) String phone, @RequestParam(value = "state", required = false) int state) throws Exception { Map<String, Object> result = usersService.createUser(loginUser, userName, userPassword, email, tenantId, phone, queue, state); return returnDataList(result); } /** * query user list paging * * @param loginUser login user * @param pageNo page number * @param searchVal search avlue * @param pageSize page size * @return user list page */ @ApiOperation(value = "queryUserList", notes = "QUERY_USER_LIST_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "pageNo", value = "PAGE_NO", required = true, dataType = "Int", example = "1"), @ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", required = true, dataType = "Int", example = "10"), @ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", type = "String") }) @GetMapping(value = "/list-paging") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_USER_LIST_PAGING_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryUserList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("pageNo") Integer pageNo, @RequestParam("pageSize") Integer pageSize, @RequestParam(value = "searchVal", required = false) String searchVal) { Result result = checkPageParams(pageNo, pageSize); if (!result.checkResult()) { return result; } searchVal = ParameterUtils.handleEscapes(searchVal); result = usersService.queryUserList(loginUser, searchVal, pageNo, pageSize); return result; } /** * update user * * @param loginUser login user * @param id user id * @param userName user name * @param userPassword user password * @param email email * @param tenantId tennat id * @param phone phone * @param queue queue * @return update result code */ @ApiOperation(value = "updateUser", notes = "UPDATE_USER_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "USER_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "userName", value = "USER_NAME", required = true, type = "String"), @ApiImplicitParam(name = "userPassword", value = "USER_PASSWORD", required = true, type = "String"), @ApiImplicitParam(name = "tenantId", value = "TENANT_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "queue", value = "QUEUE", dataType = "String"), @ApiImplicitParam(name = "email", value = "EMAIL", required = true, dataType = "String"), @ApiImplicitParam(name = "phone", value = "PHONE", dataType = "String"), @ApiImplicitParam(name = "state", value = "STATE", dataType = "Int", example = "1") }) @PostMapping(value = "/update") @ResponseStatus(HttpStatus.OK) @ApiException(UPDATE_USER_ERROR) @AccessLogAnnotation(ignoreRequestArgs = {"loginUser", "userPassword"}) public Result updateUser(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "id") int id, @RequestParam(value = "userName") String userName, @RequestParam(value = "userPassword") String userPassword, @RequestParam(value = "queue", required = false, defaultValue = "") String queue, @RequestParam(value = "email") String email, @RequestParam(value = "tenantId") int tenantId, @RequestParam(value = "phone", required = false) String phone, @RequestParam(value = "state", required = false) int state) throws Exception { Map<String, Object> result = usersService.updateUser(loginUser, id, userName, userPassword, email, tenantId, phone, queue, state); return returnDataList(result); } /** * delete user by id * * @param loginUser login user * @param id user id * @return delete result code */ @ApiOperation(value = "delUserById", notes = "DELETE_USER_BY_ID_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "USER_ID", required = true, dataType = "Int", example = "100") }) @PostMapping(value = "/delete") @ResponseStatus(HttpStatus.OK) @ApiException(DELETE_USER_BY_ID_ERROR) @AccessLogAnnotation public Result delUserById(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "id") int id) throws Exception { Map<String, Object> result = usersService.deleteUserById(loginUser, id); return returnDataList(result); } /** * grant project * * @param loginUser login user * @param userId user id * @param projectIds project id array * @return grant result code */ @ApiOperation(value = "grantProject", notes = "GRANT_PROJECT_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "projectIds", value = "PROJECT_IDS", required = true, type = "String") }) @PostMapping(value = "/grant-project") @ResponseStatus(HttpStatus.OK) @ApiException(GRANT_PROJECT_ERROR) @AccessLogAnnotation public Result grantProject(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "userId") int userId, @RequestParam(value = "projectIds") String projectIds) { Map<String, Object> result = usersService.grantProject(loginUser, userId, projectIds); return returnDataList(result); } /** * grant project by code * * @param loginUser login user * @param userId user id * @param projectCodes project code array * @return grant result code */ @ApiOperation(value = "grantProjectByCode", notes = "GRANT_PROJECT_BY_CODE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "projectCodes", value = "PROJECT_CODES", required = true, type = "String") }) @PostMapping(value = "/grant-project-by-code") @ResponseStatus(HttpStatus.OK) @ApiException(GRANT_PROJECT_ERROR) @AccessLogAnnotation public Result grantProjectByCode(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "userId") int userId, @RequestParam(value = "projectCodes") String projectCodes) { Map<String, Object> result = this.usersService.grantProjectByCode(loginUser, userId, projectCodes); return returnDataList(result); } /** * grant resource * * @param loginUser login user * @param userId user id * @param resourceIds resource id array * @return grant result code */ @ApiOperation(value = "grantResource", notes = "GRANT_RESOURCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "resourceIds", value = "RESOURCE_IDS", required = true, type = "String") }) @PostMapping(value = "/grant-file") @ResponseStatus(HttpStatus.OK) @ApiException(GRANT_RESOURCE_ERROR) @AccessLogAnnotation public Result grantResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "userId") int userId, @RequestParam(value = "resourceIds") String resourceIds) { Map<String, Object> result = usersService.grantResources(loginUser, userId, resourceIds); return returnDataList(result); } /** * grant udf function * * @param loginUser login user * @param userId user id * @param udfIds udf id array * @return grant result code */ @ApiOperation(value = "grantUDFFunc", notes = "GRANT_UDF_FUNC_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "udfIds", value = "UDF_IDS", required = true, type = "String") }) @PostMapping(value = "/grant-udf-func") @ResponseStatus(HttpStatus.OK) @ApiException(GRANT_UDF_FUNCTION_ERROR) @AccessLogAnnotation public Result grantUDFFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "userId") int userId, @RequestParam(value = "udfIds") String udfIds) { Map<String, Object> result = usersService.grantUDFFunction(loginUser, userId, udfIds); return returnDataList(result); } /** * grant datasource * * @param loginUser login user * @param userId user id * @param datasourceIds data source id array * @return grant result code */ @ApiOperation(value = "grantDataSource", notes = "GRANT_DATASOURCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "datasourceIds", value = "DATASOURCE_IDS", required = true, type = "String") }) @PostMapping(value = "/grant-datasource") @ResponseStatus(HttpStatus.OK) @ApiException(GRANT_DATASOURCE_ERROR) @AccessLogAnnotation public Result grantDataSource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "userId") int userId, @RequestParam(value = "datasourceIds") String datasourceIds) { Map<String, Object> result = usersService.grantDataSource(loginUser, userId, datasourceIds); return returnDataList(result); } /** * get user info * * @param loginUser login user * @return user info */ @ApiOperation(value = "getUserInfo", notes = "GET_USER_INFO_NOTES") @GetMapping(value = "/get-user-info") @ResponseStatus(HttpStatus.OK) @ApiException(GET_USER_INFO_ERROR) @AccessLogAnnotation public Result getUserInfo(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser) { Map<String, Object> result = usersService.getUserInfo(loginUser); return returnDataList(result); } /** * user list no paging * * @param loginUser login user * @return user list */ @ApiOperation(value = "listUser", notes = "LIST_USER_NOTES") @GetMapping(value = "/list") @ResponseStatus(HttpStatus.OK) @ApiException(USER_LIST_ERROR) @AccessLogAnnotation public Result listUser(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser) { Map<String, Object> result = usersService.queryAllGeneralUsers(loginUser); return returnDataList(result); } /** * user list no paging * * @param loginUser login user * @return user list */ @GetMapping(value = "/list-all") @ResponseStatus(HttpStatus.OK) @ApiException(USER_LIST_ERROR) @AccessLogAnnotation public Result listAll(@RequestAttribute(value = Constants.SESSION_USER) User loginUser) { Map<String, Object> result = usersService.queryUserList(loginUser); return returnDataList(result); } /** * verify username * * @param loginUser login user * @param userName user name * @return true if user name not exists, otherwise return false */ @ApiOperation(value = "verifyUserName", notes = "VERIFY_USER_NAME_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userName", value = "USER_NAME", required = true, type = "String") }) @GetMapping(value = "/verify-user-name") @ResponseStatus(HttpStatus.OK) @ApiException(VERIFY_USERNAME_ERROR) @AccessLogAnnotation public Result verifyUserName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "userName") String userName ) { return usersService.verifyUserName(userName); } /** * unauthorized user * * @param loginUser login user * @param alertgroupId alert group id * @return unauthorize result code */ @ApiOperation(value = "unauthorizedUser", notes = "UNAUTHORIZED_USER_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "alertgroupId", value = "ALERT_GROUP_ID", required = true, type = "String") }) @GetMapping(value = "/unauth-user") @ResponseStatus(HttpStatus.OK) @ApiException(UNAUTHORIZED_USER_ERROR) @AccessLogAnnotation public Result unauthorizedUser(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("alertgroupId") Integer alertgroupId) { Map<String, Object> result = usersService.unauthorizedUser(loginUser, alertgroupId); return returnDataList(result); } /** * authorized user * * @param loginUser login user * @param alertgroupId alert group id * @return authorized result code */ @ApiOperation(value = "authorizedUser", notes = "AUTHORIZED_USER_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "alertgroupId", value = "ALERT_GROUP_ID", required = true, type = "String") }) @GetMapping(value = "/authed-user") @ResponseStatus(HttpStatus.OK) @ApiException(AUTHORIZED_USER_ERROR) @AccessLogAnnotation public Result authorizedUser(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("alertgroupId") Integer alertgroupId) { try { Map<String, Object> result = usersService.authorizedUser(loginUser, alertgroupId); return returnDataList(result); } catch (Exception e) { logger.error(Status.AUTHORIZED_USER_ERROR.getMsg(), e); return error(Status.AUTHORIZED_USER_ERROR.getCode(), Status.AUTHORIZED_USER_ERROR.getMsg()); } } /** * user registry * * @param userName user name * @param userPassword user password * @param repeatPassword repeat password * @param email user email */ @ApiOperation(value = "registerUser", notes = "REGISTER_USER_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userName", value = "USER_NAME", required = true, type = "String"), @ApiImplicitParam(name = "userPassword", value = "USER_PASSWORD", required = true, type = "String"), @ApiImplicitParam(name = "repeatPassword", value = "REPEAT_PASSWORD", required = true, type = "String"), @ApiImplicitParam(name = "email", value = "EMAIL", required = true, type = "String"), }) @PostMapping("/register") @ResponseStatus(HttpStatus.OK) @ApiException(CREATE_USER_ERROR) @AccessLogAnnotation public Result<Object> registerUser(@RequestParam(value = "userName") String userName, @RequestParam(value = "userPassword") String userPassword, @RequestParam(value = "repeatPassword") String repeatPassword, @RequestParam(value = "email") String email) throws Exception { userName = ParameterUtils.handleEscapes(userName); userPassword = ParameterUtils.handleEscapes(userPassword); repeatPassword = ParameterUtils.handleEscapes(repeatPassword); email = ParameterUtils.handleEscapes(email); Map<String, Object> result = usersService.registerUser(userName, userPassword, repeatPassword, email); return returnDataList(result); } /** * user activate * * @param userName user name */ @ApiOperation(value = "activateUser", notes = "ACTIVATE_USER_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userName", value = "USER_NAME", type = "String"), }) @PostMapping("/activate") @ResponseStatus(HttpStatus.OK) @ApiException(UPDATE_USER_ERROR) @AccessLogAnnotation public Result<Object> activateUser(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "userName") String userName) { userName = ParameterUtils.handleEscapes(userName); Map<String, Object> result = usersService.activateUser(loginUser, userName); return returnDataList(result); } /** * user batch activate * * @param userNames user names */ @ApiOperation(value = "batchActivateUser", notes = "BATCH_ACTIVATE_USER_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userNames", value = "USER_NAMES", required = true, type = "String"), }) @PostMapping("/batch/activate") @ResponseStatus(HttpStatus.OK) @ApiException(UPDATE_USER_ERROR) @AccessLogAnnotation public Result<Object> batchActivateUser(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestBody List<String> userNames) { List<String> formatUserNames = userNames.stream().map(ParameterUtils::handleEscapes).collect(Collectors.toList()); Map<String, Object> result = usersService.batchActivateUser(loginUser, formatUserNames); return returnDataList(result); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,180
[Feature][dolphinscheduler-api] Revoke project permission for specified user
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Revoke project permission for specified user ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7180
https://github.com/apache/dolphinscheduler/pull/7185
2dc09c627fe1d32c63e9a7551390540581916d7d
3361d763c4af1b387602dfc97a5a57961b5a7c43
"2021-12-04T17:08:06Z"
java
"2021-12-05T06:13:23Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.enums; import java.util.Locale; import java.util.Optional; import org.springframework.context.i18n.LocaleContextHolder; /** * status enum // todo #4855 One category one interval */ public enum Status { SUCCESS(0, "success", "成功"), INTERNAL_SERVER_ERROR_ARGS(10000, "Internal Server Error: {0}", "服务端异常: {0}"), REQUEST_PARAMS_NOT_VALID_ERROR(10001, "request parameter {0} is not valid", "请求参数[{0}]无效"), TASK_TIMEOUT_PARAMS_ERROR(10002, "task timeout parameter is not valid", "任务超时参数无效"), USER_NAME_EXIST(10003, "user name already exists", "用户名已存在"), USER_NAME_NULL(10004, "user name is null", "用户名不能为空"), HDFS_OPERATION_ERROR(10006, "hdfs operation error", "hdfs操作错误"), TASK_INSTANCE_NOT_FOUND(10008, "task instance not found", "任务实例不存在"), OS_TENANT_CODE_EXIST(10009, "os tenant code {0} already exists", "操作系统租户[{0}]已存在"), USER_NOT_EXIST(10010, "user {0} not exists", "用户[{0}]不存在"), ALERT_GROUP_NOT_EXIST(10011, "alarm group not found", "告警组不存在"), ALERT_GROUP_EXIST(10012, "alarm group already exists", "告警组名称已存在"), USER_NAME_PASSWD_ERROR(10013, "user name or password error", "用户名或密码错误"), LOGIN_SESSION_FAILED(10014, "create session failed!", "创建session失败"), DATASOURCE_EXIST(10015, "data source name already exists", "数据源名称已存在"), DATASOURCE_CONNECT_FAILED(10016, "data source connection failed", "建立数据源连接失败"), TENANT_NOT_EXIST(10017, "tenant not exists", "租户不存在"), PROJECT_NOT_FOUNT(10018, "project {0} not found ", "项目[{0}]不存在"), PROJECT_ALREADY_EXISTS(10019, "project {0} already exists", "项目名称[{0}]已存在"), TASK_INSTANCE_NOT_EXISTS(10020, "task instance {0} does not exist", "任务实例[{0}]不存在"), TASK_INSTANCE_NOT_SUB_WORKFLOW_INSTANCE(10021, "task instance {0} is not sub process instance", "任务实例[{0}]不是子流程实例"), SCHEDULE_CRON_NOT_EXISTS(10022, "scheduler crontab {0} does not exist", "调度配置定时表达式[{0}]不存在"), SCHEDULE_CRON_ONLINE_FORBID_UPDATE(10023, "online status does not allow update operations", "调度配置上线状态不允许修改"), SCHEDULE_CRON_CHECK_FAILED(10024, "scheduler crontab expression validation failure: {0}", "调度配置定时表达式验证失败: {0}"), MASTER_NOT_EXISTS(10025, "master does not exist", "无可用master节点"), SCHEDULE_STATUS_UNKNOWN(10026, "unknown status: {0}", "未知状态: {0}"), CREATE_ALERT_GROUP_ERROR(10027, "create alert group error", "创建告警组错误"), QUERY_ALL_ALERTGROUP_ERROR(10028, "query all alertgroup error", "查询告警组错误"), LIST_PAGING_ALERT_GROUP_ERROR(10029, "list paging alert group error", "分页查询告警组错误"), UPDATE_ALERT_GROUP_ERROR(10030, "update alert group error", "更新告警组错误"), DELETE_ALERT_GROUP_ERROR(10031, "delete alert group error", "删除告警组错误"), ALERT_GROUP_GRANT_USER_ERROR(10032, "alert group grant user error", "告警组授权用户错误"), CREATE_DATASOURCE_ERROR(10033, "create datasource error", "创建数据源错误"), UPDATE_DATASOURCE_ERROR(10034, "update datasource error", "更新数据源错误"), QUERY_DATASOURCE_ERROR(10035, "query datasource error", "查询数据源错误"), CONNECT_DATASOURCE_FAILURE(10036, "connect datasource failure", "建立数据源连接失败"), CONNECTION_TEST_FAILURE(10037, "connection test failure", "测试数据源连接失败"), DELETE_DATA_SOURCE_FAILURE(10038, "delete data source failure", "删除数据源失败"), VERIFY_DATASOURCE_NAME_FAILURE(10039, "verify datasource name failure", "验证数据源名称失败"), UNAUTHORIZED_DATASOURCE(10040, "unauthorized datasource", "未经授权的数据源"), AUTHORIZED_DATA_SOURCE(10041, "authorized data source", "授权数据源失败"), LOGIN_SUCCESS(10042, "login success", "登录成功"), USER_LOGIN_FAILURE(10043, "user login failure", "用户登录失败"), LIST_WORKERS_ERROR(10044, "list workers error", "查询worker列表错误"), LIST_MASTERS_ERROR(10045, "list masters error", "查询master列表错误"), UPDATE_PROJECT_ERROR(10046, "update project error", "更新项目信息错误"), QUERY_PROJECT_DETAILS_BY_CODE_ERROR(10047, "query project details by code error", "查询项目详细信息错误"), CREATE_PROJECT_ERROR(10048, "create project error", "创建项目错误"), LOGIN_USER_QUERY_PROJECT_LIST_PAGING_ERROR(10049, "login user query project list paging error", "分页查询项目列表错误"), DELETE_PROJECT_ERROR(10050, "delete project error", "删除项目错误"), QUERY_UNAUTHORIZED_PROJECT_ERROR(10051, "query unauthorized project error", "查询未授权项目错误"), QUERY_AUTHORIZED_PROJECT(10052, "query authorized project", "查询授权项目错误"), QUERY_QUEUE_LIST_ERROR(10053, "query queue list error", "查询队列列表错误"), CREATE_RESOURCE_ERROR(10054, "create resource error", "创建资源错误"), UPDATE_RESOURCE_ERROR(10055, "update resource error", "更新资源错误"), QUERY_RESOURCES_LIST_ERROR(10056, "query resources list error", "查询资源列表错误"), QUERY_RESOURCES_LIST_PAGING(10057, "query resources list paging", "分页查询资源列表错误"), DELETE_RESOURCE_ERROR(10058, "delete resource error", "删除资源错误"), VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR(10059, "verify resource by name and type error", "资源名称或类型验证错误"), VIEW_RESOURCE_FILE_ON_LINE_ERROR(10060, "view resource file online error", "查看资源文件错误"), CREATE_RESOURCE_FILE_ON_LINE_ERROR(10061, "create resource file online error", "创建资源文件错误"), RESOURCE_FILE_IS_EMPTY(10062, "resource file is empty", "资源文件内容不能为空"), EDIT_RESOURCE_FILE_ON_LINE_ERROR(10063, "edit resource file online error", "更新资源文件错误"), DOWNLOAD_RESOURCE_FILE_ERROR(10064, "download resource file error", "下载资源文件错误"), CREATE_UDF_FUNCTION_ERROR(10065, "create udf function error", "创建UDF函数错误"), VIEW_UDF_FUNCTION_ERROR(10066, "view udf function error", "查询UDF函数错误"), UPDATE_UDF_FUNCTION_ERROR(10067, "update udf function error", "更新UDF函数错误"), QUERY_UDF_FUNCTION_LIST_PAGING_ERROR(10068, "query udf function list paging error", "分页查询UDF函数列表错误"), QUERY_DATASOURCE_BY_TYPE_ERROR(10069, "query datasource by type error", "查询数据源信息错误"), VERIFY_UDF_FUNCTION_NAME_ERROR(10070, "verify udf function name error", "UDF函数名称验证错误"), DELETE_UDF_FUNCTION_ERROR(10071, "delete udf function error", "删除UDF函数错误"), AUTHORIZED_FILE_RESOURCE_ERROR(10072, "authorized file resource error", "授权资源文件错误"), AUTHORIZE_RESOURCE_TREE(10073, "authorize resource tree display error", "授权资源目录树错误"), UNAUTHORIZED_UDF_FUNCTION_ERROR(10074, "unauthorized udf function error", "查询未授权UDF函数错误"), AUTHORIZED_UDF_FUNCTION_ERROR(10075, "authorized udf function error", "授权UDF函数错误"), CREATE_SCHEDULE_ERROR(10076, "create schedule error", "创建调度配置错误"), UPDATE_SCHEDULE_ERROR(10077, "update schedule error", "更新调度配置错误"), PUBLISH_SCHEDULE_ONLINE_ERROR(10078, "publish schedule online error", "上线调度配置错误"), OFFLINE_SCHEDULE_ERROR(10079, "offline schedule error", "下线调度配置错误"), QUERY_SCHEDULE_LIST_PAGING_ERROR(10080, "query schedule list paging error", "分页查询调度配置列表错误"), QUERY_SCHEDULE_LIST_ERROR(10081, "query schedule list error", "查询调度配置列表错误"), QUERY_TASK_LIST_PAGING_ERROR(10082, "query task list paging error", "分页查询任务列表错误"), QUERY_TASK_RECORD_LIST_PAGING_ERROR(10083, "query task record list paging error", "分页查询任务记录错误"), CREATE_TENANT_ERROR(10084, "create tenant error", "创建租户错误"), QUERY_TENANT_LIST_PAGING_ERROR(10085, "query tenant list paging error", "分页查询租户列表错误"), QUERY_TENANT_LIST_ERROR(10086, "query tenant list error", "查询租户列表错误"), UPDATE_TENANT_ERROR(10087, "update tenant error", "更新租户错误"), DELETE_TENANT_BY_ID_ERROR(10088, "delete tenant by id error", "删除租户错误"), VERIFY_OS_TENANT_CODE_ERROR(10089, "verify os tenant code error", "操作系统租户验证错误"), CREATE_USER_ERROR(10090, "create user error", "创建用户错误"), QUERY_USER_LIST_PAGING_ERROR(10091, "query user list paging error", "分页查询用户列表错误"), UPDATE_USER_ERROR(10092, "update user error", "更新用户错误"), DELETE_USER_BY_ID_ERROR(10093, "delete user by id error", "删除用户错误"), GRANT_PROJECT_ERROR(10094, "grant project error", "授权项目错误"), GRANT_RESOURCE_ERROR(10095, "grant resource error", "授权资源错误"), GRANT_UDF_FUNCTION_ERROR(10096, "grant udf function error", "授权UDF函数错误"), GRANT_DATASOURCE_ERROR(10097, "grant datasource error", "授权数据源错误"), GET_USER_INFO_ERROR(10098, "get user info error", "获取用户信息错误"), USER_LIST_ERROR(10099, "user list error", "查询用户列表错误"), VERIFY_USERNAME_ERROR(10100, "verify username error", "用户名验证错误"), UNAUTHORIZED_USER_ERROR(10101, "unauthorized user error", "查询未授权用户错误"), AUTHORIZED_USER_ERROR(10102, "authorized user error", "查询授权用户错误"), QUERY_TASK_INSTANCE_LOG_ERROR(10103, "view task instance log error", "查询任务实例日志错误"), DOWNLOAD_TASK_INSTANCE_LOG_FILE_ERROR(10104, "download task instance log file error", "下载任务日志文件错误"), CREATE_PROCESS_DEFINITION_ERROR(10105, "create process definition error", "创建工作流错误"), VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR(10106, "verify process definition name unique error", "工作流定义名称验证错误"), UPDATE_PROCESS_DEFINITION_ERROR(10107, "update process definition error", "更新工作流定义错误"), RELEASE_PROCESS_DEFINITION_ERROR(10108, "release process definition error", "上线工作流错误"), QUERY_DETAIL_OF_PROCESS_DEFINITION_ERROR(10109, "query detail of process definition error", "查询工作流详细信息错误"), QUERY_PROCESS_DEFINITION_LIST(10110, "query process definition list", "查询工作流列表错误"), ENCAPSULATION_TREEVIEW_STRUCTURE_ERROR(10111, "encapsulation treeview structure error", "查询工作流树形图数据错误"), GET_TASKS_LIST_BY_PROCESS_DEFINITION_ID_ERROR(10112, "get tasks list by process definition id error", "查询工作流定义节点信息错误"), QUERY_PROCESS_INSTANCE_LIST_PAGING_ERROR(10113, "query process instance list paging error", "分页查询工作流实例列表错误"), QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_ERROR(10114, "query task list by process instance id error", "查询任务实例列表错误"), UPDATE_PROCESS_INSTANCE_ERROR(10115, "update process instance error", "更新工作流实例错误"), QUERY_PROCESS_INSTANCE_BY_ID_ERROR(10116, "query process instance by id error", "查询工作流实例错误"), DELETE_PROCESS_INSTANCE_BY_ID_ERROR(10117, "delete process instance by id error", "删除工作流实例错误"), QUERY_SUB_PROCESS_INSTANCE_DETAIL_INFO_BY_TASK_ID_ERROR(10118, "query sub process instance detail info by task id error", "查询子流程任务实例错误"), QUERY_PARENT_PROCESS_INSTANCE_DETAIL_INFO_BY_SUB_PROCESS_INSTANCE_ID_ERROR(10119, "query parent process instance detail info by sub process instance id error", "查询子流程该工作流实例错误"), QUERY_PROCESS_INSTANCE_ALL_VARIABLES_ERROR(10120, "query process instance all variables error", "查询工作流自定义变量信息错误"), ENCAPSULATION_PROCESS_INSTANCE_GANTT_STRUCTURE_ERROR(10121, "encapsulation process instance gantt structure error", "查询工作流实例甘特图数据错误"), QUERY_PROCESS_DEFINITION_LIST_PAGING_ERROR(10122, "query process definition list paging error", "分页查询工作流定义列表错误"), SIGN_OUT_ERROR(10123, "sign out error", "退出错误"), OS_TENANT_CODE_HAS_ALREADY_EXISTS(10124, "os tenant code has already exists", "操作系统租户已存在"), IP_IS_EMPTY(10125, "ip is empty", "IP地址不能为空"), SCHEDULE_CRON_REALEASE_NEED_NOT_CHANGE(10126, "schedule release is already {0}", "调度配置上线错误[{0}]"), CREATE_QUEUE_ERROR(10127, "create queue error", "创建队列错误"), QUEUE_NOT_EXIST(10128, "queue {0} not exists", "队列ID[{0}]不存在"), QUEUE_VALUE_EXIST(10129, "queue value {0} already exists", "队列值[{0}]已存在"), QUEUE_NAME_EXIST(10130, "queue name {0} already exists", "队列名称[{0}]已存在"), UPDATE_QUEUE_ERROR(10131, "update queue error", "更新队列信息错误"), NEED_NOT_UPDATE_QUEUE(10132, "no content changes, no updates are required", "数据未变更,不需要更新队列信息"), VERIFY_QUEUE_ERROR(10133, "verify queue error", "验证队列信息错误"), NAME_NULL(10134, "name must be not null", "名称不能为空"), NAME_EXIST(10135, "name {0} already exists", "名称[{0}]已存在"), SAVE_ERROR(10136, "save error", "保存错误"), DELETE_PROJECT_ERROR_DEFINES_NOT_NULL(10137, "please delete the process definitions in project first!", "请先删除全部工作流定义"), BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_ERROR(10117, "batch delete process instance by ids {0} error", "批量删除工作流实例错误"), PREVIEW_SCHEDULE_ERROR(10139, "preview schedule error", "预览调度配置错误"), PARSE_TO_CRON_EXPRESSION_ERROR(10140, "parse cron to cron expression error", "解析调度表达式错误"), SCHEDULE_START_TIME_END_TIME_SAME(10141, "The start time must not be the same as the end", "开始时间不能和结束时间一样"), DELETE_TENANT_BY_ID_FAIL(10142, "delete tenant by id fail, for there are {0} process instances in executing using it", "删除租户失败,有[{0}]个运行中的工作流实例正在使用"), DELETE_TENANT_BY_ID_FAIL_DEFINES(10143, "delete tenant by id fail, for there are {0} process definitions using it", "删除租户失败,有[{0}]个工作流定义正在使用"), DELETE_TENANT_BY_ID_FAIL_USERS(10144, "delete tenant by id fail, for there are {0} users using it", "删除租户失败,有[{0}]个用户正在使用"), DELETE_WORKER_GROUP_BY_ID_FAIL(10145, "delete worker group by id fail, for there are {0} process instances in executing using it", "删除Worker分组失败,有[{0}]个运行中的工作流实例正在使用"), QUERY_WORKER_GROUP_FAIL(10146, "query worker group fail ", "查询worker分组失败"), DELETE_WORKER_GROUP_FAIL(10147, "delete worker group fail ", "删除worker分组失败"), USER_DISABLED(10148, "The current user is disabled", "当前用户已停用"), COPY_PROCESS_DEFINITION_ERROR(10149, "copy process definition from {0} to {1} error : {2}", "从{0}复制工作流到{1}错误 : {2}"), MOVE_PROCESS_DEFINITION_ERROR(10150, "move process definition from {0} to {1} error : {2}", "从{0}移动工作流到{1}错误 : {2}"), SWITCH_PROCESS_DEFINITION_VERSION_ERROR(10151, "Switch process definition version error", "切换工作流版本出错"), SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_ERROR(10152 , "Switch process definition version error: not exists process definition, [process definition id {0}]", "切换工作流版本出错:工作流不存在,[工作流id {0}]"), SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_VERSION_ERROR(10153 , "Switch process definition version error: not exists process definition version, [process definition id {0}] [version number {1}]", "切换工作流版本出错:工作流版本信息不存在,[工作流id {0}] [版本号 {1}]"), QUERY_PROCESS_DEFINITION_VERSIONS_ERROR(10154, "query process definition versions error", "查询工作流历史版本信息出错"), DELETE_PROCESS_DEFINITION_VERSION_ERROR(10156, "delete process definition version error", "删除工作流历史版本出错"), QUERY_USER_CREATED_PROJECT_ERROR(10157, "query user created project error error", "查询用户创建的项目错误"), PROCESS_DEFINITION_CODES_IS_EMPTY(10158, "process definition codes is empty", "工作流CODES不能为空"), BATCH_COPY_PROCESS_DEFINITION_ERROR(10159, "batch copy process definition error", "复制工作流错误"), BATCH_MOVE_PROCESS_DEFINITION_ERROR(10160, "batch move process definition error", "移动工作流错误"), QUERY_WORKFLOW_LINEAGE_ERROR(10161, "query workflow lineage error", "查询血缘失败"), QUERY_AUTHORIZED_AND_USER_CREATED_PROJECT_ERROR(10162, "query authorized and user created project error error", "查询授权的和用户创建的项目错误"), DELETE_PROCESS_DEFINITION_BY_CODE_FAIL(10163, "delete process definition by code fail, for there are {0} process instances in executing using it", "删除工作流定义失败,有[{0}]个运行中的工作流实例正在使用"), CHECK_OS_TENANT_CODE_ERROR(10164, "Please enter the English os tenant code", "请输入英文操作系统租户"), FORCE_TASK_SUCCESS_ERROR(10165, "force task success error", "强制成功任务实例错误"), TASK_INSTANCE_STATE_OPERATION_ERROR(10166, "the status of task instance {0} is {1},Cannot perform force success operation", "任务实例[{0}]的状态是[{1}],无法执行强制成功操作"), DATASOURCE_TYPE_NOT_EXIST(10167, "data source type not exist", "数据源类型不存在"), PROCESS_DEFINITION_NAME_EXIST(10168, "process definition name {0} already exists", "工作流定义名称[{0}]已存在"), DATASOURCE_DB_TYPE_ILLEGAL(10169, "datasource type illegal", "数据源类型参数不合法"), DATASOURCE_PORT_ILLEGAL(10170, "datasource port illegal", "数据源端口参数不合法"), DATASOURCE_OTHER_PARAMS_ILLEGAL(10171, "datasource other params illegal", "数据源其他参数不合法"), DATASOURCE_NAME_ILLEGAL(10172, "datasource name illegal", "数据源名称不合法"), DATASOURCE_HOST_ILLEGAL(10173, "datasource host illegal", "数据源HOST不合法"), DELETE_WORKER_GROUP_NOT_EXIST(10174, "delete worker group not exist ", "删除worker分组不存在"), CREATE_WORKER_GROUP_FORBIDDEN_IN_DOCKER(10175, "create worker group forbidden in docker ", "创建worker分组在docker中禁止"), DELETE_WORKER_GROUP_FORBIDDEN_IN_DOCKER(10176, "delete worker group forbidden in docker ", "删除worker分组在docker中禁止"), WORKER_ADDRESS_INVALID(10177, "worker address {0} invalid", "worker地址[{0}]无效"), QUERY_WORKER_ADDRESS_LIST_FAIL(10178, "query worker address list fail ", "查询worker地址列表失败"), TRANSFORM_PROJECT_OWNERSHIP(10179, "Please transform project ownership [{0}]", "请先转移项目所有权[{0}]"), QUERY_ALERT_GROUP_ERROR(10180, "query alert group error", "查询告警组错误"), CURRENT_LOGIN_USER_TENANT_NOT_EXIST(10181, "the tenant of the currently login user is not specified", "未指定当前登录用户的租户"), UDF_FUNCTION_NOT_EXIST(20001, "UDF function not found", "UDF函数不存在"), UDF_FUNCTION_EXISTS(20002, "UDF function already exists", "UDF函数已存在"), RESOURCE_NOT_EXIST(20004, "resource not exist", "资源不存在"), RESOURCE_EXIST(20005, "resource already exists", "资源已存在"), RESOURCE_SUFFIX_NOT_SUPPORT_VIEW(20006, "resource suffix do not support online viewing", "资源文件后缀不支持查看"), RESOURCE_SIZE_EXCEED_LIMIT(20007, "upload resource file size exceeds limit", "上传资源文件大小超过限制"), RESOURCE_SUFFIX_FORBID_CHANGE(20008, "resource suffix not allowed to be modified", "资源文件后缀不支持修改"), UDF_RESOURCE_SUFFIX_NOT_JAR(20009, "UDF resource suffix name must be jar", "UDF资源文件后缀名只支持[jar]"), HDFS_COPY_FAIL(20010, "hdfs copy {0} -> {1} fail", "hdfs复制失败:[{0}] -> [{1}]"), RESOURCE_FILE_EXIST(20011, "resource file {0} already exists in hdfs,please delete it or change name!", "资源文件[{0}]在hdfs中已存在,请删除或修改资源名"), RESOURCE_FILE_NOT_EXIST(20012, "resource file {0} not exists in hdfs!", "资源文件[{0}]在hdfs中不存在"), UDF_RESOURCE_IS_BOUND(20013, "udf resource file is bound by UDF functions:{0}", "udf函数绑定了资源文件[{0}]"), RESOURCE_IS_USED(20014, "resource file is used by process definition", "资源文件被上线的流程定义使用了"), PARENT_RESOURCE_NOT_EXIST(20015, "parent resource not exist", "父资源文件不存在"), RESOURCE_NOT_EXIST_OR_NO_PERMISSION(20016, "resource not exist or no permission,please view the task node and remove error resource", "请检查任务节点并移除无权限或者已删除的资源"), RESOURCE_IS_AUTHORIZED(20017, "resource is authorized to user {0},suffix not allowed to be modified", "资源文件已授权其他用户[{0}],后缀不允许修改"), USER_NO_OPERATION_PERM(30001, "user has no operation privilege", "当前用户没有操作权限"), USER_NO_OPERATION_PROJECT_PERM(30002, "user {0} is not has project {1} permission", "当前用户[{0}]没有[{1}]项目的操作权限"), PROCESS_INSTANCE_NOT_EXIST(50001, "process instance {0} does not exist", "工作流实例[{0}]不存在"), PROCESS_INSTANCE_EXIST(50002, "process instance {0} already exists", "工作流实例[{0}]已存在"), PROCESS_DEFINE_NOT_EXIST(50003, "process definition {0} does not exist", "工作流定义[{0}]不存在"), PROCESS_DEFINE_NOT_RELEASE(50004, "process definition {0} not on line", "工作流定义[{0}]不是上线状态"), PROCESS_INSTANCE_ALREADY_CHANGED(50005, "the status of process instance {0} is already {1}", "工作流实例[{0}]的状态已经是[{1}]"), PROCESS_INSTANCE_STATE_OPERATION_ERROR(50006, "the status of process instance {0} is {1},Cannot perform {2} operation", "工作流实例[{0}]的状态是[{1}],无法执行[{2}]操作"), SUB_PROCESS_INSTANCE_NOT_EXIST(50007, "the task belong to process instance does not exist", "子工作流实例不存在"), PROCESS_DEFINE_NOT_ALLOWED_EDIT(50008, "process definition {0} does not allow edit", "工作流定义[{0}]不允许修改"), PROCESS_INSTANCE_EXECUTING_COMMAND(50009, "process instance {0} is executing the command, please wait ...", "工作流实例[{0}]正在执行命令,请稍等..."), PROCESS_INSTANCE_NOT_SUB_PROCESS_INSTANCE(50010, "process instance {0} is not sub process instance", "工作流实例[{0}]不是子工作流实例"), TASK_INSTANCE_STATE_COUNT_ERROR(50011, "task instance state count error", "查询各状态任务实例数错误"), COUNT_PROCESS_INSTANCE_STATE_ERROR(50012, "count process instance state error", "查询各状态流程实例数错误"), COUNT_PROCESS_DEFINITION_USER_ERROR(50013, "count process definition user error", "查询各用户流程定义数错误"), START_PROCESS_INSTANCE_ERROR(50014, "start process instance error", "运行工作流实例错误"), EXECUTE_PROCESS_INSTANCE_ERROR(50015, "execute process instance error", "操作工作流实例错误"), CHECK_PROCESS_DEFINITION_ERROR(50016, "check process definition error", "工作流定义错误"), QUERY_RECIPIENTS_AND_COPYERS_BY_PROCESS_DEFINITION_ERROR(50017, "query recipients and copyers by process definition error", "查询收件人和抄送人错误"), DATA_IS_NOT_VALID(50017, "data {0} not valid", "数据[{0}]无效"), DATA_IS_NULL(50018, "data {0} is null", "数据[{0}]不能为空"), PROCESS_NODE_HAS_CYCLE(50019, "process node has cycle", "流程节点间存在循环依赖"), PROCESS_NODE_S_PARAMETER_INVALID(50020, "process node {0} parameter invalid", "流程节点[{0}]参数无效"), PROCESS_DEFINE_STATE_ONLINE(50021, "process definition {0} is already on line", "工作流定义[{0}]已上线"), DELETE_PROCESS_DEFINE_BY_CODE_ERROR(50022, "delete process definition by code error", "删除工作流定义错误"), SCHEDULE_CRON_STATE_ONLINE(50023, "the status of schedule {0} is already on line", "调度配置[{0}]已上线"), DELETE_SCHEDULE_CRON_BY_ID_ERROR(50024, "delete schedule by id error", "删除调度配置错误"), BATCH_DELETE_PROCESS_DEFINE_ERROR(50025, "batch delete process definition error", "批量删除工作流定义错误"), BATCH_DELETE_PROCESS_DEFINE_BY_CODES_ERROR(50026, "batch delete process definition by codes {0} error", "批量删除工作流定义[{0}]错误"), TENANT_NOT_SUITABLE(50027, "there is not any tenant suitable, please choose a tenant available.", "没有合适的租户,请选择可用的租户"), EXPORT_PROCESS_DEFINE_BY_ID_ERROR(50028, "export process definition by id error", "导出工作流定义错误"), BATCH_EXPORT_PROCESS_DEFINE_BY_IDS_ERROR(50028, "batch export process definition by ids error", "批量导出工作流定义错误"), IMPORT_PROCESS_DEFINE_ERROR(50029, "import process definition error", "导入工作流定义错误"), TASK_DEFINE_NOT_EXIST(50030, "task definition {0} does not exist", "任务定义[{0}]不存在"), CREATE_PROCESS_TASK_RELATION_ERROR(50032, "create process task relation error", "创建工作流任务关系错误"), PROCESS_TASK_RELATION_NOT_EXIST(50033, "process task relation {0} does not exist", "工作流任务关系[{0}]不存在"), PROCESS_TASK_RELATION_EXIST(50034, "process task relation is already exist, processCode:[{0}]", "工作流任务关系已存在, processCode:[{0}]"), PROCESS_DAG_IS_EMPTY(50035, "process dag is empty", "工作流dag是空"), CHECK_PROCESS_TASK_RELATION_ERROR(50036, "check process task relation error", "工作流任务关系参数错误"), CREATE_TASK_DEFINITION_ERROR(50037, "create task definition error", "创建任务错误"), UPDATE_TASK_DEFINITION_ERROR(50038, "update task definition error", "更新任务定义错误"), QUERY_TASK_DEFINITION_VERSIONS_ERROR(50039, "query task definition versions error", "查询任务历史版本信息出错"), SWITCH_TASK_DEFINITION_VERSION_ERROR(50040, "Switch task definition version error", "切换任务版本出错"), DELETE_TASK_DEFINITION_VERSION_ERROR(50041, "delete task definition version error", "删除任务历史版本出错"), DELETE_TASK_DEFINE_BY_CODE_ERROR(50042, "delete task definition by code error", "删除任务定义错误"), QUERY_DETAIL_OF_TASK_DEFINITION_ERROR(50043, "query detail of task definition error", "查询任务详细信息错误"), QUERY_TASK_DEFINITION_LIST_PAGING_ERROR(50044, "query task definition list paging error", "分页查询任务定义列表错误"), TASK_DEFINITION_NAME_EXISTED(50045, "task definition name [{0}] already exists", "任务定义名称[{0}]已经存在"), RELEASE_TASK_DEFINITION_ERROR(50046, "release task definition error", "上线任务错误"), MOVE_PROCESS_TASK_RELATION_ERROR(50047, "move process task relation error", "移动任务到其他工作流错误"), DELETE_TASK_PROCESS_RELATION_ERROR(50048, "delete process task relation error", "删除工作流任务关系错误"), QUERY_TASK_PROCESS_RELATION_ERROR(50049, "query process task relation error", "查询工作流任务关系错误"), TASK_DEFINE_STATE_ONLINE(50050, "task definition {0} is already on line", "任务定义[{0}]已上线"), TASK_HAS_DOWNSTREAM(50051, "Task [{0}] exists downstream dependence", "任务[{0}]存在下游依赖"), TASK_HAS_UPSTREAM(50052, "Task [{0}] exists upstream dependence", "任务[{0}]存在上游依赖"), MAIN_TABLE_USING_VERSION(50053, "the version that the master table is using", "主表正在使用该版本"), PROJECT_PROCESS_NOT_MATCH(50054, "the project and the process is not match", "项目和工作流不匹配"), DELETE_EDGE_ERROR(50055, "delete edge error", "删除工作流任务连接线错误"), HDFS_NOT_STARTUP(60001, "hdfs not startup", "hdfs未启用"), /** * for monitor */ QUERY_DATABASE_STATE_ERROR(70001, "query database state error", "查询数据库状态错误"), CREATE_ACCESS_TOKEN_ERROR(70010, "create access token error", "创建访问token错误"), GENERATE_TOKEN_ERROR(70011, "generate token error", "生成token错误"), QUERY_ACCESSTOKEN_LIST_PAGING_ERROR(70012, "query access token list paging error", "分页查询访问token列表错误"), UPDATE_ACCESS_TOKEN_ERROR(70013, "update access token error", "更新访问token错误"), DELETE_ACCESS_TOKEN_ERROR(70014, "delete access token error", "删除访问token错误"), ACCESS_TOKEN_NOT_EXIST(70015, "access token not exist", "访问token不存在"), COMMAND_STATE_COUNT_ERROR(80001, "task instance state count error", "查询各状态任务实例数错误"), NEGTIVE_SIZE_NUMBER_ERROR(80002, "query size number error", "查询size错误"), START_TIME_BIGGER_THAN_END_TIME_ERROR(80003, "start time bigger than end time error", "开始时间在结束时间之后错误"), QUEUE_COUNT_ERROR(90001, "queue count error", "查询队列数据错误"), KERBEROS_STARTUP_STATE(100001, "get kerberos startup state error", "获取kerberos启动状态错误"), //plugin PLUGIN_NOT_A_UI_COMPONENT(110001, "query plugin error, this plugin has no UI component", "查询插件错误,此插件无UI组件"), QUERY_PLUGINS_RESULT_IS_NULL(110002, "query plugins result is null", "查询插件为空"), QUERY_PLUGINS_ERROR(110003, "query plugins error", "查询插件错误"), QUERY_PLUGIN_DETAIL_RESULT_IS_NULL(110004, "query plugin detail result is null", "查询插件详情结果为空"), UPDATE_ALERT_PLUGIN_INSTANCE_ERROR(110005, "update alert plugin instance error", "更新告警组和告警组插件实例错误"), DELETE_ALERT_PLUGIN_INSTANCE_ERROR(110006, "delete alert plugin instance error", "删除告警组和告警组插件实例错误"), GET_ALERT_PLUGIN_INSTANCE_ERROR(110007, "get alert plugin instance error", "获取告警组和告警组插件实例错误"), CREATE_ALERT_PLUGIN_INSTANCE_ERROR(110008, "create alert plugin instance error", "创建告警组和告警组插件实例错误"), QUERY_ALL_ALERT_PLUGIN_INSTANCE_ERROR(110009, "query all alert plugin instance error", "查询所有告警实例失败"), PLUGIN_INSTANCE_ALREADY_EXIT(110010, "plugin instance already exit", "该告警插件实例已存在"), LIST_PAGING_ALERT_PLUGIN_INSTANCE_ERROR(110011, "query plugin instance page error", "分页查询告警实例失败"), DELETE_ALERT_PLUGIN_INSTANCE_ERROR_HAS_ALERT_GROUP_ASSOCIATED(110012, "failed to delete the alert instance, there is an alarm group associated with this alert instance", "删除告警实例失败,存在与此告警实例关联的警报组"), PROCESS_DEFINITION_VERSION_IS_USED(110013,"this process definition version is used","此工作流定义版本被使用"), CREATE_ENVIRONMENT_ERROR(120001, "create environment error", "创建环境失败"), ENVIRONMENT_NAME_EXISTS(120002,"this enviroment name [{0}] already exists","环境名称[{0}]已经存在"), ENVIRONMENT_NAME_IS_NULL(120003,"this enviroment name shouldn't be empty.","环境名称不能为空"), ENVIRONMENT_CONFIG_IS_NULL(120004,"this enviroment config shouldn't be empty.","环境配置信息不能为空"), UPDATE_ENVIRONMENT_ERROR(120005, "update environment [{0}] info error", "更新环境[{0}]信息失败"), DELETE_ENVIRONMENT_ERROR(120006, "delete environment error", "删除环境信息失败"), DELETE_ENVIRONMENT_RELATED_TASK_EXISTS(120007, "this environment has been used in tasks,so you can't delete it.", "该环境已经被任务使用,所以不能删除该环境信息"), QUERY_ENVIRONMENT_BY_NAME_ERROR(1200008, "not found environment [{0}] ", "查询环境名称[{0}]信息不存在"), QUERY_ENVIRONMENT_BY_CODE_ERROR(1200009, "not found environment [{0}] ", "查询环境编码[{0}]不存在"), QUERY_ENVIRONMENT_ERROR(1200010, "login user query environment error", "分页查询环境列表错误"), VERIFY_ENVIRONMENT_ERROR(1200011, "verify environment error", "验证环境信息错误"), TASK_GROUP_NAME_EXSIT(130001,"this task group name is repeated in a project","该任务组名称在一个项目中已经使用"), TASK_GROUP_SIZE_ERROR(130002,"task group size error","任务组大小应该为大于1的整数"), TASK_GROUP_STATUS_ERROR(130003,"task group status error","任务组已经被关闭"), TASK_GROUP_FULL(130004,"task group is full","任务组已经满了"), TASK_GROUP_USED_SIZE_ERROR(130005,"the used size number of task group is dirty","任务组使用的容量发生了变化"), TASK_GROUP_QUEUE_RELEASE_ERROR(130006,"relase task group queue failed","任务组资源释放时出现了错误"), TASK_GROUP_QUEUE_AWAKE_ERROR(130007,"awake waiting task failed","任务组使唤醒等待任务时发生了错误"), CREATE_TASK_GROUP_ERROR(130008,"create task group error","创建任务组错误"), UPDATE_TASK_GROUP_ERROR(130009,"update task group list error","更新任务组错误"), QUERY_TASK_GROUP_LIST_ERROR(130010,"query task group list error","查询任务组列表错误"), CLOSE_TASK_GROUP_ERROR(130011,"close task group error","关闭任务组错误"), START_TASK_GROUP_ERROR(130012,"start task group error","启动任务组错误"), QUERY_TASK_GROUP_QUEUE_LIST_ERROR(130013,"query task group queue list error","查询任务组队列列表错误"), TASK_GROUP_CACHE_START_FAILED(130014,"cache start failed","任务组相关的缓存启动失败"), ENVIRONMENT_WORKER_GROUPS_IS_INVALID(130015, "environment worker groups is invalid format", "环境关联的工作组参数解析错误"), UPDATE_ENVIRONMENT_WORKER_GROUP_RELATION_ERROR(130016,"You can't modify the worker group, because the worker group [{0}] and this environment [{1}] already be used in the task [{2}]", "您不能修改工作组选项,因为该工作组 [{0}] 和 该环境 [{1}] 已经被用在任务 [{2}] 中"), TASK_GROUP_QUEUE_ALREADY_START(130017, "task group queue already start", "节点已经获取任务组资源") ; private final int code; private final String enMsg; private final String zhMsg; Status(int code, String enMsg, String zhMsg) { this.code = code; this.enMsg = enMsg; this.zhMsg = zhMsg; } public int getCode() { return this.code; } public String getMsg() { if (Locale.SIMPLIFIED_CHINESE.getLanguage().equals(LocaleContextHolder.getLocale().getLanguage())) { return this.zhMsg; } else { return this.enMsg; } } /** * Retrieve Status enum entity by status code. * @param code * @return */ public static Optional<Status> findStatusBy(int code) { for (Status status : Status.values()) { if (code == status.getCode()) { return Optional.of(status); } } return Optional.empty(); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,180
[Feature][dolphinscheduler-api] Revoke project permission for specified user
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Revoke project permission for specified user ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7180
https://github.com/apache/dolphinscheduler/pull/7185
2dc09c627fe1d32c63e9a7551390540581916d7d
3361d763c4af1b387602dfc97a5a57961b5a7c43
"2021-12-04T17:08:06Z"
java
"2021-12-05T06:13:23Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UsersService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.dao.entity.User; import java.io.IOException; import java.util.List; import java.util.Map; /** * users service */ public interface UsersService { /** * create user, only system admin have permission * * @param loginUser login user * @param userName user name * @param userPassword user password * @param email email * @param tenantId tenant id * @param phone phone * @param queue queue * @return create result code * @throws Exception exception */ Map<String, Object> createUser(User loginUser, String userName, String userPassword, String email, int tenantId, String phone, String queue, int state) throws IOException; User createUser(String userName, String userPassword, String email, int tenantId, String phone, String queue, int state); /*** * create User for ldap login */ User createUser(UserType userType, String userId, String email); /** * get user by user name * * @param userName user name * @return exist user or null */ User getUserByUserName(String userName); /** * query user by id * * @param id id * @return user info */ User queryUser(int id); /** * query user by ids * * @param ids id list * @return user list */ List<User> queryUser(List<Integer> ids); /** * query user * * @param name name * @return user info */ User queryUser(String name); /** * query user * * @param name name * @param password password * @return user info */ User queryUser(String name, String password); /** * get user id by user name * * @param name user name * @return if name empty 0, user not exists -1, user exist user id */ int getUserIdByName(String name); /** * query user list * * @param loginUser login user * @param pageNo page number * @param searchVal search avlue * @param pageSize page size * @return user list page */ Result queryUserList(User loginUser, String searchVal, Integer pageNo, Integer pageSize); /** * updateProcessInstance user * * * @param loginUser * @param userId user id * @param userName user name * @param userPassword user password * @param email email * @param tenantId tennat id * @param phone phone * @param queue queue * @return update result code * @throws Exception exception */ Map<String, Object> updateUser(User loginUser, int userId, String userName, String userPassword, String email, int tenantId, String phone, String queue, int state) throws IOException; /** * delete user * * @param loginUser login user * @param id user id * @return delete result code * @throws Exception exception when operate hdfs */ Map<String, Object> deleteUserById(User loginUser, int id) throws IOException; /** * grant project * * @param loginUser login user * @param userId user id * @param projectIds project id array * @return grant result code */ Map<String, Object> grantProject(User loginUser, int userId, String projectIds); /** * grant project by code * * @param loginUser login user * @param userId user id * @param projectCodes project code array * @return grant result code */ Map<String, Object> grantProjectByCode(User loginUser, int userId, String projectCodes); /** * grant resource * * @param loginUser login user * @param userId user id * @param resourceIds resource id array * @return grant result code */ Map<String, Object> grantResources(User loginUser, int userId, String resourceIds); /** * grant udf function * * @param loginUser login user * @param userId user id * @param udfIds udf id array * @return grant result code */ Map<String, Object> grantUDFFunction(User loginUser, int userId, String udfIds); /** * grant datasource * * @param loginUser login user * @param userId user id * @param datasourceIds data source id array * @return grant result code */ Map<String, Object> grantDataSource(User loginUser, int userId, String datasourceIds); /** * query user info * * @param loginUser login user * @return user info */ Map<String, Object> getUserInfo(User loginUser); /** * query user list * * @param loginUser login user * @return user list */ Map<String, Object> queryAllGeneralUsers(User loginUser); /** * query user list * * @param loginUser login user * @return user list */ Map<String, Object> queryUserList(User loginUser); /** * verify user name exists * * @param userName user name * @return true if user name not exists, otherwise return false */ Result<Object> verifyUserName(String userName); /** * unauthorized user * * @param loginUser login user * @param alertgroupId alert group id * @return unauthorize result code */ Map<String, Object> unauthorizedUser(User loginUser, Integer alertgroupId); /** * authorized user * * @param loginUser login user * @param alertgroupId alert group id * @return authorized result code */ Map<String, Object> authorizedUser(User loginUser, Integer alertgroupId); /** * registry user, default state is 0, default tenant_id is 1, no phone, no queue * * @param userName user name * @param userPassword user password * @param repeatPassword repeat password * @param email email * @return registry result code * @throws Exception exception */ Map<String, Object> registerUser(String userName, String userPassword, String repeatPassword, String email); /** * activate user, only system admin have permission, change user state code 0 to 1 * * @param loginUser login user * @param userName user name * @return create result code */ Map<String, Object> activateUser(User loginUser, String userName); /** * activate user, only system admin have permission, change users state code 0 to 1 * * @param loginUser login user * @param userNames user name * @return create result code */ Map<String, Object> batchActivateUser(User loginUser, List<String> userNames); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,180
[Feature][dolphinscheduler-api] Revoke project permission for specified user
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Revoke project permission for specified user ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7180
https://github.com/apache/dolphinscheduler/pull/7185
2dc09c627fe1d32c63e9a7551390540581916d7d
3361d763c4af1b387602dfc97a5a57961b5a7c43
"2021-12-04T17:08:06Z"
java
"2021-12-05T06:13:23Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent; import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ServiceException; import org.apache.dolphinscheduler.api.service.UsersService; import org.apache.dolphinscheduler.api.utils.CheckUtils; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.utils.EncryptionUtils; import org.apache.dolphinscheduler.common.utils.HadoopUtils; import org.apache.dolphinscheduler.common.utils.PropertyUtils; import org.apache.dolphinscheduler.dao.entity.AlertGroup; import org.apache.dolphinscheduler.dao.entity.DatasourceUser; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.ResourcesUser; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.UDFUser; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.AccessTokenMapper; import org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper; import org.apache.dolphinscheduler.dao.mapper.DataSourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectUserMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UDFUserMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils; import org.apache.dolphinscheduler.spi.enums.ResourceType; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import java.io.IOException; import java.text.MessageFormat; import java.util.Arrays; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; /** * users service impl */ @Service public class UsersServiceImpl extends BaseServiceImpl implements UsersService { private static final Logger logger = LoggerFactory.getLogger(UsersServiceImpl.class); @Autowired private AccessTokenMapper accessTokenMapper; @Autowired private UserMapper userMapper; @Autowired private TenantMapper tenantMapper; @Autowired private ProjectUserMapper projectUserMapper; @Autowired private ResourceUserMapper resourceUserMapper; @Autowired private ResourceMapper resourceMapper; @Autowired private DataSourceUserMapper datasourceUserMapper; @Autowired private UDFUserMapper udfUserMapper; @Autowired private AlertGroupMapper alertGroupMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper; @Autowired private ProjectMapper projectMapper; /** * create user, only system admin have permission * * @param loginUser login user * @param userName user name * @param userPassword user password * @param email email * @param tenantId tenant id * @param phone phone * @param queue queue * @return create result code * @throws Exception exception */ @Override @Transactional(rollbackFor = Exception.class) public Map<String, Object> createUser(User loginUser, String userName, String userPassword, String email, int tenantId, String phone, String queue, int state) throws IOException { Map<String, Object> result = new HashMap<>(); //check all user params String msg = this.checkUserParams(userName, userPassword, email, phone); if (!StringUtils.isEmpty(msg)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, msg); return result; } if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } if (!checkTenantExists(tenantId)) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } User user = createUser(userName, userPassword, email, tenantId, phone, queue, state); Tenant tenant = tenantMapper.queryById(tenantId); // resource upload startup if (PropertyUtils.getResUploadStartupState()) { // if tenant not exists if (!HadoopUtils.getInstance().exists(HadoopUtils.getHdfsTenantDir(tenant.getTenantCode()))) { createTenantDirIfNotExists(tenant.getTenantCode()); } String userPath = HadoopUtils.getHdfsUserDir(tenant.getTenantCode(), user.getId()); HadoopUtils.getInstance().mkdir(userPath); } result.put(Constants.DATA_LIST, user); putMsg(result, Status.SUCCESS); return result; } @Override @Transactional(rollbackFor = RuntimeException.class) public User createUser(String userName, String userPassword, String email, int tenantId, String phone, String queue, int state) { User user = new User(); Date now = new Date(); user.setUserName(userName); user.setUserPassword(EncryptionUtils.getMd5(userPassword)); user.setEmail(email); user.setTenantId(tenantId); user.setPhone(phone); user.setState(state); // create general users, administrator users are currently built-in user.setUserType(UserType.GENERAL_USER); user.setCreateTime(now); user.setUpdateTime(now); if (StringUtils.isEmpty(queue)) { queue = ""; } user.setQueue(queue); // save user userMapper.insert(user); return user; } /*** * create User for ldap login */ @Override @Transactional(rollbackFor = Exception.class) public User createUser(UserType userType, String userId, String email) { User user = new User(); Date now = new Date(); user.setUserName(userId); user.setEmail(email); // create general users, administrator users are currently built-in user.setUserType(userType); user.setCreateTime(now); user.setUpdateTime(now); user.setQueue(""); // save user userMapper.insert(user); return user; } /** * get user by user name * * @param userName user name * @return exist user or null */ @Override public User getUserByUserName(String userName) { return userMapper.queryByUserNameAccurately(userName); } /** * query user by id * * @param id id * @return user info */ @Override public User queryUser(int id) { return userMapper.selectById(id); } @Override public List<User> queryUser(List<Integer> ids) { if (CollectionUtils.isEmpty(ids)) { return new ArrayList<>(); } return userMapper.selectByIds(ids); } /** * query user * * @param name name * @return user info */ @Override public User queryUser(String name) { return userMapper.queryByUserNameAccurately(name); } /** * query user * * @param name name * @param password password * @return user info */ @Override public User queryUser(String name, String password) { String md5 = EncryptionUtils.getMd5(password); return userMapper.queryUserByNamePassword(name, md5); } /** * get user id by user name * * @param name user name * @return if name empty 0, user not exists -1, user exist user id */ @Override public int getUserIdByName(String name) { //executor name query int executorId = 0; if (StringUtils.isNotEmpty(name)) { User executor = queryUser(name); if (null != executor) { executorId = executor.getId(); } else { executorId = -1; } } return executorId; } /** * query user list * * @param loginUser login user * @param pageNo page number * @param searchVal search value * @param pageSize page size * @return user list page */ @Override public Result queryUserList(User loginUser, String searchVal, Integer pageNo, Integer pageSize) { Result result = new Result(); if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } Page<User> page = new Page<>(pageNo, pageSize); IPage<User> scheduleList = userMapper.queryUserPaging(page, searchVal); PageInfo<User> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotal((int) scheduleList.getTotal()); pageInfo.setTotalList(scheduleList.getRecords()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * updateProcessInstance user * * @param userId user id * @param userName user name * @param userPassword user password * @param email email * @param tenantId tenant id * @param phone phone * @param queue queue * @return update result code * @throws Exception exception */ @Override public Map<String, Object> updateUser(User loginUser, int userId, String userName, String userPassword, String email, int tenantId, String phone, String queue, int state) throws IOException { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); if (check(result, !hasPerm(loginUser, userId), Status.USER_NO_OPERATION_PERM)) { return result; } User user = userMapper.selectById(userId); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } if (StringUtils.isNotEmpty(userName)) { if (!CheckUtils.checkUserName(userName)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, userName); return result; } User tempUser = userMapper.queryByUserNameAccurately(userName); if (tempUser != null && tempUser.getId() != userId) { putMsg(result, Status.USER_NAME_EXIST); return result; } user.setUserName(userName); } if (StringUtils.isNotEmpty(userPassword)) { if (!CheckUtils.checkPassword(userPassword)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, userPassword); return result; } user.setUserPassword(EncryptionUtils.getMd5(userPassword)); } if (StringUtils.isNotEmpty(email)) { if (!CheckUtils.checkEmail(email)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, email); return result; } user.setEmail(email); } if (StringUtils.isNotEmpty(phone) && !CheckUtils.checkPhone(phone)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, phone); return result; } user.setPhone(phone); user.setQueue(queue); user.setState(state); Date now = new Date(); user.setUpdateTime(now); //if user switches the tenant, the user's resources need to be copied to the new tenant if (user.getTenantId() != tenantId) { Tenant oldTenant = tenantMapper.queryById(user.getTenantId()); //query tenant Tenant newTenant = tenantMapper.queryById(tenantId); if (newTenant != null) { // if hdfs startup if (PropertyUtils.getResUploadStartupState() && oldTenant != null) { String newTenantCode = newTenant.getTenantCode(); String oldResourcePath = HadoopUtils.getHdfsResDir(oldTenant.getTenantCode()); String oldUdfsPath = HadoopUtils.getHdfsUdfDir(oldTenant.getTenantCode()); // if old tenant dir exists if (HadoopUtils.getInstance().exists(oldResourcePath)) { String newResourcePath = HadoopUtils.getHdfsResDir(newTenantCode); String newUdfsPath = HadoopUtils.getHdfsUdfDir(newTenantCode); //file resources list List<Resource> fileResourcesList = resourceMapper.queryResourceList( null, userId, ResourceType.FILE.ordinal()); if (CollectionUtils.isNotEmpty(fileResourcesList)) { ResourceTreeVisitor resourceTreeVisitor = new ResourceTreeVisitor(fileResourcesList); ResourceComponent resourceComponent = resourceTreeVisitor.visit(); copyResourceFiles(resourceComponent, oldResourcePath, newResourcePath); } //udf resources List<Resource> udfResourceList = resourceMapper.queryResourceList( null, userId, ResourceType.UDF.ordinal()); if (CollectionUtils.isNotEmpty(udfResourceList)) { ResourceTreeVisitor resourceTreeVisitor = new ResourceTreeVisitor(udfResourceList); ResourceComponent resourceComponent = resourceTreeVisitor.visit(); copyResourceFiles(resourceComponent, oldUdfsPath, newUdfsPath); } //Delete the user from the old tenant directory String oldUserPath = HadoopUtils.getHdfsUserDir(oldTenant.getTenantCode(), userId); HadoopUtils.getInstance().delete(oldUserPath, true); } else { // if old tenant dir not exists , create createTenantDirIfNotExists(oldTenant.getTenantCode()); } if (HadoopUtils.getInstance().exists(HadoopUtils.getHdfsTenantDir(newTenant.getTenantCode()))) { //create user in the new tenant directory String newUserPath = HadoopUtils.getHdfsUserDir(newTenant.getTenantCode(), user.getId()); HadoopUtils.getInstance().mkdir(newUserPath); } else { // if new tenant dir not exists , create createTenantDirIfNotExists(newTenant.getTenantCode()); } } } user.setTenantId(tenantId); } // updateProcessInstance user userMapper.updateById(user); putMsg(result, Status.SUCCESS); return result; } /** * delete user * * @param loginUser login user * @param id user id * @return delete result code * @throws Exception exception when operate hdfs */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteUserById(User loginUser, int id) throws IOException { Map<String, Object> result = new HashMap<>(); //only admin can operate if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM, id); return result; } //check exist User tempUser = userMapper.selectById(id); if (tempUser == null) { putMsg(result, Status.USER_NOT_EXIST, id); return result; } // check if is a project owner List<Project> projects = projectMapper.queryProjectCreatedByUser(id); if (CollectionUtils.isNotEmpty(projects)) { String projectNames = projects.stream().map(Project::getName).collect(Collectors.joining(",")); putMsg(result, Status.TRANSFORM_PROJECT_OWNERSHIP, projectNames); return result; } // delete user User user = userMapper.queryTenantCodeByUserId(id); if (user != null) { if (PropertyUtils.getResUploadStartupState()) { String userPath = HadoopUtils.getHdfsUserDir(user.getTenantCode(), id); if (HadoopUtils.getInstance().exists(userPath)) { HadoopUtils.getInstance().delete(userPath, true); } } } accessTokenMapper.deleteAccessTokenByUserId(id); userMapper.deleteById(id); putMsg(result, Status.SUCCESS); return result; } /** * grant project * * @param loginUser login user * @param userId user id * @param projectIds project id array * @return grant result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> grantProject(User loginUser, int userId, String projectIds) { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } //check exist User tempUser = userMapper.selectById(userId); if (tempUser == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } //if the selected projectIds are empty, delete all items associated with the user if (check(result, StringUtils.isEmpty(projectIds), Status.SUCCESS)) { projectUserMapper.deleteProjectRelation(0, userId); return result; } String[] projectIdArr = projectIds.split(","); for (String projectId : projectIdArr) { Date now = new Date(); ProjectUser projectUser = new ProjectUser(); projectUser.setUserId(userId); projectUser.setProjectId(Integer.parseInt(projectId)); projectUser.setPerm(7); projectUser.setCreateTime(now); projectUser.setUpdateTime(now); projectUserMapper.insert(projectUser); } putMsg(result, Status.SUCCESS); return result; } /** * grant project by code * * @param loginUser login user * @param userId user id * @param projectCodes project code array * @return grant result code */ @Override public Map<String, Object> grantProjectByCode(final User loginUser, final int userId, final String projectCodes) { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); // 1. only admin can operate if (this.check(result, !this.isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } // 2. check if user is existed User tempUser = this.userMapper.selectById(userId); if (tempUser == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } // 3. if the selected projectCodes are empty, delete all items associated with the user if (this.check(result, StringUtils.isEmpty(projectCodes), Status.SUCCESS)) { this.projectUserMapper.deleteProjectRelation(0, userId); return result; } // 4. maintain the relationship between project and user Set<Long> projectCodeSet = Arrays.stream(projectCodes.split(Constants.COMMA)).map(Long::parseLong).collect(Collectors.toSet()); final List<Project> projectList = this.projectMapper.queryByCodes(projectCodeSet); if (CollectionUtils.isEmpty(projectList)) { logger.info("project not exists"); putMsg(result, Status.PROJECT_NOT_FOUNT, projectCodes); return result; } for (final Project project : projectList) { final Date today = new Date(); ProjectUser projectUser = new ProjectUser(); projectUser.setUserId(userId); projectUser.setProjectId(project.getId()); projectUser.setPerm(7); projectUser.setCreateTime(today); projectUser.setUpdateTime(today); this.projectUserMapper.insert(projectUser); } putMsg(result, Status.SUCCESS); return result; } /** * grant resource * * @param loginUser login user * @param userId user id * @param resourceIds resource id array * @return grant result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> grantResources(User loginUser, int userId, String resourceIds) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } User user = userMapper.selectById(userId); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } Set<Integer> needAuthorizeResIds = new HashSet<>(); if (StringUtils.isNotBlank(resourceIds)) { String[] resourceFullIdArr = resourceIds.split(","); // need authorize resource id set for (String resourceFullId : resourceFullIdArr) { String[] resourceIdArr = resourceFullId.split("-"); for (int i = 0; i <= resourceIdArr.length - 1; i++) { int resourceIdValue = Integer.parseInt(resourceIdArr[i]); needAuthorizeResIds.add(resourceIdValue); } } } //get the authorized resource id list by user id List<Integer> resIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, Constants.AUTHORIZE_WRITABLE_PERM); List<Resource> oldAuthorizedRes = CollectionUtils.isEmpty(resIds) ? new ArrayList<>() : resourceMapper.queryResourceListById(resIds); //if resource type is UDF,need check whether it is bound by UDF function Set<Integer> oldAuthorizedResIds = oldAuthorizedRes.stream().map(Resource::getId).collect(Collectors.toSet()); //get the unauthorized resource id list oldAuthorizedResIds.removeAll(needAuthorizeResIds); if (CollectionUtils.isNotEmpty(oldAuthorizedResIds)) { // get all resource id of process definitions those is released List<Map<String, Object>> list = processDefinitionMapper.listResourcesByUser(userId); Map<Integer, Set<Long>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list); Set<Integer> resourceIdSet = resourceProcessMap.keySet(); resourceIdSet.retainAll(oldAuthorizedResIds); if (CollectionUtils.isNotEmpty(resourceIdSet)) { logger.error("can't be deleted,because it is used of process definition"); for (Integer resId : resourceIdSet) { logger.error("resource id:{} is used of process definition {}", resId, resourceProcessMap.get(resId)); } putMsg(result, Status.RESOURCE_IS_USED); return result; } } resourceUserMapper.deleteResourceUser(userId, 0); if (check(result, StringUtils.isEmpty(resourceIds), Status.SUCCESS)) { return result; } for (int resourceIdValue : needAuthorizeResIds) { Resource resource = resourceMapper.selectById(resourceIdValue); if (resource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } Date now = new Date(); ResourcesUser resourcesUser = new ResourcesUser(); resourcesUser.setUserId(userId); resourcesUser.setResourcesId(resourceIdValue); if (resource.isDirectory()) { resourcesUser.setPerm(Constants.AUTHORIZE_READABLE_PERM); } else { resourcesUser.setPerm(Constants.AUTHORIZE_WRITABLE_PERM); } resourcesUser.setCreateTime(now); resourcesUser.setUpdateTime(now); resourceUserMapper.insert(resourcesUser); } putMsg(result, Status.SUCCESS); return result; } /** * grant udf function * * @param loginUser login user * @param userId user id * @param udfIds udf id array * @return grant result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> grantUDFFunction(User loginUser, int userId, String udfIds) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } User user = userMapper.selectById(userId); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } udfUserMapper.deleteByUserId(userId); if (check(result, StringUtils.isEmpty(udfIds), Status.SUCCESS)) { return result; } String[] resourcesIdArr = udfIds.split(","); for (String udfId : resourcesIdArr) { Date now = new Date(); UDFUser udfUser = new UDFUser(); udfUser.setUserId(userId); udfUser.setUdfId(Integer.parseInt(udfId)); udfUser.setPerm(7); udfUser.setCreateTime(now); udfUser.setUpdateTime(now); udfUserMapper.insert(udfUser); } putMsg(result, Status.SUCCESS); return result; } /** * grant datasource * * @param loginUser login user * @param userId user id * @param datasourceIds data source id array * @return grant result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> grantDataSource(User loginUser, int userId, String datasourceIds) { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } User user = userMapper.selectById(userId); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } datasourceUserMapper.deleteByUserId(userId); if (check(result, StringUtils.isEmpty(datasourceIds), Status.SUCCESS)) { return result; } String[] datasourceIdArr = datasourceIds.split(","); for (String datasourceId : datasourceIdArr) { Date now = new Date(); DatasourceUser datasourceUser = new DatasourceUser(); datasourceUser.setUserId(userId); datasourceUser.setDatasourceId(Integer.parseInt(datasourceId)); datasourceUser.setPerm(7); datasourceUser.setCreateTime(now); datasourceUser.setUpdateTime(now); datasourceUserMapper.insert(datasourceUser); } putMsg(result, Status.SUCCESS); return result; } /** * query user info * * @param loginUser login user * @return user info */ @Override public Map<String, Object> getUserInfo(User loginUser) { Map<String, Object> result = new HashMap<>(); User user = null; if (loginUser.getUserType() == UserType.ADMIN_USER) { user = loginUser; } else { user = userMapper.queryDetailsById(loginUser.getId()); List<AlertGroup> alertGroups = alertGroupMapper.queryByUserId(loginUser.getId()); StringBuilder sb = new StringBuilder(); if (alertGroups != null && !alertGroups.isEmpty()) { for (int i = 0; i < alertGroups.size() - 1; i++) { sb.append(alertGroups.get(i).getGroupName() + ","); } sb.append(alertGroups.get(alertGroups.size() - 1)); user.setAlertGroup(sb.toString()); } } result.put(Constants.DATA_LIST, user); putMsg(result, Status.SUCCESS); return result; } /** * query user list * * @param loginUser login user * @return user list */ @Override public Map<String, Object> queryAllGeneralUsers(User loginUser) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } List<User> userList = userMapper.queryAllGeneralUser(); result.put(Constants.DATA_LIST, userList); putMsg(result, Status.SUCCESS); return result; } /** * query user list * * @param loginUser login user * @return user list */ @Override public Map<String, Object> queryUserList(User loginUser) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } List<User> userList = userMapper.selectList(null); result.put(Constants.DATA_LIST, userList); putMsg(result, Status.SUCCESS); return result; } /** * verify user name exists * * @param userName user name * @return true if user name not exists, otherwise return false */ @Override public Result<Object> verifyUserName(String userName) { Result<Object> result = new Result<>(); User user = userMapper.queryByUserNameAccurately(userName); if (user != null) { putMsg(result, Status.USER_NAME_EXIST); } else { putMsg(result, Status.SUCCESS); } return result; } /** * unauthorized user * * @param loginUser login user * @param alertgroupId alert group id * @return unauthorize result code */ @Override public Map<String, Object> unauthorizedUser(User loginUser, Integer alertgroupId) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } List<User> userList = userMapper.selectList(null); List<User> resultUsers = new ArrayList<>(); Set<User> userSet = null; if (userList != null && !userList.isEmpty()) { userSet = new HashSet<>(userList); List<User> authedUserList = userMapper.queryUserListByAlertGroupId(alertgroupId); Set<User> authedUserSet = null; if (authedUserList != null && !authedUserList.isEmpty()) { authedUserSet = new HashSet<>(authedUserList); userSet.removeAll(authedUserSet); } resultUsers = new ArrayList<>(userSet); } result.put(Constants.DATA_LIST, resultUsers); putMsg(result, Status.SUCCESS); return result; } /** * authorized user * * @param loginUser login user * @param alertgroupId alert group id * @return authorized result code */ @Override public Map<String, Object> authorizedUser(User loginUser, Integer alertgroupId) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } List<User> userList = userMapper.queryUserListByAlertGroupId(alertgroupId); result.put(Constants.DATA_LIST, userList); putMsg(result, Status.SUCCESS); return result; } /** * @param tenantId tenant id * @return true if tenant exists, otherwise return false */ private boolean checkTenantExists(int tenantId) { return tenantMapper.queryById(tenantId) != null; } /** * @return if check failed return the field, otherwise return null */ private String checkUserParams(String userName, String password, String email, String phone) { String msg = null; if (!CheckUtils.checkUserName(userName)) { msg = userName; } else if (!CheckUtils.checkPassword(password)) { msg = password; } else if (!CheckUtils.checkEmail(email)) { msg = email; } else if (!CheckUtils.checkPhone(phone)) { msg = phone; } return msg; } /** * copy resource files * * @param resourceComponent resource component * @param srcBasePath src base path * @param dstBasePath dst base path * @throws IOException io exception */ private void copyResourceFiles(ResourceComponent resourceComponent, String srcBasePath, String dstBasePath) throws IOException { List<ResourceComponent> components = resourceComponent.getChildren(); if (CollectionUtils.isNotEmpty(components)) { for (ResourceComponent component : components) { // verify whether exist if (!HadoopUtils.getInstance().exists(String.format("%s/%s", srcBasePath, component.getFullName()))) { logger.error("resource file: {} not exist,copy error", component.getFullName()); throw new ServiceException(Status.RESOURCE_NOT_EXIST); } if (!component.isDirctory()) { // copy it to dst HadoopUtils.getInstance().copy(String.format("%s/%s", srcBasePath, component.getFullName()), String.format("%s/%s", dstBasePath, component.getFullName()), false, true); continue; } if (CollectionUtils.isEmpty(component.getChildren())) { // if not exist,need create it if (!HadoopUtils.getInstance().exists(String.format("%s/%s", dstBasePath, component.getFullName()))) { HadoopUtils.getInstance().mkdir(String.format("%s/%s", dstBasePath, component.getFullName())); } } else { copyResourceFiles(component, srcBasePath, dstBasePath); } } } } /** * registry user, default state is 0, default tenant_id is 1, no phone, no queue * * @param userName user name * @param userPassword user password * @param repeatPassword repeat password * @param email email * @return registry result code * @throws Exception exception */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> registerUser(String userName, String userPassword, String repeatPassword, String email) { Map<String, Object> result = new HashMap<>(); //check user params String msg = this.checkUserParams(userName, userPassword, email, ""); if (!StringUtils.isEmpty(msg)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, msg); return result; } if (!userPassword.equals(repeatPassword)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "two passwords are not same"); return result; } User user = createUser(userName, userPassword, email, 1, "", "", Flag.NO.ordinal()); putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, user); return result; } /** * activate user, only system admin have permission, change user state code 0 to 1 * * @param loginUser login user * @param userName user name * @return create result code */ @Override public Map<String, Object> activateUser(User loginUser, String userName) { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } if (!CheckUtils.checkUserName(userName)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, userName); return result; } User user = userMapper.queryByUserNameAccurately(userName); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userName); return result; } if (user.getState() != Flag.NO.ordinal()) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, userName); return result; } user.setState(Flag.YES.ordinal()); Date now = new Date(); user.setUpdateTime(now); userMapper.updateById(user); User responseUser = userMapper.queryByUserNameAccurately(userName); putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, responseUser); return result; } /** * activate user, only system admin have permission, change users state code 0 to 1 * * @param loginUser login user * @param userNames user name * @return create result code */ @Override public Map<String, Object> batchActivateUser(User loginUser, List<String> userNames) { Map<String, Object> result = new HashMap<>(); if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } int totalSuccess = 0; List<String> successUserNames = new ArrayList<>(); Map<String, Object> successRes = new HashMap<>(); int totalFailed = 0; List<Map<String, String>> failedInfo = new ArrayList<>(); Map<String, Object> failedRes = new HashMap<>(); for (String userName : userNames) { Map<String, Object> tmpResult = activateUser(loginUser, userName); if (tmpResult.get(Constants.STATUS) != Status.SUCCESS) { totalFailed++; Map<String, String> failedBody = new HashMap<>(); failedBody.put("userName", userName); Status status = (Status) tmpResult.get(Constants.STATUS); String errorMessage = MessageFormat.format(status.getMsg(), userName); failedBody.put("msg", errorMessage); failedInfo.add(failedBody); } else { totalSuccess++; successUserNames.add(userName); } } successRes.put("sum", totalSuccess); successRes.put("userName", successUserNames); failedRes.put("sum", totalFailed); failedRes.put("info", failedInfo); Map<String, Object> res = new HashMap<>(); res.put("success", successRes); res.put("failed", failedRes); putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, res); return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,180
[Feature][dolphinscheduler-api] Revoke project permission for specified user
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Revoke project permission for specified user ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7180
https://github.com/apache/dolphinscheduler/pull/7185
2dc09c627fe1d32c63e9a7551390540581916d7d
3361d763c4af1b387602dfc97a5a57961b5a7c43
"2021-12-04T17:08:06Z"
java
"2021-12-05T06:13:23Z"
dolphinscheduler-api/src/main/resources/i18n/messages.properties
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # QUERY_SCHEDULE_LIST_NOTES=query schedule list EXECUTE_PROCESS_TAG=execute process related operation PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation RUN_PROCESS_INSTANCE_NOTES=run process instance START_NODE_LIST=start node list(node name) TASK_DEPEND_TYPE=task depend type COMMAND_TYPE=command type RUN_MODE=run mode TIMEOUT=timeout EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance EXECUTE_TYPE=execute type START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition GET_RECEIVER_CC_NOTES=query receiver cc DESC=description GROUP_NAME=group name GROUP_TYPE=group type QUERY_ALERT_GROUP_LIST_NOTES=query alert group list UPDATE_ALERT_GROUP_NOTES=update alert group DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not GRANT_ALERT_GROUP_NOTES=grant alert group USER_IDS=user id list ALERT_GROUP_TAG=alert group related operation ALERT_PLUGIN_INSTANCE_TAG=alert plugin instance related operation UPDATE_ALERT_PLUGIN_INSTANCE_NOTES=update alert plugin instance operation CREATE_ALERT_PLUGIN_INSTANCE_NOTES=create alert plugin instance operation DELETE_ALERT_PLUGIN_INSTANCE_NOTES=delete alert plugin instance operation GET_ALERT_PLUGIN_INSTANCE_NOTES=get alert plugin instance operation CREATE_ALERT_GROUP_NOTES=create alert group WORKER_GROUP_TAG=worker group related operation SAVE_WORKER_GROUP_NOTES=create worker group WORKER_GROUP_NAME=worker group name WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2 QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging QUERY_WORKER_GROUP_LIST_NOTES=query worker group list DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id DATA_ANALYSIS_TAG=analysis related operation of task state COUNT_TASK_STATE_NOTES=count task state COUNT_PROCESS_INSTANCE_NOTES=count process instance state COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user COUNT_COMMAND_STATE_NOTES=count command state COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\ ACCESS_TOKEN_TAG=access token related operation MONITOR_TAG=monitor related operation MASTER_LIST_NOTES=master server list WORKER_LIST_NOTES=worker server list QUERY_DATABASE_STATE_NOTES=query database state QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE TASK_STATE=task instance state SOURCE_TABLE=SOURCE TABLE DEST_TABLE=dest table TASK_DATE=task date QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging DATA_SOURCE_TAG=data source related operation CREATE_DATA_SOURCE_NOTES=create data source DATA_SOURCE_NAME=data source name DATA_SOURCE_NOTE=data source desc DB_TYPE=database type DATA_SOURCE_HOST=DATA SOURCE HOST DATA_SOURCE_PORT=data source port DATABASE_NAME=database name QUEUE_TAG=queue related operation QUERY_QUEUE_LIST_NOTES=query queue list QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging CREATE_QUEUE_NOTES=create queue YARN_QUEUE_NAME=yarn(hadoop) queue name QUEUE_ID=queue id TENANT_DESC=tenant desc QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging QUERY_TENANT_LIST_NOTES=query tenant list UPDATE_TENANT_NOTES=update tenant DELETE_TENANT_NOTES=delete tenant RESOURCES_TAG=resource center related operation CREATE_RESOURCE_NOTES=create resource RESOURCE_TYPE=resource file type RESOURCE_NAME=resource name RESOURCE_DESC=resource file desc RESOURCE_FILE=resource file RESOURCE_ID=resource id QUERY_RESOURCE_LIST_NOTES=query resource list DELETE_RESOURCE_BY_ID_NOTES=delete resource by id VIEW_RESOURCE_BY_ID_NOTES=view resource by id ONLINE_CREATE_RESOURCE_NOTES=online create resource SUFFIX=resource file suffix CONTENT=resource file content UPDATE_RESOURCE_NOTES=edit resource file online DOWNLOAD_RESOURCE_NOTES=download resource file CREATE_UDF_FUNCTION_NOTES=create udf function UDF_TYPE=UDF type FUNC_NAME=function name CLASS_NAME=package and class name ARG_TYPES=arguments UDF_DESC=udf desc VIEW_UDF_FUNCTION_NOTES=view udf function UPDATE_UDF_FUNCTION_NOTES=update udf function QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name DELETE_UDF_FUNCTION_NOTES=delete udf function AUTHORIZED_FILE_NOTES=authorized file UNAUTHORIZED_FILE_NOTES=unauthorized file AUTHORIZED_UDF_FUNC_NOTES=authorized udf func UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func VERIFY_QUEUE_NOTES=verify queue TENANT_TAG=tenant related operation CREATE_TENANT_NOTES=create tenant TENANT_CODE=os tenant code QUEUE_NAME=queue name PASSWORD=password DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...} DATA_SOURCE_PRINCIPAL=principal DATA_SOURCE_KERBEROS_KRB5_CONF=the kerberos authentication parameter java.security.krb5.conf DATA_SOURCE_KERBEROS_KEYTAB_USERNAME=the kerberos authentication parameter login.user.keytab.username DATA_SOURCE_KERBEROS_KEYTAB_PATH=the kerberos authentication parameter login.user.keytab.path PROJECT_TAG=project related operation CREATE_PROJECT_NOTES=create project PROJECT_DESC=project description UPDATE_PROJECT_NOTES=update project PROJECT_ID=project id QUERY_PROJECT_BY_ID_NOTES=query project info by project id QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING DELETE_PROJECT_BY_ID_NOTES=delete project by id QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project QUERY_ALL_PROJECT_LIST_NOTES=query all project list QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project TASK_RECORD_TAG=task record related operation QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging CREATE_TOKEN_NOTES=create token ,note: please login first QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging SCHEDULE=schedule WARNING_TYPE=warning type(sending strategy) WARNING_GROUP_ID=warning group id FAILURE_STRATEGY=failure strategy RECEIVERS=receivers RECEIVERS_CC=receivers cc WORKER_GROUP_ID=worker server group id PROCESS_INSTANCE_PRIORITY=process instance priority UPDATE_SCHEDULE_NOTES=update schedule SCHEDULE_ID=schedule id ONLINE_SCHEDULE_NOTES=online schedule OFFLINE_SCHEDULE_NOTES=offline schedule QUERY_SCHEDULE_NOTES=query schedule QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging LOGIN_TAG=User login related operations USER_NAME=user name PROJECT_NAME=project name CREATE_PROCESS_DEFINITION_NOTES=create process definition PROCESS_DEFINITION_NAME=process definition name PROCESS_DEFINITION_JSON=process definition detail info (json format) PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format) PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format) PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format) PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format) PROCESS_DEFINITION_DESC=process definition desc PROCESS_DEFINITION_TAG=process definition related opertation SIGNOUT_NOTES=logout USER_PASSWORD=user password UPDATE_PROCESS_INSTANCE_NOTES=update process instance QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list VERIFY_PROCESS_DEFINITION_NAME_NOTES=verify process definition name LOGIN_NOTES=user login UPDATE_PROCESS_DEFINITION_NOTES=update process definition PROCESS_DEFINITION_ID=process definition id PROCESS_DEFINITION_IDS=process definition ids RELEASE_PROCESS_DEFINITION_NOTES=release process definition QUERY_PROCESS_DEFINITION_BY_ID_NOTES=query process definition by id QUERY_PROCESS_DEFINITION_BY_NAME_NOTES=query process definition by name QUERY_PROCESS_DEFINITION_LIST_NOTES=query process definition list QUERY_PROCESS_DEFINITION_LIST_PAGING_NOTES=query process definition list paging QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list PAGE_NO=page no PROCESS_INSTANCE_ID=process instance id PROCESS_INSTANCE_JSON=process instance info(json format) SCHEDULE_TIME=schedule time SYNC_DEFINE=update the information of the process instance to the process definition\ RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance SEARCH_VAL=search val USER_ID=user id PAGE_SIZE=page size LIMIT=limit VIEW_TREE_NOTES=view tree GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id PROCESS_DEFINITION_ID_LIST=process definition id list QUERY_PROCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query process definition all by project id DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id TASK_ID=task instance id SKIP_LINE_NUM=skip line num QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log USERS_TAG=users related operation SCHEDULER_TAG=scheduler related operation CREATE_SCHEDULE_NOTES=create schedule CREATE_USER_NOTES=create user TENANT_ID=tenant id QUEUE=queue EMAIL=email PHONE=phone QUERY_USER_LIST_NOTES=query user list UPDATE_USER_NOTES=update user DELETE_USER_BY_ID_NOTES=delete user by id GRANT_PROJECT_NOTES=GRANT PROJECT PROJECT_IDS=project ids(string format, multiple projects separated by ",") GRANT_PROJECT_BY_CODE_NOTES=GRANT PROJECT BY CODE PROJECT_CODES=project codes(string format, multiple project codes separated by ",") GRANT_RESOURCE_NOTES=grant resource file RESOURCE_IDS=resource ids(string format, multiple resources separated by ",") GET_USER_INFO_NOTES=get user info LIST_USER_NOTES=list user VERIFY_USER_NAME_NOTES=verify user name UNAUTHORIZED_USER_NOTES=cancel authorization ALERT_GROUP_ID=alert group id AUTHORIZED_USER_NOTES=authorized user GRANT_UDF_FUNC_NOTES=grant udf function UDF_IDS=udf ids(string format, multiple udf functions separated by ",") GRANT_DATASOURCE_NOTES=grant datasource DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",") QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables VIEW_GANTT_NOTES=view gantt SUB_PROCESS_INSTANCE_ID=sub process instance id TASK_NAME=task instance name TASK_INSTANCE_TAG=task instance related operation LOGGER_TAG=log related operation PROCESS_INSTANCE_TAG=process instance related operation EXECUTION_STATUS=runing status for workflow and task nodes HOST=ip address of running task START_DATE=start date END_DATE=end date QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id UPDATE_DATA_SOURCE_NOTES=update data source DATA_SOURCE_ID=DATA SOURCE ID QUERY_DATA_SOURCE_NOTES=query data source by id QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test DELETE_DATA_SOURCE_NOTES=delete data source VERIFY_DATA_SOURCE_NOTES=verify data source UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source AUTHORIZED_DATA_SOURCE_NOTES=authorized data source DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id QUERY_ALERT_GROUP_LIST_PAGING_NOTES=query alert group list paging EXPORT_PROCESS_DEFINITION_BY_ID_NOTES=export process definition by id BATCH_EXPORT_PROCESS_DEFINITION_BY_IDS_NOTES= batch export process definition by ids QUERY_USER_CREATED_PROJECT_NOTES= query user created project QUERY_AUTHORIZED_AND_USER_CREATED_PROJECT_NOTES= query authorized and user created project COPY_PROCESS_DEFINITION_NOTES= copy process definition notes MOVE_PROCESS_DEFINITION_NOTES= move process definition notes TARGET_PROJECT_ID= target project id IS_COPY = is copy DELETE_PROCESS_DEFINITION_VERSION_NOTES=delete process definition version QUERY_PROCESS_DEFINITION_VERSIONS_NOTES=query process definition versions SWITCH_PROCESS_DEFINITION_VERSION_NOTES=switch process definition version VERSION=version
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,180
[Feature][dolphinscheduler-api] Revoke project permission for specified user
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Revoke project permission for specified user ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7180
https://github.com/apache/dolphinscheduler/pull/7185
2dc09c627fe1d32c63e9a7551390540581916d7d
3361d763c4af1b387602dfc97a5a57961b5a7c43
"2021-12-04T17:08:06Z"
java
"2021-12-05T06:13:23Z"
dolphinscheduler-api/src/main/resources/i18n/messages_en_US.properties
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # QUERY_SCHEDULE_LIST_NOTES=query schedule list EXECUTE_PROCESS_TAG=execute process related operation PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation RUN_PROCESS_INSTANCE_NOTES=run process instance START_NODE_LIST=start node list(node name) TASK_DEPEND_TYPE=task depend type COMMAND_TYPE=command type RUN_MODE=run mode TIMEOUT=timeout EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance EXECUTE_TYPE=execute type START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition GET_RECEIVER_CC_NOTES=query receiver cc DESC=description GROUP_NAME=group name GROUP_TYPE=group type QUERY_ALERT_GROUP_LIST_NOTES=query alert group list UPDATE_ALERT_GROUP_NOTES=update alert group DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not GRANT_ALERT_GROUP_NOTES=grant alert group USER_IDS=user id list EXECUTOR_TAG=executor operation EXECUTOR_NAME=executor name WORKER_GROUP=work group startParams=start parameters ALERT_GROUP_TAG=alert group related operation ALERT_PLUGIN_INSTANCE_TAG=alert plugin instance related operation WORK_FLOW_LINEAGE_TAG=work flow lineage related operation UI_PLUGINS_TAG=UI plugin related operation UPDATE_ALERT_PLUGIN_INSTANCE_NOTES=update alert plugin instance operation CREATE_ALERT_PLUGIN_INSTANCE_NOTES=create alert plugin instance operation DELETE_ALERT_PLUGIN_INSTANCE_NOTES=delete alert plugin instance operation QUERY_ALERT_PLUGIN_INSTANCE_LIST_PAGING_NOTES=query alert plugin instance paging QUERY_TOPN_LONGEST_RUNNING_PROCESS_INSTANCE_NOTES=query topN longest running process instance ALERT_PLUGIN_INSTANCE_NAME=alert plugin instance name ALERT_PLUGIN_DEFINE_ID=alert plugin define id ALERT_PLUGIN_ID=alert plugin id ALERT_PLUGIN_INSTANCE_ID=alert plugin instance id ALERT_PLUGIN_INSTANCE_PARAMS=alert plugin instance parameters ALERT_INSTANCE_NAME=alert instance name VERIFY_ALERT_INSTANCE_NAME_NOTES=verify alert instance name DATA_SOURCE_PARAM=datasource parameter QUERY_ALL_ALERT_PLUGIN_INSTANCE_NOTES=query all alert plugin instances GET_ALERT_PLUGIN_INSTANCE_NOTES=get alert plugin instance operation CREATE_ALERT_GROUP_NOTES=create alert group WORKER_GROUP_TAG=worker group related operation SAVE_WORKER_GROUP_NOTES=create worker group WORKER_GROUP_NAME=worker group name WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2 QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging QUERY_WORKER_GROUP_LIST_NOTES=query worker group list DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id DATA_ANALYSIS_TAG=analysis related operation of task state COUNT_TASK_STATE_NOTES=count task state COUNT_PROCESS_INSTANCE_NOTES=count process instance state COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user COUNT_COMMAND_STATE_NOTES=count command state COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\ ACCESS_TOKEN_TAG=access token related operation MONITOR_TAG=monitor related operation MASTER_LIST_NOTES=master server list WORKER_LIST_NOTES=worker server list QUERY_DATABASE_STATE_NOTES=query database state QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE TASK_STATE=task instance state SOURCE_TABLE=SOURCE TABLE DEST_TABLE=dest table TASK_DATE=task date QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging DATA_SOURCE_TAG=data source related operation CREATE_DATA_SOURCE_NOTES=create data source DATA_SOURCE_NAME=data source name DATA_SOURCE_NOTE=data source desc DB_TYPE=database type DATA_SOURCE_HOST=DATA SOURCE HOST DATA_SOURCE_PORT=data source port DATABASE_NAME=database name QUEUE_TAG=queue related operation QUERY_QUEUE_LIST_NOTES=query queue list QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging CREATE_QUEUE_NOTES=create queue YARN_QUEUE_NAME=yarn(hadoop) queue name QUEUE_ID=queue id TENANT_DESC=tenant desc QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging QUERY_TENANT_LIST_NOTES=query tenant list UPDATE_TENANT_NOTES=update tenant DELETE_TENANT_NOTES=delete tenant RESOURCES_TAG=resource center related operation CREATE_RESOURCE_NOTES=create resource RESOURCE_TYPE=resource file type RESOURCE_NAME=resource name RESOURCE_DESC=resource file desc RESOURCE_FILE=resource file RESOURCE_ID=resource id QUERY_RESOURCE_LIST_NOTES=query resource list DELETE_RESOURCE_BY_ID_NOTES=delete resource by id VIEW_RESOURCE_BY_ID_NOTES=view resource by id ONLINE_CREATE_RESOURCE_NOTES=online create resource SUFFIX=resource file suffix CONTENT=resource file content UPDATE_RESOURCE_NOTES=edit resource file online DOWNLOAD_RESOURCE_NOTES=download resource file CREATE_UDF_FUNCTION_NOTES=create udf function UDF_TYPE=UDF type FUNC_NAME=function name CLASS_NAME=package and class name ARG_TYPES=arguments UDF_DESC=udf desc VIEW_UDF_FUNCTION_NOTES=view udf function UPDATE_UDF_FUNCTION_NOTES=update udf function QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name DELETE_UDF_FUNCTION_NOTES=delete udf function AUTHORIZED_FILE_NOTES=authorized file UNAUTHORIZED_FILE_NOTES=unauthorized file AUTHORIZED_UDF_FUNC_NOTES=authorized udf func UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func VERIFY_QUEUE_NOTES=verify queue TENANT_TAG=tenant related operation CREATE_TENANT_NOTES=create tenant TENANT_CODE=os tenant code QUEUE_NAME=queue name PASSWORD=password DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...} DATA_SOURCE_PRINCIPAL=principal DATA_SOURCE_KERBEROS_KRB5_CONF=the kerberos authentication parameter java.security.krb5.conf DATA_SOURCE_KERBEROS_KEYTAB_USERNAME=the kerberos authentication parameter login.user.keytab.username DATA_SOURCE_KERBEROS_KEYTAB_PATH=the kerberos authentication parameter login.user.keytab.path PROJECT_TAG=project related operation CREATE_PROJECT_NOTES=create project PROJECT_DESC=project description UPDATE_PROJECT_NOTES=update project PROJECT_ID=project id QUERY_PROJECT_BY_ID_NOTES=query project info by project id QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING QUERY_ALL_PROJECT_LIST_NOTES=query all project list DELETE_PROJECT_BY_ID_NOTES=delete project by id QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project TASK_RECORD_TAG=task record related operation QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging CREATE_TOKEN_NOTES=create token ,note: please login first QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging SCHEDULE=schedule WARNING_TYPE=warning type(sending strategy) WARNING_GROUP_ID=warning group id FAILURE_STRATEGY=failure strategy RECEIVERS=receivers RECEIVERS_CC=receivers cc WORKER_GROUP_ID=worker server group id PROCESS_INSTANCE_START_TIME=process instance start time PROCESS_INSTANCE_END_TIME=process instance end time PROCESS_INSTANCE_SIZE=process instance size PROCESS_INSTANCE_PRIORITY=process instance priority EXPECTED_PARALLELISM_NUMBER=custom parallelism to set the complement task threads UPDATE_SCHEDULE_NOTES=update schedule SCHEDULE_ID=schedule id ONLINE_SCHEDULE_NOTES=online schedule OFFLINE_SCHEDULE_NOTES=offline schedule QUERY_SCHEDULE_NOTES=query schedule QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging LOGIN_TAG=User login related operations USER_NAME=user name PROJECT_NAME=project name CREATE_PROCESS_DEFINITION_NOTES=create process definition PROCESS_DEFINITION_NAME=process definition name PROCESS_DEFINITION_JSON=process definition detail info (json format) PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format) PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format) PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format) PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format) PROCESS_DEFINITION_DESC=process definition desc PROCESS_DEFINITION_TAG=process definition related operation SIGNOUT_NOTES=logout USER_PASSWORD=user password UPDATE_PROCESS_INSTANCE_NOTES=update process instance QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list VERIFY_PROCESS_DEFINITION_NAME_NOTES=verify process definition name LOGIN_NOTES=user login UPDATE_PROCESS_DEFINITION_NOTES=update process definition PROCESS_DEFINITION_ID=process definition id PROCESS_DEFINITION_IDS=process definition ids PROCESS_DEFINITION_CODE=process definition code PROCESS_DEFINITION_CODE_LIST=process definition code list IMPORT_PROCESS_DEFINITION_NOTES=import process definition RELEASE_PROCESS_DEFINITION_NOTES=release process definition QUERY_PROCESS_DEFINITION_BY_ID_NOTES=query process definition by id QUERY_PROCESS_DEFINITION_LIST_NOTES=query process definition list QUERY_PROCESS_DEFINITION_LIST_PAGING_NOTES=query process definition list paging QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list PAGE_NO=page no PROCESS_INSTANCE_ID=process instance id PROCESS_INSTANCE_JSON=process instance info(json format) SCHEDULE_TIME=schedule time SYNC_DEFINE=update the information of the process instance to the process definition RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance PREVIEW_SCHEDULE_NOTES=preview schedule SEARCH_VAL=search val USER_ID=user id FORCE_TASK_SUCCESS=force task success QUERY_TASK_INSTANCE_LIST_PAGING_NOTES=query task instance list paging PROCESS_INSTANCE_NAME=process instance name TASK_INSTANCE_ID=task instance id VERIFY_TENANT_CODE_NOTES=verify tenant code QUERY_UI_PLUGIN_DETAIL_BY_ID=query ui plugin detail by id PLUGIN_ID=plugin id QUERY_UI_PLUGINS_BY_TYPE=query ui plugins by type ACTIVATE_USER_NOTES=active user BATCH_ACTIVATE_USER_NOTES=batch active user STATE=state REPEAT_PASSWORD=repeat password REGISTER_USER_NOTES=register user USER_NAMES=user names PAGE_SIZE=page size LIMIT=limit CREATE_WORKER_GROUP_NOTES=create worker group WORKER_ADDR_LIST=worker address list QUERY_WORKER_ADDRESS_LIST_NOTES=query worker address list QUERY_WORKFLOW_LINEAGE_BY_IDS_NOTES=query workflow lineage by ids QUERY_WORKFLOW_LINEAGE_BY_NAME_NOTES=query workflow lineage by name VIEW_TREE_NOTES=view tree UDF_ID=udf id GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id GET_NODE_LIST_BY_DEFINITION_CODE_NOTES=get node list by definition code QUERY_PROCESS_DEFINITION_BY_NAME_NOTES=query process definition by name PROCESS_DEFINITION_ID_LIST=process definition id list QUERY_PROCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query process definition all by project id DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_NOTES=batch delete process instance by process ids QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id TASK_ID=task instance id PROCESS_INSTANCE_IDS=process_instance ids SKIP_LINE_NUM=skip line num QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log USERS_TAG=users related operation SCHEDULER_TAG=scheduler related operation CREATE_SCHEDULE_NOTES=create schedule CREATE_USER_NOTES=create user TENANT_ID=tenant id QUEUE=queue EMAIL=email PHONE=phone QUERY_USER_LIST_NOTES=query user list UPDATE_USER_NOTES=update user UPDATE_QUEUE_NOTES=update queue DELETE_USER_BY_ID_NOTES=delete user by id GRANT_PROJECT_NOTES=GRANT PROJECT PROJECT_IDS=project ids(string format, multiple projects separated by ",") GRANT_PROJECT_BY_CODE_NOTES=GRANT PROJECT BY CODE PROJECT_CODES=project codes(string format, multiple project codes separated by ",") GRANT_RESOURCE_NOTES=grant resource file RESOURCE_IDS=resource ids(string format, multiple resources separated by ",") GET_USER_INFO_NOTES=get user info LIST_USER_NOTES=list user VERIFY_USER_NAME_NOTES=verify user name UNAUTHORIZED_USER_NOTES=cancel authorization ALERT_GROUP_ID=alert group id AUTHORIZED_USER_NOTES=authorized user AUTHORIZE_RESOURCE_TREE_NOTES=authorize resource tree RESOURCE_CURRENTDIR=dir of the current resource QUERY_RESOURCE_LIST_PAGING_NOTES=query resource list paging RESOURCE_PID=parent directory ID of the current resource RESOURCE_FULL_NAME=resource full name QUERY_BY_RESOURCE_NAME=query by resource name QUERY_UDF_FUNC_LIST_NOTES=query udf funciton list VERIFY_RESOURCE_NAME_NOTES=verify resource name GRANT_UDF_FUNC_NOTES=grant udf function UDF_IDS=udf ids(string format, multiple udf functions separated by ",") GRANT_DATASOURCE_NOTES=grant datasource DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",") QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables VIEW_GANTT_NOTES=view gantt SUB_PROCESS_INSTANCE_ID=sub process instance id TASK_NAME=task instance name TASK_INSTANCE_TAG=task instance related operation LOGGER_TAG=log related operation PROCESS_INSTANCE_TAG=process instance related operation EXECUTION_STATUS=runing status for workflow and task nodes HOST=ip address of running task START_DATE=start date END_DATE=end date QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id UPDATE_DATA_SOURCE_NOTES=update data source DATA_SOURCE_ID=DATA SOURCE ID QUERY_DATA_SOURCE_NOTES=query data source by id QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test DELETE_DATA_SOURCE_NOTES=delete data source VERIFY_DATA_SOURCE_NOTES=verify data source UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source AUTHORIZED_DATA_SOURCE_NOTES=authorized data source DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id QUERY_ALERT_GROUP_LIST_PAGING_NOTES=query alert group list paging EXPORT_PROCESS_DEFINITION_BY_ID_NOTES=export process definition by id BATCH_EXPORT_PROCESS_DEFINITION_BY_IDS_NOTES=batch export process definition by ids QUERY_USER_CREATED_PROJECT_NOTES=query user created project QUERY_AUTHORIZED_AND_USER_CREATED_PROJECT_NOTES=query authorized and user created project COPY_PROCESS_DEFINITION_NOTES=copy process definition notes MOVE_PROCESS_DEFINITION_NOTES=move process definition notes TARGET_PROJECT_ID=target project id IS_COPY=is copy DELETE_PROCESS_DEFINITION_VERSION_NOTES=delete process definition version QUERY_PROCESS_DEFINITION_VERSIONS_NOTES=query process definition versions SWITCH_PROCESS_DEFINITION_VERSION_NOTES=switch process definition version VERSION=version
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,180
[Feature][dolphinscheduler-api] Revoke project permission for specified user
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Revoke project permission for specified user ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7180
https://github.com/apache/dolphinscheduler/pull/7185
2dc09c627fe1d32c63e9a7551390540581916d7d
3361d763c4af1b387602dfc97a5a57961b5a7c43
"2021-12-04T17:08:06Z"
java
"2021-12-05T06:13:23Z"
dolphinscheduler-api/src/main/resources/i18n/messages_zh_CN.properties
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # QUERY_SCHEDULE_LIST_NOTES=查询定时列表 PROCESS_INSTANCE_EXECUTOR_TAG=流程实例执行相关操作 UI_PLUGINS_TAG=UI插件相关操作 WORK_FLOW_LINEAGE_TAG=工作流血缘相关操作 RUN_PROCESS_INSTANCE_NOTES=运行流程实例 START_NODE_LIST=开始节点列表(节点name) TASK_DEPEND_TYPE=任务依赖类型 COMMAND_TYPE=指令类型 RUN_MODE=运行模式 TIMEOUT=超时时间 EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=执行流程实例的各种操作(暂停、停止、重跑、恢复等) EXECUTE_TYPE=执行类型 EXECUTOR_TAG=流程相关操作 EXECUTOR_NAME=流程名称 START_CHECK_PROCESS_DEFINITION_NOTES=检查流程定义 DESC=备注(描述) GROUP_NAME=组名称 WORKER_GROUP=worker群组 startParams=启动参数 GROUP_TYPE=组类型 QUERY_ALERT_GROUP_LIST_NOTES=告警组列表 UPDATE_ALERT_GROUP_NOTES=编辑(更新)告警组 DELETE_ALERT_GROUP_BY_ID_NOTES=通过ID删除告警组 VERIFY_ALERT_GROUP_NAME_NOTES=检查告警组是否存在 GRANT_ALERT_GROUP_NOTES=授权告警组 PROCESS_DEFINITION_IDS=流程定义ID PROCESS_DEFINITION_CODE=流程定义编码 PROCESS_DEFINITION_CODE_LIST=流程定义编码列表 USER_IDS=用户ID列表 ALERT_GROUP_TAG=告警组相关操作 WORKER_GROUP_TAG=Worker分组管理 SAVE_WORKER_GROUP_NOTES=创建Worker分组 ALERT_PLUGIN_INSTANCE_TAG=告警插件实例相关操作 WORKER_GROUP_NAME=Worker分组名称 WORKER_IP_LIST=Worker ip列表,注意:多个IP地址以逗号分割 QUERY_WORKER_GROUP_PAGING_NOTES=Worker分组管理 QUERY_WORKER_GROUP_LIST_NOTES=查询worker group分组 DELETE_WORKER_GROUP_BY_ID_NOTES=通过ID删除worker group DATA_ANALYSIS_TAG=任务状态分析相关操作 COUNT_TASK_STATE_NOTES=任务状态统计 COUNT_PROCESS_INSTANCE_NOTES=统计流程实例状态 COUNT_PROCESS_DEFINITION_BY_USER_NOTES=统计用户创建的流程定义 COUNT_COMMAND_STATE_NOTES=统计命令状态 COUNT_QUEUE_STATE_NOTES=统计队列里任务状态 ACCESS_TOKEN_TAG=访问token相关操作 MONITOR_TAG=监控相关操作 MASTER_LIST_NOTES=master服务列表 WORKER_LIST_NOTES=worker服务列表 QUERY_DATABASE_STATE_NOTES=查询数据库状态 QUERY_ZOOKEEPER_STATE_NOTES=查询Zookeeper状态 TASK_STATE=任务实例状态 SOURCE_TABLE=源表 DEST_TABLE=目标表 TASK_DATE=任务时间 QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=分页查询历史任务记录列表 DATA_SOURCE_TAG=数据源相关操作 CREATE_DATA_SOURCE_NOTES=创建数据源 DATA_SOURCE_NAME=数据源名称 DATA_SOURCE_NOTE=数据源描述 DB_TYPE=数据源类型 DATA_SOURCE_HOST=IP主机名 DATA_SOURCE_PORT=数据源端口 DATABASE_NAME=数据库名 QUEUE_TAG=队列相关操作 QUERY_TOPN_LONGEST_RUNNING_PROCESS_INSTANCE_NOTES=查询topN最长运行流程实例 QUERY_QUEUE_LIST_NOTES=查询队列列表 QUERY_QUEUE_LIST_PAGING_NOTES=分页查询队列列表 CREATE_QUEUE_NOTES=创建队列 YARN_QUEUE_NAME=hadoop yarn队列名 QUEUE_ID=队列ID TENANT_DESC=租户描述 QUERY_TENANT_LIST_PAGING_NOTES=分页查询租户列表 QUERY_TENANT_LIST_NOTES=查询租户列表 UPDATE_TENANT_NOTES=更新租户 DELETE_TENANT_NOTES=删除租户 RESOURCES_TAG=资源中心相关操作 CREATE_RESOURCE_NOTES=创建资源 RESOURCE_FULL_NAME=资源全名 RESOURCE_TYPE=资源文件类型 RESOURCE_NAME=资源文件名称 RESOURCE_DESC=资源文件描述 RESOURCE_FILE=资源文件 RESOURCE_ID=资源ID QUERY_RESOURCE_LIST_NOTES=查询资源列表 QUERY_BY_RESOURCE_NAME=通过资源名称查询 QUERY_UDF_FUNC_LIST_NOTES=查询UDF函数列表 VERIFY_RESOURCE_NAME_NOTES=验证资源名称 DELETE_RESOURCE_BY_ID_NOTES=通过ID删除资源 VIEW_RESOURCE_BY_ID_NOTES=通过ID浏览资源 ONLINE_CREATE_RESOURCE_NOTES=在线创建资源 SUFFIX=资源文件后缀 CONTENT=资源文件内容 UPDATE_RESOURCE_NOTES=在线更新资源文件 DOWNLOAD_RESOURCE_NOTES=下载资源文件 CREATE_UDF_FUNCTION_NOTES=创建UDF函数 UDF_TYPE=UDF类型 FUNC_NAME=函数名称 CLASS_NAME=包名类名 ARG_TYPES=参数 UDF_DESC=udf描述,使用说明 VIEW_UDF_FUNCTION_NOTES=查看udf函数 UPDATE_UDF_FUNCTION_NOTES=更新udf函数 QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=分页查询udf函数列表 VERIFY_UDF_FUNCTION_NAME_NOTES=验证udf函数名 DELETE_UDF_FUNCTION_NOTES=删除UDF函数 AUTHORIZED_FILE_NOTES=授权文件 UNAUTHORIZED_FILE_NOTES=取消授权文件 AUTHORIZED_UDF_FUNC_NOTES=授权udf函数 UNAUTHORIZED_UDF_FUNC_NOTES=取消udf函数授权 VERIFY_QUEUE_NOTES=验证队列 TENANT_TAG=租户相关操作 CREATE_TENANT_NOTES=创建租户 TENANT_CODE=操作系统租户 QUEUE_NAME=队列名 PASSWORD=密码 DATA_SOURCE_OTHER=jdbc连接参数,格式为:{"key1":"value1",...} DATA_SOURCE_PRINCIPAL=principal DATA_SOURCE_KERBEROS_KRB5_CONF=kerberos认证参数 java.security.krb5.conf DATA_SOURCE_KERBEROS_KEYTAB_USERNAME=kerberos认证参数 login.user.keytab.username DATA_SOURCE_KERBEROS_KEYTAB_PATH=kerberos认证参数 login.user.keytab.path PROJECT_TAG=项目相关操作 CREATE_PROJECT_NOTES=创建项目 PROJECT_DESC=项目描述 UPDATE_PROJECT_NOTES=更新项目 PROJECT_ID=项目ID QUERY_PROJECT_BY_ID_NOTES=通过项目ID查询项目信息 QUERY_PROJECT_LIST_PAGING_NOTES=分页查询项目列表 QUERY_ALL_PROJECT_LIST_NOTES=查询所有项目 DELETE_PROJECT_BY_ID_NOTES=通过ID删除项目 QUERY_UNAUTHORIZED_PROJECT_NOTES=查询未授权的项目 QUERY_AUTHORIZED_PROJECT_NOTES=查询授权项目 TASK_RECORD_TAG=任务记录相关操作 QUERY_TASK_RECORD_LIST_PAGING_NOTES=分页查询任务记录列表 CREATE_TOKEN_NOTES=创建token,注意需要先登录 QUERY_ACCESS_TOKEN_LIST_NOTES=分页查询access token列表 SCHEDULE=定时 WARNING_TYPE=发送策略 WARNING_GROUP_ID=发送组ID FAILURE_STRATEGY=失败策略 RECEIVERS=收件人 RECEIVERS_CC=收件人(抄送) WORKER_GROUP_ID=Worker Server分组ID PROCESS_INSTANCE_PRIORITY=流程实例优先级 EXPECTED_PARALLELISM_NUMBER=补数任务自定义并行度 UPDATE_SCHEDULE_NOTES=更新定时 SCHEDULE_ID=定时ID ONLINE_SCHEDULE_NOTES=定时上线 OFFLINE_SCHEDULE_NOTES=定时下线 QUERY_SCHEDULE_NOTES=查询定时 QUERY_SCHEDULE_LIST_PAGING_NOTES=分页查询定时 LOGIN_TAG=用户登录相关操作 USER_NAME=用户名 PROJECT_NAME=项目名称 CREATE_PROCESS_DEFINITION_NOTES=创建流程定义 PROCESS_INSTANCE_START_TIME=流程实例启动时间 PROCESS_INSTANCE_END_TIME=流程实例结束时间 PROCESS_INSTANCE_SIZE=流程实例个数 PROCESS_DEFINITION_NAME=流程定义名称 PROCESS_DEFINITION_JSON=流程定义详细信息(json格式) PROCESS_DEFINITION_LOCATIONS=流程定义节点坐标位置信息(json格式) PROCESS_INSTANCE_LOCATIONS=流程实例节点坐标位置信息(json格式) PROCESS_DEFINITION_CONNECTS=流程定义节点图标连接信息(json格式) PROCESS_INSTANCE_CONNECTS=流程实例节点图标连接信息(json格式) PROCESS_DEFINITION_DESC=流程定义描述信息 PROCESS_DEFINITION_TAG=流程定义相关操作 SIGNOUT_NOTES=退出登录 USER_PASSWORD=用户密码 UPDATE_PROCESS_INSTANCE_NOTES=更新流程实例 QUERY_PROCESS_INSTANCE_LIST_NOTES=查询流程实例列表 VERIFY_PROCESS_DEFINITION_NAME_NOTES=验证流程定义名字 LOGIN_NOTES=用户登录 UPDATE_PROCESS_DEFINITION_NOTES=更新流程定义 PROCESS_DEFINITION_ID=流程定义ID RELEASE_PROCESS_DEFINITION_NOTES=发布流程定义 QUERY_PROCESS_DEFINITION_BY_ID_NOTES=通过流程定义ID查询流程定义 QUERY_PROCESS_DEFINITION_LIST_NOTES=查询流程定义列表 QUERY_PROCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表 QUERY_ALL_DEFINITION_LIST_NOTES=查询所有流程定义 PAGE_NO=页码号 PROCESS_INSTANCE_ID=流程实例ID PROCESS_INSTANCE_IDS=流程实例ID集合 PROCESS_INSTANCE_JSON=流程实例信息(json格式) PREVIEW_SCHEDULE_NOTES=定时调度预览 SCHEDULE_TIME=定时时间 SYNC_DEFINE=更新流程实例的信息是否同步到流程定义 RECOVERY_PROCESS_INSTANCE_FLAG=是否恢复流程实例 SEARCH_VAL=搜索值 FORCE_TASK_SUCCESS=强制TASK成功 QUERY_TASK_INSTANCE_LIST_PAGING_NOTES=分页查询任务实例列表 PROCESS_INSTANCE_NAME=流程实例名称 TASK_INSTANCE_ID=任务实例ID VERIFY_TENANT_CODE_NOTES=验证租户 QUERY_UI_PLUGIN_DETAIL_BY_ID=通过ID查询UI插件详情 QUERY_UI_PLUGINS_BY_TYPE=通过类型查询UI插件 ACTIVATE_USER_NOTES=激活用户 BATCH_ACTIVATE_USER_NOTES=批量激活用户 REPEAT_PASSWORD=重复密码 REGISTER_USER_NOTES=用户注册 STATE=状态 USER_NAMES=多个用户名 PLUGIN_ID=插件ID USER_ID=用户ID PAGE_SIZE=页大小 LIMIT=显示多少条 UDF_ID=udf ID AUTHORIZE_RESOURCE_TREE_NOTES=授权资源树 RESOURCE_CURRENTDIR=当前资源目录 RESOURCE_PID=资源父目录ID QUERY_RESOURCE_LIST_PAGING_NOTES=分页查询资源列表 VIEW_TREE_NOTES=树状图 IMPORT_PROCESS_DEFINITION_NOTES=导入流程定义 GET_NODE_LIST_BY_DEFINITION_ID_NOTES=通过流程定义ID获得任务节点列表 PROCESS_DEFINITION_ID_LIST=流程定义id列表 QUERY_PROCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=通过项目ID查询流程定义 BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=通过流程定义ID集合批量删除流程定义 BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_NOTES=通过流程实例ID集合批量删除流程实例 DELETE_PROCESS_DEFINITION_BY_ID_NOTES=通过流程定义ID删除流程定义 QUERY_PROCESS_INSTANCE_BY_ID_NOTES=通过流程实例ID查询流程实例 DELETE_PROCESS_INSTANCE_BY_ID_NOTES=通过流程实例ID删除流程实例 TASK_ID=任务实例ID SKIP_LINE_NUM=忽略行数 QUERY_TASK_INSTANCE_LOG_NOTES=查询任务实例日志 DOWNLOAD_TASK_INSTANCE_LOG_NOTES=下载任务实例日志 USERS_TAG=用户相关操作 SCHEDULER_TAG=定时相关操作 CREATE_SCHEDULE_NOTES=创建定时 CREATE_USER_NOTES=创建用户 CREATE_WORKER_GROUP_NOTES=创建Worker分组 WORKER_ADDR_LIST=worker地址列表 QUERY_WORKER_ADDRESS_LIST_NOTES=查询worker地址列表 QUERY_WORKFLOW_LINEAGE_BY_IDS_NOTES=通过IDs查询工作流血缘列表 QUERY_WORKFLOW_LINEAGE_BY_NAME_NOTES=通过名称查询工作流血缘列表 TENANT_ID=租户ID QUEUE=使用的队列 EMAIL=邮箱 PHONE=手机号 QUERY_USER_LIST_NOTES=查询用户列表 UPDATE_USER_NOTES=更新用户 UPDATE_QUEUE_NOTES=更新队列 DELETE_USER_BY_ID_NOTES=删除用户通过ID GRANT_PROJECT_NOTES=授权项目 PROJECT_IDS=项目IDS(字符串格式,多个项目以","分割) GRANT_PROJECT_BY_CODE_NOTES=授权项目 PROJECT_CODES=项目Codes(字符串格式,多个项目Code以","分割) GRANT_RESOURCE_NOTES=授权资源文件 RESOURCE_IDS=资源ID列表(字符串格式,多个资源ID以","分割) GET_USER_INFO_NOTES=获取用户信息 GET_NODE_LIST_BY_DEFINITION_CODE_NOTES=通过流程定义编码查询节点列表 QUERY_PROCESS_DEFINITION_BY_NAME_NOTES=通过名称查询流程定义 LIST_USER_NOTES=用户列表 VERIFY_USER_NAME_NOTES=验证用户名 UNAUTHORIZED_USER_NOTES=取消授权 ALERT_GROUP_ID=报警组ID AUTHORIZED_USER_NOTES=授权用户 GRANT_UDF_FUNC_NOTES=授权udf函数 UDF_IDS=udf函数id列表(字符串格式,多个udf函数ID以","分割) GRANT_DATASOURCE_NOTES=授权数据源 DATASOURCE_IDS=数据源ID列表(字符串格式,多个数据源ID以","分割) QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=通过任务实例ID查询子流程实例 QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=通过子流程实例ID查询父流程实例信息 QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=查询流程实例全局变量和局部变量 VIEW_GANTT_NOTES=浏览Gantt图 SUB_PROCESS_INSTANCE_ID=子流程实例ID TASK_NAME=任务实例名 TASK_INSTANCE_TAG=任务实例相关操作 LOGGER_TAG=日志相关操作 PROCESS_INSTANCE_TAG=流程实例相关操作 EXECUTION_STATUS=工作流和任务节点的运行状态 HOST=运行任务的主机IP地址 START_DATE=开始时间 END_DATE=结束时间 QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=通过流程实例ID查询任务列表 DELETE_ALERT_PLUGIN_INSTANCE_NOTES=删除告警插件实例 CREATE_ALERT_PLUGIN_INSTANCE_NOTES=创建告警插件实例 GET_ALERT_PLUGIN_INSTANCE_NOTES=查询告警插件实例 QUERY_ALERT_PLUGIN_INSTANCE_LIST_PAGING_NOTES=分页查询告警实例列表 QUERY_ALL_ALERT_PLUGIN_INSTANCE_NOTES=查询所有告警实例列表 UPDATE_ALERT_PLUGIN_INSTANCE_NOTES=更新告警插件实例 ALERT_PLUGIN_INSTANCE_NAME=告警插件实例名称 ALERT_PLUGIN_DEFINE_ID=告警插件定义ID ALERT_PLUGIN_ID=告警插件ID ALERT_PLUGIN_INSTANCE_ID=告警插件实例ID ALERT_PLUGIN_INSTANCE_PARAMS=告警插件实例参数 ALERT_INSTANCE_NAME=告警插件名称 VERIFY_ALERT_INSTANCE_NAME_NOTES=验证告警插件名称 UPDATE_DATA_SOURCE_NOTES=更新数据源 DATA_SOURCE_PARAM=数据源参数 DATA_SOURCE_ID=数据源ID CREATE_ALERT_GROUP_NOTES=创建告警组 QUERY_DATA_SOURCE_NOTES=查询数据源通过ID QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=通过数据源类型查询数据源列表 QUERY_DATA_SOURCE_LIST_PAGING_NOTES=分页查询数据源列表 CONNECT_DATA_SOURCE_NOTES=连接数据源 CONNECT_DATA_SOURCE_TEST_NOTES=连接数据源测试 DELETE_DATA_SOURCE_NOTES=删除数据源 VERIFY_DATA_SOURCE_NOTES=验证数据源 UNAUTHORIZED_DATA_SOURCE_NOTES=未授权的数据源 AUTHORIZED_DATA_SOURCE_NOTES=授权的数据源 DELETE_SCHEDULER_BY_ID_NOTES=根据定时id删除定时数据 QUERY_ALERT_GROUP_LIST_PAGING_NOTES=分页查询告警组列表 EXPORT_PROCESS_DEFINITION_BY_ID_NOTES=通过工作流ID导出工作流定义 BATCH_EXPORT_PROCESS_DEFINITION_BY_IDS_NOTES=批量导出工作流定义 QUERY_USER_CREATED_PROJECT_NOTES=查询用户创建的项目 QUERY_AUTHORIZED_AND_USER_CREATED_PROJECT_NOTES=查询授权和用户创建的项目 COPY_PROCESS_DEFINITION_NOTES=复制工作流定义 MOVE_PROCESS_DEFINITION_NOTES=移动工作流定义 TARGET_PROJECT_ID=目标项目ID IS_COPY=是否复制 DELETE_PROCESS_DEFINITION_VERSION_NOTES=删除流程历史版本 QUERY_PROCESS_DEFINITION_VERSIONS_NOTES=查询流程历史版本信息 SWITCH_PROCESS_DEFINITION_VERSION_NOTES=切换流程版本 VERSION=版本号
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,180
[Feature][dolphinscheduler-api] Revoke project permission for specified user
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Revoke project permission for specified user ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7180
https://github.com/apache/dolphinscheduler/pull/7185
2dc09c627fe1d32c63e9a7551390540581916d7d
3361d763c4af1b387602dfc97a5a57961b5a7c43
"2021-12-04T17:08:06Z"
java
"2021-12-05T06:13:23Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/UsersControllerTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.controller; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.utils.JSONUtils; import java.util.ArrayList; import java.util.List; import org.junit.Assert; import org.junit.Ignore; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.http.MediaType; import org.springframework.test.web.servlet.MvcResult; import org.springframework.util.LinkedMultiValueMap; import org.springframework.util.MultiValueMap; /** * users controller test */ public class UsersControllerTest extends AbstractControllerTest { private static final Logger logger = LoggerFactory.getLogger(UsersControllerTest.class); @Test public void testCreateUser() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userName", "user_test"); paramsMap.add("userPassword", "123456qwe?"); paramsMap.add("tenantId", "109"); paramsMap.add("queue", "1"); paramsMap.add("email", "12343534@qq.com"); paramsMap.add("phone", "15800000000"); MvcResult mvcResult = mockMvc.perform(post("/users/create") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.CREATE_USER_ERROR.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testUpdateUser() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("id", "32"); paramsMap.add("userName", "user_test"); paramsMap.add("userPassword", "123456qwe?"); paramsMap.add("tenantId", "9"); paramsMap.add("queue", "1"); paramsMap.add("email", "12343534@qq.com"); paramsMap.add("phone", "15800000000"); MvcResult mvcResult = mockMvc.perform(post("/users/update") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.UPDATE_USER_ERROR.getCode(), result.getCode().intValue()); } @Test public void testGrantProject() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userId", "32"); paramsMap.add("projectIds", "3"); MvcResult mvcResult = mockMvc.perform(post("/users/grant-project") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.USER_NOT_EXIST.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testGrantProjectByCode() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userId", "32"); paramsMap.add("projectCodes", "3682329499136,3643998558592"); MvcResult mvcResult = mockMvc.perform(post("/users/grant-project-by-code") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.USER_NOT_EXIST.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testGrantResource() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userId", "32"); paramsMap.add("resourceIds", "5"); MvcResult mvcResult = mockMvc.perform(post("/users/grant-file") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.USER_NOT_EXIST.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testGrantUDFFunc() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userId", "32"); paramsMap.add("udfIds", "5"); MvcResult mvcResult = mockMvc.perform(post("/users/grant-udf-func") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.USER_NOT_EXIST.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testGrantDataSource() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userId", "32"); paramsMap.add("datasourceIds", "5"); MvcResult mvcResult = mockMvc.perform(post("/users/grant-datasource") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.USER_NOT_EXIST.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testGetUserInfo() throws Exception { MvcResult mvcResult = mockMvc.perform(get("/users/get-user-info") .header(SESSION_ID, sessionId)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testListAll() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userName", "test"); MvcResult mvcResult = mockMvc.perform(get("/users/list-all") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } // todo: there is a sql error, the table t_ds_relation_user_alertgroup has already been dropped @Ignore @Test public void testAuthorizedUser() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("alertgroupId", "1"); MvcResult mvcResult = mockMvc.perform(get("/users/authed-user") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } // todo: t_ds_relation_user_alertgroup has already been dropped @Ignore @Test public void testUnauthorizedUser() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("alertgroupId", "1"); MvcResult mvcResult = mockMvc.perform(get("/users/unauth-user") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testVerifyUserName() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userName", "hello"); MvcResult mvcResult = mockMvc.perform(get("/users/verify-user-name") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testDelUserById() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("id", "32"); MvcResult mvcResult = mockMvc.perform(post("/users/delete") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.USER_NOT_EXIST.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testQueryList() throws Exception { MvcResult mvcResult = mockMvc.perform(get("/users/list") .header(SESSION_ID, sessionId)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testRegisterUser() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userName", "user_test"); paramsMap.add("userPassword", "123456qwe?"); paramsMap.add("repeatPassword", "123456qwe?"); paramsMap.add("email", "12343534@qq.com"); MvcResult mvcResult = mockMvc.perform(post("/users/register") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); } @Test @Ignore public void testActivateUser() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userName", "user_test"); MvcResult mvcResult = mockMvc.perform(post("/users/activate") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); } @Test public void testBatchActivateUser() throws Exception { List<String> userNames = new ArrayList<>(); userNames.add("user_sky_cxl"); userNames.add("19990323"); userNames.add("test_sky_post_11"); String jsonUserNames = JSONUtils.toJsonString(userNames); MvcResult mvcResult = mockMvc.perform(post("/users/batch/activate") .header(SESSION_ID, sessionId) .contentType(MediaType.APPLICATION_JSON) .content(jsonUserNames)) .andExpect(status().isOk()) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,180
[Feature][dolphinscheduler-api] Revoke project permission for specified user
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Revoke project permission for specified user ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7180
https://github.com/apache/dolphinscheduler/pull/7185
2dc09c627fe1d32c63e9a7551390540581916d7d
3361d763c4af1b387602dfc97a5a57961b5a7c43
"2021-12-04T17:08:06Z"
java
"2021-12-05T06:13:23Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/UsersServiceTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.when; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.impl.UsersServiceImpl; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.utils.EncryptionUtils; import org.apache.dolphinscheduler.dao.entity.AlertGroup; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.AccessTokenMapper; import org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper; import org.apache.dolphinscheduler.dao.mapper.DataSourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectUserMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UDFUserMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.spi.enums.ResourceType; import org.apache.commons.collections.CollectionUtils; import java.util.ArrayList; import java.util.List; import java.util.Map; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.google.common.collect.Lists; /** * users service test */ @RunWith(MockitoJUnitRunner.class) public class UsersServiceTest { private static final Logger logger = LoggerFactory.getLogger(UsersServiceTest.class); @InjectMocks private UsersServiceImpl usersService; @Mock private UserMapper userMapper; @Mock private AccessTokenMapper accessTokenMapper; @Mock private TenantMapper tenantMapper; @Mock private ResourceMapper resourceMapper; @Mock private AlertGroupMapper alertGroupMapper; @Mock private DataSourceUserMapper datasourceUserMapper; @Mock private ProjectUserMapper projectUserMapper; @Mock private ResourceUserMapper resourceUserMapper; @Mock private UDFUserMapper udfUserMapper; @Mock private ProjectMapper projectMapper; private String queueName = "UsersServiceTestQueue"; @Before public void before() { } @After public void after() { } @Test public void testCreateUserForLdap() { String userName = "user1"; String email = "user1@ldap.com"; User user = usersService.createUser(UserType.ADMIN_USER, userName, email); Assert.assertNotNull(user); } @Test public void testCreateUser() { User user = new User(); user.setUserType(UserType.ADMIN_USER); String userName = "userTest0001~"; String userPassword = "userTest"; String email = "123@qq.com"; int tenantId = Integer.MAX_VALUE; String phone = "13456432345"; int state = 1; try { //userName error Map<String, Object> result = usersService.createUser(user, userName, userPassword, email, tenantId, phone, queueName, state); logger.info(result.toString()); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); userName = "userTest0001"; userPassword = "userTest000111111111111111"; //password error result = usersService.createUser(user, userName, userPassword, email, tenantId, phone, queueName, state); logger.info(result.toString()); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); userPassword = "userTest0001"; email = "1q.com"; //email error result = usersService.createUser(user, userName, userPassword, email, tenantId, phone, queueName, state); logger.info(result.toString()); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); email = "122222@qq.com"; phone = "2233"; //phone error result = usersService.createUser(user, userName, userPassword, email, tenantId, phone, queueName, state); logger.info(result.toString()); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); phone = "13456432345"; //tenantId not exists result = usersService.createUser(user, userName, userPassword, email, tenantId, phone, queueName, state); logger.info(result.toString()); Assert.assertEquals(Status.TENANT_NOT_EXIST, result.get(Constants.STATUS)); //success Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); result = usersService.createUser(user, userName, userPassword, email, 1, phone, queueName, state); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } catch (Exception e) { logger.error(Status.CREATE_USER_ERROR.getMsg(), e); Assert.assertTrue(false); } } @Test public void testQueryUser() { String userName = "userTest0001"; String userPassword = "userTest0001"; when(userMapper.queryUserByNamePassword(userName, EncryptionUtils.getMd5(userPassword))).thenReturn(getGeneralUser()); User queryUser = usersService.queryUser(userName, userPassword); logger.info(queryUser.toString()); Assert.assertTrue(queryUser != null); } @Test public void testSelectByIds() { List<Integer> ids = new ArrayList<>(); List<User> users = usersService.queryUser(ids); Assert.assertTrue(users.isEmpty()); ids.add(1); List<User> userList = new ArrayList<>(); userList.add(new User()); when(userMapper.selectByIds(ids)).thenReturn(userList); List<User> userList1 = usersService.queryUser(ids); Assert.assertFalse(userList1.isEmpty()); } @Test public void testGetUserIdByName() { User user = new User(); user.setId(1); user.setUserType(UserType.ADMIN_USER); user.setUserName("test_user"); //user name null int userId = usersService.getUserIdByName(""); Assert.assertEquals(0, userId); //user not exist when(usersService.queryUser(user.getUserName())).thenReturn(null); int userNotExistId = usersService.getUserIdByName(user.getUserName()); Assert.assertEquals(-1, userNotExistId); //user exist when(usersService.queryUser(user.getUserName())).thenReturn(user); int userExistId = usersService.getUserIdByName(user.getUserName()); Assert.assertEquals(user.getId(), userExistId); } @Test public void testQueryUserList() { User user = new User(); //no operate Map<String, Object> result = usersService.queryUserList(user); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //success user.setUserType(UserType.ADMIN_USER); when(userMapper.selectList(null)).thenReturn(getUserList()); result = usersService.queryUserList(user); List<User> userList = (List<User>) result.get(Constants.DATA_LIST); Assert.assertTrue(userList.size() > 0); } @Test public void testQueryUserListPage() { User user = new User(); IPage<User> page = new Page<>(1, 10); page.setRecords(getUserList()); when(userMapper.queryUserPaging(any(Page.class), eq("userTest"))).thenReturn(page); //no operate Result result = usersService.queryUserList(user, "userTest", 1, 10); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM.getCode(), (int) result.getCode()); //success user.setUserType(UserType.ADMIN_USER); result = usersService.queryUserList(user, "userTest", 1, 10); Assert.assertEquals(Status.SUCCESS.getCode(), (int) result.getCode()); PageInfo<User> pageInfo = (PageInfo<User>) result.getData(); Assert.assertTrue(pageInfo.getTotalList().size() > 0); } @Test public void testUpdateUser() { String userName = "userTest0001"; String userPassword = "userTest0001"; try { //user not exist Map<String, Object> result = usersService.updateUser(getLoginUser(), 0, userName, userPassword, "3443@qq.com", 1, "13457864543", "queue", 1); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); logger.info(result.toString()); //success when(userMapper.selectById(1)).thenReturn(getUser()); result = usersService.updateUser(getLoginUser(), 1, userName, userPassword, "32222s@qq.com", 1, "13457864543", "queue", 1); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } catch (Exception e) { logger.error("update user error", e); Assert.assertTrue(false); } } @Test public void testDeleteUserById() { User loginUser = new User(); try { when(userMapper.queryTenantCodeByUserId(1)).thenReturn(getUser()); when(userMapper.selectById(1)).thenReturn(getUser()); when(accessTokenMapper.deleteAccessTokenByUserId(1)).thenReturn(0); //no operate Map<String, Object> result = usersService.deleteUserById(loginUser, 3); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); // user not exist loginUser.setUserType(UserType.ADMIN_USER); result = usersService.deleteUserById(loginUser, 3); logger.info(result.toString()); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); // user is project owner Mockito.when(projectMapper.queryProjectCreatedByUser(1)).thenReturn(Lists.newArrayList(new Project())); result = usersService.deleteUserById(loginUser, 1); Assert.assertEquals(Status.TRANSFORM_PROJECT_OWNERSHIP, result.get(Constants.STATUS)); //success Mockito.when(projectMapper.queryProjectCreatedByUser(1)).thenReturn(null); result = usersService.deleteUserById(loginUser, 1); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } catch (Exception e) { logger.error("delete user error", e); Assert.assertTrue(false); } } @Test public void testGrantProject() { when(userMapper.selectById(1)).thenReturn(getUser()); User loginUser = new User(); String projectIds = "100000,120000"; Map<String, Object> result = usersService.grantProject(loginUser, 1, projectIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //user not exist loginUser.setUserType(UserType.ADMIN_USER); result = usersService.grantProject(loginUser, 2, projectIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); //success result = usersService.grantProject(loginUser, 1, projectIds); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } @Test public void testGrantProjectByCode() { when(userMapper.selectById(1)).thenReturn(getUser()); // user no permission User loginUser = new User(); String projectCodes = "3682329499136,3643998558592"; Map<String, Object> result = this.usersService.grantProjectByCode(loginUser, 1, projectCodes); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); // user not exist loginUser.setUserType(UserType.ADMIN_USER); result = this.usersService.grantProjectByCode(loginUser, 2, projectCodes); logger.info(result.toString()); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); // success Mockito.when(this.projectMapper.queryByCodes(Mockito.anyCollection())).thenReturn(Lists.newArrayList(new Project())); result = this.usersService.grantProjectByCode(loginUser, 1, projectCodes); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } @Test public void testGrantResources() { String resourceIds = "100000,120000"; when(userMapper.selectById(1)).thenReturn(getUser()); User loginUser = new User(); Map<String, Object> result = usersService.grantResources(loginUser, 1, resourceIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //user not exist loginUser.setUserType(UserType.ADMIN_USER); result = usersService.grantResources(loginUser, 2, resourceIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); //success when(resourceMapper.selectById(Mockito.anyInt())).thenReturn(getResource()); when(resourceUserMapper.deleteResourceUser(1, 0)).thenReturn(1); result = usersService.grantResources(loginUser, 1, resourceIds); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } @Test public void testGrantUDFFunction() { String udfIds = "100000,120000"; when(userMapper.selectById(1)).thenReturn(getUser()); User loginUser = new User(); Map<String, Object> result = usersService.grantUDFFunction(loginUser, 1, udfIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //user not exist loginUser.setUserType(UserType.ADMIN_USER); result = usersService.grantUDFFunction(loginUser, 2, udfIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); //success when(udfUserMapper.deleteByUserId(1)).thenReturn(1); result = usersService.grantUDFFunction(loginUser, 1, udfIds); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } @Test public void testGrantDataSource() { String datasourceIds = "100000,120000"; when(userMapper.selectById(1)).thenReturn(getUser()); User loginUser = new User(); Map<String, Object> result = usersService.grantDataSource(loginUser, 1, datasourceIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //user not exist loginUser.setUserType(UserType.ADMIN_USER); result = usersService.grantDataSource(loginUser, 2, datasourceIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); //success when(datasourceUserMapper.deleteByUserId(Mockito.anyInt())).thenReturn(1); result = usersService.grantDataSource(loginUser, 1, datasourceIds); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } private User getLoginUser() { User loginUser = new User(); loginUser.setId(1); loginUser.setUserType(UserType.ADMIN_USER); return loginUser; } @Test public void getUserInfo() { User loginUser = new User(); loginUser.setUserName("admin"); loginUser.setUserType(UserType.ADMIN_USER); // get admin user Map<String, Object> result = usersService.getUserInfo(loginUser); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); User tempUser = (User) result.get(Constants.DATA_LIST); //check userName Assert.assertEquals("admin", tempUser.getUserName()); //get general user loginUser.setUserType(null); loginUser.setId(1); when(userMapper.queryDetailsById(1)).thenReturn(getGeneralUser()); when(alertGroupMapper.queryByUserId(1)).thenReturn(getAlertGroups()); result = usersService.getUserInfo(loginUser); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); tempUser = (User) result.get(Constants.DATA_LIST); //check userName Assert.assertEquals("userTest0001", tempUser.getUserName()); } @Test public void testQueryAllGeneralUsers() { User loginUser = new User(); //no operate Map<String, Object> result = usersService.queryAllGeneralUsers(loginUser); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //success loginUser.setUserType(UserType.ADMIN_USER); when(userMapper.queryAllGeneralUser()).thenReturn(getUserList()); result = usersService.queryAllGeneralUsers(loginUser); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); List<User> userList = (List<User>) result.get(Constants.DATA_LIST); Assert.assertTrue(CollectionUtils.isNotEmpty(userList)); } @Test public void testVerifyUserName() { //not exist user Result result = usersService.verifyUserName("admin89899"); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg()); //exist user when(userMapper.queryByUserNameAccurately("userTest0001")).thenReturn(getUser()); result = usersService.verifyUserName("userTest0001"); logger.info(result.toString()); Assert.assertEquals(Status.USER_NAME_EXIST.getMsg(), result.getMsg()); } @Test public void testUnauthorizedUser() { User loginUser = new User(); when(userMapper.selectList(null)).thenReturn(getUserList()); when(userMapper.queryUserListByAlertGroupId(2)).thenReturn(getUserList()); //no operate Map<String, Object> result = usersService.unauthorizedUser(loginUser, 2); logger.info(result.toString()); loginUser.setUserType(UserType.ADMIN_USER); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //success result = usersService.unauthorizedUser(loginUser, 2); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } @Test public void testAuthorizedUser() { User loginUser = new User(); when(userMapper.queryUserListByAlertGroupId(2)).thenReturn(getUserList()); //no operate Map<String, Object> result = usersService.authorizedUser(loginUser, 2); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //success loginUser.setUserType(UserType.ADMIN_USER); result = usersService.authorizedUser(loginUser, 2); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); List<User> userList = (List<User>) result.get(Constants.DATA_LIST); logger.info(result.toString()); Assert.assertTrue(CollectionUtils.isNotEmpty(userList)); } @Test public void testRegisterUser() { String userName = "userTest0002~"; String userPassword = "userTest"; String repeatPassword = "userTest"; String email = "123@qq.com"; try { //userName error Map<String, Object> result = usersService.registerUser(userName, userPassword, repeatPassword, email); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); userName = "userTest0002"; userPassword = "userTest000111111111111111"; //password error result = usersService.registerUser(userName, userPassword, repeatPassword, email); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); userPassword = "userTest0002"; email = "1q.com"; //email error result = usersService.registerUser(userName, userPassword, repeatPassword, email); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); //repeatPassword error email = "7400@qq.com"; repeatPassword = "userPassword"; result = usersService.registerUser(userName, userPassword, repeatPassword, email); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); //success repeatPassword = "userTest0002"; result = usersService.registerUser(userName, userPassword, repeatPassword, email); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } catch (Exception e) { Assert.assertTrue(false); } } @Test public void testActivateUser() { User user = new User(); user.setUserType(UserType.GENERAL_USER); String userName = "userTest0002~"; try { //not admin Map<String, Object> result = usersService.activateUser(user, userName); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //userName error user.setUserType(UserType.ADMIN_USER); result = usersService.activateUser(user, userName); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); //user not exist userName = "userTest10013"; result = usersService.activateUser(user, userName); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); //user state error userName = "userTest0001"; when(userMapper.queryByUserNameAccurately(userName)).thenReturn(getUser()); result = usersService.activateUser(user, userName); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); //success when(userMapper.queryByUserNameAccurately(userName)).thenReturn(getDisabledUser()); result = usersService.activateUser(user, userName); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } catch (Exception e) { Assert.assertTrue(false); } } @Test public void testBatchActivateUser() { User user = new User(); user.setUserType(UserType.GENERAL_USER); List<String> userNames = new ArrayList<>(); userNames.add("userTest0001"); userNames.add("userTest0002"); userNames.add("userTest0003~"); userNames.add("userTest0004"); try { //not admin Map<String, Object> result = usersService.batchActivateUser(user, userNames); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //batch activate user names user.setUserType(UserType.ADMIN_USER); when(userMapper.queryByUserNameAccurately("userTest0001")).thenReturn(getUser()); when(userMapper.queryByUserNameAccurately("userTest0002")).thenReturn(getDisabledUser()); result = usersService.batchActivateUser(user, userNames); Map<String, Object> responseData = (Map<String, Object>) result.get(Constants.DATA_LIST); Map<String, Object> successData = (Map<String, Object>) responseData.get("success"); int totalSuccess = (Integer) successData.get("sum"); Map<String, Object> failedData = (Map<String, Object>) responseData.get("failed"); int totalFailed = (Integer) failedData.get("sum"); Assert.assertEquals(1, totalSuccess); Assert.assertEquals(3, totalFailed); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } catch (Exception e) { Assert.assertTrue(false); } } /** * get disabled user */ private User getDisabledUser() { User user = new User(); user.setUserType(UserType.GENERAL_USER); user.setUserName("userTest0001"); user.setUserPassword("userTest0001"); user.setState(0); return user; } /** * get user */ private User getGeneralUser() { User user = new User(); user.setUserType(UserType.GENERAL_USER); user.setUserName("userTest0001"); user.setUserPassword("userTest0001"); return user; } private List<User> getUserList() { List<User> userList = new ArrayList<>(); userList.add(getGeneralUser()); return userList; } /** * get user */ private User getUser() { User user = new User(); user.setUserType(UserType.ADMIN_USER); user.setUserName("userTest0001"); user.setUserPassword("userTest0001"); user.setState(1); return user; } /** * get tenant * * @return tenant */ private Tenant getTenant() { Tenant tenant = new Tenant(); tenant.setId(1); return tenant; } /** * get resource * * @return resource */ private Resource getResource() { Resource resource = new Resource(); resource.setPid(-1); resource.setUserId(1); resource.setDescription("ResourcesServiceTest.jar"); resource.setAlias("ResourcesServiceTest.jar"); resource.setFullName("/ResourcesServiceTest.jar"); resource.setType(ResourceType.FILE); return resource; } private List<AlertGroup> getAlertGroups() { List<AlertGroup> alertGroups = new ArrayList<>(); AlertGroup alertGroup = new AlertGroup(); alertGroups.add(alertGroup); return alertGroups; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,037
[Bug] [Data Supplement] process instance can not be killed.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Starting a job with data supplement in proccess definition ui. Stopping process instance with stop button in process instance list ui. Not only did it not stop, it also restart the same task instance. ### What you expected to happen stopping process instance successfully. ### How to reproduce Starting a job with data supplement in proccess definition ui. Stopping process instance with stop button in process instance list ui. ### Anything else _No response_ ### Version 2.0.0 ### Are you willing to submit PR? - [x] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7037
https://github.com/apache/dolphinscheduler/pull/7140
3361d763c4af1b387602dfc97a5a57961b5a7c43
d83735ab5191f37bfb90b9cb3d534386f7792fbf
"2021-11-29T07:07:51Z"
java
"2021-12-05T07:03:52Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteThread.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.runner; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVERY_START_NODE_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODES; import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.DependResult; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.StateEvent; import org.apache.dolphinscheduler.common.enums.StateEventType; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.TaskGroupQueueStatus; import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.process.ProcessDag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.Environment; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskGroupQueue; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.utils.DagHelper; import org.apache.dolphinscheduler.remote.command.HostUpdateCommand; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager; import org.apache.dolphinscheduler.server.master.runner.task.ITaskProcessor; import org.apache.dolphinscheduler.server.master.runner.task.TaskAction; import org.apache.dolphinscheduler.server.master.runner.task.TaskProcessorFactory; import org.apache.dolphinscheduler.service.alert.ProcessAlertManager; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.service.quartz.cron.CronUtils; import org.apache.dolphinscheduler.service.queue.PeerTaskInstancePriorityQueue; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.Lists; /** * master exec thread,split dag */ public class WorkflowExecuteThread implements Runnable { /** * logger of WorkflowExecuteThread */ private static final Logger logger = LoggerFactory.getLogger(WorkflowExecuteThread.class); /** * master config */ private MasterConfig masterConfig; /** * process service */ private ProcessService processService; /** * alert manager */ private ProcessAlertManager processAlertManager; /** * netty executor manager */ private NettyExecutorManager nettyExecutorManager; /** * process instance */ private ProcessInstance processInstance; /** * process definition */ private ProcessDefinition processDefinition; /** * the object of DAG */ private DAG<String, TaskNode, TaskNodeRelation> dag; /** * key of workflow */ private String key; /** * start flag, true: start nodes submit completely */ private boolean isStart = false; /** * submit failure nodes */ private boolean taskFailedSubmit = false; /** * task instance hash map, taskId as key */ private Map<Integer, TaskInstance> taskInstanceMap = new ConcurrentHashMap<>(); /** * running TaskNode, taskId as key */ private final Map<Integer, ITaskProcessor> activeTaskProcessorMaps = new ConcurrentHashMap<>(); /** * valid task map, taskCode as key, taskId as value */ private Map<String, Integer> validTaskMap = new ConcurrentHashMap<>(); /** * error task map, taskCode as key, taskId as value */ private Map<String, Integer> errorTaskMap = new ConcurrentHashMap<>(); /** * complete task map, taskCode as key, taskId as value */ private Map<String, Integer> completeTaskMap = new ConcurrentHashMap<>(); /** * depend failed task map, taskCode as key, taskId as value */ private Map<String, Integer> dependFailedTaskMap = new ConcurrentHashMap<>(); /** * forbidden task map, code as key */ private Map<String, TaskNode> forbiddenTaskMap = new ConcurrentHashMap<>(); /** * skip task map, code as key */ private Map<String, TaskNode> skipTaskNodeMap = new ConcurrentHashMap<>(); /** * complement date list */ private List<Date> complementListDate = Lists.newLinkedList(); /** * task timeout check list */ private ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList; /** * state event queue */ private ConcurrentLinkedQueue<StateEvent> stateEvents = new ConcurrentLinkedQueue<>(); /** * ready to submit task queue */ private PeerTaskInstancePriorityQueue readyToSubmitTaskQueue = new PeerTaskInstancePriorityQueue(); /** * constructor of WorkflowExecuteThread * * @param processInstance processInstance * @param processService processService * @param nettyExecutorManager nettyExecutorManager */ public WorkflowExecuteThread(ProcessInstance processInstance , ProcessService processService , NettyExecutorManager nettyExecutorManager , ProcessAlertManager processAlertManager , MasterConfig masterConfig , ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList) { this.processService = processService; this.processInstance = processInstance; this.masterConfig = masterConfig; this.nettyExecutorManager = nettyExecutorManager; this.processAlertManager = processAlertManager; this.taskTimeoutCheckList = taskTimeoutCheckList; } @Override public void run() { try { if (!this.isStart()) { startProcess(); } else { handleEvents(); } } catch (Exception e) { logger.error("handler error:", e); } } /** * the process start nodes are submitted completely. */ public boolean isStart() { return this.isStart; } private void handleEvents() { while (!this.stateEvents.isEmpty()) { try { StateEvent stateEvent = this.stateEvents.peek(); if (stateEventHandler(stateEvent)) { this.stateEvents.remove(stateEvent); } } catch (Exception e) { logger.error("state handle error:", e); } } } public String getKey() { if (StringUtils.isNotEmpty(key) || this.processDefinition == null) { return key; } key = String.format("%d_%d_%d", this.processDefinition.getCode(), this.processDefinition.getVersion(), this.processInstance.getId()); return key; } public boolean addStateEvent(StateEvent stateEvent) { if (processInstance.getId() != stateEvent.getProcessInstanceId()) { logger.info("state event would be abounded :{}", stateEvent.toString()); return false; } this.stateEvents.add(stateEvent); return true; } public int eventSize() { return this.stateEvents.size(); } public ProcessInstance getProcessInstance() { return this.processInstance; } private boolean stateEventHandler(StateEvent stateEvent) { logger.info("process event: {}", stateEvent.toString()); if (!checkProcessInstance(stateEvent)) { return false; } boolean result = false; switch (stateEvent.getType()) { case PROCESS_STATE_CHANGE: result = processStateChangeHandler(stateEvent); break; case TASK_STATE_CHANGE: result = taskStateChangeHandler(stateEvent); break; case PROCESS_TIMEOUT: result = processTimeout(); break; case TASK_TIMEOUT: result = taskTimeout(stateEvent); break; case WAIT_TASK_GROUP: result = checkForceStartAndWakeUp(stateEvent); break; default: break; } if (result) { this.stateEvents.remove(stateEvent); } return result; } private boolean checkForceStartAndWakeUp(StateEvent stateEvent) { TaskGroupQueue taskGroupQueue = this.processService.loadTaskGroupQueue(stateEvent.getTaskInstanceId()); if (taskGroupQueue.getForceStart() == Flag.YES.getCode()) { ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId()); TaskInstance taskInstance = this.processService.findTaskInstanceById(stateEvent.getTaskInstanceId()); ProcessInstance processInstance = this.processService.findProcessInstanceById(taskInstance.getProcessInstanceId()); taskProcessor.dispatch(taskInstance, processInstance); this.processService.updateTaskGroupQueueStatus(taskGroupQueue.getId(), TaskGroupQueueStatus.ACQUIRE_SUCCESS.getCode()); return true; } if (taskGroupQueue.getInQueue() == Flag.YES.getCode()) { boolean acquireTaskGroup = processService.acquireTaskGroupAgain(taskGroupQueue); if (acquireTaskGroup) { ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId()); TaskInstance taskInstance = this.processService.findTaskInstanceById(stateEvent.getTaskInstanceId()); ProcessInstance processInstance = this.processService.findProcessInstanceById(taskInstance.getProcessInstanceId()); taskProcessor.dispatch(taskInstance, processInstance); return true; } } return false; } private boolean taskTimeout(StateEvent stateEvent) { if (!checkTaskInstanceByStateEvent(stateEvent)) { return true; } TaskInstance taskInstance = taskInstanceMap.get(stateEvent.getTaskInstanceId()); if (TimeoutFlag.CLOSE == taskInstance.getTaskDefine().getTimeoutFlag()) { return true; } TaskTimeoutStrategy taskTimeoutStrategy = taskInstance.getTaskDefine().getTimeoutNotifyStrategy(); if (TaskTimeoutStrategy.FAILED == taskTimeoutStrategy) { ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId()); taskProcessor.action(TaskAction.TIMEOUT); return false; } else { processAlertManager.sendTaskTimeoutAlert(processInstance, taskInstance, taskInstance.getTaskDefine()); return true; } } private boolean processTimeout() { this.processAlertManager.sendProcessTimeoutAlert(this.processInstance, this.processDefinition); return true; } private boolean taskStateChangeHandler(StateEvent stateEvent) { if (!checkTaskInstanceByStateEvent(stateEvent)) { return true; } TaskInstance task = getTaskInstance(stateEvent.getTaskInstanceId()); if (task.getState() == null) { logger.error("task state is null, state handler error: {}", stateEvent); return true; } if (task.getState().typeIsFinished() && !completeTaskMap.containsKey(Long.toString(task.getTaskCode()))) { taskFinished(task); if (task.getTaskGroupId() > 0) { //release task group TaskInstance nextTaskInstance = this.processService.releaseTaskGroup(task); if (nextTaskInstance != null) { if (nextTaskInstance.getProcessInstanceId() == task.getProcessInstanceId()) { StateEvent nextEvent = new StateEvent(); nextEvent.setProcessInstanceId(this.processInstance.getId()); nextEvent.setTaskInstanceId(nextTaskInstance.getId()); nextEvent.setType(StateEventType.WAIT_TASK_GROUP); this.stateEvents.add(nextEvent); } else { ProcessInstance processInstance = this.processService.findProcessInstanceById(nextTaskInstance.getProcessInstanceId()); this.processService.sendStartTask2Master(processInstance,nextTaskInstance.getId(), org.apache.dolphinscheduler.remote.command.CommandType.TASK_WAKEUP_EVENT_REQUEST); } } } } else if (activeTaskProcessorMaps.containsKey(stateEvent.getTaskInstanceId())) { ITaskProcessor iTaskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId()); iTaskProcessor.run(); if (iTaskProcessor.taskState().typeIsFinished()) { task = processService.findTaskInstanceById(stateEvent.getTaskInstanceId()); taskFinished(task); } } else { logger.error("state handler error: {}", stateEvent); } return true; } private void taskFinished(TaskInstance task) { logger.info("work flow {} task {} state:{} ", processInstance.getId(), task.getId(), task.getState()); if (task.taskCanRetry()) { addTaskToStandByList(task); if (!task.retryTaskIntervalOverTime()) { logger.info("failure task will be submitted: process id: {}, task instance id: {} state:{} retry times:{} / {}, interval:{}", processInstance.getId(), task.getId(), task.getState(), task.getRetryTimes(), task.getMaxRetryTimes(), task.getRetryInterval()); this.addTimeoutCheck(task); } else { submitStandByTask(); } return; } completeTaskMap.put(Long.toString(task.getTaskCode()), task.getId()); activeTaskProcessorMaps.remove(task.getId()); taskTimeoutCheckList.remove(task.getId()); if (task.getState().typeIsSuccess()) { processInstance.setVarPool(task.getVarPool()); processService.saveProcessInstance(processInstance); submitPostNode(Long.toString(task.getTaskCode())); } else if (task.getState().typeIsFailure()) { if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(Long.toString(task.getTaskCode()), dag)) { submitPostNode(Long.toString(task.getTaskCode())); } else { errorTaskMap.put(Long.toString(task.getTaskCode()), task.getId()); if (processInstance.getFailureStrategy() == FailureStrategy.END) { killAllTasks(); } } } this.updateProcessInstanceState(); } /** * update process instance */ public void refreshProcessInstance(int processInstanceId) { logger.info("process instance update: {}", processInstanceId); processInstance = processService.findProcessInstanceById(processInstanceId); processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); processInstance.setProcessDefinition(processDefinition); } /** * update task instance */ public void refreshTaskInstance(int taskInstanceId) { logger.info("task instance update: {} ", taskInstanceId); TaskInstance taskInstance = processService.findTaskInstanceById(taskInstanceId); if (taskInstance == null) { logger.error("can not find task instance, id:{}", taskInstanceId); return; } processService.packageTaskInstance(taskInstance, processInstance); taskInstanceMap.put(taskInstance.getId(), taskInstance); validTaskMap.remove(Long.toString(taskInstance.getTaskCode())); if (Flag.YES == taskInstance.getFlag()) { validTaskMap.put(Long.toString(taskInstance.getTaskCode()), taskInstance.getId()); } } /** * check process instance by state event */ public boolean checkProcessInstance(StateEvent stateEvent) { if (this.processInstance.getId() != stateEvent.getProcessInstanceId()) { logger.error("mismatch process instance id: {}, state event:{}", this.processInstance.getId(), stateEvent); return false; } return true; } /** * check if task instance exist by state event */ public boolean checkTaskInstanceByStateEvent(StateEvent stateEvent) { if (stateEvent.getTaskInstanceId() == 0) { logger.error("task instance id null, state event:{}", stateEvent); return false; } if (!taskInstanceMap.containsKey(stateEvent.getTaskInstanceId())) { logger.error("mismatch task instance id, event:{}", stateEvent); return false; } return true; } /** * check if task instance exist by task code */ public boolean checkTaskInstanceByCode(long taskCode) { if (taskInstanceMap == null || taskInstanceMap.size() == 0) { return false; } for (TaskInstance taskInstance : taskInstanceMap.values()) { if (taskInstance.getTaskCode() == taskCode) { return true; } } return false; } /** * check if task instance exist by id */ public boolean checkTaskInstanceById(int taskInstanceId) { if (taskInstanceMap == null || taskInstanceMap.size() == 0) { return false; } return taskInstanceMap.containsKey(taskInstanceId); } /** * get task instance from memory */ public TaskInstance getTaskInstance(int taskInstanceId) { if (taskInstanceMap.containsKey(taskInstanceId)) { return taskInstanceMap.get(taskInstanceId); } return null; } private boolean processStateChangeHandler(StateEvent stateEvent) { try { logger.info("process:{} state {} change to {}", processInstance.getId(), processInstance.getState(), stateEvent.getExecutionStatus()); if (processComplementData()) { return true; } if (stateEvent.getExecutionStatus().typeIsFinished()) { endProcess(); } if (processInstance.getState() == ExecutionStatus.READY_STOP) { killAllTasks(); } return true; } catch (Exception e) { logger.error("process state change error:", e); } return true; } private boolean processComplementData() throws Exception { if (!needComplementProcess()) { return false; } Date scheduleDate = processInstance.getScheduleTime(); if (scheduleDate == null) { scheduleDate = complementListDate.get(0); } else if (processInstance.getState().typeIsFinished()) { endProcess(); if (complementListDate.size() <= 0) { logger.info("process complement end. process id:{}", processInstance.getId()); return true; } int index = complementListDate.indexOf(scheduleDate); if (index >= complementListDate.size() - 1 || !processInstance.getState().typeIsSuccess()) { logger.info("process complement end. process id:{}", processInstance.getId()); // complement data ends || no success return true; } logger.info("process complement continue. process id:{}, schedule time:{} complementListDate:{}", processInstance.getId(), processInstance.getScheduleTime(), complementListDate.toString()); scheduleDate = complementListDate.get(index + 1); //the next process complement processInstance.setId(0); } processInstance.setScheduleTime(scheduleDate); Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) { cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); } processInstance.setState(ExecutionStatus.RUNNING_EXECUTION); processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime())); processInstance.setStartTime(new Date()); processInstance.setEndTime(null); processService.saveProcessInstance(processInstance); this.taskInstanceMap.clear(); startProcess(); return true; } private boolean needComplementProcess() { if (processInstance.isComplementData() && Flag.NO == processInstance.getIsSubProcess()) { return true; } return false; } private void startProcess() throws Exception { if (this.taskInstanceMap.size() == 0) { isStart = false; buildFlowDag(); initTaskQueue(); submitPostNode(null); isStart = true; } } /** * process end handle */ private void endProcess() { this.stateEvents.clear(); if (processDefinition.getExecutionType().typeIsSerialWait()) { checkSerialProcess(processDefinition); } if (processInstance.getState().typeIsWaitingThread()) { processService.createRecoveryWaitingThreadCommand(null, processInstance); } if (processAlertManager.isNeedToSendWarning(processInstance)) { ProjectUser projectUser = processService.queryProjectWithUserByProcessInstanceId(processInstance.getId()); processAlertManager.sendAlertProcessInstance(processInstance, getValidTaskList(), projectUser); } List<TaskInstance> taskInstances = processService.findValidTaskListByProcessId(processInstance.getId()); ProjectUser projectUser = processService.queryProjectWithUserByProcessInstanceId(processInstance.getId()); processAlertManager.sendAlertProcessInstance(processInstance, taskInstances, projectUser); //release task group processService.releaseAllTaskGroup(processInstance.getId()); } public void checkSerialProcess(ProcessDefinition processDefinition) { int nextInstanceId = processInstance.getNextProcessInstanceId(); if (nextInstanceId == 0) { ProcessInstance nextProcessInstance = this.processService.loadNextProcess4Serial(processInstance.getProcessDefinition().getCode(), ExecutionStatus.SERIAL_WAIT.getCode()); if (nextProcessInstance == null) { return; } nextInstanceId = nextProcessInstance.getId(); } ProcessInstance nextProcessInstance = this.processService.findProcessInstanceById(nextInstanceId); if (nextProcessInstance.getState().typeIsFinished() || nextProcessInstance.getState().typeIsRunning()) { return; } Map<String, Object> cmdParam = new HashMap<>(); cmdParam.put(CMD_PARAM_RECOVER_PROCESS_ID_STRING, nextInstanceId); Command command = new Command(); command.setCommandType(CommandType.RECOVER_SERIAL_WAIT); command.setProcessDefinitionCode(processDefinition.getCode()); command.setCommandParam(JSONUtils.toJsonString(cmdParam)); processService.createCommand(command); } /** * generate process dag * * @throws Exception exception */ private void buildFlowDag() throws Exception { if (this.dag != null) { return; } processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); processInstance.setProcessDefinition(processDefinition); List<TaskInstance> recoverNodeList = getStartTaskInstanceList(processInstance.getCommandParam()); List<TaskNode> taskNodeList = processService.transformTask(processService.findRelationByCode(processDefinition.getProjectCode(), processDefinition.getCode()), Lists.newArrayList()); forbiddenTaskMap.clear(); taskNodeList.forEach(taskNode -> { if (taskNode.isForbidden()) { forbiddenTaskMap.put(Long.toString(taskNode.getCode()), taskNode); } }); // generate process to get DAG info List<String> recoveryNodeCodeList = getRecoveryNodeCodeList(recoverNodeList); List<String> startNodeNameList = parseStartNodeName(processInstance.getCommandParam()); ProcessDag processDag = generateFlowDag(taskNodeList, startNodeNameList, recoveryNodeCodeList, processInstance.getTaskDependType()); if (processDag == null) { logger.error("processDag is null"); return; } // generate process dag dag = DagHelper.buildDagGraph(processDag); } /** * init task queue */ private void initTaskQueue() { taskFailedSubmit = false; activeTaskProcessorMaps.clear(); dependFailedTaskMap.clear(); completeTaskMap.clear(); errorTaskMap.clear(); if (ExecutionStatus.SUBMITTED_SUCCESS != processInstance.getState()) { List<TaskInstance> validTaskInstanceList = processService.findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance task : validTaskInstanceList) { validTaskMap.put(Long.toString(task.getTaskCode()), task.getId()); taskInstanceMap.put(task.getId(), task); if (task.isTaskComplete()) { completeTaskMap.put(Long.toString(task.getTaskCode()), task.getId()); } if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(Long.toString(task.getTaskCode()), dag)) { continue; } if (task.getState().typeIsFailure() && !task.taskCanRetry()) { errorTaskMap.put(Long.toString(task.getTaskCode()), task.getId()); } } } if (processInstance.isComplementData() && complementListDate.size() == 0) { Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); if (cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) { Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE)); Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionCode(processInstance.getProcessDefinitionCode()); if (complementListDate.size() == 0 && needComplementProcess()) { complementListDate = CronUtils.getSelfFireDateList(start, end, schedules); logger.info(" process definition code:{} complement data: {}", processInstance.getProcessDefinitionCode(), complementListDate.toString()); if (complementListDate.size() > 0 && Flag.NO == processInstance.getIsSubProcess()) { processInstance.setScheduleTime(complementListDate.get(0)); processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime())); processService.updateProcessInstance(processInstance); } } } } } /** * submit task to execute * * @param taskInstance task instance * @return TaskInstance */ private TaskInstance submitTaskExec(TaskInstance taskInstance) { try { ITaskProcessor taskProcessor = TaskProcessorFactory.getTaskProcessor(taskInstance.getTaskType()); if (taskInstance.getState() == ExecutionStatus.RUNNING_EXECUTION && taskProcessor.getType().equalsIgnoreCase(Constants.COMMON_TASK_TYPE)) { notifyProcessHostUpdate(taskInstance); } TaskDefinition taskDefinition = processService.findTaskDefinition( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion()); taskInstance.setTaskGroupId(taskDefinition.getTaskGroupId()); // package task instance before submit processService.packageTaskInstance(taskInstance, processInstance); boolean submit = taskProcessor.submit(taskInstance, processInstance, masterConfig.getTaskCommitRetryTimes(), masterConfig.getTaskCommitInterval()); if (!submit) { logger.error("process id:{} name:{} submit standby task id:{} name:{} failed!", processInstance.getId(), processInstance.getName(), taskInstance.getId(), taskInstance.getName()); return null; } taskInstanceMap.put(taskInstance.getId(), taskInstance); activeTaskProcessorMaps.put(taskInstance.getId(), taskProcessor); taskProcessor.run(); addTimeoutCheck(taskInstance); if (taskProcessor.taskState().typeIsFinished()) { StateEvent stateEvent = new StateEvent(); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setTaskInstanceId(taskInstance.getId()); stateEvent.setExecutionStatus(taskProcessor.taskState()); stateEvent.setType(StateEventType.TASK_STATE_CHANGE); this.stateEvents.add(stateEvent); } return taskInstance; } catch (Exception e) { logger.error("submit standby task error", e); return null; } } private void notifyProcessHostUpdate(TaskInstance taskInstance) { if (StringUtils.isEmpty(taskInstance.getHost())) { return; } try { HostUpdateCommand hostUpdateCommand = new HostUpdateCommand(); hostUpdateCommand.setProcessHost(NetUtils.getAddr(masterConfig.getListenPort())); hostUpdateCommand.setTaskInstanceId(taskInstance.getId()); Host host = new Host(taskInstance.getHost()); nettyExecutorManager.doExecute(host, hostUpdateCommand.convert2Command()); } catch (Exception e) { logger.error("notify process host update", e); } } private void addTimeoutCheck(TaskInstance taskInstance) { if (taskTimeoutCheckList.containsKey(taskInstance.getId())) { return; } TaskDefinition taskDefinition = taskInstance.getTaskDefine(); if (taskDefinition == null) { logger.error("taskDefinition is null, taskId:{}", taskInstance.getId()); return; } if (TimeoutFlag.OPEN == taskDefinition.getTimeoutFlag() || taskInstance.taskCanRetry()) { this.taskTimeoutCheckList.put(taskInstance.getId(), taskInstance); } else { if (taskInstance.isDependTask() || taskInstance.isSubProcess()) { this.taskTimeoutCheckList.put(taskInstance.getId(), taskInstance); } } } /** * find task instance in db. * in case submit more than one same name task in the same time. * * @param taskCode task code * @param taskVersion task version * @return TaskInstance */ private TaskInstance findTaskIfExists(Long taskCode, int taskVersion) { List<TaskInstance> validTaskInstanceList = getValidTaskList(); for (TaskInstance taskInstance : validTaskInstanceList) { if (taskInstance.getTaskCode() == taskCode && taskInstance.getTaskDefinitionVersion() == taskVersion) { return taskInstance; } } return null; } /** * encapsulation task * * @param processInstance process instance * @param taskNode taskNode * @return TaskInstance */ private TaskInstance createTaskInstance(ProcessInstance processInstance, TaskNode taskNode) { TaskInstance taskInstance = findTaskIfExists(taskNode.getCode(), taskNode.getVersion()); if (taskInstance == null) { taskInstance = new TaskInstance(); taskInstance.setTaskCode(taskNode.getCode()); taskInstance.setTaskDefinitionVersion(taskNode.getVersion()); // task name taskInstance.setName(taskNode.getName()); // task instance state taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); // process instance id taskInstance.setProcessInstanceId(processInstance.getId()); // task instance type taskInstance.setTaskType(taskNode.getType().toUpperCase()); // task instance whether alert taskInstance.setAlertFlag(Flag.NO); // task instance start time taskInstance.setStartTime(null); // task instance flag taskInstance.setFlag(Flag.YES); // task dry run flag taskInstance.setDryRun(processInstance.getDryRun()); // task instance retry times taskInstance.setRetryTimes(0); // max task instance retry times taskInstance.setMaxRetryTimes(taskNode.getMaxRetryTimes()); // retry task instance interval taskInstance.setRetryInterval(taskNode.getRetryInterval()); //set task param taskInstance.setTaskParams(taskNode.getTaskParams()); // task instance priority if (taskNode.getTaskInstancePriority() == null) { taskInstance.setTaskInstancePriority(Priority.MEDIUM); } else { taskInstance.setTaskInstancePriority(taskNode.getTaskInstancePriority()); } String processWorkerGroup = processInstance.getWorkerGroup(); processWorkerGroup = StringUtils.isBlank(processWorkerGroup) ? DEFAULT_WORKER_GROUP : processWorkerGroup; String taskWorkerGroup = StringUtils.isBlank(taskNode.getWorkerGroup()) ? processWorkerGroup : taskNode.getWorkerGroup(); Long processEnvironmentCode = Objects.isNull(processInstance.getEnvironmentCode()) ? -1 : processInstance.getEnvironmentCode(); Long taskEnvironmentCode = Objects.isNull(taskNode.getEnvironmentCode()) ? processEnvironmentCode : taskNode.getEnvironmentCode(); if (!processWorkerGroup.equals(DEFAULT_WORKER_GROUP) && taskWorkerGroup.equals(DEFAULT_WORKER_GROUP)) { taskInstance.setWorkerGroup(processWorkerGroup); taskInstance.setEnvironmentCode(processEnvironmentCode); } else { taskInstance.setWorkerGroup(taskWorkerGroup); taskInstance.setEnvironmentCode(taskEnvironmentCode); } if (!taskInstance.getEnvironmentCode().equals(-1L)) { Environment environment = processService.findEnvironmentByCode(taskInstance.getEnvironmentCode()); if (Objects.nonNull(environment) && StringUtils.isNotEmpty(environment.getConfig())) { taskInstance.setEnvironmentConfig(environment.getConfig()); } } // delay execution time taskInstance.setDelayTime(taskNode.getDelayTime()); } return taskInstance; } public void getPreVarPool(TaskInstance taskInstance, Set<String> preTask) { Map<String, Property> allProperty = new HashMap<>(); Map<String, TaskInstance> allTaskInstance = new HashMap<>(); if (CollectionUtils.isNotEmpty(preTask)) { for (String preTaskCode : preTask) { Integer taskId = completeTaskMap.get(preTaskCode); if (taskId == null) { continue; } TaskInstance preTaskInstance = taskInstanceMap.get(taskId); if (preTaskInstance == null) { continue; } String preVarPool = preTaskInstance.getVarPool(); if (StringUtils.isNotEmpty(preVarPool)) { List<Property> properties = JSONUtils.toList(preVarPool, Property.class); for (Property info : properties) { setVarPoolValue(allProperty, allTaskInstance, preTaskInstance, info); } } } if (allProperty.size() > 0) { taskInstance.setVarPool(JSONUtils.toJsonString(allProperty.values())); } } } private void setVarPoolValue(Map<String, Property> allProperty, Map<String, TaskInstance> allTaskInstance, TaskInstance preTaskInstance, Property thisProperty) { //for this taskInstance all the param in this part is IN. thisProperty.setDirect(Direct.IN); //get the pre taskInstance Property's name String proName = thisProperty.getProp(); //if the Previous nodes have the Property of same name if (allProperty.containsKey(proName)) { //comparison the value of two Property Property otherPro = allProperty.get(proName); //if this property'value of loop is empty,use the other,whether the other's value is empty or not if (StringUtils.isEmpty(thisProperty.getValue())) { allProperty.put(proName, otherPro); //if property'value of loop is not empty,and the other's value is not empty too, use the earlier value } else if (StringUtils.isNotEmpty(otherPro.getValue())) { TaskInstance otherTask = allTaskInstance.get(proName); if (otherTask.getEndTime().getTime() > preTaskInstance.getEndTime().getTime()) { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } else { allProperty.put(proName, otherPro); } } else { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } } else { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } } /** * get complete task instance map, taskCode as key */ private Map<String, TaskInstance> getCompleteTaskInstanceMap() { Map<String, TaskInstance> completeTaskInstanceMap = new HashMap<>(); for (Integer taskInstanceId : completeTaskMap.values()) { TaskInstance taskInstance = taskInstanceMap.get(taskInstanceId); completeTaskInstanceMap.put(Long.toString(taskInstance.getTaskCode()), taskInstance); } return completeTaskInstanceMap; } /** * get valid task list */ private List<TaskInstance> getValidTaskList() { List<TaskInstance> validTaskInstanceList = new ArrayList<>(); for (Integer taskInstanceId : validTaskMap.values()) { validTaskInstanceList.add(taskInstanceMap.get(taskInstanceId)); } return validTaskInstanceList; } private void submitPostNode(String parentNodeCode) { Set<String> submitTaskNodeList = DagHelper.parsePostNodes(parentNodeCode, skipTaskNodeMap, dag, getCompleteTaskInstanceMap()); List<TaskInstance> taskInstances = new ArrayList<>(); for (String taskNode : submitTaskNodeList) { TaskNode taskNodeObject = dag.getNode(taskNode); if (checkTaskInstanceByCode(taskNodeObject.getCode())) { continue; } TaskInstance task = createTaskInstance(processInstance, taskNodeObject); taskInstances.add(task); } // if previous node success , post node submit for (TaskInstance task : taskInstances) { if (readyToSubmitTaskQueue.contains(task)) { continue; } if (completeTaskMap.containsKey(Long.toString(task.getTaskCode()))) { logger.info("task {} has already run success", task.getName()); continue; } if (task.getState().typeIsPause() || task.getState().typeIsCancel()) { logger.info("task {} stopped, the state is {}", task.getName(), task.getState()); continue; } addTaskToStandByList(task); } submitStandByTask(); updateProcessInstanceState(); } /** * determine whether the dependencies of the task node are complete * * @return DependResult */ private DependResult isTaskDepsComplete(String taskCode) { Collection<String> startNodes = dag.getBeginNode(); // if vertex,returns true directly if (startNodes.contains(taskCode)) { return DependResult.SUCCESS; } TaskNode taskNode = dag.getNode(taskCode); List<String> depCodeList = taskNode.getDepList(); for (String depsNode : depCodeList) { if (!dag.containsNode(depsNode) || forbiddenTaskMap.containsKey(depsNode) || skipTaskNodeMap.containsKey(depsNode)) { continue; } // dependencies must be fully completed if (!completeTaskMap.containsKey(depsNode)) { return DependResult.WAITING; } Integer depsTaskId = completeTaskMap.get(depsNode); ExecutionStatus depTaskState = taskInstanceMap.get(depsTaskId).getState(); if (depTaskState.typeIsPause() || depTaskState.typeIsCancel()) { return DependResult.NON_EXEC; } // ignore task state if current task is condition if (taskNode.isConditionsTask()) { continue; } if (!dependTaskSuccess(depsNode, taskCode)) { return DependResult.FAILED; } } logger.info("taskCode: {} completeDependTaskList: {}", taskCode, Arrays.toString(completeTaskMap.keySet().toArray())); return DependResult.SUCCESS; } /** * depend node is completed, but here need check the condition task branch is the next node */ private boolean dependTaskSuccess(String dependNodeName, String nextNodeName) { if (dag.getNode(dependNodeName).isConditionsTask()) { //condition task need check the branch to run List<String> nextTaskList = DagHelper.parseConditionTask(dependNodeName, skipTaskNodeMap, dag, getCompleteTaskInstanceMap()); if (!nextTaskList.contains(nextNodeName)) { return false; } } else { Integer taskInstanceId = completeTaskMap.get(dependNodeName); ExecutionStatus depTaskState = taskInstanceMap.get(taskInstanceId).getState(); if (depTaskState.typeIsFailure()) { return false; } } return true; } /** * query task instance by complete state * * @param state state * @return task instance list */ private List<TaskInstance> getCompleteTaskByState(ExecutionStatus state) { List<TaskInstance> resultList = new ArrayList<>(); for (Integer taskInstanceId : completeTaskMap.values()) { TaskInstance taskInstance = taskInstanceMap.get(taskInstanceId); if (taskInstance != null && taskInstance.getState() == state) { resultList.add(taskInstance); } } return resultList; } /** * where there are ongoing tasks * * @param state state * @return ExecutionStatus */ private ExecutionStatus runningState(ExecutionStatus state) { if (state == ExecutionStatus.READY_STOP || state == ExecutionStatus.READY_PAUSE || state == ExecutionStatus.WAITING_THREAD || state == ExecutionStatus.DELAY_EXECUTION) { // if the running task is not completed, the state remains unchanged return state; } else { return ExecutionStatus.RUNNING_EXECUTION; } } /** * exists failure task,contains submit failure、dependency failure,execute failure(retry after) * * @return Boolean whether has failed task */ private boolean hasFailedTask() { if (this.taskFailedSubmit) { return true; } if (this.errorTaskMap.size() > 0) { return true; } return this.dependFailedTaskMap.size() > 0; } /** * process instance failure * * @return Boolean whether process instance failed */ private boolean processFailed() { if (hasFailedTask()) { if (processInstance.getFailureStrategy() == FailureStrategy.END) { return true; } if (processInstance.getFailureStrategy() == FailureStrategy.CONTINUE) { return readyToSubmitTaskQueue.size() == 0 && activeTaskProcessorMaps.size() == 0; } } return false; } /** * whether task for waiting thread * * @return Boolean whether has waiting thread task */ private boolean hasWaitingThreadTask() { List<TaskInstance> waitingList = getCompleteTaskByState(ExecutionStatus.WAITING_THREAD); return CollectionUtils.isNotEmpty(waitingList); } /** * prepare for pause * 1,failed retry task in the preparation queue , returns to failure directly * 2,exists pause task,complement not completed, pending submission of tasks, return to suspension * 3,success * * @return ExecutionStatus */ private ExecutionStatus processReadyPause() { if (hasRetryTaskInStandBy()) { return ExecutionStatus.FAILURE; } List<TaskInstance> pauseList = getCompleteTaskByState(ExecutionStatus.PAUSE); if (CollectionUtils.isNotEmpty(pauseList) || !isComplementEnd() || readyToSubmitTaskQueue.size() > 0) { return ExecutionStatus.PAUSE; } else { return ExecutionStatus.SUCCESS; } } /** * generate the latest process instance status by the tasks state * * @return process instance execution status */ private ExecutionStatus getProcessInstanceState(ProcessInstance instance) { ExecutionStatus state = instance.getState(); if (activeTaskProcessorMaps.size() > 0 || hasRetryTaskInStandBy()) { // active task and retry task exists return runningState(state); } // process failure if (processFailed()) { return ExecutionStatus.FAILURE; } // waiting thread if (hasWaitingThreadTask()) { return ExecutionStatus.WAITING_THREAD; } // pause if (state == ExecutionStatus.READY_PAUSE) { return processReadyPause(); } // stop if (state == ExecutionStatus.READY_STOP) { List<TaskInstance> stopList = getCompleteTaskByState(ExecutionStatus.STOP); List<TaskInstance> killList = getCompleteTaskByState(ExecutionStatus.KILL); if (CollectionUtils.isNotEmpty(stopList) || CollectionUtils.isNotEmpty(killList) || !isComplementEnd()) { return ExecutionStatus.STOP; } else { return ExecutionStatus.SUCCESS; } } // success if (state == ExecutionStatus.RUNNING_EXECUTION) { List<TaskInstance> killTasks = getCompleteTaskByState(ExecutionStatus.KILL); if (readyToSubmitTaskQueue.size() > 0) { //tasks currently pending submission, no retries, indicating that depend is waiting to complete return ExecutionStatus.RUNNING_EXECUTION; } else if (CollectionUtils.isNotEmpty(killTasks)) { // tasks maybe killed manually return ExecutionStatus.FAILURE; } else { // if the waiting queue is empty and the status is in progress, then success return ExecutionStatus.SUCCESS; } } return state; } /** * whether complement end * * @return Boolean whether is complement end */ private boolean isComplementEnd() { if (!processInstance.isComplementData()) { return true; } try { Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); Date endTime = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); return processInstance.getScheduleTime().equals(endTime); } catch (Exception e) { logger.error("complement end failed ", e); return false; } } /** * updateProcessInstance process instance state * after each batch of tasks is executed, the status of the process instance is updated */ private void updateProcessInstanceState() { ExecutionStatus state = getProcessInstanceState(processInstance); if (processInstance.getState() != state) { logger.info( "work flow process instance [id: {}, name:{}], state change from {} to {}, cmd type: {}", processInstance.getId(), processInstance.getName(), processInstance.getState(), state, processInstance.getCommandType()); processInstance.setState(state); if (state.typeIsFinished()) { processInstance.setEndTime(new Date()); } processService.updateProcessInstance(processInstance); StateEvent stateEvent = new StateEvent(); stateEvent.setExecutionStatus(processInstance.getState()); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setType(StateEventType.PROCESS_STATE_CHANGE); this.processStateChangeHandler(stateEvent); } } /** * get task dependency result * * @param taskInstance task instance * @return DependResult */ private DependResult getDependResultForTask(TaskInstance taskInstance) { return isTaskDepsComplete(Long.toString(taskInstance.getTaskCode())); } /** * add task to standby list * * @param taskInstance task instance */ private void addTaskToStandByList(TaskInstance taskInstance) { logger.info("add task to stand by list: {}", taskInstance.getName()); try { if (!readyToSubmitTaskQueue.contains(taskInstance)) { readyToSubmitTaskQueue.put(taskInstance); } } catch (Exception e) { logger.error("add task instance to readyToSubmitTaskQueue error, taskName: {}", taskInstance.getName(), e); } } /** * remove task from stand by list * * @param taskInstance task instance */ private void removeTaskFromStandbyList(TaskInstance taskInstance) { logger.info("remove task from stand by list, id: {} name:{}", taskInstance.getId(), taskInstance.getName()); try { readyToSubmitTaskQueue.remove(taskInstance); } catch (Exception e) { logger.error("remove task instance from readyToSubmitTaskQueue error, task id:{}, Name: {}", taskInstance.getId(), taskInstance.getName(), e); } } /** * has retry task in standby * * @return Boolean whether has retry task in standby */ private boolean hasRetryTaskInStandBy() { for (Iterator<TaskInstance> iter = readyToSubmitTaskQueue.iterator(); iter.hasNext(); ) { if (iter.next().getState().typeIsFailure()) { return true; } } return false; } /** * close the on going tasks */ private void killAllTasks() { logger.info("kill called on process instance id: {}, num: {}", processInstance.getId(), activeTaskProcessorMaps.size()); for (int taskId : activeTaskProcessorMaps.keySet()) { TaskInstance taskInstance = processService.findTaskInstanceById(taskId); if (taskInstance == null || taskInstance.getState().typeIsFinished()) { continue; } ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(taskId); taskProcessor.action(TaskAction.STOP); if (taskProcessor.taskState().typeIsFinished()) { StateEvent stateEvent = new StateEvent(); stateEvent.setType(StateEventType.TASK_STATE_CHANGE); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setTaskInstanceId(taskInstance.getId()); stateEvent.setExecutionStatus(taskProcessor.taskState()); this.addStateEvent(stateEvent); } } } public boolean workFlowFinish() { return this.processInstance.getState().typeIsFinished(); } /** * handling the list of tasks to be submitted */ private void submitStandByTask() { try { int length = readyToSubmitTaskQueue.size(); for (int i = 0; i < length; i++) { TaskInstance task = readyToSubmitTaskQueue.peek(); if (task == null) { continue; } // stop tasks which is retrying if forced success happens if (task.taskCanRetry()) { TaskInstance retryTask = processService.findTaskInstanceById(task.getId()); if (retryTask != null && retryTask.getState().equals(ExecutionStatus.FORCED_SUCCESS)) { task.setState(retryTask.getState()); logger.info("task: {} has been forced success, put it into complete task list and stop retrying", task.getName()); removeTaskFromStandbyList(task); completeTaskMap.put(Long.toString(task.getTaskCode()), task.getId()); taskInstanceMap.put(task.getId(), task); submitPostNode(Long.toString(task.getTaskCode())); continue; } } //init varPool only this task is the first time running if (task.isFirstRun()) { //get pre task ,get all the task varPool to this task Set<String> preTask = dag.getPreviousNodes(Long.toString(task.getTaskCode())); getPreVarPool(task, preTask); } DependResult dependResult = getDependResultForTask(task); if (DependResult.SUCCESS == dependResult) { if (task.retryTaskIntervalOverTime()) { int originalId = task.getId(); TaskInstance taskInstance = submitTaskExec(task); if (taskInstance == null) { this.taskFailedSubmit = true; } else { removeTaskFromStandbyList(task); if (taskInstance.getId() != originalId) { activeTaskProcessorMaps.remove(originalId); } } } } else if (DependResult.FAILED == dependResult) { // if the dependency fails, the current node is not submitted and the state changes to failure. dependFailedTaskMap.put(Long.toString(task.getTaskCode()), task.getId()); removeTaskFromStandbyList(task); logger.info("task {},id:{} depend result : {}", task.getName(), task.getId(), dependResult); } else if (DependResult.NON_EXEC == dependResult) { // for some reasons(depend task pause/stop) this task would not be submit removeTaskFromStandbyList(task); logger.info("remove task {},id:{} , because depend result : {}", task.getName(), task.getId(), dependResult); } } } catch (Exception e) { logger.error("submit standby task error", e); } } /** * get recovery task instance * * @param taskId task id * @return recovery task instance */ private TaskInstance getRecoveryTaskInstance(String taskId) { if (!StringUtils.isNotEmpty(taskId)) { return null; } try { Integer intId = Integer.valueOf(taskId); TaskInstance task = processService.findTaskInstanceById(intId); if (task == null) { logger.error("start node id cannot be found: {}", taskId); } else { return task; } } catch (Exception e) { logger.error("get recovery task instance failed ", e); } return null; } /** * get start task instance list * * @param cmdParam command param * @return task instance list */ private List<TaskInstance> getStartTaskInstanceList(String cmdParam) { List<TaskInstance> instanceList = new ArrayList<>(); Map<String, String> paramMap = JSONUtils.toMap(cmdParam); if (paramMap != null && paramMap.containsKey(CMD_PARAM_RECOVERY_START_NODE_STRING)) { String[] idList = paramMap.get(CMD_PARAM_RECOVERY_START_NODE_STRING).split(Constants.COMMA); for (String nodeId : idList) { TaskInstance task = getRecoveryTaskInstance(nodeId); if (task != null) { instanceList.add(task); } } } return instanceList; } /** * parse "StartNodeNameList" from cmd param * * @param cmdParam command param * @return start node name list */ private List<String> parseStartNodeName(String cmdParam) { List<String> startNodeNameList = new ArrayList<>(); Map<String, String> paramMap = JSONUtils.toMap(cmdParam); if (paramMap == null) { return startNodeNameList; } if (paramMap.containsKey(CMD_PARAM_START_NODES)) { startNodeNameList = Arrays.asList(paramMap.get(CMD_PARAM_START_NODES).split(Constants.COMMA)); } return startNodeNameList; } /** * generate start node code list from parsing command param; * if "StartNodeIdList" exists in command param, return StartNodeIdList * * @return recovery node code list */ private List<String> getRecoveryNodeCodeList(List<TaskInstance> recoverNodeList) { List<String> recoveryNodeCodeList = new ArrayList<>(); if (CollectionUtils.isNotEmpty(recoverNodeList)) { for (TaskInstance task : recoverNodeList) { recoveryNodeCodeList.add(Long.toString(task.getTaskCode())); } } return recoveryNodeCodeList; } /** * generate flow dag * * @param totalTaskNodeList total task node list * @param startNodeNameList start node name list * @param recoveryNodeCodeList recovery node code list * @param depNodeType depend node type * @return ProcessDag process dag * @throws Exception exception */ public ProcessDag generateFlowDag(List<TaskNode> totalTaskNodeList, List<String> startNodeNameList, List<String> recoveryNodeCodeList, TaskDependType depNodeType) throws Exception { return DagHelper.generateFlowDag(totalTaskNodeList, startNodeNameList, recoveryNodeCodeList, depNodeType); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,154
[Bug] [API] MultipartFile resource [file] cannot be resolved to absolute file path
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The resource management function upload file is abnormal: ``` java.io.FileNotFoundException: MultipartFile resource [file] cannot be resolved to absolute file path at org.springframework.core.io.AbstractResource.getFile(AbstractResource.java:138) ~[spring-core-5.3.12.jar:5.3.12] at org.apache.dolphinscheduler.api.utils.FileUtils.copyFile(FileUtils.java:48) ~[classes/:na] at org.apache.dolphinscheduler.api.service.impl.ResourcesServiceImpl.upload(ResourcesServiceImpl.java:599) [classes/:na] at org.apache.dolphinscheduler.api.service.impl.ResourcesServiceImpl.createResource(ResourcesServiceImpl.java:238) [classes/:na] ``` ### What you expected to happen Upload files normally ### How to reproduce The resource management function upload file ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7154
https://github.com/apache/dolphinscheduler/pull/7155
3c8d1c6da91b23aa9106353829469f40899a7bed
8f77fbbeb8e2261365ebc6921a8095eb56243afb
"2021-12-03T10:05:33Z"
java
"2021-12-06T06:13:53Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ResourcesServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import static org.apache.dolphinscheduler.common.Constants.ALIAS; import static org.apache.dolphinscheduler.common.Constants.CONTENT; import static org.apache.dolphinscheduler.common.Constants.JAR; import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent; import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter; import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor; import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ServiceException; import org.apache.dolphinscheduler.api.service.ResourcesService; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.RegexUtils; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ProgramType; import org.apache.dolphinscheduler.spi.enums.ResourceType; import org.apache.dolphinscheduler.common.utils.FileUtils; import org.apache.dolphinscheduler.common.utils.HadoopUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.PropertyUtils; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.ResourcesUser; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.UdfFunc; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils; import org.apache.commons.beanutils.BeanMap; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import java.io.IOException; import java.text.MessageFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.regex.Matcher; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.dao.DuplicateKeyException; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import org.springframework.web.multipart.MultipartFile; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.fasterxml.jackson.databind.SerializationFeature; import com.google.common.io.Files; /** * resources service impl */ @Service public class ResourcesServiceImpl extends BaseServiceImpl implements ResourcesService { private static final Logger logger = LoggerFactory.getLogger(ResourcesServiceImpl.class); @Autowired private ResourceMapper resourcesMapper; @Autowired private UdfFuncMapper udfFunctionMapper; @Autowired private TenantMapper tenantMapper; @Autowired private UserMapper userMapper; @Autowired private ResourceUserMapper resourceUserMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper; /** * create directory * * @param loginUser login user * @param name alias * @param description description * @param type type * @param pid parent id * @param currentDir current directory * @return create directory result */ @Override @Transactional(rollbackFor = Exception.class) public Result<Object> createDirectory(User loginUser, String name, String description, ResourceType type, int pid, String currentDir) { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name) : String.format("%s/%s",currentDir,name); result = verifyResource(loginUser, type, fullName, pid); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } if (checkResourceExists(fullName, type.ordinal())) { logger.error("resource directory {} has exist, can't recreate", fullName); putMsg(result, Status.RESOURCE_EXIST); return result; } Date now = new Date(); Resource resource = new Resource(pid,name,fullName,true,description,name,loginUser.getId(),type,0,now,now); try { resourcesMapper.insert(resource); putMsg(result, Status.SUCCESS); Map<Object, Object> dataMap = new BeanMap(resource); Map<String, Object> resultMap = new HashMap<>(); for (Map.Entry<Object, Object> entry: dataMap.entrySet()) { if (!"class".equalsIgnoreCase(entry.getKey().toString())) { resultMap.put(entry.getKey().toString(), entry.getValue()); } } result.setData(resultMap); } catch (DuplicateKeyException e) { logger.error("resource directory {} has exist, can't recreate", fullName); putMsg(result, Status.RESOURCE_EXIST); return result; } catch (Exception e) { logger.error("resource already exists, can't recreate ", e); throw new ServiceException("resource already exists, can't recreate"); } //create directory in hdfs createDirectory(loginUser,fullName,type,result); return result; } /** * create resource * * @param loginUser login user * @param name alias * @param desc description * @param file file * @param type type * @param pid parent id * @param currentDir current directory * @return create result code */ @Override @Transactional(rollbackFor = Exception.class) public Result<Object> createResource(User loginUser, String name, String desc, ResourceType type, MultipartFile file, int pid, String currentDir) { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } result = verifyPid(loginUser, pid); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } result = verifyFile(name, type, file); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } // check resource name exists String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name) : String.format("%s/%s",currentDir,name); if (checkResourceExists(fullName, type.ordinal())) { logger.error("resource {} has exist, can't recreate", RegexUtils.escapeNRT(name)); putMsg(result, Status.RESOURCE_EXIST); return result; } Date now = new Date(); Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now); try { resourcesMapper.insert(resource); putMsg(result, Status.SUCCESS); Map<Object, Object> dataMap = new BeanMap(resource); Map<String, Object> resultMap = new HashMap<>(); for (Map.Entry<Object, Object> entry: dataMap.entrySet()) { if (!"class".equalsIgnoreCase(entry.getKey().toString())) { resultMap.put(entry.getKey().toString(), entry.getValue()); } } result.setData(resultMap); } catch (Exception e) { logger.error("resource already exists, can't recreate ", e); throw new ServiceException("resource already exists, can't recreate"); } // fail upload if (!upload(loginUser, fullName, file, type)) { logger.error("upload resource: {} file: {} failed.", RegexUtils.escapeNRT(name), RegexUtils.escapeNRT(file.getOriginalFilename())); putMsg(result, Status.HDFS_OPERATION_ERROR); throw new ServiceException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename())); } return result; } /** * check resource is exists * * @param fullName fullName * @param type type * @return true if resource exists */ private boolean checkResourceExists(String fullName, int type) { Boolean existResource = resourcesMapper.existResource(fullName, type); return existResource == Boolean.TRUE; } /** * update resource * @param loginUser login user * @param resourceId resource id * @param name name * @param desc description * @param type resource type * @param file resource file * @return update result code */ @Override @Transactional(rollbackFor = Exception.class) public Result<Object> updateResource(User loginUser, int resourceId, String name, String desc, ResourceType type, MultipartFile file) { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } if (!hasPerm(loginUser, resource.getUserId())) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } if (file == null && name.equals(resource.getAlias()) && desc.equals(resource.getDescription())) { putMsg(result, Status.SUCCESS); return result; } //check resource already exists String originFullName = resource.getFullName(); String originResourceName = resource.getAlias(); String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/") + 1),name); if (!originResourceName.equals(name) && checkResourceExists(fullName, type.ordinal())) { logger.error("resource {} already exists, can't recreate", name); putMsg(result, Status.RESOURCE_EXIST); return result; } result = verifyFile(name, type, file); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } // query tenant by user id String tenantCode = getTenantCode(resource.getUserId(),result); if (StringUtils.isEmpty(tenantCode)) { return result; } // verify whether the resource exists in storage // get the path of origin file in storage String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName); try { if (!HadoopUtils.getInstance().exists(originHdfsFileName)) { logger.error("{} not exist", originHdfsFileName); putMsg(result,Status.RESOURCE_NOT_EXIST); return result; } } catch (IOException e) { logger.error(e.getMessage(),e); throw new ServiceException(Status.HDFS_OPERATION_ERROR); } if (!resource.isDirectory()) { //get the origin file suffix String originSuffix = Files.getFileExtension(originFullName); String suffix = Files.getFileExtension(fullName); boolean suffixIsChanged = false; if (StringUtils.isBlank(suffix) && StringUtils.isNotBlank(originSuffix)) { suffixIsChanged = true; } if (StringUtils.isNotBlank(suffix) && !suffix.equals(originSuffix)) { suffixIsChanged = true; } //verify whether suffix is changed if (suffixIsChanged) { //need verify whether this resource is authorized to other users Map<String, Object> columnMap = new HashMap<>(); columnMap.put("resources_id", resourceId); List<ResourcesUser> resourcesUsers = resourceUserMapper.selectByMap(columnMap); if (CollectionUtils.isNotEmpty(resourcesUsers)) { List<Integer> userIds = resourcesUsers.stream().map(ResourcesUser::getUserId).collect(Collectors.toList()); List<User> users = userMapper.selectBatchIds(userIds); String userNames = users.stream().map(User::getUserName).collect(Collectors.toList()).toString(); logger.error("resource is authorized to user {},suffix not allowed to be modified", userNames); putMsg(result,Status.RESOURCE_IS_AUTHORIZED,userNames); return result; } } } // updateResource data Date now = new Date(); resource.setAlias(name); resource.setFileName(name); resource.setFullName(fullName); resource.setDescription(desc); resource.setUpdateTime(now); if (file != null) { resource.setSize(file.getSize()); } try { resourcesMapper.updateById(resource); if (resource.isDirectory()) { List<Integer> childrenResource = listAllChildren(resource,false); if (CollectionUtils.isNotEmpty(childrenResource)) { String matcherFullName = Matcher.quoteReplacement(fullName); List<Resource> childResourceList; Integer[] childResIdArray = childrenResource.toArray(new Integer[childrenResource.size()]); List<Resource> resourceList = resourcesMapper.listResourceByIds(childResIdArray); childResourceList = resourceList.stream().map(t -> { t.setFullName(t.getFullName().replaceFirst(originFullName, matcherFullName)); t.setUpdateTime(now); return t; }).collect(Collectors.toList()); resourcesMapper.batchUpdateResource(childResourceList); if (ResourceType.UDF.equals(resource.getType())) { List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(childResIdArray); if (CollectionUtils.isNotEmpty(udfFuncs)) { udfFuncs = udfFuncs.stream().map(t -> { t.setResourceName(t.getResourceName().replaceFirst(originFullName, matcherFullName)); t.setUpdateTime(now); return t; }).collect(Collectors.toList()); udfFunctionMapper.batchUpdateUdfFunc(udfFuncs); } } } } else if (ResourceType.UDF.equals(resource.getType())) { List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(new Integer[]{resourceId}); if (CollectionUtils.isNotEmpty(udfFuncs)) { udfFuncs = udfFuncs.stream().map(t -> { t.setResourceName(fullName); t.setUpdateTime(now); return t; }).collect(Collectors.toList()); udfFunctionMapper.batchUpdateUdfFunc(udfFuncs); } } putMsg(result, Status.SUCCESS); Map<Object, Object> dataMap = new BeanMap(resource); Map<String, Object> resultMap = new HashMap<>(); for (Map.Entry<Object, Object> entry: dataMap.entrySet()) { if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) { resultMap.put(entry.getKey().toString(), entry.getValue()); } } result.setData(resultMap); } catch (Exception e) { logger.error(Status.UPDATE_RESOURCE_ERROR.getMsg(), e); throw new ServiceException(Status.UPDATE_RESOURCE_ERROR); } // if name unchanged, return directly without moving on HDFS if (originResourceName.equals(name) && file == null) { return result; } if (file != null) { // fail upload if (!upload(loginUser, fullName, file, type)) { logger.error("upload resource: {} file: {} failed.", name, RegexUtils.escapeNRT(file.getOriginalFilename())); putMsg(result, Status.HDFS_OPERATION_ERROR); throw new ServiceException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename())); } if (!fullName.equals(originFullName)) { try { HadoopUtils.getInstance().delete(originHdfsFileName,false); } catch (IOException e) { logger.error(e.getMessage(),e); throw new ServiceException(String.format("delete resource: %s failed.", originFullName)); } } return result; } // get the path of dest file in hdfs String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName); try { logger.info("start hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName); HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true); } catch (Exception e) { logger.error(MessageFormat.format("hdfs copy {0} -> {1} fail", originHdfsFileName, destHdfsFileName), e); putMsg(result,Status.HDFS_COPY_FAIL); throw new ServiceException(Status.HDFS_COPY_FAIL); } return result; } private Result<Object> verifyFile(String name, ResourceType type, MultipartFile file) { Result<Object> result = new Result<>(); putMsg(result, Status.SUCCESS); if (file != null) { // file is empty if (file.isEmpty()) { logger.error("file is empty: {}", RegexUtils.escapeNRT(file.getOriginalFilename())); putMsg(result, Status.RESOURCE_FILE_IS_EMPTY); return result; } // file suffix String fileSuffix = Files.getFileExtension(file.getOriginalFilename()); String nameSuffix = Files.getFileExtension(name); // determine file suffix if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) { // rename file suffix and original suffix must be consistent logger.error("rename file suffix and original suffix must be consistent: {}", RegexUtils.escapeNRT(file.getOriginalFilename())); putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE); return result; } //If resource type is UDF, only jar packages are allowed to be uploaded, and the suffix must be .jar if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(fileSuffix)) { logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg()); putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR); return result; } if (file.getSize() > Constants.MAX_FILE_SIZE) { logger.error("file size is too large: {}", RegexUtils.escapeNRT(file.getOriginalFilename())); putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT); return result; } } return result; } /** * query resources list paging * * @param loginUser login user * @param type resource type * @param searchVal search value * @param pageNo page number * @param pageSize page size * @return resource list page */ @Override public Result queryResourceListPaging(User loginUser, int directoryId, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) { Result result = new Result(); Page<Resource> page = new Page<>(pageNo, pageSize); int userId = loginUser.getId(); if (isAdmin(loginUser)) { userId = 0; } if (directoryId != -1) { Resource directory = resourcesMapper.selectById(directoryId); if (directory == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } } List<Integer> resourcesIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 0); IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page, userId, directoryId, type.ordinal(), searchVal,resourcesIds); PageInfo<Resource> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotal((int)resourceIPage.getTotal()); pageInfo.setTotalList(resourceIPage.getRecords()); result.setData(pageInfo); putMsg(result,Status.SUCCESS); return result; } /** * create directory * @param loginUser login user * @param fullName full name * @param type resource type * @param result Result */ private void createDirectory(User loginUser,String fullName,ResourceType type,Result<Object> result) { String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode(); String directoryName = HadoopUtils.getHdfsFileName(type,tenantCode,fullName); String resourceRootPath = HadoopUtils.getHdfsDir(type,tenantCode); try { if (!HadoopUtils.getInstance().exists(resourceRootPath)) { createTenantDirIfNotExists(tenantCode); } if (!HadoopUtils.getInstance().mkdir(directoryName)) { logger.error("create resource directory {} of hdfs failed",directoryName); putMsg(result,Status.HDFS_OPERATION_ERROR); throw new ServiceException(String.format("create resource directory: %s failed.", directoryName)); } } catch (Exception e) { logger.error("create resource directory {} of hdfs failed",directoryName); putMsg(result,Status.HDFS_OPERATION_ERROR); throw new ServiceException(String.format("create resource directory: %s failed.", directoryName)); } } /** * upload file to hdfs * * @param loginUser login user * @param fullName full name * @param file file */ private boolean upload(User loginUser, String fullName, MultipartFile file, ResourceType type) { // save to local String fileSuffix = Files.getFileExtension(file.getOriginalFilename()); String nameSuffix = Files.getFileExtension(fullName); // determine file suffix if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) { return false; } // query tenant String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode(); // random file name String localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString()); // save file to hdfs, and delete original file String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName); String resourcePath = HadoopUtils.getHdfsDir(type,tenantCode); try { // if tenant dir not exists if (!HadoopUtils.getInstance().exists(resourcePath)) { createTenantDirIfNotExists(tenantCode); } org.apache.dolphinscheduler.api.utils.FileUtils.copyFile(file, localFilename); HadoopUtils.getInstance().copyLocalToHdfs(localFilename, hdfsFilename, true, true); } catch (Exception e) { FileUtils.deleteFile(localFilename); logger.error(e.getMessage(), e); return false; } return true; } /** * query resource list * * @param loginUser login user * @param type resource type * @return resource list */ @Override public Map<String, Object> queryResourceList(User loginUser, ResourceType type) { Map<String, Object> result = new HashMap<>(); List<Resource> allResourceList = queryAuthoredResourceList(loginUser, type); Visitor resourceTreeVisitor = new ResourceTreeVisitor(allResourceList); result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren()); putMsg(result, Status.SUCCESS); return result; } /** * query resource list by program type * * @param loginUser login user * @param type resource type * @return resource list */ @Override public Map<String, Object> queryResourceByProgramType(User loginUser, ResourceType type, ProgramType programType) { Map<String, Object> result = new HashMap<>(); List<Resource> allResourceList = queryAuthoredResourceList(loginUser, type); String suffix = ".jar"; if (programType != null) { switch (programType) { case JAVA: case SCALA: break; case PYTHON: suffix = ".py"; break; default: } } List<Resource> resources = new ResourceFilter(suffix, new ArrayList<>(allResourceList)).filter(); Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources); result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren()); putMsg(result, Status.SUCCESS); return result; } /** * delete resource * * @param loginUser login user * @param resourceId resource id * @return delete result code * @throws IOException exception */ @Override @Transactional(rollbackFor = Exception.class) public Result<Object> delete(User loginUser, int resourceId) throws IOException { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } // get resource by id Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } if (!hasPerm(loginUser, resource.getUserId())) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } String tenantCode = getTenantCode(resource.getUserId(),result); if (StringUtils.isEmpty(tenantCode)) { return result; } // get all resource id of process definitions those is released List<Map<String, Object>> list = processDefinitionMapper.listResources(); Map<Integer, Set<Long>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list); Set<Integer> resourceIdSet = resourceProcessMap.keySet(); // get all children of the resource List<Integer> allChildren = listAllChildren(resource,true); Integer[] needDeleteResourceIdArray = allChildren.toArray(new Integer[allChildren.size()]); //if resource type is UDF,need check whether it is bound by UDF function if (resource.getType() == (ResourceType.UDF)) { List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(needDeleteResourceIdArray); if (CollectionUtils.isNotEmpty(udfFuncs)) { logger.error("can't be deleted,because it is bound by UDF functions:{}", udfFuncs); putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName()); return result; } } if (resourceIdSet.contains(resource.getPid())) { logger.error("can't be deleted,because it is used of process definition"); putMsg(result, Status.RESOURCE_IS_USED); return result; } resourceIdSet.retainAll(allChildren); if (CollectionUtils.isNotEmpty(resourceIdSet)) { logger.error("can't be deleted,because it is used of process definition"); for (Integer resId : resourceIdSet) { logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId)); } putMsg(result, Status.RESOURCE_IS_USED); return result; } // get hdfs file by type String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName()); //delete data in database resourcesMapper.deleteIds(needDeleteResourceIdArray); resourceUserMapper.deleteResourceUserArray(0, needDeleteResourceIdArray); //delete file on hdfs HadoopUtils.getInstance().delete(hdfsFilename, true); putMsg(result, Status.SUCCESS); return result; } /** * verify resource by name and type * @param loginUser login user * @param fullName resource full name * @param type resource type * @return true if the resource name not exists, otherwise return false */ @Override public Result<Object> verifyResourceName(String fullName, ResourceType type, User loginUser) { Result<Object> result = new Result<>(); putMsg(result, Status.SUCCESS); if (checkResourceExists(fullName, type.ordinal())) { logger.error("resource type:{} name:{} has exist, can't create again.", type, RegexUtils.escapeNRT(fullName)); putMsg(result, Status.RESOURCE_EXIST); } else { // query tenant Tenant tenant = tenantMapper.queryById(loginUser.getTenantId()); if (tenant != null) { String tenantCode = tenant.getTenantCode(); try { String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName); if (HadoopUtils.getInstance().exists(hdfsFilename)) { logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, RegexUtils.escapeNRT(fullName), hdfsFilename); putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename); } } catch (Exception e) { logger.error(e.getMessage(),e); putMsg(result,Status.HDFS_OPERATION_ERROR); } } else { putMsg(result,Status.CURRENT_LOGIN_USER_TENANT_NOT_EXIST); } } return result; } /** * verify resource by full name or pid and type * @param fullName resource full name * @param id resource id * @param type resource type * @return true if the resource full name or pid not exists, otherwise return false */ @Override public Result<Object> queryResource(String fullName, Integer id, ResourceType type) { Result<Object> result = new Result<>(); if (StringUtils.isBlank(fullName) && id == null) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR); return result; } if (StringUtils.isNotBlank(fullName)) { List<Resource> resourceList = resourcesMapper.queryResource(fullName,type.ordinal()); if (CollectionUtils.isEmpty(resourceList)) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } putMsg(result, Status.SUCCESS); result.setData(resourceList.get(0)); } else { Resource resource = resourcesMapper.selectById(id); if (resource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } Resource parentResource = resourcesMapper.selectById(resource.getPid()); if (parentResource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } putMsg(result, Status.SUCCESS); result.setData(parentResource); } return result; } /** * view resource file online * * @param resourceId resource id * @param skipLineNum skip line number * @param limit limit * @return resource content */ @Override public Result<Object> readResource(int resourceId, int skipLineNum, int limit) { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } // get resource by id Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } //check preview or not by file suffix String nameSuffix = Files.getFileExtension(resource.getAlias()); String resourceViewSuffixs = FileUtils.getResourceViewSuffixs(); if (StringUtils.isNotEmpty(resourceViewSuffixs)) { List<String> strList = Arrays.asList(resourceViewSuffixs.split(",")); if (!strList.contains(nameSuffix)) { logger.error("resource suffix {} not support view, resource id {}", nameSuffix, resourceId); putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW); return result; } } String tenantCode = getTenantCode(resource.getUserId(),result); if (StringUtils.isEmpty(tenantCode)) { return result; } // hdfs path String hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resource.getFullName()); logger.info("resource hdfs path is {}", hdfsFileName); try { if (HadoopUtils.getInstance().exists(hdfsFileName)) { List<String> content = HadoopUtils.getInstance().catFile(hdfsFileName, skipLineNum, limit); putMsg(result, Status.SUCCESS); Map<String, Object> map = new HashMap<>(); map.put(ALIAS, resource.getAlias()); map.put(CONTENT, String.join("\n", content)); result.setData(map); } else { logger.error("read file {} not exist in hdfs", hdfsFileName); putMsg(result, Status.RESOURCE_FILE_NOT_EXIST,hdfsFileName); } } catch (Exception e) { logger.error("Resource {} read failed", hdfsFileName, e); putMsg(result, Status.HDFS_OPERATION_ERROR); } return result; } /** * create resource file online * * @param loginUser login user * @param type resource type * @param fileName file name * @param fileSuffix file suffix * @param desc description * @param content content * @param pid pid * @param currentDir current directory * @return create result code */ @Override @Transactional(rollbackFor = Exception.class) public Result<Object> onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content,int pid,String currentDir) { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } //check file suffix String nameSuffix = fileSuffix.trim(); String resourceViewSuffixs = FileUtils.getResourceViewSuffixs(); if (StringUtils.isNotEmpty(resourceViewSuffixs)) { List<String> strList = Arrays.asList(resourceViewSuffixs.split(",")); if (!strList.contains(nameSuffix)) { logger.error("resource suffix {} not support create", nameSuffix); putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW); return result; } } String name = fileName.trim() + "." + nameSuffix; String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name) : String.format("%s/%s",currentDir,name); result = verifyResource(loginUser, type, fullName, pid); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } // save data Date now = new Date(); Resource resource = new Resource(pid,name,fullName,false,desc,name,loginUser.getId(),type,content.getBytes().length,now,now); resourcesMapper.insert(resource); putMsg(result, Status.SUCCESS); Map<Object, Object> dataMap = new BeanMap(resource); Map<String, Object> resultMap = new HashMap<>(); for (Map.Entry<Object, Object> entry: dataMap.entrySet()) { if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) { resultMap.put(entry.getKey().toString(), entry.getValue()); } } result.setData(resultMap); String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode(); result = uploadContentToHdfs(fullName, tenantCode, content); if (!result.getCode().equals(Status.SUCCESS.getCode())) { throw new ServiceException(result.getMsg()); } return result; } private Result<Object> checkResourceUploadStartupState() { Result<Object> result = new Result<>(); putMsg(result, Status.SUCCESS); // if resource upload startup if (!PropertyUtils.getResUploadStartupState()) { logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState()); putMsg(result, Status.HDFS_NOT_STARTUP); return result; } return result; } private Result<Object> verifyResource(User loginUser, ResourceType type, String fullName, int pid) { Result<Object> result = verifyResourceName(fullName, type, loginUser); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } return verifyPid(loginUser, pid); } private Result<Object> verifyPid(User loginUser, int pid) { Result<Object> result = new Result<>(); putMsg(result, Status.SUCCESS); if (pid != -1) { Resource parentResource = resourcesMapper.selectById(pid); if (parentResource == null) { putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST); return result; } if (!hasPerm(loginUser, parentResource.getUserId())) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } } return result; } /** * updateProcessInstance resource * * @param resourceId resource id * @param content content * @return update result cod */ @Override @Transactional(rollbackFor = Exception.class) public Result<Object> updateResourceContent(int resourceId, String content) { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { logger.error("read file not exist, resource id {}", resourceId); putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } //check can edit by file suffix String nameSuffix = Files.getFileExtension(resource.getAlias()); String resourceViewSuffixs = FileUtils.getResourceViewSuffixs(); if (StringUtils.isNotEmpty(resourceViewSuffixs)) { List<String> strList = Arrays.asList(resourceViewSuffixs.split(",")); if (!strList.contains(nameSuffix)) { logger.error("resource suffix {} not support updateProcessInstance, resource id {}", nameSuffix, resourceId); putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW); return result; } } String tenantCode = getTenantCode(resource.getUserId(),result); if (StringUtils.isEmpty(tenantCode)) { return result; } resource.setSize(content.getBytes().length); resource.setUpdateTime(new Date()); resourcesMapper.updateById(resource); result = uploadContentToHdfs(resource.getFullName(), tenantCode, content); if (!result.getCode().equals(Status.SUCCESS.getCode())) { throw new ServiceException(result.getMsg()); } return result; } /** * @param resourceName resource name * @param tenantCode tenant code * @param content content * @return result */ private Result<Object> uploadContentToHdfs(String resourceName, String tenantCode, String content) { Result<Object> result = new Result<>(); String localFilename = ""; String hdfsFileName = ""; try { localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString()); if (!FileUtils.writeContent2File(content, localFilename)) { // write file fail logger.error("file {} fail, content is {}", localFilename, RegexUtils.escapeNRT(content)); putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } // get resource file hdfs path hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resourceName); String resourcePath = HadoopUtils.getHdfsResDir(tenantCode); logger.info("resource hdfs path is {}, resource dir is {}", hdfsFileName, resourcePath); HadoopUtils hadoopUtils = HadoopUtils.getInstance(); if (!hadoopUtils.exists(resourcePath)) { // create if tenant dir not exists createTenantDirIfNotExists(tenantCode); } if (hadoopUtils.exists(hdfsFileName)) { hadoopUtils.delete(hdfsFileName, false); } hadoopUtils.copyLocalToHdfs(localFilename, hdfsFileName, true, true); } catch (Exception e) { logger.error(e.getMessage(), e); result.setCode(Status.HDFS_OPERATION_ERROR.getCode()); result.setMsg(String.format("copy %s to hdfs %s fail", localFilename, hdfsFileName)); return result; } putMsg(result, Status.SUCCESS); return result; } /** * download file * * @param resourceId resource id * @return resource content * @throws IOException exception */ @Override public org.springframework.core.io.Resource downloadResource(int resourceId) throws IOException { // if resource upload startup if (!PropertyUtils.getResUploadStartupState()) { logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState()); throw new ServiceException("hdfs not startup"); } Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { logger.error("download file not exist, resource id {}", resourceId); return null; } if (resource.isDirectory()) { logger.error("resource id {} is directory,can't download it", resourceId); throw new ServiceException("can't download directory"); } int userId = resource.getUserId(); User user = userMapper.selectById(userId); if (user == null) { logger.error("user id {} not exists", userId); throw new ServiceException(String.format("resource owner id %d not exist",userId)); } Tenant tenant = tenantMapper.queryById(user.getTenantId()); if (tenant == null) { logger.error("tenant id {} not exists", user.getTenantId()); throw new ServiceException(String.format("The tenant id %d of resource owner not exist",user.getTenantId())); } String tenantCode = tenant.getTenantCode(); String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName()); String localFileName = FileUtils.getDownloadFilename(resource.getAlias()); logger.info("resource hdfs path is {}, download local filename is {}", hdfsFileName, localFileName); HadoopUtils.getInstance().copyHdfsToLocal(hdfsFileName, localFileName, false, true); return org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(localFileName); } /** * list all file * * @param loginUser login user * @param userId user id * @return unauthorized result code */ @Override public Map<String, Object> authorizeResourceTree(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(); if (isNotAdmin(loginUser, result)) { return result; } List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId); List<ResourceComponent> list; if (CollectionUtils.isNotEmpty(resourceList)) { Visitor visitor = new ResourceTreeVisitor(resourceList); list = visitor.visit().getChildren(); } else { list = new ArrayList<>(0); } result.put(Constants.DATA_LIST, list); putMsg(result, Status.SUCCESS); return result; } /** * unauthorized file * * @param loginUser login user * @param userId user id * @return unauthorized result code */ @Override public Map<String, Object> unauthorizedFile(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(); if (isNotAdmin(loginUser, result)) { return result; } List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId); List<Resource> list; if (resourceList != null && !resourceList.isEmpty()) { Set<Resource> resourceSet = new HashSet<>(resourceList); List<Resource> authedResourceList = queryResourceList(userId, Constants.AUTHORIZE_WRITABLE_PERM); getAuthorizedResourceList(resourceSet, authedResourceList); list = new ArrayList<>(resourceSet); } else { list = new ArrayList<>(0); } Visitor visitor = new ResourceTreeVisitor(list); result.put(Constants.DATA_LIST, visitor.visit().getChildren()); putMsg(result, Status.SUCCESS); return result; } /** * unauthorized udf function * * @param loginUser login user * @param userId user id * @return unauthorized result code */ @Override public Map<String, Object> unauthorizedUDFFunction(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (isNotAdmin(loginUser, result)) { return result; } List<UdfFunc> udfFuncList = udfFunctionMapper.queryUdfFuncExceptUserId(userId); List<UdfFunc> resultList = new ArrayList<>(); Set<UdfFunc> udfFuncSet; if (CollectionUtils.isNotEmpty(udfFuncList)) { udfFuncSet = new HashSet<>(udfFuncList); List<UdfFunc> authedUDFFuncList = udfFunctionMapper.queryAuthedUdfFunc(userId); getAuthorizedResourceList(udfFuncSet, authedUDFFuncList); resultList = new ArrayList<>(udfFuncSet); } result.put(Constants.DATA_LIST, resultList); putMsg(result, Status.SUCCESS); return result; } /** * authorized udf function * * @param loginUser login user * @param userId user id * @return authorized result code */ @Override public Map<String, Object> authorizedUDFFunction(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(); if (isNotAdmin(loginUser, result)) { return result; } List<UdfFunc> udfFuncs = udfFunctionMapper.queryAuthedUdfFunc(userId); result.put(Constants.DATA_LIST, udfFuncs); putMsg(result, Status.SUCCESS); return result; } /** * authorized file * * @param loginUser login user * @param userId user id * @return authorized result */ @Override public Map<String, Object> authorizedFile(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(); if (isNotAdmin(loginUser, result)) { return result; } List<Resource> authedResources = queryResourceList(userId, Constants.AUTHORIZE_WRITABLE_PERM); Visitor visitor = new ResourceTreeVisitor(authedResources); String visit = JSONUtils.toJsonString(visitor.visit(), SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS); logger.info(visit); String jsonTreeStr = JSONUtils.toJsonString(visitor.visit().getChildren(), SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS); logger.info(jsonTreeStr); result.put(Constants.DATA_LIST, visitor.visit().getChildren()); putMsg(result,Status.SUCCESS); return result; } /** * get authorized resource list * * @param resourceSet resource set * @param authedResourceList authorized resource list */ private void getAuthorizedResourceList(Set<?> resourceSet, List<?> authedResourceList) { Set<?> authedResourceSet; if (CollectionUtils.isNotEmpty(authedResourceList)) { authedResourceSet = new HashSet<>(authedResourceList); resourceSet.removeAll(authedResourceSet); } } /** * get tenantCode by UserId * * @param userId user id * @param result return result * @return tenant code */ private String getTenantCode(int userId,Result<Object> result) { User user = userMapper.selectById(userId); if (user == null) { logger.error("user {} not exists", userId); putMsg(result, Status.USER_NOT_EXIST,userId); return null; } Tenant tenant = tenantMapper.queryById(user.getTenantId()); if (tenant == null) { logger.error("tenant not exists"); putMsg(result, Status.CURRENT_LOGIN_USER_TENANT_NOT_EXIST); return null; } return tenant.getTenantCode(); } /** * list all children id * @param resource resource * @param containSelf whether add self to children list * @return all children id */ List<Integer> listAllChildren(Resource resource,boolean containSelf) { List<Integer> childList = new ArrayList<>(); if (resource.getId() != -1 && containSelf) { childList.add(resource.getId()); } if (resource.isDirectory()) { listAllChildren(resource.getId(),childList); } return childList; } /** * list all children id * @param resourceId resource id * @param childList child list */ void listAllChildren(int resourceId,List<Integer> childList) { List<Integer> children = resourcesMapper.listChildren(resourceId); for (int childId : children) { childList.add(childId); listAllChildren(childId, childList); } } /** * query authored resource list (own and authorized) * @param loginUser login user * @param type ResourceType * @return all authored resource list */ private List<Resource> queryAuthoredResourceList(User loginUser, ResourceType type) { List<Resource> relationResources; int userId = loginUser.getId(); if (isAdmin(loginUser)) { userId = 0; relationResources = new ArrayList<>(); } else { // query resource relation relationResources = queryResourceList(userId, 0); } List<Resource> ownResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal()); ownResourceList.addAll(relationResources); return ownResourceList; } /** * query resource list by userId and perm * @param userId userId * @param perm perm * @return resource list */ private List<Resource> queryResourceList(Integer userId, int perm) { List<Integer> resIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, perm); return CollectionUtils.isEmpty(resIds) ? new ArrayList<>() : resourcesMapper.queryResourceListById(resIds); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,154
[Bug] [API] MultipartFile resource [file] cannot be resolved to absolute file path
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The resource management function upload file is abnormal: ``` java.io.FileNotFoundException: MultipartFile resource [file] cannot be resolved to absolute file path at org.springframework.core.io.AbstractResource.getFile(AbstractResource.java:138) ~[spring-core-5.3.12.jar:5.3.12] at org.apache.dolphinscheduler.api.utils.FileUtils.copyFile(FileUtils.java:48) ~[classes/:na] at org.apache.dolphinscheduler.api.service.impl.ResourcesServiceImpl.upload(ResourcesServiceImpl.java:599) [classes/:na] at org.apache.dolphinscheduler.api.service.impl.ResourcesServiceImpl.createResource(ResourcesServiceImpl.java:238) [classes/:na] ``` ### What you expected to happen Upload files normally ### How to reproduce The resource management function upload file ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7154
https://github.com/apache/dolphinscheduler/pull/7155
3c8d1c6da91b23aa9106353829469f40899a7bed
8f77fbbeb8e2261365ebc6921a8095eb56243afb
"2021-12-03T10:05:33Z"
java
"2021-12-06T06:13:53Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/utils/FileUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.utils; import org.apache.commons.io.IOUtils; import java.io.File; import java.io.IOException; import java.net.MalformedURLException; import java.nio.charset.StandardCharsets; import java.nio.file.Path; import java.nio.file.Paths; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.core.io.Resource; import org.springframework.core.io.UrlResource; import org.springframework.web.multipart.MultipartFile; /** * file utils */ public class FileUtils { private static final Logger logger = LoggerFactory.getLogger(FileUtils.class); /** * copy source file to target file * * @param file file * @param destFilename destination file name */ public static void copyFile(MultipartFile file, String destFilename) { try { org.apache.commons.io.FileUtils.copyFile(file.getResource().getFile(), new File(destFilename)); } catch (IOException e) { logger.error("failed to copy file , {} is empty file", file.getOriginalFilename(), e); } } /** * file to resource * * @param filename file name * @return resource * @throws MalformedURLException io exceptions */ public static Resource file2Resource(String filename) throws MalformedURLException { Path file = Paths.get(filename); Resource resource = new UrlResource(file.toUri()); if (resource.exists() || resource.isReadable()) { return resource; } else { logger.error("file can not read : {}", filename); } return null; } /** * file convert String * @param file MultipartFile file * @return file content string */ public static String file2String(MultipartFile file) { try { return IOUtils.toString(file.getInputStream(), StandardCharsets.UTF_8); } catch (IOException e) { logger.error("file convert to string failed: {}", file.getName()); } return ""; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,154
[Bug] [API] MultipartFile resource [file] cannot be resolved to absolute file path
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The resource management function upload file is abnormal: ``` java.io.FileNotFoundException: MultipartFile resource [file] cannot be resolved to absolute file path at org.springframework.core.io.AbstractResource.getFile(AbstractResource.java:138) ~[spring-core-5.3.12.jar:5.3.12] at org.apache.dolphinscheduler.api.utils.FileUtils.copyFile(FileUtils.java:48) ~[classes/:na] at org.apache.dolphinscheduler.api.service.impl.ResourcesServiceImpl.upload(ResourcesServiceImpl.java:599) [classes/:na] at org.apache.dolphinscheduler.api.service.impl.ResourcesServiceImpl.createResource(ResourcesServiceImpl.java:238) [classes/:na] ``` ### What you expected to happen Upload files normally ### How to reproduce The resource management function upload file ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7154
https://github.com/apache/dolphinscheduler/pull/7155
3c8d1c6da91b23aa9106353829469f40899a7bed
8f77fbbeb8e2261365ebc6921a8095eb56243afb
"2021-12-03T10:05:33Z"
java
"2021-12-06T06:13:53Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/utils/FileUtilsTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.utils; import org.apache.http.entity.ContentType; import org.junit.*; import org.junit.rules.TemporaryFolder; import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.core.io.Resource; import org.springframework.mock.web.MockMultipartFile; import org.springframework.web.multipart.MultipartFile; import java.io.*; import static org.junit.Assert.*; public class FileUtilsTest { private static final Logger logger = LoggerFactory.getLogger(FileUtilsTest.class); @Rule public TemporaryFolder folder = null; private String rootPath = null; @Before public void setUp() throws Exception { folder = new TemporaryFolder(); folder.create(); rootPath = folder.getRoot().getAbsolutePath(); } @After public void tearDown() throws Exception { folder.delete(); } /** * Use mock to test copyFile * @throws IOException */ @Test public void testCopyFile() throws IOException { //Define dest file path String src = rootPath + System.getProperty("file.separator") + "src.txt"; String destFilename = rootPath + System.getProperty("file.separator") + "data.txt"; logger.info("destFilename: "+destFilename); //Define InputStream for MultipartFile String data = "data text"; org.apache.commons.io.FileUtils.writeStringToFile(new File(src), data); //Use Mockito to mock MultipartFile MultipartFile file = Mockito.mock(MultipartFile.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(file.getResource().getFile()).thenReturn(new File(src)); //Invoke copyFile FileUtils.copyFile(file,destFilename); //Test file exists File destFile = new File(destFilename); assertTrue(destFile.exists()); } @Test public void testFile2Resource() throws IOException { //Define dest file path String destFilename = rootPath + System.getProperty("file.separator") + "data.txt"; logger.info("destFilename: "+destFilename); //Define test resource File file = folder.newFile("resource.txt"); //Invoke file2Resource and test not null Resource resource = FileUtils.file2Resource(file.getAbsolutePath()); assertNotNull(resource); //Invoke file2Resource and test null Resource resource1 = FileUtils.file2Resource(file.getAbsolutePath()+"abc"); assertNull(resource1); } @Test public void testFile2String() throws IOException { String content = "123"; org.apache.commons.io.FileUtils.writeStringToFile(new File("/tmp/task.json"),content); File file = new File("/tmp/task.json"); FileInputStream fileInputStream = new FileInputStream("/tmp/task.json"); MultipartFile multipartFile = new MockMultipartFile(file.getName(), file.getName(), ContentType.APPLICATION_OCTET_STREAM.toString(), fileInputStream); String resultStr = FileUtils.file2String(multipartFile); Assert.assertEquals(content, resultStr); boolean delete = file.delete(); Assert.assertTrue(delete); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,195
[Bug] [DAO] The wrong separator is used in the file path of 'sql/upgrade/2.0.1-schema'
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened When I initialized the database through the class of UpgradeDolphinScheduler, I encountered an error. The details is as follows: ``` java.lang.IllegalStateException: Failed to execute CommandLineRunner at org.springframework.boot.SpringApplication.callRunner(SpringApplication.java:794) ~[spring-boot-2.5.6.jar:2.5.6] at org.springframework.boot.SpringApplication.callRunners(SpringApplication.java:775) ~[spring-boot-2.5.6.jar:2.5.6] at org.springframework.boot.SpringApplication.run(SpringApplication.java:345) ~[spring-boot-2.5.6.jar:2.5.6] at org.springframework.boot.builder.SpringApplicationBuilder.run(SpringApplicationBuilder.java:143) [spring-boot-2.5.6.jar:2.5.6] at org.apache.dolphinscheduler.dao.upgrade.shell.CreateDolphinScheduler.main(CreateDolphinScheduler.java:40) [dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] Caused by: java.lang.RuntimeException: java.lang.NumberFormatException: For input string: "1-schema" at org.apache.dolphinscheduler.dao.upgrade.SchemaUtils.lambda$getAllSchemaList$0(SchemaUtils.java:71) ~[dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] at java.util.TimSort.binarySort(TimSort.java:296) ~[na:1.8.0_312] at java.util.TimSort.sort(TimSort.java:221) ~[na:1.8.0_312] at java.util.Arrays.sort(Arrays.java:1512) ~[na:1.8.0_312] at java.util.stream.SortedOps$SizedRefSortingSink.end(SortedOps.java:353) ~[na:1.8.0_312] at java.util.stream.Sink$ChainedReference.end(Sink.java:258) ~[na:1.8.0_312] at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:483) ~[na:1.8.0_312] at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:472) ~[na:1.8.0_312] at java.util.stream.ReduceOps$ReduceOp.evaluateSequential(ReduceOps.java:708) ~[na:1.8.0_312] at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) ~[na:1.8.0_312] at java.util.stream.ReferencePipeline.collect(ReferencePipeline.java:566) ~[na:1.8.0_312] at org.apache.dolphinscheduler.dao.upgrade.SchemaUtils.getAllSchemaList(SchemaUtils.java:73) ~[dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] at org.apache.dolphinscheduler.dao.upgrade.DolphinSchedulerManager.upgradeDolphinScheduler(DolphinSchedulerManager.java:75) ~[dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] at org.apache.dolphinscheduler.dao.upgrade.shell.CreateDolphinScheduler$CreateRunner.run(CreateDolphinScheduler.java:58) ~[dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] at org.springframework.boot.SpringApplication.callRunner(SpringApplication.java:791) ~[spring-boot-2.5.6.jar:2.5.6] ... 4 common frames omitted Caused by: java.lang.NumberFormatException: For input string: "1-schema" at java.lang.NumberFormatException.forInputString(NumberFormatException.java:65) ~[na:1.8.0_312] at java.lang.Integer.parseInt(Integer.java:580) ~[na:1.8.0_312] at java.lang.Integer.parseInt(Integer.java:615) ~[na:1.8.0_312] at org.apache.dolphinscheduler.dao.upgrade.SchemaUtils.isAGreatVersion(SchemaUtils.java:92) ~[dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] at org.apache.dolphinscheduler.dao.upgrade.SchemaUtils.lambda$getAllSchemaList$0(SchemaUtils.java:64) ~[dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] ... 18 common frames omitted ``` ### What you expected to happen I expect that the process of initializing database can be executed exactly and correctly. ### How to reproduce I've found this root cause that the wrong separator is used in the file path of 'sql/upgrade/2.0.1-schema'. ![image](https://user-images.githubusercontent.com/4928204/144785147-502b4b28-1ee3-41eb-966e-8f44883903cd.png) ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7195
https://github.com/apache/dolphinscheduler/pull/7200
c7b06b007922c387f7edcf4936a2568a04d35573
705ba74f529a50dbf700e528701b8c19bd65f028
"2021-12-06T04:00:37Z"
java
"2021-12-06T09:29:11Z"
dolphinscheduler-dao/src/main/resources/sql/upgrade/2.0.1_schema/mysql/dolphinscheduler_ddl.sql
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,195
[Bug] [DAO] The wrong separator is used in the file path of 'sql/upgrade/2.0.1-schema'
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened When I initialized the database through the class of UpgradeDolphinScheduler, I encountered an error. The details is as follows: ``` java.lang.IllegalStateException: Failed to execute CommandLineRunner at org.springframework.boot.SpringApplication.callRunner(SpringApplication.java:794) ~[spring-boot-2.5.6.jar:2.5.6] at org.springframework.boot.SpringApplication.callRunners(SpringApplication.java:775) ~[spring-boot-2.5.6.jar:2.5.6] at org.springframework.boot.SpringApplication.run(SpringApplication.java:345) ~[spring-boot-2.5.6.jar:2.5.6] at org.springframework.boot.builder.SpringApplicationBuilder.run(SpringApplicationBuilder.java:143) [spring-boot-2.5.6.jar:2.5.6] at org.apache.dolphinscheduler.dao.upgrade.shell.CreateDolphinScheduler.main(CreateDolphinScheduler.java:40) [dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] Caused by: java.lang.RuntimeException: java.lang.NumberFormatException: For input string: "1-schema" at org.apache.dolphinscheduler.dao.upgrade.SchemaUtils.lambda$getAllSchemaList$0(SchemaUtils.java:71) ~[dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] at java.util.TimSort.binarySort(TimSort.java:296) ~[na:1.8.0_312] at java.util.TimSort.sort(TimSort.java:221) ~[na:1.8.0_312] at java.util.Arrays.sort(Arrays.java:1512) ~[na:1.8.0_312] at java.util.stream.SortedOps$SizedRefSortingSink.end(SortedOps.java:353) ~[na:1.8.0_312] at java.util.stream.Sink$ChainedReference.end(Sink.java:258) ~[na:1.8.0_312] at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:483) ~[na:1.8.0_312] at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:472) ~[na:1.8.0_312] at java.util.stream.ReduceOps$ReduceOp.evaluateSequential(ReduceOps.java:708) ~[na:1.8.0_312] at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) ~[na:1.8.0_312] at java.util.stream.ReferencePipeline.collect(ReferencePipeline.java:566) ~[na:1.8.0_312] at org.apache.dolphinscheduler.dao.upgrade.SchemaUtils.getAllSchemaList(SchemaUtils.java:73) ~[dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] at org.apache.dolphinscheduler.dao.upgrade.DolphinSchedulerManager.upgradeDolphinScheduler(DolphinSchedulerManager.java:75) ~[dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] at org.apache.dolphinscheduler.dao.upgrade.shell.CreateDolphinScheduler$CreateRunner.run(CreateDolphinScheduler.java:58) ~[dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] at org.springframework.boot.SpringApplication.callRunner(SpringApplication.java:791) ~[spring-boot-2.5.6.jar:2.5.6] ... 4 common frames omitted Caused by: java.lang.NumberFormatException: For input string: "1-schema" at java.lang.NumberFormatException.forInputString(NumberFormatException.java:65) ~[na:1.8.0_312] at java.lang.Integer.parseInt(Integer.java:580) ~[na:1.8.0_312] at java.lang.Integer.parseInt(Integer.java:615) ~[na:1.8.0_312] at org.apache.dolphinscheduler.dao.upgrade.SchemaUtils.isAGreatVersion(SchemaUtils.java:92) ~[dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] at org.apache.dolphinscheduler.dao.upgrade.SchemaUtils.lambda$getAllSchemaList$0(SchemaUtils.java:64) ~[dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] ... 18 common frames omitted ``` ### What you expected to happen I expect that the process of initializing database can be executed exactly and correctly. ### How to reproduce I've found this root cause that the wrong separator is used in the file path of 'sql/upgrade/2.0.1-schema'. ![image](https://user-images.githubusercontent.com/4928204/144785147-502b4b28-1ee3-41eb-966e-8f44883903cd.png) ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7195
https://github.com/apache/dolphinscheduler/pull/7200
c7b06b007922c387f7edcf4936a2568a04d35573
705ba74f529a50dbf700e528701b8c19bd65f028
"2021-12-06T04:00:37Z"
java
"2021-12-06T09:29:11Z"
dolphinscheduler-dao/src/main/resources/sql/upgrade/2.0.1_schema/postgresql/dolphinscheduler_ddl.sql
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,195
[Bug] [DAO] The wrong separator is used in the file path of 'sql/upgrade/2.0.1-schema'
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened When I initialized the database through the class of UpgradeDolphinScheduler, I encountered an error. The details is as follows: ``` java.lang.IllegalStateException: Failed to execute CommandLineRunner at org.springframework.boot.SpringApplication.callRunner(SpringApplication.java:794) ~[spring-boot-2.5.6.jar:2.5.6] at org.springframework.boot.SpringApplication.callRunners(SpringApplication.java:775) ~[spring-boot-2.5.6.jar:2.5.6] at org.springframework.boot.SpringApplication.run(SpringApplication.java:345) ~[spring-boot-2.5.6.jar:2.5.6] at org.springframework.boot.builder.SpringApplicationBuilder.run(SpringApplicationBuilder.java:143) [spring-boot-2.5.6.jar:2.5.6] at org.apache.dolphinscheduler.dao.upgrade.shell.CreateDolphinScheduler.main(CreateDolphinScheduler.java:40) [dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] Caused by: java.lang.RuntimeException: java.lang.NumberFormatException: For input string: "1-schema" at org.apache.dolphinscheduler.dao.upgrade.SchemaUtils.lambda$getAllSchemaList$0(SchemaUtils.java:71) ~[dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] at java.util.TimSort.binarySort(TimSort.java:296) ~[na:1.8.0_312] at java.util.TimSort.sort(TimSort.java:221) ~[na:1.8.0_312] at java.util.Arrays.sort(Arrays.java:1512) ~[na:1.8.0_312] at java.util.stream.SortedOps$SizedRefSortingSink.end(SortedOps.java:353) ~[na:1.8.0_312] at java.util.stream.Sink$ChainedReference.end(Sink.java:258) ~[na:1.8.0_312] at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:483) ~[na:1.8.0_312] at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:472) ~[na:1.8.0_312] at java.util.stream.ReduceOps$ReduceOp.evaluateSequential(ReduceOps.java:708) ~[na:1.8.0_312] at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) ~[na:1.8.0_312] at java.util.stream.ReferencePipeline.collect(ReferencePipeline.java:566) ~[na:1.8.0_312] at org.apache.dolphinscheduler.dao.upgrade.SchemaUtils.getAllSchemaList(SchemaUtils.java:73) ~[dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] at org.apache.dolphinscheduler.dao.upgrade.DolphinSchedulerManager.upgradeDolphinScheduler(DolphinSchedulerManager.java:75) ~[dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] at org.apache.dolphinscheduler.dao.upgrade.shell.CreateDolphinScheduler$CreateRunner.run(CreateDolphinScheduler.java:58) ~[dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] at org.springframework.boot.SpringApplication.callRunner(SpringApplication.java:791) ~[spring-boot-2.5.6.jar:2.5.6] ... 4 common frames omitted Caused by: java.lang.NumberFormatException: For input string: "1-schema" at java.lang.NumberFormatException.forInputString(NumberFormatException.java:65) ~[na:1.8.0_312] at java.lang.Integer.parseInt(Integer.java:580) ~[na:1.8.0_312] at java.lang.Integer.parseInt(Integer.java:615) ~[na:1.8.0_312] at org.apache.dolphinscheduler.dao.upgrade.SchemaUtils.isAGreatVersion(SchemaUtils.java:92) ~[dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] at org.apache.dolphinscheduler.dao.upgrade.SchemaUtils.lambda$getAllSchemaList$0(SchemaUtils.java:64) ~[dolphinscheduler-dao-2.0.0-SNAPSHOT.jar:2.0.0-SNAPSHOT] ... 18 common frames omitted ``` ### What you expected to happen I expect that the process of initializing database can be executed exactly and correctly. ### How to reproduce I've found this root cause that the wrong separator is used in the file path of 'sql/upgrade/2.0.1-schema'. ![image](https://user-images.githubusercontent.com/4928204/144785147-502b4b28-1ee3-41eb-966e-8f44883903cd.png) ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7195
https://github.com/apache/dolphinscheduler/pull/7200
c7b06b007922c387f7edcf4936a2568a04d35573
705ba74f529a50dbf700e528701b8c19bd65f028
"2021-12-06T04:00:37Z"
java
"2021-12-06T09:29:11Z"
dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/upgrade/SchemaUtilsTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.dao.upgrade; import org.junit.Assert; import org.junit.Test; public class SchemaUtilsTest { @Test public void testIsAGreatVersion() { // param is null try { SchemaUtils.isAGreatVersion(null, null); } catch (RuntimeException e) { Assert.assertEquals("schemaVersion or version is empty", e.getMessage()); } // param is "" try { SchemaUtils.isAGreatVersion("", ""); } catch (RuntimeException e) { Assert.assertEquals("schemaVersion or version is empty", e.getMessage()); } Assert.assertFalse(SchemaUtils.isAGreatVersion("1", "1")); Assert.assertTrue(SchemaUtils.isAGreatVersion("2", "1")); Assert.assertTrue(SchemaUtils.isAGreatVersion("1.1", "1")); Assert.assertTrue(SchemaUtils.isAGreatVersion("1.1", "1.0.1")); Assert.assertFalse(SchemaUtils.isAGreatVersion("1.1", "1.2")); Assert.assertTrue(SchemaUtils.isAGreatVersion("1.1.1", "1.1")); Assert.assertTrue(SchemaUtils.isAGreatVersion("10.1.1", "1.01.100")); try { SchemaUtils.isAGreatVersion("10.1.1", ".1"); Assert.fail("Should fail"); } catch (Exception ignored) { // This is expected } try { SchemaUtils.isAGreatVersion("a.1.1", "b.1"); Assert.fail("Should fail"); } catch (Exception ignored) { // This is expected } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,206
[Bug] [MasterServer] processInstance always running when task was timeout and failed.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened when task was timeout and failed, processInstance runs without retry and finish. ### What you expected to happen processInstance can goto retry and finish. ### How to reproduce ![image](https://user-images.githubusercontent.com/11962619/144804188-bae18b81-4e9d-437e-b7cc-d71bba251e93.png) ### Anything else _No response_ ### Version 2.0.0 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7206
https://github.com/apache/dolphinscheduler/pull/7207
705ba74f529a50dbf700e528701b8c19bd65f028
ff9bc806ac04d07ff0581e7a88514f943310d2c9
"2021-12-06T07:20:55Z"
java
"2021-12-06T10:20:56Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterSchedulerService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.runner; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.thread.ThreadUtils; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.common.utils.OSUtils; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.remote.NettyRemotingClient; import org.apache.dolphinscheduler.remote.config.NettyClientConfig; import org.apache.dolphinscheduler.server.master.cache.ProcessInstanceExecCacheManager; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager; import org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient; import org.apache.dolphinscheduler.server.master.registry.ServerNodeManager; import org.apache.dolphinscheduler.server.master.runner.task.TaskProcessorFactory; import org.apache.dolphinscheduler.service.alert.ProcessAlertManager; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.commons.collections4.CollectionUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; /** * master scheduler thread */ @Service public class MasterSchedulerService extends Thread { /** * logger of MasterSchedulerService */ private static final Logger logger = LoggerFactory.getLogger(MasterSchedulerService.class); /** * dolphinscheduler database interface */ @Autowired private ProcessService processService; @Autowired private TaskProcessorFactory taskProcessorFactory; /** * zookeeper master client */ @Autowired private MasterRegistryClient masterRegistryClient; /** * master config */ @Autowired private MasterConfig masterConfig; /** * alert manager */ @Autowired private ProcessAlertManager processAlertManager; /** * netty remoting client */ private NettyRemotingClient nettyRemotingClient; @Autowired NettyExecutorManager nettyExecutorManager; /** * master prepare exec service */ private ThreadPoolExecutor masterPrepareExecService; /** * master exec service */ private ThreadPoolExecutor masterExecService; @Autowired private ProcessInstanceExecCacheManager processInstanceExecCacheManager; /** * process timeout check list */ ConcurrentHashMap<Integer, ProcessInstance> processTimeoutCheckList = new ConcurrentHashMap<>(); /** * task time out checkout list */ ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList = new ConcurrentHashMap<>(); private StateWheelExecuteThread stateWheelExecuteThread; /** * constructor of MasterSchedulerService */ public void init() { this.masterPrepareExecService = (ThreadPoolExecutor) ThreadUtils.newDaemonFixedThreadExecutor("Master-Pre-Exec-Thread", masterConfig.getPreExecThreads()); this.masterExecService = (ThreadPoolExecutor) ThreadUtils.newDaemonFixedThreadExecutor("Master-Exec-Thread", masterConfig.getExecThreads()); NettyClientConfig clientConfig = new NettyClientConfig(); this.nettyRemotingClient = new NettyRemotingClient(clientConfig); stateWheelExecuteThread = new StateWheelExecuteThread(processTimeoutCheckList, taskTimeoutCheckList, this.processInstanceExecCacheManager, masterConfig.getStateWheelInterval() * Constants.SLEEP_TIME_MILLIS); } @Override public synchronized void start() { super.setName("MasterSchedulerService"); super.start(); this.stateWheelExecuteThread.start(); } public void close() { masterExecService.shutdown(); boolean terminated = false; try { terminated = masterExecService.awaitTermination(5, TimeUnit.SECONDS); } catch (InterruptedException ignore) { Thread.currentThread().interrupt(); } if (!terminated) { logger.warn("masterExecService shutdown without terminated, increase await time"); } nettyRemotingClient.close(); logger.info("master schedule service stopped..."); } /** * run of MasterSchedulerService */ @Override public void run() { logger.info("master scheduler started"); while (Stopper.isRunning()) { try { boolean runCheckFlag = OSUtils.checkResource(masterConfig.getMaxCpuLoadAvg(), masterConfig.getReservedMemory()); if (!runCheckFlag) { Thread.sleep(Constants.SLEEP_TIME_MILLIS); continue; } scheduleProcess(); } catch (Exception e) { logger.error("master scheduler thread error", e); } } } /** * 1. get command by slot * 2. donot handle command if slot is empty */ private void scheduleProcess() throws Exception { List<Command> commands = findCommands(); if (CollectionUtils.isEmpty(commands)) { //indicate that no command ,sleep for 1s Thread.sleep(Constants.SLEEP_TIME_MILLIS); return; } List<ProcessInstance> processInstances = command2ProcessInstance(commands); if (CollectionUtils.isEmpty(processInstances)) { return; } for (ProcessInstance processInstance : processInstances) { if (processInstance == null) { continue; } WorkflowExecuteThread workflowExecuteThread = new WorkflowExecuteThread( processInstance , processService , nettyExecutorManager , processAlertManager , masterConfig , taskTimeoutCheckList , taskProcessorFactory); this.processInstanceExecCacheManager.cache(processInstance.getId(), workflowExecuteThread); if (processInstance.getTimeout() > 0) { this.processTimeoutCheckList.put(processInstance.getId(), processInstance); } masterExecService.execute(workflowExecuteThread); } } private List<ProcessInstance> command2ProcessInstance(List<Command> commands) { if (CollectionUtils.isEmpty(commands)) { return null; } ProcessInstance[] processInstances = new ProcessInstance[commands.size()]; CountDownLatch latch = new CountDownLatch(commands.size()); for (int i = 0; i < commands.size(); i++) { int index = i; this.masterPrepareExecService.execute(() -> { Command command = commands.get(index); // slot check again if (!slotCheck(command)) { latch.countDown(); return; } try { ProcessInstance processInstance = processService.handleCommand(logger, getLocalAddress(), command); if (processInstance != null) { processInstances[index] = processInstance; logger.info("handle command command {} end, create process instance {}", command.getId(), processInstance.getId()); } } catch (Exception e) { logger.error("scan command error ", e); processService.moveToErrorCommand(command, e.toString()); } finally { latch.countDown(); } }); } try { // make sure to finish handling command each time before next scan latch.await(); } catch (InterruptedException e) { logger.error("countDownLatch await error ", e); } return Arrays.asList(processInstances); } private List<Command> findCommands() { int pageNumber = 0; int pageSize = masterConfig.getFetchCommandNum(); List<Command> result = new ArrayList<>(); while (Stopper.isRunning()) { if (ServerNodeManager.MASTER_SIZE == 0) { return result; } List<Command> commandList = processService.findCommandPage(pageSize, pageNumber); if (commandList.size() == 0) { return result; } for (Command command : commandList) { if (slotCheck(command)) { result.add(command); } } if (CollectionUtils.isNotEmpty(result)) { logger.info("find {} commands, slot:{}", result.size(), ServerNodeManager.getSlot()); break; } pageNumber += 1; } return result; } private boolean slotCheck(Command command) { int slot = ServerNodeManager.getSlot(); if (ServerNodeManager.MASTER_SIZE != 0 && command.getId() % ServerNodeManager.MASTER_SIZE == slot) { return true; } return false; } private String getLocalAddress() { return NetUtils.getAddr(masterConfig.getListenPort()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,206
[Bug] [MasterServer] processInstance always running when task was timeout and failed.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened when task was timeout and failed, processInstance runs without retry and finish. ### What you expected to happen processInstance can goto retry and finish. ### How to reproduce ![image](https://user-images.githubusercontent.com/11962619/144804188-bae18b81-4e9d-437e-b7cc-d71bba251e93.png) ### Anything else _No response_ ### Version 2.0.0 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7206
https://github.com/apache/dolphinscheduler/pull/7207
705ba74f529a50dbf700e528701b8c19bd65f028
ff9bc806ac04d07ff0581e7a88514f943310d2c9
"2021-12-06T07:20:55Z"
java
"2021-12-06T10:20:56Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/StateWheelExecuteThread.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.runner; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.StateEvent; import org.apache.dolphinscheduler.common.enums.StateEventType; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.server.master.cache.ProcessInstanceExecCacheManager; import org.apache.hadoop.util.ThreadUtil; import java.util.concurrent.ConcurrentHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * 1. timeout check wheel * 2. dependent task check wheel */ public class StateWheelExecuteThread extends Thread { private static final Logger logger = LoggerFactory.getLogger(StateWheelExecuteThread.class); ConcurrentHashMap<Integer, ProcessInstance> processInstanceCheckList; ConcurrentHashMap<Integer, TaskInstance> taskInstanceCheckList; private ProcessInstanceExecCacheManager processInstanceExecCacheManager; private int stateCheckIntervalSecs; public StateWheelExecuteThread(ConcurrentHashMap<Integer, ProcessInstance> processInstances, ConcurrentHashMap<Integer, TaskInstance> taskInstances, ProcessInstanceExecCacheManager processInstanceExecCacheManager, int stateCheckIntervalSecs) { this.processInstanceCheckList = processInstances; this.taskInstanceCheckList = taskInstances; this.processInstanceExecCacheManager = processInstanceExecCacheManager; this.stateCheckIntervalSecs = stateCheckIntervalSecs; } @Override public void run() { logger.info("state wheel thread start"); while (Stopper.isRunning()) { try { checkProcess(); checkTask(); } catch (Exception e) { logger.error("state wheel thread check error:", e); } ThreadUtil.sleepAtLeastIgnoreInterrupts(stateCheckIntervalSecs); } } public boolean addProcess(ProcessInstance processInstance) { this.processInstanceCheckList.put(processInstance.getId(), processInstance); return true; } public boolean addTask(TaskInstance taskInstance) { this.taskInstanceCheckList.put(taskInstance.getId(), taskInstance); return true; } private void checkTask() { if (taskInstanceCheckList.isEmpty()) { return; } for (TaskInstance taskInstance : this.taskInstanceCheckList.values()) { if (TimeoutFlag.OPEN == taskInstance.getTaskDefine().getTimeoutFlag()) { long timeRemain = DateUtils.getRemainTime(taskInstance.getStartTime(), taskInstance.getTaskDefine().getTimeout() * Constants.SEC_2_MINUTES_TIME_UNIT); if (0 >= timeRemain && processTimeout(taskInstance)) { taskInstanceCheckList.remove(taskInstance.getId()); } } if (taskInstance.taskCanRetry() && taskInstance.retryTaskIntervalOverTime()) { processDependCheck(taskInstance); taskInstanceCheckList.remove(taskInstance.getId()); } if (taskInstance.isSubProcess() || taskInstance.isDependTask()) { processDependCheck(taskInstance); } } } private void checkProcess() { if (processInstanceCheckList.isEmpty()) { return; } for (ProcessInstance processInstance : this.processInstanceCheckList.values()) { long timeRemain = DateUtils.getRemainTime(processInstance.getStartTime(), processInstance.getTimeout() * Constants.SEC_2_MINUTES_TIME_UNIT); if (0 <= timeRemain && processTimeout(processInstance)) { processInstanceCheckList.remove(processInstance.getId()); } } } private void putEvent(StateEvent stateEvent) { if (!processInstanceExecCacheManager.contains(stateEvent.getProcessInstanceId())) { return; } WorkflowExecuteThread workflowExecuteThread = this.processInstanceExecCacheManager.getByProcessInstanceId(stateEvent.getProcessInstanceId()); workflowExecuteThread.addStateEvent(stateEvent); } private boolean processDependCheck(TaskInstance taskInstance) { StateEvent stateEvent = new StateEvent(); stateEvent.setType(StateEventType.TASK_STATE_CHANGE); stateEvent.setProcessInstanceId(taskInstance.getProcessInstanceId()); stateEvent.setTaskInstanceId(taskInstance.getId()); stateEvent.setExecutionStatus(ExecutionStatus.RUNNING_EXECUTION); putEvent(stateEvent); return true; } private boolean processTimeout(TaskInstance taskInstance) { StateEvent stateEvent = new StateEvent(); stateEvent.setType(StateEventType.TASK_TIMEOUT); stateEvent.setProcessInstanceId(taskInstance.getProcessInstanceId()); stateEvent.setTaskInstanceId(taskInstance.getId()); putEvent(stateEvent); return true; } private boolean processTimeout(ProcessInstance processInstance) { StateEvent stateEvent = new StateEvent(); stateEvent.setType(StateEventType.PROCESS_TIMEOUT); stateEvent.setProcessInstanceId(processInstance.getId()); putEvent(stateEvent); return true; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,206
[Bug] [MasterServer] processInstance always running when task was timeout and failed.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened when task was timeout and failed, processInstance runs without retry and finish. ### What you expected to happen processInstance can goto retry and finish. ### How to reproduce ![image](https://user-images.githubusercontent.com/11962619/144804188-bae18b81-4e9d-437e-b7cc-d71bba251e93.png) ### Anything else _No response_ ### Version 2.0.0 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7206
https://github.com/apache/dolphinscheduler/pull/7207
705ba74f529a50dbf700e528701b8c19bd65f028
ff9bc806ac04d07ff0581e7a88514f943310d2c9
"2021-12-06T07:20:55Z"
java
"2021-12-06T10:20:56Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteThread.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.runner; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVERY_START_NODE_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODES; import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.DependResult; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.StateEvent; import org.apache.dolphinscheduler.common.enums.StateEventType; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.TaskGroupQueueStatus; import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.process.ProcessDag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.Environment; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskGroupQueue; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.utils.DagHelper; import org.apache.dolphinscheduler.remote.command.HostUpdateCommand; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager; import org.apache.dolphinscheduler.server.master.runner.task.ITaskProcessor; import org.apache.dolphinscheduler.server.master.runner.task.TaskAction; import org.apache.dolphinscheduler.server.master.runner.task.TaskProcessorFactory; import org.apache.dolphinscheduler.service.alert.ProcessAlertManager; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.service.quartz.cron.CronUtils; import org.apache.dolphinscheduler.service.queue.PeerTaskInstancePriorityQueue; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.Lists; /** * master exec thread,split dag */ public class WorkflowExecuteThread implements Runnable { /** * logger of WorkflowExecuteThread */ private static final Logger logger = LoggerFactory.getLogger(WorkflowExecuteThread.class); /** * master config */ private MasterConfig masterConfig; /** * process service */ private ProcessService processService; /** * alert manager */ private ProcessAlertManager processAlertManager; /** * netty executor manager */ private NettyExecutorManager nettyExecutorManager; /** * process instance */ private ProcessInstance processInstance; /** * process definition */ private ProcessDefinition processDefinition; /** * task processor */ private TaskProcessorFactory taskProcessorFactory; /** * the object of DAG */ private DAG<String, TaskNode, TaskNodeRelation> dag; /** * key of workflow */ private String key; /** * start flag, true: start nodes submit completely */ private boolean isStart = false; /** * submit failure nodes */ private boolean taskFailedSubmit = false; /** * task instance hash map, taskId as key */ private Map<Integer, TaskInstance> taskInstanceMap = new ConcurrentHashMap<>(); /** * running TaskNode, taskId as key */ private final Map<Integer, ITaskProcessor> activeTaskProcessorMaps = new ConcurrentHashMap<>(); /** * valid task map, taskCode as key, taskId as value */ private Map<String, Integer> validTaskMap = new ConcurrentHashMap<>(); /** * error task map, taskCode as key, taskId as value */ private Map<String, Integer> errorTaskMap = new ConcurrentHashMap<>(); /** * complete task map, taskCode as key, taskId as value */ private Map<String, Integer> completeTaskMap = new ConcurrentHashMap<>(); /** * depend failed task map, taskCode as key, taskId as value */ private Map<String, Integer> dependFailedTaskMap = new ConcurrentHashMap<>(); /** * forbidden task map, code as key */ private Map<String, TaskNode> forbiddenTaskMap = new ConcurrentHashMap<>(); /** * skip task map, code as key */ private Map<String, TaskNode> skipTaskNodeMap = new ConcurrentHashMap<>(); /** * complement date list */ private List<Date> complementListDate = Lists.newLinkedList(); /** * task timeout check list */ private ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList; /** * state event queue */ private ConcurrentLinkedQueue<StateEvent> stateEvents = new ConcurrentLinkedQueue<>(); /** * ready to submit task queue */ private PeerTaskInstancePriorityQueue readyToSubmitTaskQueue = new PeerTaskInstancePriorityQueue(); /** * constructor of WorkflowExecuteThread * * @param processInstance processInstance * @param processService processService * @param nettyExecutorManager nettyExecutorManager * @param processAlertManager processAlertManager * @param masterConfig masterConfig * @param taskTimeoutCheckList taskTimeoutCheckList * @param taskProcessorFactory taskProcessorFactory */ public WorkflowExecuteThread(ProcessInstance processInstance , ProcessService processService , NettyExecutorManager nettyExecutorManager , ProcessAlertManager processAlertManager , MasterConfig masterConfig , ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList , TaskProcessorFactory taskProcessorFactory) { this.processService = processService; this.processInstance = processInstance; this.masterConfig = masterConfig; this.nettyExecutorManager = nettyExecutorManager; this.processAlertManager = processAlertManager; this.taskTimeoutCheckList = taskTimeoutCheckList; this.taskProcessorFactory = taskProcessorFactory; } @Override public void run() { try { if (!this.isStart()) { startProcess(); } else { handleEvents(); } } catch (Exception e) { logger.error("handler error:", e); } } /** * the process start nodes are submitted completely. */ public boolean isStart() { return this.isStart; } private void handleEvents() { while (!this.stateEvents.isEmpty()) { try { StateEvent stateEvent = this.stateEvents.peek(); if (stateEventHandler(stateEvent)) { this.stateEvents.remove(stateEvent); } } catch (Exception e) { logger.error("state handle error:", e); } } } public String getKey() { if (StringUtils.isNotEmpty(key) || this.processDefinition == null) { return key; } key = String.format("%d_%d_%d", this.processDefinition.getCode(), this.processDefinition.getVersion(), this.processInstance.getId()); return key; } public boolean addStateEvent(StateEvent stateEvent) { if (processInstance.getId() != stateEvent.getProcessInstanceId()) { logger.info("state event would be abounded :{}", stateEvent.toString()); return false; } this.stateEvents.add(stateEvent); return true; } public int eventSize() { return this.stateEvents.size(); } public ProcessInstance getProcessInstance() { return this.processInstance; } private boolean stateEventHandler(StateEvent stateEvent) { logger.info("process event: {}", stateEvent.toString()); if (!checkProcessInstance(stateEvent)) { return false; } boolean result = false; switch (stateEvent.getType()) { case PROCESS_STATE_CHANGE: result = processStateChangeHandler(stateEvent); break; case TASK_STATE_CHANGE: result = taskStateChangeHandler(stateEvent); break; case PROCESS_TIMEOUT: result = processTimeout(); break; case TASK_TIMEOUT: result = taskTimeout(stateEvent); break; case WAIT_TASK_GROUP: result = checkForceStartAndWakeUp(stateEvent); break; default: break; } if (result) { this.stateEvents.remove(stateEvent); } return result; } private boolean checkForceStartAndWakeUp(StateEvent stateEvent) { TaskGroupQueue taskGroupQueue = this.processService.loadTaskGroupQueue(stateEvent.getTaskInstanceId()); if (taskGroupQueue.getForceStart() == Flag.YES.getCode()) { ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId()); TaskInstance taskInstance = this.processService.findTaskInstanceById(stateEvent.getTaskInstanceId()); ProcessInstance processInstance = this.processService.findProcessInstanceById(taskInstance.getProcessInstanceId()); taskProcessor.dispatch(taskInstance, processInstance); this.processService.updateTaskGroupQueueStatus(taskGroupQueue.getId(), TaskGroupQueueStatus.ACQUIRE_SUCCESS.getCode()); return true; } if (taskGroupQueue.getInQueue() == Flag.YES.getCode()) { boolean acquireTaskGroup = processService.acquireTaskGroupAgain(taskGroupQueue); if (acquireTaskGroup) { ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId()); TaskInstance taskInstance = this.processService.findTaskInstanceById(stateEvent.getTaskInstanceId()); ProcessInstance processInstance = this.processService.findProcessInstanceById(taskInstance.getProcessInstanceId()); taskProcessor.dispatch(taskInstance, processInstance); return true; } } return false; } private boolean taskTimeout(StateEvent stateEvent) { if (!checkTaskInstanceByStateEvent(stateEvent)) { return true; } TaskInstance taskInstance = taskInstanceMap.get(stateEvent.getTaskInstanceId()); if (TimeoutFlag.CLOSE == taskInstance.getTaskDefine().getTimeoutFlag()) { return true; } TaskTimeoutStrategy taskTimeoutStrategy = taskInstance.getTaskDefine().getTimeoutNotifyStrategy(); if (TaskTimeoutStrategy.FAILED == taskTimeoutStrategy) { ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId()); taskProcessor.action(TaskAction.TIMEOUT); return false; } else { processAlertManager.sendTaskTimeoutAlert(processInstance, taskInstance, taskInstance.getTaskDefine()); return true; } } private boolean processTimeout() { this.processAlertManager.sendProcessTimeoutAlert(this.processInstance, this.processDefinition); return true; } private boolean taskStateChangeHandler(StateEvent stateEvent) { if (!checkTaskInstanceByStateEvent(stateEvent)) { return true; } TaskInstance task = getTaskInstance(stateEvent.getTaskInstanceId()); if (task.getState() == null) { logger.error("task state is null, state handler error: {}", stateEvent); return true; } if (task.getState().typeIsFinished() && !completeTaskMap.containsKey(Long.toString(task.getTaskCode()))) { taskFinished(task); if (task.getTaskGroupId() > 0) { //release task group TaskInstance nextTaskInstance = this.processService.releaseTaskGroup(task); if (nextTaskInstance != null) { if (nextTaskInstance.getProcessInstanceId() == task.getProcessInstanceId()) { StateEvent nextEvent = new StateEvent(); nextEvent.setProcessInstanceId(this.processInstance.getId()); nextEvent.setTaskInstanceId(nextTaskInstance.getId()); nextEvent.setType(StateEventType.WAIT_TASK_GROUP); this.stateEvents.add(nextEvent); } else { ProcessInstance processInstance = this.processService.findProcessInstanceById(nextTaskInstance.getProcessInstanceId()); this.processService.sendStartTask2Master(processInstance,nextTaskInstance.getId(), org.apache.dolphinscheduler.remote.command.CommandType.TASK_WAKEUP_EVENT_REQUEST); } } } } else if (activeTaskProcessorMaps.containsKey(stateEvent.getTaskInstanceId())) { ITaskProcessor iTaskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId()); iTaskProcessor.run(); if (iTaskProcessor.taskState().typeIsFinished()) { task = processService.findTaskInstanceById(stateEvent.getTaskInstanceId()); taskFinished(task); } } else { logger.error("state handler error: {}", stateEvent); } return true; } private void taskFinished(TaskInstance task) { logger.info("work flow {} task {} state:{} ", processInstance.getId(), task.getId(), task.getState()); if (task.taskCanRetry()) { addTaskToStandByList(task); if (!task.retryTaskIntervalOverTime()) { logger.info("failure task will be submitted: process id: {}, task instance id: {} state:{} retry times:{} / {}, interval:{}", processInstance.getId(), task.getId(), task.getState(), task.getRetryTimes(), task.getMaxRetryTimes(), task.getRetryInterval()); this.addTimeoutCheck(task); } else { submitStandByTask(); } return; } completeTaskMap.put(Long.toString(task.getTaskCode()), task.getId()); activeTaskProcessorMaps.remove(task.getId()); taskTimeoutCheckList.remove(task.getId()); if (task.getState().typeIsSuccess()) { processInstance.setVarPool(task.getVarPool()); processService.saveProcessInstance(processInstance); submitPostNode(Long.toString(task.getTaskCode())); } else if (task.getState().typeIsFailure()) { if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(Long.toString(task.getTaskCode()), dag)) { submitPostNode(Long.toString(task.getTaskCode())); } else { errorTaskMap.put(Long.toString(task.getTaskCode()), task.getId()); if (processInstance.getFailureStrategy() == FailureStrategy.END) { killAllTasks(); } } } this.updateProcessInstanceState(); } /** * update process instance */ public void refreshProcessInstance(int processInstanceId) { logger.info("process instance update: {}", processInstanceId); processInstance = processService.findProcessInstanceById(processInstanceId); processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); processInstance.setProcessDefinition(processDefinition); } /** * update task instance */ public void refreshTaskInstance(int taskInstanceId) { logger.info("task instance update: {} ", taskInstanceId); TaskInstance taskInstance = processService.findTaskInstanceById(taskInstanceId); if (taskInstance == null) { logger.error("can not find task instance, id:{}", taskInstanceId); return; } processService.packageTaskInstance(taskInstance, processInstance); taskInstanceMap.put(taskInstance.getId(), taskInstance); validTaskMap.remove(Long.toString(taskInstance.getTaskCode())); if (Flag.YES == taskInstance.getFlag()) { validTaskMap.put(Long.toString(taskInstance.getTaskCode()), taskInstance.getId()); } } /** * check process instance by state event */ public boolean checkProcessInstance(StateEvent stateEvent) { if (this.processInstance.getId() != stateEvent.getProcessInstanceId()) { logger.error("mismatch process instance id: {}, state event:{}", this.processInstance.getId(), stateEvent); return false; } return true; } /** * check if task instance exist by state event */ public boolean checkTaskInstanceByStateEvent(StateEvent stateEvent) { if (stateEvent.getTaskInstanceId() == 0) { logger.error("task instance id null, state event:{}", stateEvent); return false; } if (!taskInstanceMap.containsKey(stateEvent.getTaskInstanceId())) { logger.error("mismatch task instance id, event:{}", stateEvent); return false; } return true; } /** * check if task instance exist by task code */ public boolean checkTaskInstanceByCode(long taskCode) { if (taskInstanceMap == null || taskInstanceMap.size() == 0) { return false; } for (TaskInstance taskInstance : taskInstanceMap.values()) { if (taskInstance.getTaskCode() == taskCode) { return true; } } return false; } /** * check if task instance exist by id */ public boolean checkTaskInstanceById(int taskInstanceId) { if (taskInstanceMap == null || taskInstanceMap.size() == 0) { return false; } return taskInstanceMap.containsKey(taskInstanceId); } /** * get task instance from memory */ public TaskInstance getTaskInstance(int taskInstanceId) { if (taskInstanceMap.containsKey(taskInstanceId)) { return taskInstanceMap.get(taskInstanceId); } return null; } private boolean processStateChangeHandler(StateEvent stateEvent) { try { logger.info("process:{} state {} change to {}", processInstance.getId(), processInstance.getState(), stateEvent.getExecutionStatus()); if (processComplementData()) { return true; } if (stateEvent.getExecutionStatus().typeIsFinished()) { endProcess(); } if (processInstance.getState() == ExecutionStatus.READY_STOP) { killAllTasks(); } return true; } catch (Exception e) { logger.error("process state change error:", e); } return true; } private boolean processComplementData() throws Exception { if (!needComplementProcess()) { return false; } if (processInstance.getState() == ExecutionStatus.READY_STOP) { return false; } Date scheduleDate = processInstance.getScheduleTime(); if (scheduleDate == null) { scheduleDate = complementListDate.get(0); } else if (processInstance.getState().typeIsFinished()) { endProcess(); if (complementListDate.size() <= 0) { logger.info("process complement end. process id:{}", processInstance.getId()); return true; } int index = complementListDate.indexOf(scheduleDate); if (index >= complementListDate.size() - 1 || !processInstance.getState().typeIsSuccess()) { logger.info("process complement end. process id:{}", processInstance.getId()); // complement data ends || no success return true; } logger.info("process complement continue. process id:{}, schedule time:{} complementListDate:{}", processInstance.getId(), processInstance.getScheduleTime(), complementListDate.toString()); scheduleDate = complementListDate.get(index + 1); //the next process complement processInstance.setId(0); } processInstance.setScheduleTime(scheduleDate); Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) { cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); } processInstance.setState(ExecutionStatus.RUNNING_EXECUTION); processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime())); processInstance.setStartTime(new Date()); processInstance.setEndTime(null); processService.saveProcessInstance(processInstance); this.taskInstanceMap.clear(); startProcess(); return true; } private boolean needComplementProcess() { if (processInstance.isComplementData() && Flag.NO == processInstance.getIsSubProcess()) { return true; } return false; } private void startProcess() throws Exception { if (this.taskInstanceMap.size() == 0) { isStart = false; buildFlowDag(); initTaskQueue(); submitPostNode(null); isStart = true; } } /** * process end handle */ private void endProcess() { this.stateEvents.clear(); if (processDefinition.getExecutionType().typeIsSerialWait()) { checkSerialProcess(processDefinition); } if (processInstance.getState().typeIsWaitingThread()) { processService.createRecoveryWaitingThreadCommand(null, processInstance); } if (processAlertManager.isNeedToSendWarning(processInstance)) { ProjectUser projectUser = processService.queryProjectWithUserByProcessInstanceId(processInstance.getId()); processAlertManager.sendAlertProcessInstance(processInstance, getValidTaskList(), projectUser); } List<TaskInstance> taskInstances = processService.findValidTaskListByProcessId(processInstance.getId()); ProjectUser projectUser = processService.queryProjectWithUserByProcessInstanceId(processInstance.getId()); processAlertManager.sendAlertProcessInstance(processInstance, taskInstances, projectUser); //release task group processService.releaseAllTaskGroup(processInstance.getId()); } public void checkSerialProcess(ProcessDefinition processDefinition) { int nextInstanceId = processInstance.getNextProcessInstanceId(); if (nextInstanceId == 0) { ProcessInstance nextProcessInstance = this.processService.loadNextProcess4Serial(processInstance.getProcessDefinition().getCode(), ExecutionStatus.SERIAL_WAIT.getCode()); if (nextProcessInstance == null) { return; } nextInstanceId = nextProcessInstance.getId(); } ProcessInstance nextProcessInstance = this.processService.findProcessInstanceById(nextInstanceId); if (nextProcessInstance.getState().typeIsFinished() || nextProcessInstance.getState().typeIsRunning()) { return; } Map<String, Object> cmdParam = new HashMap<>(); cmdParam.put(CMD_PARAM_RECOVER_PROCESS_ID_STRING, nextInstanceId); Command command = new Command(); command.setCommandType(CommandType.RECOVER_SERIAL_WAIT); command.setProcessDefinitionCode(processDefinition.getCode()); command.setCommandParam(JSONUtils.toJsonString(cmdParam)); processService.createCommand(command); } /** * generate process dag * * @throws Exception exception */ private void buildFlowDag() throws Exception { if (this.dag != null) { return; } processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); processInstance.setProcessDefinition(processDefinition); List<TaskInstance> recoverNodeList = getStartTaskInstanceList(processInstance.getCommandParam()); List<TaskNode> taskNodeList = processService.transformTask(processService.findRelationByCode(processDefinition.getProjectCode(), processDefinition.getCode()), Lists.newArrayList()); forbiddenTaskMap.clear(); taskNodeList.forEach(taskNode -> { if (taskNode.isForbidden()) { forbiddenTaskMap.put(Long.toString(taskNode.getCode()), taskNode); } }); // generate process to get DAG info List<String> recoveryNodeCodeList = getRecoveryNodeCodeList(recoverNodeList); List<String> startNodeNameList = parseStartNodeName(processInstance.getCommandParam()); ProcessDag processDag = generateFlowDag(taskNodeList, startNodeNameList, recoveryNodeCodeList, processInstance.getTaskDependType()); if (processDag == null) { logger.error("processDag is null"); return; } // generate process dag dag = DagHelper.buildDagGraph(processDag); } /** * init task queue */ private void initTaskQueue() { taskFailedSubmit = false; activeTaskProcessorMaps.clear(); dependFailedTaskMap.clear(); completeTaskMap.clear(); errorTaskMap.clear(); if (ExecutionStatus.SUBMITTED_SUCCESS != processInstance.getState()) { List<TaskInstance> validTaskInstanceList = processService.findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance task : validTaskInstanceList) { validTaskMap.put(Long.toString(task.getTaskCode()), task.getId()); taskInstanceMap.put(task.getId(), task); if (task.isTaskComplete()) { completeTaskMap.put(Long.toString(task.getTaskCode()), task.getId()); } if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(Long.toString(task.getTaskCode()), dag)) { continue; } if (task.getState().typeIsFailure() && !task.taskCanRetry()) { errorTaskMap.put(Long.toString(task.getTaskCode()), task.getId()); } } } if (processInstance.isComplementData() && complementListDate.size() == 0) { Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); if (cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) { Date start = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE)); Date end = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionCode(processInstance.getProcessDefinitionCode()); if (complementListDate.size() == 0 && needComplementProcess()) { complementListDate = CronUtils.getSelfFireDateList(start, end, schedules); logger.info(" process definition code:{} complement data: {}", processInstance.getProcessDefinitionCode(), complementListDate.toString()); if (complementListDate.size() > 0 && Flag.NO == processInstance.getIsSubProcess()) { processInstance.setScheduleTime(complementListDate.get(0)); processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime())); processService.updateProcessInstance(processInstance); } } } } } /** * submit task to execute * * @param taskInstance task instance * @return TaskInstance */ private TaskInstance submitTaskExec(TaskInstance taskInstance) { try { ITaskProcessor taskProcessor = taskProcessorFactory.getTaskProcessor(taskInstance.getTaskType()); if (taskInstance.getState() == ExecutionStatus.RUNNING_EXECUTION && taskProcessor.getType().equalsIgnoreCase(Constants.COMMON_TASK_TYPE)) { notifyProcessHostUpdate(taskInstance); } TaskDefinition taskDefinition = processService.findTaskDefinition( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion()); taskInstance.setTaskGroupId(taskDefinition.getTaskGroupId()); // package task instance before submit processService.packageTaskInstance(taskInstance, processInstance); boolean submit = taskProcessor.submit(taskInstance, processInstance, masterConfig.getTaskCommitRetryTimes(), masterConfig.getTaskCommitInterval()); if (!submit) { logger.error("process id:{} name:{} submit standby task id:{} name:{} failed!", processInstance.getId(), processInstance.getName(), taskInstance.getId(), taskInstance.getName()); return null; } taskInstanceMap.put(taskInstance.getId(), taskInstance); activeTaskProcessorMaps.put(taskInstance.getId(), taskProcessor); taskProcessor.run(); addTimeoutCheck(taskInstance); if (taskProcessor.taskState().typeIsFinished()) { StateEvent stateEvent = new StateEvent(); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setTaskInstanceId(taskInstance.getId()); stateEvent.setExecutionStatus(taskProcessor.taskState()); stateEvent.setType(StateEventType.TASK_STATE_CHANGE); this.stateEvents.add(stateEvent); } return taskInstance; } catch (Exception e) { logger.error("submit standby task error", e); return null; } } private void notifyProcessHostUpdate(TaskInstance taskInstance) { if (StringUtils.isEmpty(taskInstance.getHost())) { return; } try { HostUpdateCommand hostUpdateCommand = new HostUpdateCommand(); hostUpdateCommand.setProcessHost(NetUtils.getAddr(masterConfig.getListenPort())); hostUpdateCommand.setTaskInstanceId(taskInstance.getId()); Host host = new Host(taskInstance.getHost()); nettyExecutorManager.doExecute(host, hostUpdateCommand.convert2Command()); } catch (Exception e) { logger.error("notify process host update", e); } } private void addTimeoutCheck(TaskInstance taskInstance) { if (taskTimeoutCheckList.containsKey(taskInstance.getId())) { return; } TaskDefinition taskDefinition = taskInstance.getTaskDefine(); if (taskDefinition == null) { logger.error("taskDefinition is null, taskId:{}", taskInstance.getId()); return; } if (TimeoutFlag.OPEN == taskDefinition.getTimeoutFlag() || taskInstance.taskCanRetry()) { this.taskTimeoutCheckList.put(taskInstance.getId(), taskInstance); } else { if (taskInstance.isDependTask() || taskInstance.isSubProcess()) { this.taskTimeoutCheckList.put(taskInstance.getId(), taskInstance); } } } /** * find task instance in db. * in case submit more than one same name task in the same time. * * @param taskCode task code * @param taskVersion task version * @return TaskInstance */ private TaskInstance findTaskIfExists(Long taskCode, int taskVersion) { List<TaskInstance> validTaskInstanceList = getValidTaskList(); for (TaskInstance taskInstance : validTaskInstanceList) { if (taskInstance.getTaskCode() == taskCode && taskInstance.getTaskDefinitionVersion() == taskVersion) { return taskInstance; } } return null; } /** * encapsulation task * * @param processInstance process instance * @param taskNode taskNode * @return TaskInstance */ private TaskInstance createTaskInstance(ProcessInstance processInstance, TaskNode taskNode) { TaskInstance taskInstance = findTaskIfExists(taskNode.getCode(), taskNode.getVersion()); if (taskInstance == null) { taskInstance = new TaskInstance(); taskInstance.setTaskCode(taskNode.getCode()); taskInstance.setTaskDefinitionVersion(taskNode.getVersion()); // task name taskInstance.setName(taskNode.getName()); // task instance state taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); // process instance id taskInstance.setProcessInstanceId(processInstance.getId()); // task instance type taskInstance.setTaskType(taskNode.getType().toUpperCase()); // task instance whether alert taskInstance.setAlertFlag(Flag.NO); // task instance start time taskInstance.setStartTime(null); // task instance flag taskInstance.setFlag(Flag.YES); // task dry run flag taskInstance.setDryRun(processInstance.getDryRun()); // task instance retry times taskInstance.setRetryTimes(0); // max task instance retry times taskInstance.setMaxRetryTimes(taskNode.getMaxRetryTimes()); // retry task instance interval taskInstance.setRetryInterval(taskNode.getRetryInterval()); //set task param taskInstance.setTaskParams(taskNode.getTaskParams()); // task instance priority if (taskNode.getTaskInstancePriority() == null) { taskInstance.setTaskInstancePriority(Priority.MEDIUM); } else { taskInstance.setTaskInstancePriority(taskNode.getTaskInstancePriority()); } String processWorkerGroup = processInstance.getWorkerGroup(); processWorkerGroup = StringUtils.isBlank(processWorkerGroup) ? DEFAULT_WORKER_GROUP : processWorkerGroup; String taskWorkerGroup = StringUtils.isBlank(taskNode.getWorkerGroup()) ? processWorkerGroup : taskNode.getWorkerGroup(); Long processEnvironmentCode = Objects.isNull(processInstance.getEnvironmentCode()) ? -1 : processInstance.getEnvironmentCode(); Long taskEnvironmentCode = Objects.isNull(taskNode.getEnvironmentCode()) ? processEnvironmentCode : taskNode.getEnvironmentCode(); if (!processWorkerGroup.equals(DEFAULT_WORKER_GROUP) && taskWorkerGroup.equals(DEFAULT_WORKER_GROUP)) { taskInstance.setWorkerGroup(processWorkerGroup); taskInstance.setEnvironmentCode(processEnvironmentCode); } else { taskInstance.setWorkerGroup(taskWorkerGroup); taskInstance.setEnvironmentCode(taskEnvironmentCode); } if (!taskInstance.getEnvironmentCode().equals(-1L)) { Environment environment = processService.findEnvironmentByCode(taskInstance.getEnvironmentCode()); if (Objects.nonNull(environment) && StringUtils.isNotEmpty(environment.getConfig())) { taskInstance.setEnvironmentConfig(environment.getConfig()); } } // delay execution time taskInstance.setDelayTime(taskNode.getDelayTime()); } return taskInstance; } public void getPreVarPool(TaskInstance taskInstance, Set<String> preTask) { Map<String, Property> allProperty = new HashMap<>(); Map<String, TaskInstance> allTaskInstance = new HashMap<>(); if (CollectionUtils.isNotEmpty(preTask)) { for (String preTaskCode : preTask) { Integer taskId = completeTaskMap.get(preTaskCode); if (taskId == null) { continue; } TaskInstance preTaskInstance = taskInstanceMap.get(taskId); if (preTaskInstance == null) { continue; } String preVarPool = preTaskInstance.getVarPool(); if (StringUtils.isNotEmpty(preVarPool)) { List<Property> properties = JSONUtils.toList(preVarPool, Property.class); for (Property info : properties) { setVarPoolValue(allProperty, allTaskInstance, preTaskInstance, info); } } } if (allProperty.size() > 0) { taskInstance.setVarPool(JSONUtils.toJsonString(allProperty.values())); } } } private void setVarPoolValue(Map<String, Property> allProperty, Map<String, TaskInstance> allTaskInstance, TaskInstance preTaskInstance, Property thisProperty) { //for this taskInstance all the param in this part is IN. thisProperty.setDirect(Direct.IN); //get the pre taskInstance Property's name String proName = thisProperty.getProp(); //if the Previous nodes have the Property of same name if (allProperty.containsKey(proName)) { //comparison the value of two Property Property otherPro = allProperty.get(proName); //if this property'value of loop is empty,use the other,whether the other's value is empty or not if (StringUtils.isEmpty(thisProperty.getValue())) { allProperty.put(proName, otherPro); //if property'value of loop is not empty,and the other's value is not empty too, use the earlier value } else if (StringUtils.isNotEmpty(otherPro.getValue())) { TaskInstance otherTask = allTaskInstance.get(proName); if (otherTask.getEndTime().getTime() > preTaskInstance.getEndTime().getTime()) { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } else { allProperty.put(proName, otherPro); } } else { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } } else { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } } /** * get complete task instance map, taskCode as key */ private Map<String, TaskInstance> getCompleteTaskInstanceMap() { Map<String, TaskInstance> completeTaskInstanceMap = new HashMap<>(); for (Integer taskInstanceId : completeTaskMap.values()) { TaskInstance taskInstance = taskInstanceMap.get(taskInstanceId); completeTaskInstanceMap.put(Long.toString(taskInstance.getTaskCode()), taskInstance); } return completeTaskInstanceMap; } /** * get valid task list */ private List<TaskInstance> getValidTaskList() { List<TaskInstance> validTaskInstanceList = new ArrayList<>(); for (Integer taskInstanceId : validTaskMap.values()) { validTaskInstanceList.add(taskInstanceMap.get(taskInstanceId)); } return validTaskInstanceList; } private void submitPostNode(String parentNodeCode) { Set<String> submitTaskNodeList = DagHelper.parsePostNodes(parentNodeCode, skipTaskNodeMap, dag, getCompleteTaskInstanceMap()); List<TaskInstance> taskInstances = new ArrayList<>(); for (String taskNode : submitTaskNodeList) { TaskNode taskNodeObject = dag.getNode(taskNode); if (checkTaskInstanceByCode(taskNodeObject.getCode())) { continue; } TaskInstance task = createTaskInstance(processInstance, taskNodeObject); taskInstances.add(task); } // if previous node success , post node submit for (TaskInstance task : taskInstances) { if (readyToSubmitTaskQueue.contains(task)) { continue; } if (completeTaskMap.containsKey(Long.toString(task.getTaskCode()))) { logger.info("task {} has already run success", task.getName()); continue; } if (task.getState().typeIsPause() || task.getState().typeIsCancel()) { logger.info("task {} stopped, the state is {}", task.getName(), task.getState()); continue; } addTaskToStandByList(task); } submitStandByTask(); updateProcessInstanceState(); } /** * determine whether the dependencies of the task node are complete * * @return DependResult */ private DependResult isTaskDepsComplete(String taskCode) { Collection<String> startNodes = dag.getBeginNode(); // if vertex,returns true directly if (startNodes.contains(taskCode)) { return DependResult.SUCCESS; } TaskNode taskNode = dag.getNode(taskCode); List<String> depCodeList = taskNode.getDepList(); for (String depsNode : depCodeList) { if (!dag.containsNode(depsNode) || forbiddenTaskMap.containsKey(depsNode) || skipTaskNodeMap.containsKey(depsNode)) { continue; } // dependencies must be fully completed if (!completeTaskMap.containsKey(depsNode)) { return DependResult.WAITING; } Integer depsTaskId = completeTaskMap.get(depsNode); ExecutionStatus depTaskState = taskInstanceMap.get(depsTaskId).getState(); if (depTaskState.typeIsPause() || depTaskState.typeIsCancel()) { return DependResult.NON_EXEC; } // ignore task state if current task is condition if (taskNode.isConditionsTask()) { continue; } if (!dependTaskSuccess(depsNode, taskCode)) { return DependResult.FAILED; } } logger.info("taskCode: {} completeDependTaskList: {}", taskCode, Arrays.toString(completeTaskMap.keySet().toArray())); return DependResult.SUCCESS; } /** * depend node is completed, but here need check the condition task branch is the next node */ private boolean dependTaskSuccess(String dependNodeName, String nextNodeName) { if (dag.getNode(dependNodeName).isConditionsTask()) { //condition task need check the branch to run List<String> nextTaskList = DagHelper.parseConditionTask(dependNodeName, skipTaskNodeMap, dag, getCompleteTaskInstanceMap()); if (!nextTaskList.contains(nextNodeName)) { return false; } } else { Integer taskInstanceId = completeTaskMap.get(dependNodeName); ExecutionStatus depTaskState = taskInstanceMap.get(taskInstanceId).getState(); if (depTaskState.typeIsFailure()) { return false; } } return true; } /** * query task instance by complete state * * @param state state * @return task instance list */ private List<TaskInstance> getCompleteTaskByState(ExecutionStatus state) { List<TaskInstance> resultList = new ArrayList<>(); for (Integer taskInstanceId : completeTaskMap.values()) { TaskInstance taskInstance = taskInstanceMap.get(taskInstanceId); if (taskInstance != null && taskInstance.getState() == state) { resultList.add(taskInstance); } } return resultList; } /** * where there are ongoing tasks * * @param state state * @return ExecutionStatus */ private ExecutionStatus runningState(ExecutionStatus state) { if (state == ExecutionStatus.READY_STOP || state == ExecutionStatus.READY_PAUSE || state == ExecutionStatus.WAITING_THREAD || state == ExecutionStatus.DELAY_EXECUTION) { // if the running task is not completed, the state remains unchanged return state; } else { return ExecutionStatus.RUNNING_EXECUTION; } } /** * exists failure task,contains submit failure、dependency failure,execute failure(retry after) * * @return Boolean whether has failed task */ private boolean hasFailedTask() { if (this.taskFailedSubmit) { return true; } if (this.errorTaskMap.size() > 0) { return true; } return this.dependFailedTaskMap.size() > 0; } /** * process instance failure * * @return Boolean whether process instance failed */ private boolean processFailed() { if (hasFailedTask()) { if (processInstance.getFailureStrategy() == FailureStrategy.END) { return true; } if (processInstance.getFailureStrategy() == FailureStrategy.CONTINUE) { return readyToSubmitTaskQueue.size() == 0 && activeTaskProcessorMaps.size() == 0; } } return false; } /** * whether task for waiting thread * * @return Boolean whether has waiting thread task */ private boolean hasWaitingThreadTask() { List<TaskInstance> waitingList = getCompleteTaskByState(ExecutionStatus.WAITING_THREAD); return CollectionUtils.isNotEmpty(waitingList); } /** * prepare for pause * 1,failed retry task in the preparation queue , returns to failure directly * 2,exists pause task,complement not completed, pending submission of tasks, return to suspension * 3,success * * @return ExecutionStatus */ private ExecutionStatus processReadyPause() { if (hasRetryTaskInStandBy()) { return ExecutionStatus.FAILURE; } List<TaskInstance> pauseList = getCompleteTaskByState(ExecutionStatus.PAUSE); if (CollectionUtils.isNotEmpty(pauseList) || !isComplementEnd() || readyToSubmitTaskQueue.size() > 0) { return ExecutionStatus.PAUSE; } else { return ExecutionStatus.SUCCESS; } } /** * generate the latest process instance status by the tasks state * * @return process instance execution status */ private ExecutionStatus getProcessInstanceState(ProcessInstance instance) { ExecutionStatus state = instance.getState(); if (activeTaskProcessorMaps.size() > 0 || hasRetryTaskInStandBy()) { // active task and retry task exists return runningState(state); } // process failure if (processFailed()) { return ExecutionStatus.FAILURE; } // waiting thread if (hasWaitingThreadTask()) { return ExecutionStatus.WAITING_THREAD; } // pause if (state == ExecutionStatus.READY_PAUSE) { return processReadyPause(); } // stop if (state == ExecutionStatus.READY_STOP) { List<TaskInstance> stopList = getCompleteTaskByState(ExecutionStatus.STOP); List<TaskInstance> killList = getCompleteTaskByState(ExecutionStatus.KILL); if (CollectionUtils.isNotEmpty(stopList) || CollectionUtils.isNotEmpty(killList) || !isComplementEnd()) { return ExecutionStatus.STOP; } else { return ExecutionStatus.SUCCESS; } } // success if (state == ExecutionStatus.RUNNING_EXECUTION) { List<TaskInstance> killTasks = getCompleteTaskByState(ExecutionStatus.KILL); if (readyToSubmitTaskQueue.size() > 0) { //tasks currently pending submission, no retries, indicating that depend is waiting to complete return ExecutionStatus.RUNNING_EXECUTION; } else if (CollectionUtils.isNotEmpty(killTasks)) { // tasks maybe killed manually return ExecutionStatus.FAILURE; } else { // if the waiting queue is empty and the status is in progress, then success return ExecutionStatus.SUCCESS; } } return state; } /** * whether complement end * * @return Boolean whether is complement end */ private boolean isComplementEnd() { if (!processInstance.isComplementData()) { return true; } try { Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); Date endTime = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); return processInstance.getScheduleTime().equals(endTime); } catch (Exception e) { logger.error("complement end failed ", e); return false; } } /** * updateProcessInstance process instance state * after each batch of tasks is executed, the status of the process instance is updated */ private void updateProcessInstanceState() { ExecutionStatus state = getProcessInstanceState(processInstance); if (processInstance.getState() != state) { logger.info( "work flow process instance [id: {}, name:{}], state change from {} to {}, cmd type: {}", processInstance.getId(), processInstance.getName(), processInstance.getState(), state, processInstance.getCommandType()); processInstance.setState(state); if (state.typeIsFinished()) { processInstance.setEndTime(new Date()); } processService.updateProcessInstance(processInstance); StateEvent stateEvent = new StateEvent(); stateEvent.setExecutionStatus(processInstance.getState()); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setType(StateEventType.PROCESS_STATE_CHANGE); this.processStateChangeHandler(stateEvent); } } /** * get task dependency result * * @param taskInstance task instance * @return DependResult */ private DependResult getDependResultForTask(TaskInstance taskInstance) { return isTaskDepsComplete(Long.toString(taskInstance.getTaskCode())); } /** * add task to standby list * * @param taskInstance task instance */ private void addTaskToStandByList(TaskInstance taskInstance) { logger.info("add task to stand by list: {}", taskInstance.getName()); try { if (!readyToSubmitTaskQueue.contains(taskInstance)) { readyToSubmitTaskQueue.put(taskInstance); } } catch (Exception e) { logger.error("add task instance to readyToSubmitTaskQueue error, taskName: {}", taskInstance.getName(), e); } } /** * remove task from stand by list * * @param taskInstance task instance */ private void removeTaskFromStandbyList(TaskInstance taskInstance) { logger.info("remove task from stand by list, id: {} name:{}", taskInstance.getId(), taskInstance.getName()); try { readyToSubmitTaskQueue.remove(taskInstance); } catch (Exception e) { logger.error("remove task instance from readyToSubmitTaskQueue error, task id:{}, Name: {}", taskInstance.getId(), taskInstance.getName(), e); } } /** * has retry task in standby * * @return Boolean whether has retry task in standby */ private boolean hasRetryTaskInStandBy() { for (Iterator<TaskInstance> iter = readyToSubmitTaskQueue.iterator(); iter.hasNext(); ) { if (iter.next().getState().typeIsFailure()) { return true; } } return false; } /** * close the on going tasks */ private void killAllTasks() { logger.info("kill called on process instance id: {}, num: {}", processInstance.getId(), activeTaskProcessorMaps.size()); for (int taskId : activeTaskProcessorMaps.keySet()) { TaskInstance taskInstance = processService.findTaskInstanceById(taskId); if (taskInstance == null || taskInstance.getState().typeIsFinished()) { continue; } ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(taskId); taskProcessor.action(TaskAction.STOP); if (taskProcessor.taskState().typeIsFinished()) { StateEvent stateEvent = new StateEvent(); stateEvent.setType(StateEventType.TASK_STATE_CHANGE); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setTaskInstanceId(taskInstance.getId()); stateEvent.setExecutionStatus(taskProcessor.taskState()); this.addStateEvent(stateEvent); } } } public boolean workFlowFinish() { return this.processInstance.getState().typeIsFinished(); } /** * handling the list of tasks to be submitted */ private void submitStandByTask() { try { int length = readyToSubmitTaskQueue.size(); for (int i = 0; i < length; i++) { TaskInstance task = readyToSubmitTaskQueue.peek(); if (task == null) { continue; } // stop tasks which is retrying if forced success happens if (task.taskCanRetry()) { TaskInstance retryTask = processService.findTaskInstanceById(task.getId()); if (retryTask != null && retryTask.getState().equals(ExecutionStatus.FORCED_SUCCESS)) { task.setState(retryTask.getState()); logger.info("task: {} has been forced success, put it into complete task list and stop retrying", task.getName()); removeTaskFromStandbyList(task); completeTaskMap.put(Long.toString(task.getTaskCode()), task.getId()); taskInstanceMap.put(task.getId(), task); submitPostNode(Long.toString(task.getTaskCode())); continue; } } //init varPool only this task is the first time running if (task.isFirstRun()) { //get pre task ,get all the task varPool to this task Set<String> preTask = dag.getPreviousNodes(Long.toString(task.getTaskCode())); getPreVarPool(task, preTask); } DependResult dependResult = getDependResultForTask(task); if (DependResult.SUCCESS == dependResult) { if (task.retryTaskIntervalOverTime()) { int originalId = task.getId(); TaskInstance taskInstance = submitTaskExec(task); if (taskInstance == null) { this.taskFailedSubmit = true; } else { removeTaskFromStandbyList(task); if (taskInstance.getId() != originalId) { activeTaskProcessorMaps.remove(originalId); } } } } else if (DependResult.FAILED == dependResult) { // if the dependency fails, the current node is not submitted and the state changes to failure. dependFailedTaskMap.put(Long.toString(task.getTaskCode()), task.getId()); removeTaskFromStandbyList(task); logger.info("task {},id:{} depend result : {}", task.getName(), task.getId(), dependResult); } else if (DependResult.NON_EXEC == dependResult) { // for some reasons(depend task pause/stop) this task would not be submit removeTaskFromStandbyList(task); logger.info("remove task {},id:{} , because depend result : {}", task.getName(), task.getId(), dependResult); } } } catch (Exception e) { logger.error("submit standby task error", e); } } /** * get recovery task instance * * @param taskId task id * @return recovery task instance */ private TaskInstance getRecoveryTaskInstance(String taskId) { if (!StringUtils.isNotEmpty(taskId)) { return null; } try { Integer intId = Integer.valueOf(taskId); TaskInstance task = processService.findTaskInstanceById(intId); if (task == null) { logger.error("start node id cannot be found: {}", taskId); } else { return task; } } catch (Exception e) { logger.error("get recovery task instance failed ", e); } return null; } /** * get start task instance list * * @param cmdParam command param * @return task instance list */ private List<TaskInstance> getStartTaskInstanceList(String cmdParam) { List<TaskInstance> instanceList = new ArrayList<>(); Map<String, String> paramMap = JSONUtils.toMap(cmdParam); if (paramMap != null && paramMap.containsKey(CMD_PARAM_RECOVERY_START_NODE_STRING)) { String[] idList = paramMap.get(CMD_PARAM_RECOVERY_START_NODE_STRING).split(Constants.COMMA); for (String nodeId : idList) { TaskInstance task = getRecoveryTaskInstance(nodeId); if (task != null) { instanceList.add(task); } } } return instanceList; } /** * parse "StartNodeNameList" from cmd param * * @param cmdParam command param * @return start node name list */ private List<String> parseStartNodeName(String cmdParam) { List<String> startNodeNameList = new ArrayList<>(); Map<String, String> paramMap = JSONUtils.toMap(cmdParam); if (paramMap == null) { return startNodeNameList; } if (paramMap.containsKey(CMD_PARAM_START_NODES)) { startNodeNameList = Arrays.asList(paramMap.get(CMD_PARAM_START_NODES).split(Constants.COMMA)); } return startNodeNameList; } /** * generate start node code list from parsing command param; * if "StartNodeIdList" exists in command param, return StartNodeIdList * * @return recovery node code list */ private List<String> getRecoveryNodeCodeList(List<TaskInstance> recoverNodeList) { List<String> recoveryNodeCodeList = new ArrayList<>(); if (CollectionUtils.isNotEmpty(recoverNodeList)) { for (TaskInstance task : recoverNodeList) { recoveryNodeCodeList.add(Long.toString(task.getTaskCode())); } } return recoveryNodeCodeList; } /** * generate flow dag * * @param totalTaskNodeList total task node list * @param startNodeNameList start node name list * @param recoveryNodeCodeList recovery node code list * @param depNodeType depend node type * @return ProcessDag process dag * @throws Exception exception */ public ProcessDag generateFlowDag(List<TaskNode> totalTaskNodeList, List<String> startNodeNameList, List<String> recoveryNodeCodeList, TaskDependType depNodeType) throws Exception { return DagHelper.generateFlowDag(totalTaskNodeList, startNodeNameList, recoveryNodeCodeList, depNodeType); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,206
[Bug] [MasterServer] processInstance always running when task was timeout and failed.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened when task was timeout and failed, processInstance runs without retry and finish. ### What you expected to happen processInstance can goto retry and finish. ### How to reproduce ![image](https://user-images.githubusercontent.com/11962619/144804188-bae18b81-4e9d-437e-b7cc-d71bba251e93.png) ### Anything else _No response_ ### Version 2.0.0 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7206
https://github.com/apache/dolphinscheduler/pull/7207
705ba74f529a50dbf700e528701b8c19bd65f028
ff9bc806ac04d07ff0581e7a88514f943310d2c9
"2021-12-06T07:20:55Z"
java
"2021-12-06T10:20:56Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/WorkflowExecuteThreadTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVERY_START_NODE_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODES; import static org.powermock.api.mockito.PowerMockito.mock; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.ProcessExecutionTypeEnum; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread; import org.apache.dolphinscheduler.server.master.runner.task.TaskProcessorFactory; import org.apache.dolphinscheduler.service.process.ProcessService; import java.lang.reflect.Field; import java.lang.reflect.Method; import java.text.ParseException; import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mockito; import org.powermock.api.mockito.PowerMockito; import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; import org.springframework.context.ApplicationContext; /** * test for WorkflowExecuteThread */ @RunWith(PowerMockRunner.class) @PrepareForTest({WorkflowExecuteThread.class}) public class WorkflowExecuteThreadTest { private WorkflowExecuteThread workflowExecuteThread; private ProcessInstance processInstance; private ProcessService processService; private final int processDefinitionId = 1; private MasterConfig config; private ApplicationContext applicationContext; private TaskProcessorFactory taskProcessorFactory; @Before public void init() throws Exception { processService = mock(ProcessService.class); taskProcessorFactory = mock(TaskProcessorFactory.class); applicationContext = mock(ApplicationContext.class); config = new MasterConfig(); Mockito.when(applicationContext.getBean(MasterConfig.class)).thenReturn(config); processInstance = mock(ProcessInstance.class); Mockito.when(processInstance.getState()).thenReturn(ExecutionStatus.SUCCESS); Mockito.when(processInstance.getHistoryCmd()).thenReturn(CommandType.COMPLEMENT_DATA.toString()); Mockito.when(processInstance.getIsSubProcess()).thenReturn(Flag.NO); Mockito.when(processInstance.getScheduleTime()).thenReturn(DateUtils.stringToDate("2020-01-01 00:00:00")); Map<String, String> cmdParam = new HashMap<>(); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, "2020-01-01 00:00:00"); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, "2020-01-20 23:00:00"); Mockito.when(processInstance.getCommandParam()).thenReturn(JSONUtils.toJsonString(cmdParam)); ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setGlobalParamMap(Collections.emptyMap()); processDefinition.setGlobalParamList(Collections.emptyList()); Mockito.when(processInstance.getProcessDefinition()).thenReturn(processDefinition); ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList = new ConcurrentHashMap<>(); workflowExecuteThread = PowerMockito.spy(new WorkflowExecuteThread(processInstance, processService, null, null, config, taskTimeoutCheckList, taskProcessorFactory)); // prepareProcess init dag Field dag = WorkflowExecuteThread.class.getDeclaredField("dag"); dag.setAccessible(true); dag.set(workflowExecuteThread, new DAG()); PowerMockito.doNothing().when(workflowExecuteThread, "endProcess"); } @Test public void testParseStartNodeName() throws ParseException { try { Map<String, String> cmdParam = new HashMap<>(); cmdParam.put(CMD_PARAM_START_NODES, "1,2,3"); Mockito.when(processInstance.getCommandParam()).thenReturn(JSONUtils.toJsonString(cmdParam)); Class<WorkflowExecuteThread> masterExecThreadClass = WorkflowExecuteThread.class; Method method = masterExecThreadClass.getDeclaredMethod("parseStartNodeName", String.class); method.setAccessible(true); List<String> nodeNames = (List<String>) method.invoke(workflowExecuteThread, JSONUtils.toJsonString(cmdParam)); Assert.assertEquals(3, nodeNames.size()); } catch (Exception e) { Assert.fail(); } } @Test public void testGetStartTaskInstanceList() { try { TaskInstance taskInstance1 = new TaskInstance(); taskInstance1.setId(1); TaskInstance taskInstance2 = new TaskInstance(); taskInstance2.setId(2); TaskInstance taskInstance3 = new TaskInstance(); taskInstance3.setId(3); TaskInstance taskInstance4 = new TaskInstance(); taskInstance4.setId(4); Map<String, String> cmdParam = new HashMap<>(); cmdParam.put(CMD_PARAM_RECOVERY_START_NODE_STRING, "1,2,3,4"); Mockito.when(processService.findTaskInstanceById(1)).thenReturn(taskInstance1); Mockito.when(processService.findTaskInstanceById(2)).thenReturn(taskInstance2); Mockito.when(processService.findTaskInstanceById(3)).thenReturn(taskInstance3); Mockito.when(processService.findTaskInstanceById(4)).thenReturn(taskInstance4); Class<WorkflowExecuteThread> masterExecThreadClass = WorkflowExecuteThread.class; Method method = masterExecThreadClass.getDeclaredMethod("getStartTaskInstanceList", String.class); method.setAccessible(true); List<TaskInstance> taskInstances = (List<TaskInstance>) method.invoke(workflowExecuteThread, JSONUtils.toJsonString(cmdParam)); Assert.assertEquals(4, taskInstances.size()); } catch (Exception e) { Assert.fail(); } } @Test public void testGetPreVarPool() { try { Set<String> preTaskName = new HashSet<>(); preTaskName.add(Long.toString(1)); preTaskName.add(Long.toString(2)); TaskInstance taskInstance = new TaskInstance(); TaskInstance taskInstance1 = new TaskInstance(); taskInstance1.setId(1); taskInstance1.setTaskCode(1); taskInstance1.setVarPool("[{\"direct\":\"OUT\",\"prop\":\"test1\",\"type\":\"VARCHAR\",\"value\":\"1\"}]"); taskInstance1.setEndTime(new Date()); TaskInstance taskInstance2 = new TaskInstance(); taskInstance2.setId(2); taskInstance2.setTaskCode(2); taskInstance2.setVarPool("[{\"direct\":\"OUT\",\"prop\":\"test2\",\"type\":\"VARCHAR\",\"value\":\"2\"}]"); taskInstance2.setEndTime(new Date()); Map<Integer, TaskInstance> taskInstanceMap = new ConcurrentHashMap<>(); taskInstanceMap.put(taskInstance1.getId(), taskInstance1); taskInstanceMap.put(taskInstance2.getId(), taskInstance2); Map<String, Integer> completeTaskList = new ConcurrentHashMap<>(); completeTaskList.put(Long.toString(taskInstance1.getTaskCode()), taskInstance1.getId()); completeTaskList.put(Long.toString(taskInstance1.getTaskCode()), taskInstance2.getId()); Class<WorkflowExecuteThread> masterExecThreadClass = WorkflowExecuteThread.class; Field completeTaskMapField = masterExecThreadClass.getDeclaredField("completeTaskMap"); completeTaskMapField.setAccessible(true); completeTaskMapField.set(workflowExecuteThread, completeTaskList); Field taskInstanceMapField = masterExecThreadClass.getDeclaredField("taskInstanceMap"); taskInstanceMapField.setAccessible(true); taskInstanceMapField.set(workflowExecuteThread, taskInstanceMap); workflowExecuteThread.getPreVarPool(taskInstance, preTaskName); Assert.assertNotNull(taskInstance.getVarPool()); taskInstance2.setVarPool("[{\"direct\":\"OUT\",\"prop\":\"test1\",\"type\":\"VARCHAR\",\"value\":\"2\"}]"); completeTaskList.put(Long.toString(taskInstance2.getTaskCode()), taskInstance2.getId()); completeTaskMapField.setAccessible(true); completeTaskMapField.set(workflowExecuteThread, completeTaskList); taskInstanceMapField.setAccessible(true); taskInstanceMapField.set(workflowExecuteThread, taskInstanceMap); workflowExecuteThread.getPreVarPool(taskInstance, preTaskName); Assert.assertNotNull(taskInstance.getVarPool()); } catch (Exception e) { Assert.fail(); } } @Test public void testCheckSerialProcess() { try { ProcessDefinition processDefinition1 = new ProcessDefinition(); processDefinition1.setId(123); processDefinition1.setName("test"); processDefinition1.setVersion(1); processDefinition1.setCode(11L); processDefinition1.setExecutionType(ProcessExecutionTypeEnum.SERIAL_WAIT); Mockito.when(processInstance.getId()).thenReturn(225); Mockito.when(processService.findProcessInstanceById(225)).thenReturn(processInstance); workflowExecuteThread.checkSerialProcess(processDefinition1); Mockito.when(processInstance.getId()).thenReturn(225); Mockito.when(processInstance.getNextProcessInstanceId()).thenReturn(222); ProcessInstance processInstance9 = new ProcessInstance(); processInstance9.setId(222); processInstance9.setProcessDefinitionCode(11L); processInstance9.setProcessDefinitionVersion(1); processInstance9.setState(ExecutionStatus.SERIAL_WAIT); Mockito.when(processService.findProcessInstanceById(225)).thenReturn(processInstance); Mockito.when(processService.findProcessInstanceById(222)).thenReturn(processInstance9); workflowExecuteThread.checkSerialProcess(processDefinition1); } catch (Exception e) { Assert.fail(); } } private List<Schedule> zeroSchedulerList() { return Collections.emptyList(); } private List<Schedule> oneSchedulerList() { List<Schedule> schedulerList = new LinkedList<>(); Schedule schedule = new Schedule(); schedule.setCrontab("0 0 0 1/2 * ?"); schedulerList.add(schedule); return schedulerList; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,217
[Bug] [DAO] Missing the file dolphinscheduler_dml.sql in both 2.0.1_schema/mysql and 2.0.1_schema/postgresql.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened When I initialize the database through the class of CreateDolphinScheduler, it threw an exception about missing the file '2.0.1_schema/postgresql/dolphinscheduler_dml.sql '. ### What you expected to happen I expect that the process of initializing database can be executed exactly. ### How to reproduce Try to execute the 'create-dolphinscheduler.sh', you will reproduce it. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7217
https://github.com/apache/dolphinscheduler/pull/7223
ff9bc806ac04d07ff0581e7a88514f943310d2c9
4743e8f891387934c50a4ac5b6f8be76e5e6bbcb
"2021-12-06T09:57:23Z"
java
"2021-12-06T14:55:28Z"
dolphinscheduler-dao/src/main/resources/sql/upgrade/2.0.1_schema/mysql/dolphinscheduler_dml.sql
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,217
[Bug] [DAO] Missing the file dolphinscheduler_dml.sql in both 2.0.1_schema/mysql and 2.0.1_schema/postgresql.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened When I initialize the database through the class of CreateDolphinScheduler, it threw an exception about missing the file '2.0.1_schema/postgresql/dolphinscheduler_dml.sql '. ### What you expected to happen I expect that the process of initializing database can be executed exactly. ### How to reproduce Try to execute the 'create-dolphinscheduler.sh', you will reproduce it. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7217
https://github.com/apache/dolphinscheduler/pull/7223
ff9bc806ac04d07ff0581e7a88514f943310d2c9
4743e8f891387934c50a4ac5b6f8be76e5e6bbcb
"2021-12-06T09:57:23Z"
java
"2021-12-06T14:55:28Z"
dolphinscheduler-dao/src/main/resources/sql/upgrade/2.0.1_schema/postgresql/dolphinscheduler_dml.sql
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,245
[Bug] [API] spelEvaluationException: EL1008E: Property or field 'all' cannot be found
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ``` [ERROR] 2021-12-07 19:02:27.766 org.apache.dolphinscheduler.api.exceptions.ApiExceptionHandler:[46] - 查询worker分组失败 org.springframework.expression.spel.SpelEvaluationException: EL1008E: Property or field 'all' cannot be found on object of type 'org.springframework.cache.interceptor.CacheExpressionRootObject' - maybe not public or not valid? at org.springframework.expression.spel.ast.PropertyOrFieldReference.readProperty(PropertyOrFieldReference.java:217) at org.springframework.expression.spel.ast.PropertyOrFieldReference.getValueInternal(PropertyOrFieldReference.java:104) at org.springframework.expression.spel.ast.PropertyOrFieldReference.getValueInternal(PropertyOrFieldReference.java:91) at org.springframework.expression.spel.ast.SpelNodeImpl.getValue(SpelNodeImpl.java:112) at org.springframework.expression.spel.standard.SpelExpression.getValue(SpelExpression.java:272) at org.springframework.cache.interceptor.CacheOperationExpressionEvaluator.key(CacheOperationExpressionEvaluator.java:104) at org.springframework.cache.interceptor.CacheAspectSupport$CacheOperationContext.generateKey(CacheAspectSupport.java:795) at org.springframework.cache.interceptor.CacheAspectSupport.generateKey(CacheAspectSupport.java:592) at org.springframework.cache.interceptor.CacheAspectSupport.execute(CacheAspectSupport.java:379) at org.springframework.cache.interceptor.CacheAspectSupport.execute(CacheAspectSupport.java:345) at org.springframework.cache.interceptor.CacheInterceptor.invoke(CacheInterceptor.java:64) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) at org.springframework.aop.interceptor.ExposeInvocationInterceptor.invoke(ExposeInvocationInterceptor.java:97) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) at org.springframework.aop.framework.JdkDynamicAopProxy.invoke(JdkDynamicAopProxy.java:215) at com.sun.proxy.$Proxy152.queryAllWorkerGroup(Unknown Source) at org.apache.dolphinscheduler.api.service.impl.WorkerGroupServiceImpl.getWorkerGroups(WorkerGroupServiceImpl.java:255) at org.apache.dolphinscheduler.api.service.impl.WorkerGroupServiceImpl.queryAllGroup(WorkerGroupServiceImpl.java:233) ``` ### What you expected to happen The query is normal ### How to reproduce Query the list of workers ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7245
https://github.com/apache/dolphinscheduler/pull/7246
6a144d69bf7c3fec37fa3bdcb1fcd6f01e4848c4
4e6265df11ce0b4dcf5ef13b596f1f7d25d33380
"2021-12-07T11:28:36Z"
java
"2021-12-08T01:00:00Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.SystemUtils; import java.util.regex.Pattern; /** * Constants */ public final class Constants { private Constants() { throw new UnsupportedOperationException("Construct Constants"); } /** * common properties path */ public static final String COMMON_PROPERTIES_PATH = "/common.properties"; /** * alert properties */ public static final String ALERT_RPC_PORT = "alert.rpc.port"; /** * registry properties */ public static final String REGISTRY_DOLPHINSCHEDULER_MASTERS = "/nodes/master"; public static final String REGISTRY_DOLPHINSCHEDULER_WORKERS = "/nodes/worker"; public static final String REGISTRY_DOLPHINSCHEDULER_DEAD_SERVERS = "/dead-servers"; public static final String REGISTRY_DOLPHINSCHEDULER_NODE = "/nodes"; public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_MASTERS = "/lock/masters"; public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_FAILOVER_MASTERS = "/lock/failover/masters"; public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_FAILOVER_WORKERS = "/lock/failover/workers"; public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS = "/lock/failover/startup-masters"; /** * fs.defaultFS */ public static final String FS_DEFAULTFS = "fs.defaultFS"; /** * fs s3a endpoint */ public static final String FS_S3A_ENDPOINT = "fs.s3a.endpoint"; /** * fs s3a access key */ public static final String FS_S3A_ACCESS_KEY = "fs.s3a.access.key"; /** * fs s3a secret key */ public static final String FS_S3A_SECRET_KEY = "fs.s3a.secret.key"; /** * hadoop configuration */ public static final String HADOOP_RM_STATE_ACTIVE = "ACTIVE"; public static final String HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT = "resource.manager.httpaddress.port"; /** * yarn.resourcemanager.ha.rm.ids */ public static final String YARN_RESOURCEMANAGER_HA_RM_IDS = "yarn.resourcemanager.ha.rm.ids"; /** * yarn.application.status.address */ public static final String YARN_APPLICATION_STATUS_ADDRESS = "yarn.application.status.address"; /** * yarn.job.history.status.address */ public static final String YARN_JOB_HISTORY_STATUS_ADDRESS = "yarn.job.history.status.address"; /** * hdfs configuration * hdfs.root.user */ public static final String HDFS_ROOT_USER = "hdfs.root.user"; /** * hdfs/s3 configuration * resource.upload.path */ public static final String RESOURCE_UPLOAD_PATH = "resource.upload.path"; /** * data basedir path */ public static final String DATA_BASEDIR_PATH = "data.basedir.path"; /** * dolphinscheduler.env.path */ public static final String DOLPHINSCHEDULER_ENV_PATH = "dolphinscheduler.env.path"; /** * environment properties default path */ public static final String ENV_PATH = "env/dolphinscheduler_env.sh"; /** * resource.view.suffixs */ public static final String RESOURCE_VIEW_SUFFIXS = "resource.view.suffixs"; public static final String RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE = "txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js"; /** * development.state */ public static final String DEVELOPMENT_STATE = "development.state"; /** * sudo enable */ public static final String SUDO_ENABLE = "sudo.enable"; /** * string true */ public static final String STRING_TRUE = "true"; /** * resource storage type */ public static final String RESOURCE_STORAGE_TYPE = "resource.storage.type"; /** * comma , */ public static final String COMMA = ","; /** * COLON : */ public static final String COLON = ":"; /** * SINGLE_SLASH / */ public static final String SINGLE_SLASH = "/"; /** * DOUBLE_SLASH // */ public static final String DOUBLE_SLASH = "//"; /** * EQUAL SIGN */ public static final String EQUAL_SIGN = "="; /** * date format of yyyy-MM-dd HH:mm:ss */ public static final String YYYY_MM_DD_HH_MM_SS = "yyyy-MM-dd HH:mm:ss"; /** * date format of yyyyMMdd */ public static final String YYYYMMDD = "yyyyMMdd"; /** * date format of yyyyMMddHHmmss */ public static final String YYYYMMDDHHMMSS = "yyyyMMddHHmmss"; /** * date format of yyyyMMddHHmmssSSS */ public static final String YYYYMMDDHHMMSSSSS = "yyyyMMddHHmmssSSS"; /** * http connect time out */ public static final int HTTP_CONNECT_TIMEOUT = 60 * 1000; /** * http connect request time out */ public static final int HTTP_CONNECTION_REQUEST_TIMEOUT = 60 * 1000; /** * httpclient soceket time out */ public static final int SOCKET_TIMEOUT = 60 * 1000; /** * http header */ public static final String HTTP_HEADER_UNKNOWN = "unKnown"; /** * http X-Forwarded-For */ public static final String HTTP_X_FORWARDED_FOR = "X-Forwarded-For"; /** * http X-Real-IP */ public static final String HTTP_X_REAL_IP = "X-Real-IP"; /** * UTF-8 */ public static final String UTF_8 = "UTF-8"; /** * user name regex */ public static final Pattern REGEX_USER_NAME = Pattern.compile("^[a-zA-Z0-9._-]{3,39}$"); /** * email regex */ public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^([a-z0-9A-Z]+[_|\\-|\\.]?)+[a-z0-9A-Z]@([a-z0-9A-Z]+(-[a-z0-9A-Z]+)?\\.)+[a-zA-Z]{2,}$"); /** * read permission */ public static final int READ_PERMISSION = 2; /** * write permission */ public static final int WRITE_PERMISSION = 2 * 2; /** * execute permission */ public static final int EXECUTE_PERMISSION = 1; /** * default admin permission */ public static final int DEFAULT_ADMIN_PERMISSION = 7; /** * all permissions */ public static final int ALL_PERMISSIONS = READ_PERMISSION | WRITE_PERMISSION | EXECUTE_PERMISSION; /** * max task timeout */ public static final int MAX_TASK_TIMEOUT = 24 * 3600; /** * worker host weight */ public static final int DEFAULT_WORKER_HOST_WEIGHT = 100; /** * time unit secong to minutes */ public static final int SEC_2_MINUTES_TIME_UNIT = 60; /*** * * rpc port */ public static final String RPC_PORT = "rpc.port"; /** * forbid running task */ public static final String FLOWNODE_RUN_FLAG_FORBIDDEN = "FORBIDDEN"; /** * normal running task */ public static final String FLOWNODE_RUN_FLAG_NORMAL = "NORMAL"; public static final String COMMON_TASK_TYPE = "common"; public static final String DEFAULT = "default"; public static final String PASSWORD = "password"; public static final String XXXXXX = "******"; public static final String NULL = "NULL"; public static final String THREAD_NAME_MASTER_SERVER = "Master-Server"; public static final String THREAD_NAME_WORKER_SERVER = "Worker-Server"; /** * command parameter keys */ public static final String CMD_PARAM_RECOVER_PROCESS_ID_STRING = "ProcessInstanceId"; public static final String CMD_PARAM_RECOVERY_START_NODE_STRING = "StartNodeIdList"; public static final String CMD_PARAM_RECOVERY_WAITING_THREAD = "WaitingThreadInstanceId"; public static final String CMD_PARAM_SUB_PROCESS = "processInstanceId"; public static final String CMD_PARAM_EMPTY_SUB_PROCESS = "0"; public static final String CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID = "parentProcessInstanceId"; public static final String CMD_PARAM_SUB_PROCESS_DEFINE_CODE = "processDefinitionCode"; public static final String CMD_PARAM_START_NODES = "StartNodeList"; public static final String CMD_PARAM_START_PARAMS = "StartParams"; public static final String CMD_PARAM_FATHER_PARAMS = "fatherParams"; /** * complement data start date */ public static final String CMDPARAM_COMPLEMENT_DATA_START_DATE = "complementStartDate"; /** * complement data end date */ public static final String CMDPARAM_COMPLEMENT_DATA_END_DATE = "complementEndDate"; /** * complement date default cron string */ public static final String DEFAULT_CRON_STRING = "0 0 0 * * ? *"; public static final int SLEEP_TIME_MILLIS = 1000; /** * one second mils */ public static final int SECOND_TIME_MILLIS = 1000; /** * master task instance cache-database refresh interval */ public static final int CACHE_REFRESH_TIME_MILLIS = 20 * 1000; /** * heartbeat for zk info length */ public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 13; /** * jar */ public static final String JAR = "jar"; /** * hadoop */ public static final String HADOOP = "hadoop"; /** * -D <property>=<value> */ public static final String D = "-D"; /** * exit code success */ public static final int EXIT_CODE_SUCCESS = 0; /** * exit code failure */ public static final int EXIT_CODE_FAILURE = -1; /** * process or task definition failure */ public static final int DEFINITION_FAILURE = -1; /** * process or task definition first version */ public static final int VERSION_FIRST = 1; /** * date format of yyyyMMdd */ public static final String PARAMETER_FORMAT_DATE = "yyyyMMdd"; /** * date format of yyyyMMddHHmmss */ public static final String PARAMETER_FORMAT_TIME = "yyyyMMddHHmmss"; /** * system date(yyyyMMddHHmmss) */ public static final String PARAMETER_DATETIME = "system.datetime"; /** * system date(yyyymmdd) today */ public static final String PARAMETER_CURRENT_DATE = "system.biz.curdate"; /** * system date(yyyymmdd) yesterday */ public static final String PARAMETER_BUSINESS_DATE = "system.biz.date"; /** * ACCEPTED */ public static final String ACCEPTED = "ACCEPTED"; /** * SUCCEEDED */ public static final String SUCCEEDED = "SUCCEEDED"; /** * ENDED */ public static final String ENDED = "ENDED"; /** * NEW */ public static final String NEW = "NEW"; /** * NEW_SAVING */ public static final String NEW_SAVING = "NEW_SAVING"; /** * SUBMITTED */ public static final String SUBMITTED = "SUBMITTED"; /** * FAILED */ public static final String FAILED = "FAILED"; /** * KILLED */ public static final String KILLED = "KILLED"; /** * RUNNING */ public static final String RUNNING = "RUNNING"; /** * underline "_" */ public static final String UNDERLINE = "_"; /** * quartz job prifix */ public static final String QUARTZ_JOB_PRIFIX = "job"; /** * quartz job group prifix */ public static final String QUARTZ_JOB_GROUP_PRIFIX = "jobgroup"; /** * projectId */ public static final String PROJECT_ID = "projectId"; /** * processId */ public static final String SCHEDULE_ID = "scheduleId"; /** * schedule */ public static final String SCHEDULE = "schedule"; /** * application regex */ public static final String APPLICATION_REGEX = "application_\\d+_\\d+"; public static final String PID = SystemUtils.IS_OS_WINDOWS ? "handle" : "pid"; /** * month_begin */ public static final String MONTH_BEGIN = "month_begin"; /** * add_months */ public static final String ADD_MONTHS = "add_months"; /** * month_end */ public static final String MONTH_END = "month_end"; /** * week_begin */ public static final String WEEK_BEGIN = "week_begin"; /** * week_end */ public static final String WEEK_END = "week_end"; /** * timestamp */ public static final String TIMESTAMP = "timestamp"; public static final char SUBTRACT_CHAR = '-'; public static final char ADD_CHAR = '+'; public static final char MULTIPLY_CHAR = '*'; public static final char DIVISION_CHAR = '/'; public static final char LEFT_BRACE_CHAR = '('; public static final char RIGHT_BRACE_CHAR = ')'; public static final String ADD_STRING = "+"; public static final String STAR = "*"; public static final String DIVISION_STRING = "/"; public static final String LEFT_BRACE_STRING = "("; public static final char P = 'P'; public static final char N = 'N'; public static final String SUBTRACT_STRING = "-"; public static final String GLOBAL_PARAMS = "globalParams"; public static final String LOCAL_PARAMS = "localParams"; public static final String SUBPROCESS_INSTANCE_ID = "subProcessInstanceId"; public static final String PROCESS_INSTANCE_STATE = "processInstanceState"; public static final String PARENT_WORKFLOW_INSTANCE = "parentWorkflowInstance"; public static final String CONDITION_RESULT = "conditionResult"; public static final String SWITCH_RESULT = "switchResult"; public static final String WAIT_START_TIMEOUT = "waitStartTimeout"; public static final String DEPENDENCE = "dependence"; public static final String TASK_LIST = "taskList"; public static final String QUEUE = "queue"; public static final String QUEUE_NAME = "queueName"; public static final int LOG_QUERY_SKIP_LINE_NUMBER = 0; public static final int LOG_QUERY_LIMIT = 4096; /** * master/worker server use for zk */ public static final String MASTER_TYPE = "master"; public static final String WORKER_TYPE = "worker"; public static final String DELETE_OP = "delete"; public static final String ADD_OP = "add"; public static final String ALIAS = "alias"; public static final String CONTENT = "content"; public static final String DEPENDENT_SPLIT = ":||"; public static final long DEPENDENT_ALL_TASK_CODE = 0; /** * preview schedule execute count */ public static final int PREVIEW_SCHEDULE_EXECUTE_COUNT = 5; /** * kerberos */ public static final String KERBEROS = "kerberos"; /** * kerberos expire time */ public static final String KERBEROS_EXPIRE_TIME = "kerberos.expire.time"; /** * java.security.krb5.conf */ public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf"; /** * java.security.krb5.conf.path */ public static final String JAVA_SECURITY_KRB5_CONF_PATH = "java.security.krb5.conf.path"; /** * hadoop.security.authentication */ public static final String HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authentication"; /** * hadoop.security.authentication */ public static final String HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE = "hadoop.security.authentication.startup.state"; /** * com.amazonaws.services.s3.enableV4 */ public static final String AWS_S3_V4 = "com.amazonaws.services.s3.enableV4"; /** * loginUserFromKeytab user */ public static final String LOGIN_USER_KEY_TAB_USERNAME = "login.user.keytab.username"; /** * loginUserFromKeytab path */ public static final String LOGIN_USER_KEY_TAB_PATH = "login.user.keytab.path"; /** * task log info format */ public static final String TASK_LOG_INFO_FORMAT = "TaskLogInfo-%s"; public static final int[] NOT_TERMINATED_STATES = new int[] { ExecutionStatus.SUBMITTED_SUCCESS.ordinal(), ExecutionStatus.RUNNING_EXECUTION.ordinal(), ExecutionStatus.DELAY_EXECUTION.ordinal(), ExecutionStatus.READY_PAUSE.ordinal(), ExecutionStatus.READY_STOP.ordinal(), ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(), ExecutionStatus.WAITING_THREAD.ordinal(), ExecutionStatus.WAITING_DEPEND.ordinal() }; public static final int[] RUNNING_PROCESS_STATE = new int[] { ExecutionStatus.RUNNING_EXECUTION.ordinal(), ExecutionStatus.SUBMITTED_SUCCESS.ordinal(), ExecutionStatus.SERIAL_WAIT.ordinal() }; /** * status */ public static final String STATUS = "status"; /** * message */ public static final String MSG = "msg"; /** * data total */ public static final String COUNT = "count"; /** * page size */ public static final String PAGE_SIZE = "pageSize"; /** * current page no */ public static final String PAGE_NUMBER = "pageNo"; /** * */ public static final String DATA_LIST = "data"; public static final String TOTAL_LIST = "totalList"; public static final String CURRENT_PAGE = "currentPage"; public static final String TOTAL_PAGE = "totalPage"; public static final String TOTAL = "total"; /** * workflow */ public static final String WORKFLOW_LIST = "workFlowList"; public static final String WORKFLOW_RELATION_LIST = "workFlowRelationList"; /** * session user */ public static final String SESSION_USER = "session.user"; public static final String SESSION_ID = "sessionId"; /** * locale */ public static final String LOCALE_LANGUAGE = "language"; /** * database type */ public static final String MYSQL = "MYSQL"; public static final String HIVE = "HIVE"; public static final String ADDRESS = "address"; public static final String DATABASE = "database"; public static final String OTHER = "other"; /** * session timeout */ public static final int SESSION_TIME_OUT = 7200; public static final int MAX_FILE_SIZE = 1024 * 1024 * 1024; public static final String UDF = "UDF"; public static final String CLASS = "class"; /** * dataSource sensitive param */ public static final String DATASOURCE_PASSWORD_REGEX = "(?<=((?i)password((\\\\\":\\\\\")|(=')))).*?(?=((\\\\\")|(')))"; /** * default worker group */ public static final String DEFAULT_WORKER_GROUP = "default"; /** * authorize writable perm */ public static final int AUTHORIZE_WRITABLE_PERM = 7; /** * authorize readable perm */ public static final int AUTHORIZE_READABLE_PERM = 4; public static final int NORMAL_NODE_STATUS = 0; public static final int ABNORMAL_NODE_STATUS = 1; public static final int BUSY_NODE_STATUE = 2; public static final String START_TIME = "start time"; public static final String END_TIME = "end time"; public static final String START_END_DATE = "startDate,endDate"; /** * system line separator */ public static final String SYSTEM_LINE_SEPARATOR = System.getProperty("line.separator"); /** * datasource encryption salt */ public static final String DATASOURCE_ENCRYPTION_SALT_DEFAULT = "!@#$%^&*"; public static final String DATASOURCE_ENCRYPTION_ENABLE = "datasource.encryption.enable"; public static final String DATASOURCE_ENCRYPTION_SALT = "datasource.encryption.salt"; /** * network interface preferred */ public static final String DOLPHIN_SCHEDULER_NETWORK_INTERFACE_PREFERRED = "dolphin.scheduler.network.interface.preferred"; /** * network IP gets priority, default inner outer */ public static final String DOLPHIN_SCHEDULER_NETWORK_PRIORITY_STRATEGY = "dolphin.scheduler.network.priority.strategy"; /** * exec shell scripts */ public static final String SH = "sh"; /** * pstree, get pud and sub pid */ public static final String PSTREE = "pstree"; public static final Boolean KUBERNETES_MODE = !StringUtils.isEmpty(System.getenv("KUBERNETES_SERVICE_HOST")) && !StringUtils.isEmpty(System.getenv("KUBERNETES_SERVICE_PORT")); /** * dry run flag */ public static final int DRY_RUN_FLAG_NO = 0; public static final int DRY_RUN_FLAG_YES = 1; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,245
[Bug] [API] spelEvaluationException: EL1008E: Property or field 'all' cannot be found
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ``` [ERROR] 2021-12-07 19:02:27.766 org.apache.dolphinscheduler.api.exceptions.ApiExceptionHandler:[46] - 查询worker分组失败 org.springframework.expression.spel.SpelEvaluationException: EL1008E: Property or field 'all' cannot be found on object of type 'org.springframework.cache.interceptor.CacheExpressionRootObject' - maybe not public or not valid? at org.springframework.expression.spel.ast.PropertyOrFieldReference.readProperty(PropertyOrFieldReference.java:217) at org.springframework.expression.spel.ast.PropertyOrFieldReference.getValueInternal(PropertyOrFieldReference.java:104) at org.springframework.expression.spel.ast.PropertyOrFieldReference.getValueInternal(PropertyOrFieldReference.java:91) at org.springframework.expression.spel.ast.SpelNodeImpl.getValue(SpelNodeImpl.java:112) at org.springframework.expression.spel.standard.SpelExpression.getValue(SpelExpression.java:272) at org.springframework.cache.interceptor.CacheOperationExpressionEvaluator.key(CacheOperationExpressionEvaluator.java:104) at org.springframework.cache.interceptor.CacheAspectSupport$CacheOperationContext.generateKey(CacheAspectSupport.java:795) at org.springframework.cache.interceptor.CacheAspectSupport.generateKey(CacheAspectSupport.java:592) at org.springframework.cache.interceptor.CacheAspectSupport.execute(CacheAspectSupport.java:379) at org.springframework.cache.interceptor.CacheAspectSupport.execute(CacheAspectSupport.java:345) at org.springframework.cache.interceptor.CacheInterceptor.invoke(CacheInterceptor.java:64) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) at org.springframework.aop.interceptor.ExposeInvocationInterceptor.invoke(ExposeInvocationInterceptor.java:97) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) at org.springframework.aop.framework.JdkDynamicAopProxy.invoke(JdkDynamicAopProxy.java:215) at com.sun.proxy.$Proxy152.queryAllWorkerGroup(Unknown Source) at org.apache.dolphinscheduler.api.service.impl.WorkerGroupServiceImpl.getWorkerGroups(WorkerGroupServiceImpl.java:255) at org.apache.dolphinscheduler.api.service.impl.WorkerGroupServiceImpl.queryAllGroup(WorkerGroupServiceImpl.java:233) ``` ### What you expected to happen The query is normal ### How to reproduce Query the list of workers ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7245
https://github.com/apache/dolphinscheduler/pull/7246
6a144d69bf7c3fec37fa3bdcb1fcd6f01e4848c4
4e6265df11ce0b4dcf5ef13b596f1f7d25d33380
"2021-12-07T11:28:36Z"
java
"2021-12-08T01:00:00Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/WorkerGroupMapper.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.dao.mapper; import org.apache.dolphinscheduler.dao.entity.WorkerGroup; import org.apache.ibatis.annotations.Param; import java.util.List; import org.springframework.cache.annotation.CacheConfig; import org.springframework.cache.annotation.CacheEvict; import org.springframework.cache.annotation.Cacheable; import com.baomidou.mybatisplus.core.mapper.BaseMapper; /** * worker group mapper interface */ @CacheConfig(cacheNames = "workerGroup", keyGenerator = "cacheKeyGenerator") public interface WorkerGroupMapper extends BaseMapper<WorkerGroup> { /** * query all worker group * * @return worker group list */ @Cacheable(sync = true, key = "all") List<WorkerGroup> queryAllWorkerGroup(); @CacheEvict(key = "all") int deleteById(Integer id); @CacheEvict(key = "all") int insert(WorkerGroup entity); @CacheEvict(key = "all") int updateById(@Param("et") WorkerGroup entity); /** * query worer grouop by name * * @param name name * @return worker group list */ List<WorkerGroup> queryWorkerGroupByName(@Param("name") String name); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,235
[BUG][dolphinscheduler-api]unable to confige the multi-hosts by ',' in datasource configuration
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened we are tring upgrate to 2.0.0 the Impala datasource configuration is: ```cdh-datanode07,cdh-datanode08``` It can't connect successfully while testing, and the same configuration is working fine in DS version 1.3.2. the same problems are occured in the other multi-hosts datasources that with ',', unless you remain a single source, without ',' , such as ```cdh-datanode07```, then it can work. ![image](https://user-images.githubusercontent.com/13417113/144982934-5bacd665-5bc6-4d11-a9fd-6559ef2cbbba.png) ``` [INFO] 2021-12-07 14:53:03.287 org.apache.dolphinscheduler.api.aspect.AccessLogAspect:[76] - REQUEST TRANCE_ID:faf39a91-da0d-4d25-9789-7af804ee56a7, LOGIN_USER:admin, URI:/dolphinscheduler/datasources/connect, METHOD:POST, HANDLER:org.apache.dolphinscheduler.api.controller.DataSourceController.connectDataSource, ARGS:{dataSourceParam=HiveDataSourceParamDTO{host='cdh-datanode04,cdh-datanode05,cdh-datanode06', port=21050, database='ods_cbd', principal='null', userName='cdchive', password='rdr7zIaM', other='null', javaSecurityKrb5Conf='null', loginUserKeytabUsername='null', loginUserKeytabPath='null'}} [ERROR] 2021-12-07 14:53:03.288 org.apache.dolphinscheduler.api.exceptions.ApiExceptionHandler:[46] - 建立数据源连接失败 java.lang.IllegalArgumentException: datasource host illegal at org.apache.dolphinscheduler.common.datasource.AbstractDatasourceProcessor.checkHost(AbstractDatasourceProcessor.java:49) at org.apache.dolphinscheduler.common.datasource.AbstractDatasourceProcessor.checkDatasourceParam(AbstractDatasourceProcessor.java:37) at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.checkDatasourceParam(DatasourceUtil.java:59) at org.apache.dolphinscheduler.api.controller.DataSourceController.connectDataSource(DataSourceController.java:213) at org.apache.dolphinscheduler.api.controller.DataSourceController$$FastClassBySpringCGLIB$$835fdd04.invoke(<generated>) at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218) at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:752) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163) at org.springframework.aop.aspectj.MethodInvocationProceedingJoinPoint.proceed(MethodInvocationProceedingJoinPoint.java:88) at org.apache.dolphinscheduler.api.aspect.AccessLogAspect.doAround(AccessLogAspect.java:87) at sun.reflect.GeneratedMethodAccessor161.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.springframework.aop.aspectj.AbstractAspectJAdvice.invokeAdviceMethodWithGivenArgs(AbstractAspectJAdvice.java:644) at org.springframework.aop.aspectj.AbstractAspectJAdvice.invokeAdviceMethod(AbstractAspectJAdvice.java:633) at org.springframework.aop.aspectj.AspectJAroundAdvice.invoke(AspectJAroundAdvice.java:70) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:175) at org.springframework.aop.interceptor.ExposeInvocationInterceptor.invoke(ExposeInvocationInterceptor.java:93) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:691) at org.apache.dolphinscheduler.api.controller.DataSourceController$$EnhancerBySpringCGLIB$$fc8c1309.connectDataSource(<generated>) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.springframework.web.method.support.InvocableHandlerMethod.doInvoke(InvocableHandlerMethod.java:190) at org.springframework.web.method.support.InvocableHandlerMethod.invokeForRequest(InvocableHandlerMethod.java:138) at org.springframework.web.servlet.mvc.method.annotation.ServletInvocableHandlerMethod.invokeAndHandle(ServletInvocableHandlerMethod.java:105) at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.invokeHandlerMethod(RequestMappingHandlerAdapter.java:892) at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.handleInternal(RequestMappingHandlerAdapter.java:797) at org.springframework.web.servlet.mvc.method.AbstractHandlerMethodAdapter.handle(AbstractHandlerMethodAdapter.java:87) at org.springframework.web.servlet.DispatcherServlet.doDispatch(DispatcherServlet.java:1040) at org.springframework.web.servlet.DispatcherServlet.doService(DispatcherServlet.java:943) at org.springframework.web.servlet.FrameworkServlet.processRequest(FrameworkServlet.java:1006) at org.springframework.web.servlet.FrameworkServlet.doPost(FrameworkServlet.java:909) at javax.servlet.http.HttpServlet.service(HttpServlet.java:707) at org.springframework.web.servlet.FrameworkServlet.service(FrameworkServlet.java:883) at javax.servlet.http.HttpServlet.service(HttpServlet.java:790) at org.eclipse.jetty.servlet.ServletHolder.handle(ServletHolder.java:763) at org.eclipse.jetty.servlet.ServletHandler$ChainEnd.doFilter(ServletHandler.java:1633) at org.springframework.web.filter.CorsFilter.doFilterInternal(CorsFilter.java:97) at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119) at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193) at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609) at com.github.xiaoymin.swaggerbootstrapui.filter.SecurityBasicAuthFilter.doFilter(SecurityBasicAuthFilter.java:84) at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193) at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609) at com.github.xiaoymin.swaggerbootstrapui.filter.ProductionSecurityFilter.doFilter(ProductionSecurityFilter.java:53) at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193) at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609) at org.springframework.web.filter.RequestContextFilter.doFilterInternal(RequestContextFilter.java:100) at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119) at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193) at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609) at org.springframework.web.filter.FormContentFilter.doFilterInternal(FormContentFilter.java:93) at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119) at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193) at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609) at org.springframework.web.filter.HiddenHttpMethodFilter.doFilterInternal(HiddenHttpMethodFilter.java:94) at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119) at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193) at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609) at org.springframework.web.filter.CharacterEncodingFilter.doFilterInternal(CharacterEncodingFilter.java:201) at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119) at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193) at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609) at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:561) at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:143) at org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:602) at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127) at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:235) at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1612) at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:233) at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1434) at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:188) at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:501) at org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1582) at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:186) at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1349) at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141) at org.eclipse.jetty.server.handler.gzip.GzipHandler.handle(GzipHandler.java:766) at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127) at org.eclipse.jetty.server.Server.handle(Server.java:516) at org.eclipse.jetty.server.HttpChannel.lambda$handle$1(HttpChannel.java:383) at org.eclipse.jetty.server.HttpChannel.dispatch(HttpChannel.java:556) at org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:375) at org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:273) at org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:311) at org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:105) at org.eclipse.jetty.io.ChannelEndPoint$1.run(ChannelEndPoint.java:104) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:336) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:313) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:129) at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:375) at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:773) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:905) at java.lang.Thread.run(Thread.java:748) ``` ### What you expected to happen It should support to confige the multi-hosts, such as ```cdh-datanode07,cdh-datanode08``` The new version should be compatible with the old ways ### How to reproduce configure the hosts as ```cdh-datanode07,cdh-datanode08```, then testing ### Anything else _No response_ ### Version 2.0.0 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7235
https://github.com/apache/dolphinscheduler/pull/7253
4e6265df11ce0b4dcf5ef13b596f1f7d25d33380
deefc8e98af6246e89ef3aeecf97ea3a69674bca
"2021-12-07T07:39:11Z"
java
"2021-12-08T01:21:54Z"
dolphinscheduler-datasource-plugin/dolphinscheduler-datasource-api/src/main/java/org/apache/dolphinscheduler/plugin/datasource/api/datasource/AbstractDatasourceProcessor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.datasource.api.datasource; import org.apache.dolphinscheduler.spi.datasource.BaseConnectionParam; import org.apache.dolphinscheduler.spi.datasource.ConnectionParam; import org.apache.dolphinscheduler.spi.enums.DbType; import org.apache.commons.collections4.MapUtils; import java.text.MessageFormat; import java.util.Map; import java.util.regex.Pattern; public abstract class AbstractDatasourceProcessor implements DatasourceProcessor { private static final Pattern IPV4_PATTERN = Pattern.compile("^[a-zA-Z0-9\\_\\-\\.]+$"); private static final Pattern IPV6_PATTERN = Pattern.compile("^[a-zA-Z0-9\\_\\-\\.\\:\\[\\]]+$"); private static final Pattern DATABASE_PATTER = Pattern.compile("^[a-zA-Z0-9\\_\\-\\.]+$"); private static final Pattern PARAMS_PATTER = Pattern.compile("^[a-zA-Z0-9\\-\\_\\/]+$"); @Override public void checkDatasourceParam(BaseDataSourceParamDTO baseDataSourceParamDTO) { checkHost(baseDataSourceParamDTO.getHost()); checkDatasourcePatter(baseDataSourceParamDTO.getDatabase()); checkOther(baseDataSourceParamDTO.getOther()); } /** * Check the host is valid * * @param host datasource host */ protected void checkHost(String host) { if (!IPV4_PATTERN.matcher(host).matches() || !IPV6_PATTERN.matcher(host).matches()) { throw new IllegalArgumentException("datasource host illegal"); } } /** * check database name is valid * * @param database database name */ protected void checkDatasourcePatter(String database) { if (!DATABASE_PATTER.matcher(database).matches()) { throw new IllegalArgumentException("datasource name illegal"); } } /** * check other is valid * * @param other other */ protected void checkOther(Map<String, String> other) { if (MapUtils.isEmpty(other)) { return; } boolean paramsCheck = other.entrySet().stream().allMatch(p -> PARAMS_PATTER.matcher(p.getValue()).matches()); if (!paramsCheck) { throw new IllegalArgumentException("datasource other params illegal"); } } @Override public String getDatasourceUniqueId(ConnectionParam connectionParam, DbType dbType) { BaseConnectionParam baseConnectionParam = (BaseConnectionParam) connectionParam; return MessageFormat.format("{0}@{1}@{2}", dbType.getDescp(), baseConnectionParam.getUser(), baseConnectionParam.getJdbcUrl()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,241
[Bug] [dolphinscheduler-ui] When editing the timing, the alarm group displays 0
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/95271106/145004359-c6d4333c-3085-4738-8a84-7a1bd08332fb.png) ### Version 2.0.1-prepare ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7241
https://github.com/apache/dolphinscheduler/pull/7262
deefc8e98af6246e89ef3aeecf97ea3a69674bca
3ca2ee9ac8d09ced1b3f4e893b9a00891456fee6
"2021-12-07T09:37:01Z"
java
"2021-12-08T06:24:53Z"
dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/_source/timing.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="timing-process-model"> <div class="clearfix list"> <div class="text"> {{$t('Start and stop time')}} </div> <div class="cont"> <el-date-picker style="width: 360px" v-model="scheduleTime" size="small" @change="_datepicker" type="datetimerange" range-separator="-" :start-placeholder="$t('startDate')" :end-placeholder="$t('endDate')" value-format="yyyy-MM-dd HH:mm:ss"> </el-date-picker> </div> </div> <div class="clearfix list"> <el-button type="primary" style="margin-left:20px" size="small" round :loading="spinnerLoading" @click="preview()">{{$t('Execute time')}}</el-button> <div class="text"> {{$t('Timing')}} </div> <div class="cont"> <template> <el-popover placement="bottom-start" trigger="click"> <template slot="reference"> <el-input style="width: 360px;" type="text" size="small" readonly :value="crontab"> </el-input> </template> <div class="crontab-box"> <v-crontab v-model="crontab" :locale="i18n"></v-crontab> </div> </el-popover> </template> </div> </div> <div class="clearfix list"> <div class="text"> {{$t('Timezone')}} </div> <div class="cont"> <el-select v-model=timezoneId filterable placeholder="Timezone"> <el-option v-for="item in availableTimezoneIDList" :key="item" :label="item" :value="item"> </el-option> </el-select> </div> </div> <div class="clearfix list"> <div style = "padding-left: 150px;">{{$t('Next five execution times')}}</div> <ul style = "padding-left: 150px;"> <li v-for="(time,i) in previewTimes" :key='i'>{{time}}</li> </ul> </div> <div class="clearfix list"> <div class="text"> {{$t('Failure Strategy')}} </div> <div class="cont"> <el-radio-group v-model="failureStrategy" style="margin-top: 7px;" size="small"> <el-radio :label="'CONTINUE'">{{$t('Continue')}}</el-radio> <el-radio :label="'END'">{{$t('End')}}</el-radio> </el-radio-group> </div> </div> <div class="clearfix list"> <div class="text"> {{$t('Notification strategy')}} </div> <div class="cont"> <el-select style="width: 200px;" size="small" v-model="warningType"> <el-option v-for="city in warningTypeList" :key="city.id" :value="city.id" :label="city.code"> </el-option> </el-select> </div> </div> <div class="clearfix list"> <div class="text"> {{$t('Process priority')}} </div> <div class="cont"> <m-priority v-model="processInstancePriority"></m-priority> </div> </div> <div class="clearfix list"> <div class="text"> {{$t('Worker group')}} </div> <div class="cont"> <m-worker-groups v-model="workerGroup"></m-worker-groups> </div> </div> <div class="clearfix list"> <div class="text"> {{$t('Environment Name')}} </div> <div class="cont"> <m-related-environment v-model="environmentCode" :workerGroup="workerGroup" v-on:environmentCodeEvent="_onUpdateEnvironmentCode"></m-related-environment> </div> </div> <div class="clearfix list"> <div class="text"> {{$t('Alarm group')}} </div> <div class="cont"> <el-select style="width: 200px;" clearable size="small" :disabled="!notifyGroupList.length" v-model="warningGroupId"> <el-input slot="trigger" readonly slot-scope="{ selectedModel }" :value="selectedModel ? selectedModel.label : ''" style="width: 200px;" @on-click-icon.stop="warningGroupId = {}"> <em slot="suffix" class="el-icon-error" style="font-size: 15px;cursor: pointer;" v-show="warningGroupId.id"></em> <em slot="suffix" class="el-icon-bottom" style="font-size: 12px;" v-show="!warningGroupId.id"></em> </el-input> <el-option v-for="city in notifyGroupList" :key="city.id" :value="city.id" :label="city.code"> </el-option> </el-select> </div> </div> <div class="submit"> <el-button type="text" size="small" @click="close()"> {{$t('Cancel')}} </el-button> <el-button type="primary" size="small" round :loading="spinnerLoading" @click="ok()">{{spinnerLoading ? $t('Loading...') : (timingData.item.crontab ? $t('Edit') : $t('Create'))}} </el-button> </div> </div> </template> <script> import moment from 'moment-timezone' import i18n from '@/module/i18n' import store from '@/conf/home/store' import { warningTypeList } from './util' import { vCrontab } from '@/module/components/crontab/index' import { formatDate } from '@/module/filter/filter' import mPriority from '@/module/components/priority/priority' import mWorkerGroups from '@/conf/home/pages/dag/_source/formModel/_source/workerGroups' import mRelatedEnvironment from '@/conf/home/pages/dag/_source/formModel/_source/relatedEnvironment' export default { name: 'timing-process', data () { return { store, processDefinitionId: 0, failureStrategy: 'CONTINUE', warningTypeList: warningTypeList, availableTimezoneIDList: moment.tz.names(), warningType: 'NONE', notifyGroupList: [], warningGroupId: '', spinnerLoading: false, scheduleTime: '', crontab: '0 0 * * * ? *', timezoneId: moment.tz.guess(), cronPopover: false, i18n: i18n.globalScope.LOCALE, processInstancePriority: 'MEDIUM', workerGroup: '', environmentCode: '', previewTimes: [] } }, props: { timingData: Object }, methods: { _datepicker (val) { this.scheduleTime = val }, _verification () { if (!this.scheduleTime) { this.$message.warning(`${i18n.$t('Please select time')}`) return false } if (this.scheduleTime[0] === this.scheduleTime[1]) { this.$message.warning(`${i18n.$t('The start time must not be the same as the end')}`) return false } if (!this.crontab) { this.$message.warning(`${i18n.$t('Please enter crontab')}`) return false } return true }, _onUpdateEnvironmentCode (o) { this.environmentCode = o }, _timing () { if (this._verification()) { let api = '' let searchParams = { schedule: JSON.stringify({ startTime: this.scheduleTime[0], endTime: this.scheduleTime[1], crontab: this.crontab, timezoneId: this.timezoneId }), failureStrategy: this.failureStrategy, warningType: this.warningType, processInstancePriority: this.processInstancePriority, warningGroupId: this.warningGroupId === '' ? 0 : this.warningGroupId, workerGroup: this.workerGroup, environmentCode: this.environmentCode } let msg = '' // edit if (this.timingData.item.crontab) { api = 'dag/updateSchedule' searchParams.id = this.timingData.item.id msg = `${i18n.$t('Edit')}${i18n.$t('Success')},${i18n.$t('Please go online')}` } else { api = 'dag/createSchedule' searchParams.processDefinitionCode = this.timingData.item.code msg = `${i18n.$t('Create')}${i18n.$t('Success')}` } this.store.dispatch(api, searchParams).then(res => { this.$message.success(msg) this.$emit('onUpdateTiming') }).catch(e => { this.$message.error(e.msg || '') }) } }, _preview () { if (this._verification()) { let api = 'dag/previewSchedule' let searchParams = { schedule: JSON.stringify({ startTime: this.scheduleTime[0], endTime: this.scheduleTime[1], crontab: this.crontab }) } this.store.dispatch(api, searchParams).then(res => { if (res.length) { this.previewTimes = res } else { this.$message.warning(`${i18n.$t('There is no data for this period of time')}`) } }) } }, _getNotifyGroupList () { return new Promise((resolve, reject) => { this.store.dispatch('dag/getNotifyGroupList').then(res => { this.notifyGroupList = res if (this.notifyGroupList.length) { resolve() } else { reject(new Error(0)) } }) }) }, ok () { this._timing() }, close () { this.$emit('closeTiming') }, preview () { this._preview() } }, watch: { }, created () { if (this.timingData.item.workerGroup === undefined) { let stateWorkerGroupsList = this.store.state.security.workerGroupsListAll || [] if (stateWorkerGroupsList.length) { this.workerGroup = stateWorkerGroupsList[0].id } else { this.store.dispatch('security/getWorkerGroupsAll').then(res => { this.$nextTick(() => { this.workerGroup = res[0].id }) }) } } else { this.workerGroup = this.timingData.item.workerGroup } if (this.timingData.item.crontab !== null) { this.crontab = this.timingData.item.crontab } if (this.timingData.type === 'timing') { let date = new Date() let year = date.getFullYear() let month = date.getMonth() + 1 let day = date.getDate() if (month < 10) { month = '0' + month } if (day < 10) { day = '0' + day } let startDate = year + '-' + month + '-' + day + ' ' + '00:00:00' let endDate = (year + 100) + '-' + month + '-' + day + ' ' + '00:00:00' let times = [] times[0] = startDate times[1] = endDate this.crontab = '0 0 * * * ? *' this.scheduleTime = times } }, mounted () { let item = this.timingData.item // Determine whether to echo if (this.timingData.item.crontab) { this.crontab = item.crontab this.scheduleTime = [formatDate(item.startTime), formatDate(item.endTime)] this.timezoneId = item.timezoneId === null ? moment.tz.guess() : item.timezoneId this.failureStrategy = item.failureStrategy this.warningType = item.warningType this.processInstancePriority = item.processInstancePriority this._getNotifyGroupList().then(() => { this.$nextTick(() => { // let list = _.filter(this.notifyGroupList, v => v.id === item.warningGroupId) this.warningGroupId = item.warningGroupId }) }).catch(() => { this.warningGroupId = '' }) } else { this._getNotifyGroupList().then(() => { this.$nextTick(() => { this.warningGroupId = '' }) }).catch(() => { this.warningGroupId = '' }) } }, components: { vCrontab, mPriority, mWorkerGroups, mRelatedEnvironment } } </script> <style lang="scss" rel="stylesheet/scss"> .timing-process-model { width: 860px; min-height: 300px; background: #fff; border-radius: 3px; margin-top: 0; .crontab-box { margin: -6px; .v-crontab { } } .form-model { padding-top: 0; } .title-box { margin-bottom: 18px; span { padding-left: 30px; font-size: 16px; padding-top: 29px; display: block; } } .list { margin-bottom: 14px; >.text { width: 140px; float: left; text-align: right; line-height: 32px; padding-right: 8px; } .cont { width: 350px; float: left; } } .submit { text-align: right; padding-right: 30px; padding-top: 10px; padding-bottom: 30px; } } .v-crontab-form-model { .list-box { padding: 0; } } .x-date-packer-panel .x-date-packer-day .lattice label.bg-hover { background: #00BFFF!important; margin-top: -4px; } .x-date-packer-panel .x-date-packer-day .lattice em:hover { background: #0098e1!important; } </style>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,274
[Bug] [MasterServer] invalid failover worker logic when failover master
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened when failover master, it will try to failover worker too, but it's invalid. ### What you expected to happen when failover master, it can failover associated task instance. ### How to reproduce one master server down. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7274
https://github.com/apache/dolphinscheduler/pull/7275
f21eb8f9fefdb2f9f6fc917e2af838b927cce68a
3a351da7525bcc95c7ae71ed82610d2bf785fd42
"2021-12-08T13:19:16Z"
java
"2021-12-08T13:35:18Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistryClient.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.registry; import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_MASTERS; import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_NODE; import static org.apache.dolphinscheduler.common.Constants.SLEEP_TIME_MILLIS; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.IStoppable; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.NodeType; import org.apache.dolphinscheduler.common.enums.StateEvent; import org.apache.dolphinscheduler.common.enums.StateEventType; import org.apache.dolphinscheduler.common.model.Server; import org.apache.dolphinscheduler.common.thread.ThreadUtils; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.registry.api.ConnectionState; import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory; import org.apache.dolphinscheduler.server.builder.TaskExecutionContextBuilder; import org.apache.dolphinscheduler.server.master.cache.ProcessInstanceExecCacheManager; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread; import org.apache.dolphinscheduler.server.registry.HeartBeatTask; import org.apache.dolphinscheduler.server.utils.ProcessUtils; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.service.queue.entity.TaskExecutionContext; import org.apache.dolphinscheduler.service.registry.RegistryClient; import org.apache.commons.lang.StringUtils; import java.util.Collections; import java.util.Date; import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import com.google.common.collect.Sets; /** * zookeeper master client * <p> * single instance */ @Component public class MasterRegistryClient { /** * logger */ private static final Logger logger = LoggerFactory.getLogger(MasterRegistryClient.class); /** * process service */ @Autowired private ProcessService processService; @Autowired private RegistryClient registryClient; /** * master config */ @Autowired private MasterConfig masterConfig; /** * heartbeat executor */ private ScheduledExecutorService heartBeatExecutor; @Autowired private ProcessInstanceExecCacheManager processInstanceExecCacheManager; /** * master startup time, ms */ private long startupTime; private String localNodePath; public void init() { this.startupTime = System.currentTimeMillis(); this.heartBeatExecutor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("HeartBeatExecutor")); } public void start() { String nodeLock = Constants.REGISTRY_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS; try { // create distributed lock with the root node path of the lock space as /dolphinscheduler/lock/failover/startup-masters registryClient.getLock(nodeLock); // master registry registry(); String registryPath = getMasterPath(); registryClient.handleDeadServer(Collections.singleton(registryPath), NodeType.MASTER, Constants.DELETE_OP); // init system node while (!registryClient.checkNodeExists(NetUtils.getHost(), NodeType.MASTER)) { ThreadUtils.sleep(SLEEP_TIME_MILLIS); } // self tolerant if (registryClient.getActiveMasterNum() == 1) { removeNodePath(null, NodeType.MASTER, true); removeNodePath(null, NodeType.WORKER, true); } registryClient.subscribe(REGISTRY_DOLPHINSCHEDULER_NODE, new MasterRegistryDataListener()); } catch (Exception e) { logger.error("master start up exception", e); } finally { registryClient.releaseLock(nodeLock); } } public void setRegistryStoppable(IStoppable stoppable) { registryClient.setStoppable(stoppable); } public void closeRegistry() { // TODO unsubscribe MasterRegistryDataListener deregister(); } /** * remove zookeeper node path * * @param path zookeeper node path * @param nodeType zookeeper node type * @param failover is failover */ public void removeNodePath(String path, NodeType nodeType, boolean failover) { logger.info("{} node deleted : {}", nodeType, path); String failoverPath = getFailoverLockPath(nodeType); try { registryClient.getLock(failoverPath); String serverHost = null; if (!StringUtils.isEmpty(path)) { serverHost = registryClient.getHostByEventDataPath(path); if (StringUtils.isEmpty(serverHost)) { logger.error("server down error: unknown path: {}", path); return; } // handle dead server registryClient.handleDeadServer(Collections.singleton(path), nodeType, Constants.ADD_OP); } //failover server if (failover) { failoverServerWhenDown(serverHost, nodeType); } } catch (Exception e) { logger.error("{} server failover failed.", nodeType); logger.error("failover exception ", e); } finally { registryClient.releaseLock(failoverPath); } } /** * failover server when server down * * @param serverHost server host * @param nodeType zookeeper node type */ private void failoverServerWhenDown(String serverHost, NodeType nodeType) { switch (nodeType) { case MASTER: failoverMaster(serverHost); break; case WORKER: failoverWorker(serverHost, true, true); break; default: break; } } /** * get failover lock path * * @param nodeType zookeeper node type * @return fail over lock path */ private String getFailoverLockPath(NodeType nodeType) { switch (nodeType) { case MASTER: return Constants.REGISTRY_DOLPHINSCHEDULER_LOCK_FAILOVER_MASTERS; case WORKER: return Constants.REGISTRY_DOLPHINSCHEDULER_LOCK_FAILOVER_WORKERS; default: return ""; } } /** * task needs failover if task start before worker starts * * @param taskInstance task instance * @return true if task instance need fail over */ private boolean checkTaskInstanceNeedFailover(TaskInstance taskInstance) { boolean taskNeedFailover = true; //now no host will execute this task instance,so no need to failover the task if (taskInstance.getHost() == null) { return false; } // if the worker node exists in zookeeper, we must check the task starts after the worker if (registryClient.checkNodeExists(taskInstance.getHost(), NodeType.WORKER)) { //if task start after worker starts, there is no need to failover the task. if (checkTaskAfterWorkerStart(taskInstance)) { taskNeedFailover = false; } } return taskNeedFailover; } /** * check task start after the worker server starts. * * @param taskInstance task instance * @return true if task instance start time after worker server start date */ private boolean checkTaskAfterWorkerStart(TaskInstance taskInstance) { if (StringUtils.isEmpty(taskInstance.getHost())) { return false; } Date workerServerStartDate = null; List<Server> workerServers = registryClient.getServerList(NodeType.WORKER); for (Server workerServer : workerServers) { if (taskInstance.getHost().equals(workerServer.getHost() + Constants.COLON + workerServer.getPort())) { workerServerStartDate = workerServer.getCreateTime(); break; } } if (workerServerStartDate != null) { return taskInstance.getStartTime().after(workerServerStartDate); } return false; } /** * failover worker tasks * <p> * 1. kill yarn job if there are yarn jobs in tasks. * 2. change task state from running to need failover. * 3. failover all tasks when workerHost is null * * @param workerHost worker host * @param needCheckWorkerAlive need check worker alive * @param checkOwner need check process instance owner */ private void failoverWorker(String workerHost, boolean needCheckWorkerAlive, boolean checkOwner) { logger.info("start worker[{}] failover ...", workerHost); List<TaskInstance> needFailoverTaskInstanceList = processService.queryNeedFailoverTaskInstances(workerHost); for (TaskInstance taskInstance : needFailoverTaskInstanceList) { if (needCheckWorkerAlive) { if (!checkTaskInstanceNeedFailover(taskInstance)) { continue; } } ProcessInstance processInstance = processService.findProcessInstanceDetailById(taskInstance.getProcessInstanceId()); if (workerHost == null || !checkOwner || processInstance.getHost().equalsIgnoreCase(getLocalAddress())) { // only failover the task owned myself if worker down. if (processInstance == null) { logger.error("failover error, the process {} of task {} do not exists.", taskInstance.getProcessInstanceId(), taskInstance.getId()); continue; } taskInstance.setProcessInstance(processInstance); TaskExecutionContext taskExecutionContext = TaskExecutionContextBuilder.get() .buildTaskInstanceRelatedInfo(taskInstance) .buildProcessInstanceRelatedInfo(processInstance) .create(); // only kill yarn job if exists , the local thread has exited ProcessUtils.killYarnJob(taskExecutionContext); taskInstance.setState(ExecutionStatus.NEED_FAULT_TOLERANCE); processService.saveTaskInstance(taskInstance); if (!processInstanceExecCacheManager.contains(processInstance.getId())) { continue; } WorkflowExecuteThread workflowExecuteThreadNotify = processInstanceExecCacheManager.getByProcessInstanceId(processInstance.getId()); StateEvent stateEvent = new StateEvent(); stateEvent.setTaskInstanceId(taskInstance.getId()); stateEvent.setType(StateEventType.TASK_STATE_CHANGE); stateEvent.setProcessInstanceId(processInstance.getId()); stateEvent.setExecutionStatus(taskInstance.getState()); workflowExecuteThreadNotify.addStateEvent(stateEvent); } } logger.info("end worker[{}] failover ...", workerHost); } /** * failover master tasks * * @param masterHost master host */ private void failoverMaster(String masterHost) { logger.info("start master failover ..."); List<ProcessInstance> needFailoverProcessInstanceList = processService.queryNeedFailoverProcessInstances(masterHost); logger.info("failover process list size:{} ", needFailoverProcessInstanceList.size()); //updateProcessInstance host is null and insert into command for (ProcessInstance processInstance : needFailoverProcessInstanceList) { logger.info("failover process instance id: {} host:{}", processInstance.getId(), processInstance.getHost()); if (Constants.NULL.equals(processInstance.getHost())) { continue; } processService.processNeedFailoverProcessInstances(processInstance); } failoverWorker(masterHost, true, false); logger.info("master failover end"); } /** * registry */ public void registry() { String address = NetUtils.getAddr(masterConfig.getListenPort()); localNodePath = getMasterPath(); int masterHeartbeatInterval = masterConfig.getHeartbeatInterval(); HeartBeatTask heartBeatTask = new HeartBeatTask(startupTime, masterConfig.getMaxCpuLoadAvg(), masterConfig.getReservedMemory(), Sets.newHashSet(getMasterPath()), Constants.MASTER_TYPE, registryClient); registryClient.persistEphemeral(localNodePath, heartBeatTask.getHeartBeatInfo()); registryClient.addConnectionStateListener(this::handleConnectionState); this.heartBeatExecutor.scheduleAtFixedRate(heartBeatTask, masterHeartbeatInterval, masterHeartbeatInterval, TimeUnit.SECONDS); logger.info("master node : {} registry to ZK successfully with heartBeatInterval : {}s", address, masterHeartbeatInterval); } public void handleConnectionState(ConnectionState state) { switch (state) { case CONNECTED: logger.debug("registry connection state is {}", state); break; case SUSPENDED: logger.warn("registry connection state is {}, ready to stop myself", state); registryClient.getStoppable().stop("registry connection state is SUSPENDED, stop myself"); break; case RECONNECTED: logger.debug("registry connection state is {}, clean the node info", state); registryClient.persistEphemeral(localNodePath, ""); break; case DISCONNECTED: logger.warn("registry connection state is {}, ready to stop myself", state); registryClient.getStoppable().stop("registry connection state is DISCONNECTED, stop myself"); break; default: } } public void deregister() { try { String address = getLocalAddress(); String localNodePath = getMasterPath(); registryClient.remove(localNodePath); logger.info("master node : {} unRegistry to register center.", address); heartBeatExecutor.shutdown(); logger.info("heartbeat executor shutdown"); registryClient.close(); } catch (Exception e) { logger.error("remove registry path exception ", e); } } /** * get master path */ public String getMasterPath() { String address = getLocalAddress(); return REGISTRY_DOLPHINSCHEDULER_MASTERS + "/" + address; } /** * get local address */ private String getLocalAddress() { return NetUtils.getAddr(masterConfig.getListenPort()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,277
[Feature][datasource] Support Kerberos auto renewal
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Support Kerberos auto renewal ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7277
https://github.com/apache/dolphinscheduler/pull/7278
2a7d6b468f2942d075430478ffe08375f98df007
8d68cf48dde3765df1e02449af5edd8b18dac52d
"2021-12-08T14:05:35Z"
java
"2021-12-09T01:27:57Z"
dolphinscheduler-datasource-plugin/dolphinscheduler-datasource-hive/src/main/java/org/apache/dolphinscheduler/plugin/datasource/hive/HiveDataSourceClient.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.datasource.hive; import static org.apache.dolphinscheduler.spi.task.TaskConstants.JAVA_SECURITY_KRB5_CONF; import static org.apache.dolphinscheduler.spi.task.TaskConstants.JAVA_SECURITY_KRB5_CONF_PATH; import org.apache.dolphinscheduler.plugin.datasource.api.client.CommonDataSourceClient; import org.apache.dolphinscheduler.plugin.datasource.api.provider.JdbcDataSourceProvider; import org.apache.dolphinscheduler.plugin.datasource.utils.CommonUtil; import org.apache.dolphinscheduler.spi.datasource.BaseConnectionParam; import org.apache.dolphinscheduler.spi.utils.Constants; import org.apache.dolphinscheduler.spi.utils.PropertyUtils; import org.apache.dolphinscheduler.spi.utils.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import java.io.IOException; import java.sql.Connection; import java.sql.SQLException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.jdbc.core.JdbcTemplate; import com.zaxxer.hikari.HikariDataSource; public class HiveDataSourceClient extends CommonDataSourceClient { private static final Logger logger = LoggerFactory.getLogger(HiveDataSourceClient.class); protected HikariDataSource oneSessionDataSource; private UserGroupInformation ugi; public HiveDataSourceClient(BaseConnectionParam baseConnectionParam) { super(baseConnectionParam); } @Override protected void initClient(BaseConnectionParam baseConnectionParam) { logger.info("Create UserGroupInformation."); this.ugi = createUserGroupInformation(baseConnectionParam.getUser()); logger.info("Create ugi success."); super.initClient(baseConnectionParam); this.oneSessionDataSource = JdbcDataSourceProvider.createOneSessionJdbcDataSource(baseConnectionParam); logger.info("Init {} success.", getClass().getName()); } @Override protected void checkEnv(BaseConnectionParam baseConnectionParam) { super.checkEnv(baseConnectionParam); checkKerberosEnv(); } private void checkKerberosEnv() { String krb5File = PropertyUtils.getString(JAVA_SECURITY_KRB5_CONF_PATH); if (StringUtils.isNotBlank(krb5File)) { System.setProperty(JAVA_SECURITY_KRB5_CONF, krb5File); } } private UserGroupInformation createUserGroupInformation(String username) { String krb5File = PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH); String keytab = PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH); String principal = PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME); try { return CommonUtil.createUGI(getHadoopConf(), principal, keytab, krb5File, username); } catch (IOException e) { throw new RuntimeException("createUserGroupInformation fail. ", e); } } protected Configuration getHadoopConf() { return new Configuration(); } @Override public Connection getConnection() { try { return oneSessionDataSource.getConnection(); } catch (SQLException e) { logger.error("get oneSessionDataSource Connection fail SQLException: {}", e.getMessage(), e); return null; } } @Override public void close() { super.close(); logger.info("close HiveDataSourceClient."); this.oneSessionDataSource.close(); this.oneSessionDataSource = null; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,284
[Bug] [dolphinscheduler-api] In the process alarm message, projectid is 0
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/95271106/145352078-31533d62-de6f-4bb2-9ef5-a18fcaf6bca0.png) The projectid should be the project id associated with the process ### Version 2.0.1-prepare ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7284
https://github.com/apache/dolphinscheduler/pull/7298
67cc260d52c1f2e5aa7db76aa8621cdd0f8c4ee0
3fadfca085610ac812f24adc1ec2d25f71c579d0
"2021-12-09T07:25:22Z"
java
"2021-12-10T04:25:35Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/AlertDao.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.dao; import org.apache.dolphinscheduler.common.enums.AlertEvent; import org.apache.dolphinscheduler.common.enums.AlertStatus; import org.apache.dolphinscheduler.common.enums.AlertWarnLevel; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.dao.entity.Alert; import org.apache.dolphinscheduler.dao.entity.AlertPluginInstance; import org.apache.dolphinscheduler.dao.entity.ProcessAlertContent; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ServerAlertContent; import org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper; import org.apache.dolphinscheduler.dao.mapper.AlertMapper; import org.apache.dolphinscheduler.dao.mapper.AlertPluginInstanceMapper; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.List; import java.util.stream.Collectors; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import com.google.common.collect.Lists; @Component public class AlertDao { @Autowired private AlertMapper alertMapper; @Autowired private AlertPluginInstanceMapper alertPluginInstanceMapper; @Autowired private AlertGroupMapper alertGroupMapper; /** * insert alert * * @param alert alert * @return add alert result */ public int addAlert(Alert alert) { return alertMapper.insert(alert); } /** * update alert * * @param alertStatus alertStatus * @param log log * @param id id * @return update alert result */ public int updateAlert(AlertStatus alertStatus, String log, int id) { Alert alert = alertMapper.selectById(id); alert.setAlertStatus(alertStatus); alert.setUpdateTime(new Date()); alert.setLog(log); return alertMapper.updateById(alert); } /** * MasterServer or WorkerServer stoped * * @param alertGroupId alertGroupId * @param host host * @param serverType serverType */ public void sendServerStopedAlert(int alertGroupId, String host, String serverType) { ServerAlertContent serverStopAlertContent = ServerAlertContent.newBuilder(). type(serverType) .host(host) .event(AlertEvent.SERVER_DOWN) .warningLevel(AlertWarnLevel.SERIOUS). build(); String content = JSONUtils.toJsonString(Lists.newArrayList(serverStopAlertContent)); Alert alert = new Alert(); alert.setTitle("Fault tolerance warning"); alert.setAlertStatus(AlertStatus.WAIT_EXECUTION); alert.setContent(content); alert.setAlertGroupId(alertGroupId); alert.setCreateTime(new Date()); alert.setUpdateTime(new Date()); // we use this method to avoid insert duplicate alert(issue #5525) alertMapper.insertAlertWhenServerCrash(alert); } /** * process time out alert * * @param processInstance processInstance * @param processDefinition processDefinition */ public void sendProcessTimeoutAlert(ProcessInstance processInstance, ProcessDefinition processDefinition) { int alertGroupId = processInstance.getWarningGroupId(); Alert alert = new Alert(); List<ProcessAlertContent> processAlertContentList = new ArrayList<>(1); ProcessAlertContent processAlertContent = ProcessAlertContent.newBuilder() .processId(processInstance.getId()) .processName(processInstance.getName()) .event(AlertEvent.TIME_OUT) .warningLevel(AlertWarnLevel.MIDDLE) .build(); processAlertContentList.add(processAlertContent); String content = JSONUtils.toJsonString(processAlertContentList); alert.setTitle("Process Timeout Warn"); saveTaskTimeoutAlert(alert, content, alertGroupId); } private void saveTaskTimeoutAlert(Alert alert, String content, int alertGroupId) { alert.setAlertGroupId(alertGroupId); alert.setContent(content); alert.setCreateTime(new Date()); alert.setUpdateTime(new Date()); alertMapper.insert(alert); } /** * task timeout warn * * @param alertGroupId alertGroupId * @param processInstanceId processInstanceId * @param processInstanceName processInstanceName * @param taskId taskId * @param taskName taskName */ public void sendTaskTimeoutAlert(int alertGroupId, int processInstanceId, String processInstanceName, int taskId, String taskName) { Alert alert = new Alert(); List<ProcessAlertContent> processAlertContentList = new ArrayList<>(1); ProcessAlertContent processAlertContent = ProcessAlertContent.newBuilder() .processId(processInstanceId) .processName(processInstanceName) .taskId(taskId) .taskName(taskName) .event(AlertEvent.TIME_OUT) .warningLevel(AlertWarnLevel.MIDDLE) .build(); processAlertContentList.add(processAlertContent); String content = JSONUtils.toJsonString(processAlertContentList); alert.setTitle("Task Timeout Warn"); saveTaskTimeoutAlert(alert, content, alertGroupId); } /** * List alerts that are pending for execution */ public List<Alert> listPendingAlerts() { return alertMapper.listAlertByStatus(AlertStatus.WAIT_EXECUTION); } /** * for test * * @return AlertMapper */ public AlertMapper getAlertMapper() { return alertMapper; } /** * list all alert plugin instance by alert group id * * @param alertGroupId alert group id * @return AlertPluginInstance list */ public List<AlertPluginInstance> listInstanceByAlertGroupId(int alertGroupId) { String alertInstanceIdsParam = alertGroupMapper.queryAlertGroupInstanceIdsById(alertGroupId); if (StringUtils.isNotBlank(alertInstanceIdsParam)) { String[] idsArray = alertInstanceIdsParam.split(","); List<Integer> ids = Arrays.stream(idsArray) .map(s -> Integer.parseInt(s.trim())) .collect(Collectors.toList()); return alertPluginInstanceMapper.queryByIds(ids); } return null; } public AlertPluginInstanceMapper getAlertPluginInstanceMapper() { return alertPluginInstanceMapper; } public void setAlertPluginInstanceMapper(AlertPluginInstanceMapper alertPluginInstanceMapper) { this.alertPluginInstanceMapper = alertPluginInstanceMapper; } public AlertGroupMapper getAlertGroupMapper() { return alertGroupMapper; } public void setAlertGroupMapper(AlertGroupMapper alertGroupMapper) { this.alertGroupMapper = alertGroupMapper; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,284
[Bug] [dolphinscheduler-api] In the process alarm message, projectid is 0
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/95271106/145352078-31533d62-de6f-4bb2-9ef5-a18fcaf6bca0.png) The projectid should be the project id associated with the process ### Version 2.0.1-prepare ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7284
https://github.com/apache/dolphinscheduler/pull/7298
67cc260d52c1f2e5aa7db76aa8621cdd0f8c4ee0
3fadfca085610ac812f24adc1ec2d25f71c579d0
"2021-12-09T07:25:22Z"
java
"2021-12-10T04:25:35Z"
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/alert/ProcessAlertManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.alert; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.dao.AlertDao; import org.apache.dolphinscheduler.dao.entity.Alert; import org.apache.dolphinscheduler.dao.entity.ProcessAlertContent; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import java.util.ArrayList; import java.util.Date; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; /** * process alert manager */ @Component public class ProcessAlertManager { /** * logger of AlertManager */ private static final Logger logger = LoggerFactory.getLogger(ProcessAlertManager.class); /** * alert dao */ @Autowired private AlertDao alertDao; /** * command type convert chinese * * @param commandType command type * @return command name */ private String getCommandCnName(CommandType commandType) { switch (commandType) { case RECOVER_TOLERANCE_FAULT_PROCESS: return "recover tolerance fault process"; case RECOVER_SUSPENDED_PROCESS: return "recover suspended process"; case START_CURRENT_TASK_PROCESS: return "start current task process"; case START_FAILURE_TASK_PROCESS: return "start failure task process"; case START_PROCESS: return "start process"; case REPEAT_RUNNING: return "repeat running"; case SCHEDULER: return "scheduler"; case COMPLEMENT_DATA: return "complement data"; case PAUSE: return "pause"; case STOP: return "stop"; default: return "unknown type"; } } /** * get process instance content * * @param processInstance process instance * @param taskInstances task instance list * @return process instance format content */ public String getContentProcessInstance(ProcessInstance processInstance, List<TaskInstance> taskInstances, ProjectUser projectUser) { String res = ""; if (processInstance.getState().typeIsSuccess()) { List<ProcessAlertContent> successTaskList = new ArrayList<>(1); ProcessAlertContent processAlertContent = ProcessAlertContent.newBuilder() .projectId(projectUser.getProjectId()) .projectName(projectUser.getProjectName()) .owner(projectUser.getUserName()) .processId(processInstance.getId()) .processName(processInstance.getName()) .processType(processInstance.getCommandType()) .processState(processInstance.getState()) .recovery(processInstance.getRecovery()) .runTimes(processInstance.getRunTimes()) .processStartTime(processInstance.getStartTime()) .processEndTime(processInstance.getEndTime()) .processHost(processInstance.getHost()) .build(); successTaskList.add(processAlertContent); res = JSONUtils.toJsonString(successTaskList); } else if (processInstance.getState().typeIsFailure()) { List<ProcessAlertContent> failedTaskList = new ArrayList<>(); for (TaskInstance task : taskInstances) { if (task.getState().typeIsSuccess()) { continue; } ProcessAlertContent processAlertContent = ProcessAlertContent.newBuilder() .projectId(projectUser.getProjectId()) .projectName(projectUser.getProjectName()) .owner(projectUser.getUserName()) .processId(processInstance.getId()) .processName(processInstance.getName()) .taskId(task.getId()) .taskName(task.getName()) .taskType(task.getTaskType()) .taskState(task.getState()) .taskStartTime(task.getStartTime()) .taskEndTime(task.getEndTime()) .taskHost(task.getHost()) .logPath(task.getLogPath()) .build(); failedTaskList.add(processAlertContent); } res = JSONUtils.toJsonString(failedTaskList); } return res; } /** * getting worker fault tolerant content * * @param processInstance process instance * @param toleranceTaskList tolerance task list * @return worker tolerance content */ private String getWorkerToleranceContent(ProcessInstance processInstance, List<TaskInstance> toleranceTaskList) { List<ProcessAlertContent> toleranceTaskInstanceList = new ArrayList<>(); for (TaskInstance taskInstance : toleranceTaskList) { ProcessAlertContent processAlertContent = ProcessAlertContent.newBuilder() .processName(processInstance.getName()) .taskName(taskInstance.getName()) .taskHost(taskInstance.getHost()) .retryTimes(taskInstance.getRetryTimes()) .build(); toleranceTaskInstanceList.add(processAlertContent); } return JSONUtils.toJsonString(toleranceTaskInstanceList); } /** * send worker alert fault tolerance * * @param processInstance process instance * @param toleranceTaskList tolerance task list */ public void sendAlertWorkerToleranceFault(ProcessInstance processInstance, List<TaskInstance> toleranceTaskList) { try { Alert alert = new Alert(); alert.setTitle("worker fault tolerance"); String content = getWorkerToleranceContent(processInstance, toleranceTaskList); alert.setContent(content); alert.setCreateTime(new Date()); alert.setAlertGroupId(processInstance.getWarningGroupId() == null ? 1 : processInstance.getWarningGroupId()); alertDao.addAlert(alert); logger.info("add alert to db , alert : {}", alert); } catch (Exception e) { logger.error("send alert failed:{} ", e.getMessage()); } } /** * send process instance alert * * @param processInstance process instance * @param taskInstances task instance list */ public void sendAlertProcessInstance(ProcessInstance processInstance, List<TaskInstance> taskInstances, ProjectUser projectUser) { if (!isNeedToSendWarning(processInstance)) { return; } Alert alert = new Alert(); String cmdName = getCommandCnName(processInstance.getCommandType()); String success = processInstance.getState().typeIsSuccess() ? "success" : "failed"; alert.setTitle(cmdName + " " + success); String content = getContentProcessInstance(processInstance, taskInstances,projectUser); alert.setContent(content); alert.setAlertGroupId(processInstance.getWarningGroupId()); alert.setCreateTime(new Date()); alertDao.addAlert(alert); logger.info("add alert to db , alert: {}", alert); } /** * check if need to be send warning * * @param processInstance * @return */ public boolean isNeedToSendWarning(ProcessInstance processInstance) { if (Flag.YES == processInstance.getIsSubProcess()) { return false; } boolean sendWarning = false; WarningType warningType = processInstance.getWarningType(); switch (warningType) { case ALL: if (processInstance.getState().typeIsFinished()) { sendWarning = true; } break; case SUCCESS: if (processInstance.getState().typeIsSuccess()) { sendWarning = true; } break; case FAILURE: if (processInstance.getState().typeIsFailure()) { sendWarning = true; } break; default: } return sendWarning; } /** * send process timeout alert * * @param processInstance process instance * @param processDefinition process definition */ public void sendProcessTimeoutAlert(ProcessInstance processInstance, ProcessDefinition processDefinition) { alertDao.sendProcessTimeoutAlert(processInstance, processDefinition); } public void sendTaskTimeoutAlert(ProcessInstance processInstance, TaskInstance taskInstance, TaskDefinition taskDefinition) { alertDao.sendTaskTimeoutAlert(processInstance.getWarningGroupId(), processInstance.getId(),processInstance.getName(), taskInstance.getId(), taskInstance.getName()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,310
[Bug] [Master] TaskResponseProcessor process NullPointerException
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ``` [ERROR] 2021-12-10 13:01:03.481 org.apache.dolphinscheduler.remote.handler.NettyClientHandler:[156] - process command Command [type=TASK_EXECUTE_RESPONSE, opaque=15, bodyLen=130] exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.processor.TaskResponseProcessor.process(TaskResponseProcessor.java:72) at org.apache.dolphinscheduler.remote.handler.NettyClientHandler.lambda$processByCommandType$0(NettyClientHandler.java:154) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ``` ### What you expected to happen Workflow execution ### How to reproduce Manually execute a workflow ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7310
https://github.com/apache/dolphinscheduler/pull/7318
f03b0e8c7b562b77715185cbaeacc598cba87e12
939fa0c3bac50b43a519dfd3f4a6b8db1be92e92
"2021-12-10T05:25:06Z"
java
"2021-12-10T08:55:56Z"
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/MasterServer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.IStoppable; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.remote.NettyRemotingServer; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.config.NettyServerConfig; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.server.master.processor.CacheProcessor; import org.apache.dolphinscheduler.server.master.processor.StateEventProcessor; import org.apache.dolphinscheduler.server.master.processor.TaskAckProcessor; import org.apache.dolphinscheduler.server.master.processor.TaskEventProcessor; import org.apache.dolphinscheduler.server.master.processor.TaskKillResponseProcessor; import org.apache.dolphinscheduler.server.master.processor.TaskResponseProcessor; import org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient; import org.apache.dolphinscheduler.server.master.runner.EventExecuteService; import org.apache.dolphinscheduler.server.master.runner.MasterSchedulerService; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import org.quartz.Scheduler; import org.quartz.SchedulerException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.cache.annotation.EnableCaching; import org.springframework.context.annotation.ComponentScan; import org.springframework.transaction.annotation.EnableTransactionManagement; import javax.annotation.PostConstruct; @SpringBootApplication @ComponentScan("org.apache.dolphinscheduler") @EnableTransactionManagement @EnableCaching public class MasterServer implements IStoppable { private static final Logger logger = LoggerFactory.getLogger(MasterServer.class); @Autowired private MasterConfig masterConfig; @Autowired private SpringApplicationContext springApplicationContext; private NettyRemotingServer nettyRemotingServer; @Autowired private MasterRegistryClient masterRegistryClient; @Autowired private MasterSchedulerService masterSchedulerService; @Autowired private EventExecuteService eventExecuteService; @Autowired private Scheduler scheduler; @Autowired private TaskAckProcessor taskAckProcessor; public static void main(String[] args) { Thread.currentThread().setName(Constants.THREAD_NAME_MASTER_SERVER); SpringApplication.run(MasterServer.class); } /** * run master server */ @PostConstruct public void run() throws SchedulerException { // init remoting server NettyServerConfig serverConfig = new NettyServerConfig(); serverConfig.setListenPort(masterConfig.getListenPort()); this.nettyRemotingServer = new NettyRemotingServer(serverConfig); this.nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_RESPONSE, new TaskResponseProcessor()); this.nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_ACK, taskAckProcessor); this.nettyRemotingServer.registerProcessor(CommandType.TASK_KILL_RESPONSE, new TaskKillResponseProcessor()); this.nettyRemotingServer.registerProcessor(CommandType.STATE_EVENT_REQUEST, new StateEventProcessor()); this.nettyRemotingServer.registerProcessor(CommandType.TASK_FORCE_STATE_EVENT_REQUEST, new TaskEventProcessor()); this.nettyRemotingServer.registerProcessor(CommandType.TASK_WAKEUP_EVENT_REQUEST, new TaskEventProcessor()); this.nettyRemotingServer.registerProcessor(CommandType.CACHE_EXPIRE, new CacheProcessor()); this.nettyRemotingServer.start(); // self tolerant this.masterRegistryClient.init(); this.masterRegistryClient.start(); this.masterRegistryClient.setRegistryStoppable(this); this.eventExecuteService.init(); this.eventExecuteService.start(); this.masterSchedulerService.init(); this.masterSchedulerService.start(); this.scheduler.start(); Runtime.getRuntime().addShutdownHook(new Thread(() -> { if (Stopper.isRunning()) { close("shutdownHook"); } })); } /** * gracefully close * * @param cause close cause */ public void close(String cause) { try { // execute only once if (Stopper.isStopped()) { return; } logger.info("master server is stopping ..., cause : {}", cause); // set stop signal is true Stopper.stop(); try { // thread sleep 3 seconds for thread quietly stop Thread.sleep(3000L); } catch (Exception e) { logger.warn("thread sleep exception ", e); } // close this.masterSchedulerService.close(); this.nettyRemotingServer.close(); this.masterRegistryClient.closeRegistry(); // close spring Context and will invoke method with @PreDestroy annotation to destory beans. like ServerNodeManager,HostManager,TaskResponseService,CuratorZookeeperClient,etc springApplicationContext.close(); } catch (Exception e) { logger.error("master server stop exception ", e); } } @Override public void stop(String cause) { close(cause); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,310
[Bug] [Master] TaskResponseProcessor process NullPointerException
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ``` [ERROR] 2021-12-10 13:01:03.481 org.apache.dolphinscheduler.remote.handler.NettyClientHandler:[156] - process command Command [type=TASK_EXECUTE_RESPONSE, opaque=15, bodyLen=130] exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.processor.TaskResponseProcessor.process(TaskResponseProcessor.java:72) at org.apache.dolphinscheduler.remote.handler.NettyClientHandler.lambda$processByCommandType$0(NettyClientHandler.java:154) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ``` ### What you expected to happen Workflow execution ### How to reproduce Manually execute a workflow ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7310
https://github.com/apache/dolphinscheduler/pull/7318
f03b0e8c7b562b77715185cbaeacc598cba87e12
939fa0c3bac50b43a519dfd3f4a6b8db1be92e92
"2021-12-10T05:25:06Z"
java
"2021-12-10T08:55:56Z"
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/dispatch/executor/NettyExecutorManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.dispatch.executor; import org.apache.dolphinscheduler.common.thread.ThreadUtils; import org.apache.dolphinscheduler.remote.NettyRemotingClient; import org.apache.dolphinscheduler.remote.command.Command; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.config.NettyClientConfig; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.server.master.dispatch.context.ExecutionContext; import org.apache.dolphinscheduler.server.master.dispatch.enums.ExecutorType; import org.apache.dolphinscheduler.server.master.dispatch.exceptions.ExecuteException; import org.apache.dolphinscheduler.server.master.processor.TaskAckProcessor; import org.apache.dolphinscheduler.server.master.processor.TaskKillResponseProcessor; import org.apache.dolphinscheduler.server.master.processor.TaskResponseProcessor; import org.apache.dolphinscheduler.server.master.registry.ServerNodeManager; import org.apache.commons.collections.CollectionUtils; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.Set; import javax.annotation.PostConstruct; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; /** * netty executor manager */ @Service public class NettyExecutorManager extends AbstractExecutorManager<Boolean>{ private final Logger logger = LoggerFactory.getLogger(NettyExecutorManager.class); /** * server node manager */ @Autowired private ServerNodeManager serverNodeManager; @Autowired private TaskAckProcessor taskAckProcessor; /** * netty remote client */ private final NettyRemotingClient nettyRemotingClient; /** * constructor */ public NettyExecutorManager(){ final NettyClientConfig clientConfig = new NettyClientConfig(); this.nettyRemotingClient = new NettyRemotingClient(clientConfig); } @PostConstruct public void init(){ /** * register EXECUTE_TASK_RESPONSE command type TaskResponseProcessor * register EXECUTE_TASK_ACK command type TaskAckProcessor */ this.nettyRemotingClient.registerProcessor(CommandType.TASK_EXECUTE_RESPONSE, new TaskResponseProcessor()); this.nettyRemotingClient.registerProcessor(CommandType.TASK_EXECUTE_ACK, taskAckProcessor); this.nettyRemotingClient.registerProcessor(CommandType.TASK_KILL_RESPONSE, new TaskKillResponseProcessor()); } /** * execute logic * @param context context * @return result * @throws ExecuteException if error throws ExecuteException */ @Override public Boolean execute(ExecutionContext context) throws ExecuteException { /** * all nodes */ Set<String> allNodes = getAllNodes(context); /** * fail nodes */ Set<String> failNodeSet = new HashSet<>(); /** * build command accord executeContext */ Command command = context.getCommand(); /** * execute task host */ Host host = context.getHost(); boolean success = false; while (!success) { try { doExecute(host,command); success = true; context.setHost(host); } catch (ExecuteException ex) { logger.error(String.format("execute command : %s error", command), ex); try { failNodeSet.add(host.getAddress()); Set<String> tmpAllIps = new HashSet<>(allNodes); Collection<String> remained = CollectionUtils.subtract(tmpAllIps, failNodeSet); if (remained != null && remained.size() > 0) { host = Host.of(remained.iterator().next()); logger.error("retry execute command : {} host : {}", command, host); } else { throw new ExecuteException("fail after try all nodes"); } } catch (Throwable t) { throw new ExecuteException("fail after try all nodes"); } } } return success; } @Override public void executeDirectly(ExecutionContext context) throws ExecuteException { Host host = context.getHost(); doExecute(host, context.getCommand()); } /** * execute logic * @param host host * @param command command * @throws ExecuteException if error throws ExecuteException */ public void doExecute(final Host host, final Command command) throws ExecuteException { /** * retry count,default retry 3 */ int retryCount = 3; boolean success = false; do { try { nettyRemotingClient.send(host, command); success = true; } catch (Exception ex) { logger.error(String.format("send command : %s to %s error", command, host), ex); retryCount--; ThreadUtils.sleep(100); } } while (retryCount >= 0 && !success); if (!success) { throw new ExecuteException(String.format("send command : %s to %s error", command, host)); } } /** * get all nodes * @param context context * @return nodes */ private Set<String> getAllNodes(ExecutionContext context){ Set<String> nodes = Collections.emptySet(); /** * executor type */ ExecutorType executorType = context.getExecutorType(); switch (executorType){ case WORKER: nodes = serverNodeManager.getWorkerGroupNodes(context.getWorkerGroup()); break; case CLIENT: break; default: throw new IllegalArgumentException("invalid executor type : " + executorType); } return nodes; } public NettyRemotingClient getNettyRemotingClient() { return nettyRemotingClient; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,310
[Bug] [Master] TaskResponseProcessor process NullPointerException
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ``` [ERROR] 2021-12-10 13:01:03.481 org.apache.dolphinscheduler.remote.handler.NettyClientHandler:[156] - process command Command [type=TASK_EXECUTE_RESPONSE, opaque=15, bodyLen=130] exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.processor.TaskResponseProcessor.process(TaskResponseProcessor.java:72) at org.apache.dolphinscheduler.remote.handler.NettyClientHandler.lambda$processByCommandType$0(NettyClientHandler.java:154) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ``` ### What you expected to happen Workflow execution ### How to reproduce Manually execute a workflow ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7310
https://github.com/apache/dolphinscheduler/pull/7318
f03b0e8c7b562b77715185cbaeacc598cba87e12
939fa0c3bac50b43a519dfd3f4a6b8db1be92e92
"2021-12-10T05:25:06Z"
java
"2021-12-10T08:55:56Z"
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/processor/CacheProcessor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.processor; import org.apache.dolphinscheduler.common.enums.CacheType; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.remote.command.CacheExpireCommand; import org.apache.dolphinscheduler.remote.command.Command; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.cache.Cache; import org.springframework.cache.CacheManager; import com.google.common.base.Preconditions; import io.netty.channel.Channel; /** * cache process from master/api */ public class CacheProcessor implements NettyRequestProcessor { private final Logger logger = LoggerFactory.getLogger(CacheProcessor.class); private CacheManager cacheManager; @Override public void process(Channel channel, Command command) { Preconditions.checkArgument(CommandType.CACHE_EXPIRE == command.getType(), String.format("invalid command type: %s", command.getType())); CacheExpireCommand cacheExpireCommand = JSONUtils.parseObject(command.getBody(), CacheExpireCommand.class); logger.info("received command : {}", cacheExpireCommand); this.cacheExpire(cacheExpireCommand); } private void cacheExpire(CacheExpireCommand cacheExpireCommand) { if (cacheManager == null) { cacheManager = SpringApplicationContext.getBean(CacheManager.class); } if (cacheExpireCommand.getCacheKey().isEmpty()) { return; } CacheType cacheType = cacheExpireCommand.getCacheType(); Cache cache = cacheManager.getCache(cacheType.getCacheName()); if (cache != null) { cache.evict(cacheExpireCommand.getCacheKey()); logger.info("cache evict, type:{}, key:{}", cacheType.getCacheName(), cacheExpireCommand.getCacheKey()); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,310
[Bug] [Master] TaskResponseProcessor process NullPointerException
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ``` [ERROR] 2021-12-10 13:01:03.481 org.apache.dolphinscheduler.remote.handler.NettyClientHandler:[156] - process command Command [type=TASK_EXECUTE_RESPONSE, opaque=15, bodyLen=130] exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.processor.TaskResponseProcessor.process(TaskResponseProcessor.java:72) at org.apache.dolphinscheduler.remote.handler.NettyClientHandler.lambda$processByCommandType$0(NettyClientHandler.java:154) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ``` ### What you expected to happen Workflow execution ### How to reproduce Manually execute a workflow ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7310
https://github.com/apache/dolphinscheduler/pull/7318
f03b0e8c7b562b77715185cbaeacc598cba87e12
939fa0c3bac50b43a519dfd3f4a6b8db1be92e92
"2021-12-10T05:25:06Z"
java
"2021-12-10T08:55:56Z"
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/processor/StateEventProcessor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.processor; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.StateEvent; import org.apache.dolphinscheduler.common.enums.StateEventType; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.remote.command.Command; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.command.StateEventChangeCommand; import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor; import org.apache.dolphinscheduler.server.master.processor.queue.StateEventResponseService; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Preconditions; import io.netty.channel.Channel; /** * handle state event received from master/api */ public class StateEventProcessor implements NettyRequestProcessor { private final Logger logger = LoggerFactory.getLogger(StateEventProcessor.class); private StateEventResponseService stateEventResponseService; public StateEventProcessor() { stateEventResponseService = SpringApplicationContext.getBean(StateEventResponseService.class); } @Override public void process(Channel channel, Command command) { Preconditions.checkArgument(CommandType.STATE_EVENT_REQUEST == command.getType(), String.format("invalid command type: %s", command.getType())); StateEventChangeCommand stateEventChangeCommand = JSONUtils.parseObject(command.getBody(), StateEventChangeCommand.class); StateEvent stateEvent = new StateEvent(); stateEvent.setKey(stateEventChangeCommand.getKey()); if (stateEventChangeCommand.getSourceProcessInstanceId() != stateEventChangeCommand.getDestProcessInstanceId()) { stateEvent.setExecutionStatus(ExecutionStatus.RUNNING_EXECUTION); } else { stateEvent.setExecutionStatus(stateEventChangeCommand.getSourceStatus()); } stateEvent.setProcessInstanceId(stateEventChangeCommand.getDestProcessInstanceId()); stateEvent.setTaskInstanceId(stateEventChangeCommand.getDestTaskInstanceId()); StateEventType type = stateEvent.getTaskInstanceId() == 0 ? StateEventType.PROCESS_STATE_CHANGE : StateEventType.TASK_STATE_CHANGE; stateEvent.setType(type); logger.info("received command : {}", stateEvent); stateEventResponseService.addResponse(stateEvent); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,310
[Bug] [Master] TaskResponseProcessor process NullPointerException
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ``` [ERROR] 2021-12-10 13:01:03.481 org.apache.dolphinscheduler.remote.handler.NettyClientHandler:[156] - process command Command [type=TASK_EXECUTE_RESPONSE, opaque=15, bodyLen=130] exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.processor.TaskResponseProcessor.process(TaskResponseProcessor.java:72) at org.apache.dolphinscheduler.remote.handler.NettyClientHandler.lambda$processByCommandType$0(NettyClientHandler.java:154) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ``` ### What you expected to happen Workflow execution ### How to reproduce Manually execute a workflow ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7310
https://github.com/apache/dolphinscheduler/pull/7318
f03b0e8c7b562b77715185cbaeacc598cba87e12
939fa0c3bac50b43a519dfd3f4a6b8db1be92e92
"2021-12-10T05:25:06Z"
java
"2021-12-10T08:55:56Z"
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/processor/TaskEventProcessor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.processor; import org.apache.dolphinscheduler.common.enums.StateEvent; import org.apache.dolphinscheduler.common.enums.StateEventType; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.remote.command.Command; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.command.TaskEventChangeCommand; import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor; import org.apache.dolphinscheduler.server.master.processor.queue.StateEventResponseService; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Preconditions; import io.netty.channel.Channel; /** * handle state event received from master/api */ public class TaskEventProcessor implements NettyRequestProcessor { private final Logger logger = LoggerFactory.getLogger(TaskEventProcessor.class); private StateEventResponseService stateEventResponseService; public TaskEventProcessor() { stateEventResponseService = SpringApplicationContext.getBean(StateEventResponseService.class); } @Override public void process(Channel channel, Command command) { Preconditions.checkArgument(CommandType.TASK_FORCE_STATE_EVENT_REQUEST == command.getType() || CommandType.TASK_WAKEUP_EVENT_REQUEST == command.getType() , String.format("invalid command type: %s", command.getType())); TaskEventChangeCommand taskEventChangeCommand = JSONUtils.parseObject(command.getBody(), TaskEventChangeCommand.class); StateEvent stateEvent = new StateEvent(); stateEvent.setKey(taskEventChangeCommand.getKey()); stateEvent.setProcessInstanceId(taskEventChangeCommand.getProcessInstanceId()); stateEvent.setTaskInstanceId(taskEventChangeCommand.getTaskInstanceId()); stateEvent.setType(StateEventType.WAIT_TASK_GROUP); logger.info("received command : {}", stateEvent); stateEventResponseService.addEvent2WorkflowExecute(stateEvent); } }